public void Run(IEnumerable<string> filterstrings = null, Library.Utility.IFilter compositefilter = null) { using (var tmpdb = new Library.Utility.TempFile()) using (var db = new Database.LocalDatabase(System.IO.File.Exists(m_options.Dbpath) ? m_options.Dbpath : (string)tmpdb, "ListControlFiles")) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter); try { var filteredList = ListFilesHandler.ParseAndFilterFilesets(backend.List(), m_options); if (filteredList.Count == 0) throw new Exception("No filesets found on remote target"); Exception lastEx = new Exception("No suitable files found on remote target"); foreach(var fileversion in filteredList) try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) return; var file = fileversion.Value.File; long size; string hash; RemoteVolumeType type; RemoteVolumeState state; if (!db.GetRemoteVolume(file.Name, out hash, out size, out type, out state)) size = file.Size; var files = new List<Library.Interface.IListResultFile>(); using (var tmpfile = backend.Get(file.Name, size, hash)) using (var tmp = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(file.Name), tmpfile, m_options)) foreach (var cf in tmp.ControlFiles) if (Library.Utility.FilterExpression.Matches(filter, cf.Key)) files.Add(new ListResultFile(cf.Key, null)); m_result.SetResult(new Library.Interface.IListResultFileset[] { new ListResultFileset(fileversion.Key, fileversion.Value.Time, -1, -1) }, files); lastEx = null; break; } catch(Exception ex) { lastEx = ex; if (ex is System.Threading.ThreadAbortException) throw; } if (lastEx != null) throw lastEx; } finally { backend.WaitForComplete(db, null); } } } }
/// <summary> /// Run the recreate procedure /// </summary> /// <param name="path">Path to the database that will be created</param> /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param> /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param> /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param> internal void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null) { var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); if (hashalg == null) throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm)); var hashsize = hashalg.HashSize / 8; //We build a local database in steps. using(var restoredb = new LocalRecreateDatabase(dbparent, m_options)) using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb)) { var volumeIds = new Dictionary<string, long>(); var rawlist = backend.List(); //First step is to examine the remote storage to see what // kind of data we can find var remotefiles = (from x in rawlist let n = VolumeBase.ParseFilename(x) where n != null && n.Prefix == m_options.Prefix select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times if (remotefiles.Length == 0) { if (rawlist.Count == 0) throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?"); else { var tmp = (from x in rawlist let n = VolumeBase.ParseFilename(x) where n != null select n.Prefix).ToArray(); var types = tmp.Distinct().ToArray(); if (tmp.Length == 0) throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count)); else if (types.Length == 1) throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0])); else throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types))); } } //Then we select the filelist we should work with, // and create the filelist table to fit IEnumerable<IParsedVolume> filelists = from n in remotefiles where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n; if (filelistfilter != null) filelists = filelistfilter(filelists).Select(x => x.Value).ToArray(); foreach(var fl in remotefiles) volumeIds[fl.File.Name] = restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, RemoteVolumeState.Uploaded); //Record all blocksets and files needed using(var tr = restoredb.BeginTransaction()) { var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList(); foreach(var entry in new AsyncDownloader(filelistWork, backend)) try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } using(var tmpfile = entry.TempFile) { if (entry.Hash != null && entry.Size > 0) restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr); var parsed = VolumeBase.ParseFilename(entry.Name); // Create timestamped operations based on the file timestamp var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr); using(var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options)) foreach(var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path))) { try { if (fe.Type == FilelistEntryType.Folder) { restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else if (fe.Type == FilelistEntryType.File) { var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, tr); restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else if (fe.Type == FilelistEntryType.Symlink) { restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else { m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null); } } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex); } } } } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex); if (ex is System.Threading.ThreadAbortException) throw; } using(new Logging.Timer("CommitUpdateFilesetFromRemote")) tr.Commit(); } //Grab all index files, and update the block table using(var tr = restoredb.BeginTransaction()) { var indexfiles = from n in remotefiles where n.FileType == RemoteVolumeType.Index select new RemoteVolume(n.File) as IRemoteVolume; foreach(var sf in new AsyncDownloader(indexfiles.ToList(), backend)) try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } using(var tmpfile = sf.TempFile) { if (sf.Hash != null && sf.Size > 0) restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr); using(var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize)) { Utility.VerifyParameters(restoredb, m_options); foreach(var a in svr.Volumes) { var volumeID = restoredb.GetRemoteVolumeID(a.Filename); //Add all block/volume mappings foreach(var b in a.Blocks) restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr); restoredb.UpdateRemoteVolume(a.Filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr); restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr); } //If there are blocklists in the index file, update the blocklists foreach(var b in svr.BlockLists) restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr); } } } catch (Exception ex) { //Not fatal m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex); if (ex is System.Threading.ThreadAbortException) throw; } using(new Logging.Timer("CommitRecreatedDb")) tr.Commit(); // TODO: In some cases, we can avoid downloading all index files, // if we are lucky and pick the right ones } // We have now grabbed as much information as possible, // if we are still missing data, we must now fetch block files restoredb.FindMissingBlocklistHashes(hashsize, null); //We do this in three passes for(var i = 0; i < 3; i++) { // Grab the list matching the pass type var lst = restoredb.GetMissingBlockListVolumes(i).ToList(); foreach (var sf in new AsyncDownloader(lst, backend)) using (var tmpfile = sf.TempFile) using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options)) using (var tr = restoredb.BeginTransaction()) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } var volumeid = restoredb.GetRemoteVolumeID(sf.Name); // Update the block table so we know about the block/volume map foreach(var h in rd.Blocks) restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr); // Grab all known blocklists from the volume foreach (var blocklisthash in restoredb.GetBlockLists(volumeid)) restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr); // Update tables so we know if we are done restoredb.FindMissingBlocklistHashes(hashsize, tr); using(new Logging.Timer("CommitRestoredBlocklist")) tr.Commit(); //At this point we can patch files with data from the block volume if (blockprocessor != null) blockprocessor(sf.Name, rd); } } backend.WaitForComplete(restoredb, null); //All done, we must verify that we have all blocklist fully intact // if this fails, the db will not be deleted, so it can be used, // except to continue a backup restoredb.VerifyConsistency(null); } }
public void Run(IEnumerable <string> filterstrings = null, Library.Utility.IFilter compositefilter = null) { using (var tmpdb = new Library.Utility.TempFile()) using (var db = new Database.LocalDatabase(System.IO.File.Exists(m_options.Dbpath) ? m_options.Dbpath : (string)tmpdb, "ListControlFiles", true)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter); try { var filteredList = ListFilesHandler.ParseAndFilterFilesets(backend.List(), m_options); if (filteredList.Count == 0) { throw new Exception("No filesets found on remote target"); } Exception lastEx = new Exception("No suitable files found on remote target"); foreach (var fileversion in filteredList) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { return; } var file = fileversion.Value.File; var entry = db.GetRemoteVolume(file.Name); var files = new List <Library.Interface.IListResultFile>(); using (var tmpfile = backend.Get(file.Name, entry.Size < 0 ? file.Size : entry.Size, entry.Hash)) using (var tmp = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(file.Name), tmpfile, m_options)) foreach (var cf in tmp.ControlFiles) { if (Library.Utility.FilterExpression.Matches(filter, cf.Key)) { files.Add(new ListResultFile(cf.Key, null)); } } m_result.SetResult(new Library.Interface.IListResultFileset[] { new ListResultFileset(fileversion.Key, LocalDatabase.BackupType.PARTIAL_BACKUP, fileversion.Value.Time, -1, -1) }, files); lastEx = null; break; } catch (Exception ex) { lastEx = ex; if (ex is System.Threading.ThreadAbortException) { throw; } } } if (lastEx != null) { throw lastEx; } } finally { backend.WaitForComplete(db, null); } } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var rawlist = backend.List(); var lookup = new Dictionary <string, Volumes.IParsedVolume>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix == options.Prefix select p).ToList(); var otherlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix != options.Prefix select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count(); log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum(); log.UnknownFileCount = unknownlist.Count(); log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); if (backend is Library.Interface.IQuotaEnabledBackend) { log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace; log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace; } log.AssignedQuotaSpace = options.QuotaSize; foreach (var s in remotelist) { lookup[s.File.Name] = s; } var missing = new List <RemoteVolumeEntry>(); var missingHash = new List <Tuple <long, RemoteVolumeEntry> >(); var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) { log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name)); } break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } else { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else if (!remoteFound) { log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); database.RegisterRemoteVolume(i.Name, i.Type, RemoteVolumeState.Deleting, null); } else { log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } break; case RemoteVolumeState.Uploaded: if (!remoteFound) { missing.Add(i); } else if (correctSize) { database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); } else { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; case RemoteVolumeState.Verified: if (!remoteFound) { missing.Add(i); } else if (!correctSize) { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } backend.FlushDbMessages(); } foreach (var i in missingHash) { log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null); } return(new RemoteAnalysisResult() { ParsedVolumes = remotelist, OtherVolumes = otherlist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }); }
/// <summary> /// Run the recreate procedure /// </summary> /// <param name="path">Path to the database that will be created</param> /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param> /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param> /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param> internal void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null) { var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); if (hashalg == null) { throw new Exception(Strings.Foresthash.InvalidHashAlgorithm(m_options.BlockHashAlgorithm)); } var hashsize = hashalg.HashSize / 8; m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running); //We build a local database in steps. using (var restoredb = new LocalRecreateDatabase(dbparent, m_options)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb)) { var volumeIds = new Dictionary <string, long>(); var rawlist = backend.List(); //First step is to examine the remote storage to see what // kind of data we can find var remotefiles = (from x in rawlist let n = VolumeBase.ParseFilename(x) where n != null && n.Prefix == m_options.Prefix select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times if (remotefiles.Length == 0) { if (rawlist.Count == 0) { throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?"); } else { var tmp = (from x in rawlist let n = VolumeBase.ParseFilename(x) where n != null select n.Prefix).ToArray(); var types = tmp.Distinct().ToArray(); if (tmp.Length == 0) { throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count)); } else if (types.Length == 1) { throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0])); } else { throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types))); } } } //Then we select the filelist we should work with, // and create the filelist table to fit IEnumerable <IParsedVolume> filelists = from n in remotefiles where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n; if (filelistfilter != null) { filelists = filelistfilter(filelists).Select(x => x.Value).ToArray(); } foreach (var fl in remotefiles) { volumeIds[fl.File.Name] = restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, RemoteVolumeState.Uploaded); } //Record all blocksets and files needed using (var tr = restoredb.BeginTransaction()) { var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList(); var progress = 0; foreach (var entry in new AsyncDownloader(filelistWork, backend)) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * 0.2f); using (var tmpfile = entry.TempFile) { if (entry.Hash != null && entry.Size > 0) { restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr); } var parsed = VolumeBase.ParseFilename(entry.Name); // Create timestamped operations based on the file timestamp var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr); using (var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options)) foreach (var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path))) { try { if (fe.Type == FilelistEntryType.Folder) { restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else if (fe.Type == FilelistEntryType.File) { var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, tr); restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else if (fe.Type == FilelistEntryType.Symlink) { restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else { m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null); } } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex); } } } } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } using (new Logging.Timer("CommitUpdateFilesetFromRemote")) tr.Commit(); } //Grab all index files, and update the block table using (var tr = restoredb.BeginTransaction()) { var indexfiles = ( from n in remotefiles where n.FileType == RemoteVolumeType.Index select new RemoteVolume(n.File) as IRemoteVolume).ToList(); var progress = 0; foreach (var sf in new AsyncDownloader(indexfiles, backend)) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f); using (var tmpfile = sf.TempFile) { if (sf.Hash != null && sf.Size > 0) { restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr); } using (var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize)) { Utility.VerifyParameters(restoredb, m_options); foreach (var a in svr.Volumes) { var volumeID = restoredb.GetRemoteVolumeID(a.Filename); //Add all block/volume mappings foreach (var b in a.Blocks) { restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr); } restoredb.UpdateRemoteVolume(a.Filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr); restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr); } //If there are blocklists in the index file, update the blocklists foreach (var b in svr.BlockLists) { restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr); } } } } catch (Exception ex) { //Not fatal m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } using (new Logging.Timer("CommitRecreatedDb")) tr.Commit(); // TODO: In some cases, we can avoid downloading all index files, // if we are lucky and pick the right ones } // We have now grabbed as much information as possible, // if we are still missing data, we must now fetch block files restoredb.FindMissingBlocklistHashes(hashsize, null); //We do this in three passes for (var i = 0; i < 3; i++) { // Grab the list matching the pass type var lst = restoredb.GetMissingBlockListVolumes(i).ToList(); if (lst.Count > 0) { switch (i) { case 0: if (m_options.Verbose) { m_result.AddVerboseMessage("Processing required {0} blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name))); } else { m_result.AddMessage(string.Format("Processing required {0} blocklist volumes", lst.Count)); } break; case 1: if (m_options.Verbose) { m_result.AddVerboseMessage("Probing {0} candidate blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name))); } else { m_result.AddMessage(string.Format("Probing {0} candidate blocklist volumes", lst.Count)); } break; default: if (m_options.Verbose) { m_result.AddVerboseMessage("Processing all of the {0} volumes for blocklists: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name))); } else { m_result.AddMessage(string.Format("Processing all of the {0} volumes for blocklists", lst.Count)); } break; } } var progress = 0; foreach (var sf in new AsyncDownloader(lst, backend)) { using (var tmpfile = sf.TempFile) using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options)) using (var tr = restoredb.BeginTransaction()) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f)); var volumeid = restoredb.GetRemoteVolumeID(sf.Name); restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr); // Update the block table so we know about the block/volume map foreach (var h in rd.Blocks) { restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr); } // Grab all known blocklists from the volume foreach (var blocklisthash in restoredb.GetBlockLists(volumeid)) { restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr); } // Update tables so we know if we are done restoredb.FindMissingBlocklistHashes(hashsize, tr); using (new Logging.Timer("CommitRestoredBlocklist")) tr.Commit(); //At this point we can patch files with data from the block volume if (blockprocessor != null) { blockprocessor(sf.Name, rd); } } } } backend.WaitForComplete(restoredb, null); //All done, we must verify that we have all blocklist fully intact // if this fails, the db will not be deleted, so it can be used, // except to continue a backup restoredb.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize); } }
public void Run(IEnumerable<string> filterstrings = null, Library.Utility.IFilter compositefilter = null) { if (string.IsNullOrEmpty(m_options.Restorepath)) throw new Exception("Cannot restore control files without --restore-path"); if (!System.IO.Directory.Exists(m_options.Restorepath)) System.IO.Directory.CreateDirectory(m_options.Restorepath); using (var tmpdb = new Library.Utility.TempFile()) using (var db = new Database.LocalDatabase(System.IO.File.Exists(m_options.Dbpath) ? m_options.Dbpath : (string)tmpdb, "RestoreControlFiles")) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter); try { var filteredList = ListFilesHandler.ParseAndFilterFilesets(backend.List(), m_options); if (filteredList.Count == 0) throw new Exception("No filesets found on remote target"); Exception lastEx = new Exception("No suitable files found on remote target"); foreach(var fileversion in filteredList) try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } var file = fileversion.Value.File; long size; string hash; RemoteVolumeType type; RemoteVolumeState state; if (!db.GetRemoteVolume(file.Name, out hash, out size, out type, out state)) size = file.Size; var res = new List<string>(); using (var tmpfile = backend.Get(file.Name, size, hash)) using (var tmp = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(file.Name), tmpfile, m_options)) foreach (var cf in tmp.ControlFiles) if (Library.Utility.FilterExpression.Matches(filter, cf.Key)) { var targetpath = System.IO.Path.Combine(m_options.Restorepath, cf.Key); using (var ts = System.IO.File.Create(targetpath)) Library.Utility.Utility.CopyStream(cf.Value, ts); res.Add(targetpath); } m_result.SetResult(res); lastEx = null; break; } catch(Exception ex) { lastEx = ex; if (ex is System.Threading.ThreadAbortException) throw; } if (lastEx != null) throw lastEx; } finally { backend.WaitForComplete(db, null); } db.WriteResults(); } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> /// <param name="protectedFiles">Filenames that should be exempted from deletion</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log, IEnumerable <string> protectedFiles) { var rawlist = backend.List(); var lookup = new Dictionary <string, Volumes.IParsedVolume>(); protectedFiles = protectedFiles ?? Enumerable.Empty <string>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix == options.Prefix select p).ToList(); var otherlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix != options.Prefix select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count; long knownFileSize = remotelist.Select(x => Math.Max(0, x.File.Size)).Sum(); log.KnownFileSize = knownFileSize; log.UnknownFileCount = unknownlist.Count; log.UnknownFileSize = unknownlist.Select(x => Math.Max(0, x.Size)).Sum(); log.BackupListCount = database.FilesetTimes.Count(); log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); // TODO: We should query through the backendmanager using (var bk = DynamicLoader.BackendLoader.GetBackend(backend.BackendUrl, options.RawOptions)) if (bk is IQuotaEnabledBackend enabledBackend) { Library.Interface.IQuotaInfo quota = enabledBackend.Quota; if (quota != null) { log.TotalQuotaSpace = quota.TotalQuotaSpace; log.FreeQuotaSpace = quota.FreeQuotaSpace; // Check to see if there should be a warning or error about the quota // Since this processor may be called multiple times during a backup // (both at the start and end, for example), the log keeps track of // whether a quota error or warning has been sent already. // Note that an error can still be sent later even if a warning was sent earlier. if (!log.ReportedQuotaError && quota.FreeQuotaSpace == 0) { log.ReportedQuotaError = true; Logging.Log.WriteErrorMessage(LOGTAG, "BackendQuotaExceeded", null, "Backend quota has been exceeded: Using {0} of {1} ({2} available)", Library.Utility.Utility.FormatSizeString(knownFileSize), Library.Utility.Utility.FormatSizeString(quota.TotalQuotaSpace), Library.Utility.Utility.FormatSizeString(quota.FreeQuotaSpace)); } else if (!log.ReportedQuotaWarning && !log.ReportedQuotaError && quota.FreeQuotaSpace >= 0) // Negative value means the backend didn't return the quota info { // Warnings are sent if the available free space is less than the given percentage of the total backup size. double warningThreshold = options.QuotaWarningThreshold / (double)100; if (quota.FreeQuotaSpace < warningThreshold * knownFileSize) { log.ReportedQuotaWarning = true; Logging.Log.WriteWarningMessage(LOGTAG, "BackendQuotaNear", null, "Backend quota is close to being exceeded: Using {0} of {1} ({2} available)", Library.Utility.Utility.FormatSizeString(knownFileSize), Library.Utility.Utility.FormatSizeString(quota.TotalQuotaSpace), Library.Utility.Utility.FormatSizeString(quota.FreeQuotaSpace)); } } } } log.AssignedQuotaSpace = options.QuotaSize; foreach (var s in remotelist) { lookup[s.File.Name] = s; } var missing = new List <RemoteVolumeEntry>(); var missingHash = new List <Tuple <long, RemoteVolumeEntry> >(); var cleanupRemovedRemoteVolumes = new HashSet <string>(); foreach (var e in database.DuplicateRemoteVolumes()) { if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary) { database.UnlinkRemoteVolume(e.Key, e.Value); } else { throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString())); } } var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) { Logging.Log.WriteInformationMessage(LOGTAG, "IgnoreRemoteDeletedFile", "ignoring remote file listed as {0}: {1}", i.State, i.Name); } break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { Logging.Log.WriteInformationMessage(LOGTAG, "RemoveUnwantedRemoteFile", "removing remote file listed as {0}: {1}", i.State, i.Name); backend.Delete(i.Name, i.Size, true); } else { if (i.DeleteGracePeriod > DateTime.UtcNow) { Logging.Log.WriteInformationMessage(LOGTAG, "KeepDeleteRequest", "keeping delete request for {0} until {1}", i.Name, i.DeleteGracePeriod.ToLocalTime()); } else { if (i.State == RemoteVolumeState.Temporary && protectedFiles.Any(pf => pf == i.Name)) { Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name); } else { Logging.Log.WriteInformationMessage(LOGTAG, "RemoteUnwantedMissingFile", "removing file listed as {0}: {1}", i.State, i.Name); cleanupRemovedRemoteVolumes.Add(i.Name); } } } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { Logging.Log.WriteInformationMessage(LOGTAG, "PromotingCompleteFile", "promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else if (!remoteFound) { if (protectedFiles.Any(pf => pf == i.Name)) { Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Temporary, i.Size, i.Hash, false, new TimeSpan(0), null); } else { Logging.Log.WriteInformationMessage(LOGTAG, "SchedulingMissingFileForDelete", "scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name); cleanupRemovedRemoteVolumes.Add(i.Name); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null); } } else { if (protectedFiles.Any(pf => pf == i.Name)) { Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name); } else { Logging.Log.WriteInformationMessage(LOGTAG, "Remove incomplete file", "removing incomplete remote file listed as {0}: {1}", i.State, i.Name); backend.Delete(i.Name, i.Size, true); } } break; case RemoteVolumeState.Uploaded: if (!remoteFound) { missing.Add(i); } else if (correctSize) { database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); } else { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; case RemoteVolumeState.Verified: if (!remoteFound) { missing.Add(i); } else if (!correctSize) { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; default: Logging.Log.WriteWarningMessage(LOGTAG, "UnknownFileState", null, "unknown state for remote file listed as {0}: {1}", i.State, i.Name); break; } backend.FlushDbMessages(); } // cleanup deleted volumes in DB en block database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null); foreach (var i in missingHash) { Logging.Log.WriteWarningMessage(LOGTAG, "MissingRemoteHash", null, "remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash); } return(new RemoteAnalysisResult() { ParsedVolumes = remotelist, OtherVolumes = otherlist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }); }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var rawlist = backend.List(); var lookup = new Dictionary <string, Volumes.IParsedVolume>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count(); log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum(); log.UnknownFileCount = unknownlist.Count(); log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); if (backend is Library.Interface.IQuotaEnabledBackend) { log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace; log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace; } log.AssignedQuotaSpace = options.QuotaSize; foreach (var s in remotelist) { if (s.Prefix == options.Prefix) { lookup[s.File.Name] = s; } } var missing = new List <RemoteVolumeEntry>(); var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { //Ignore those that are deleted if (i.State == RemoteVolumeState.Deleted) { continue; } if (i.State == RemoteVolumeState.Temporary) { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); } else if (i.State == RemoteVolumeState.Deleting && lookup.ContainsKey(i.Name)) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); lookup.Remove(i.Name); } else { Volumes.IParsedVolume r; if (!lookup.TryGetValue(i.Name, out r)) { if (i.State == RemoteVolumeState.Uploading || i.State == RemoteVolumeState.Deleting || (r != null && r.File.Size != i.Size && r.File.Size >= 0 && i.Size >= 0)) { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); } else { missing.Add(i); } } else if (i.State != RemoteVolumeState.Verified) { database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); } lookup.Remove(i.Name); } } return(new RemoteAnalysisResult() { ParsedVolumes = remotelist, ExtraVolumes = lookup.Values, MissingVolumes = missing }); }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> /// <param name="protectedfile">A filename that should be excempted for deletion</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log, string protectedfile) { var rawlist = backend.List(); var lookup = new Dictionary <string, Volumes.IParsedVolume>(); protectedfile = protectedfile ?? string.Empty; var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix == options.Prefix select p).ToList(); var otherlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix != options.Prefix select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count; log.KnownFileSize = remotelist.Select(x => Math.Max(0, x.File.Size)).Sum(); log.UnknownFileCount = unknownlist.Count; log.UnknownFileSize = unknownlist.Select(x => Math.Max(0, x.Size)).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); // TODO: We should query through the backendmanager using (var bk = DynamicLoader.BackendLoader.GetBackend(backend.BackendUrl, options.RawOptions)) if (bk is Library.Interface.IQuotaEnabledBackend) { Library.Interface.IQuotaInfo quota = ((Library.Interface.IQuotaEnabledBackend)bk).Quota; if (quota != null) { log.TotalQuotaSpace = quota.TotalQuotaSpace; log.FreeQuotaSpace = quota.FreeQuotaSpace; } } log.AssignedQuotaSpace = options.QuotaSize; foreach (var s in remotelist) { lookup[s.File.Name] = s; } var missing = new List <RemoteVolumeEntry>(); var missingHash = new List <Tuple <long, RemoteVolumeEntry> >(); var cleanupRemovedRemoteVolumes = new HashSet <string>(); foreach (var e in database.DuplicateRemoteVolumes()) { if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary) { database.UnlinkRemoteVolume(e.Key, e.Value); } else { throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString())); } } var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) { log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name)); } break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } else { if (i.deleteGracePeriod > DateTime.UtcNow) { log.AddMessage(string.Format("keeping delete request for {0} until {1}", i.Name, i.deleteGracePeriod.ToLocalTime())); } else { if (string.Equals(i.Name, protectedfile) && i.State == RemoteVolumeState.Temporary) { log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name)); } else { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); cleanupRemovedRemoteVolumes.Add(i.Name); } } } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else if (!remoteFound) { if (string.Equals(i.Name, protectedfile)) { log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Temporary, i.Size, i.Hash, false, new TimeSpan(0), null); } else { log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name)); cleanupRemovedRemoteVolumes.Add(i.Name); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null); } } else { if (string.Equals(i.Name, protectedfile)) { log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name)); } else { log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } } break; case RemoteVolumeState.Uploaded: if (!remoteFound) { missing.Add(i); } else if (correctSize) { database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); } else { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; case RemoteVolumeState.Verified: if (!remoteFound) { missing.Add(i); } else if (!correctSize) { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } backend.FlushDbMessages(); } // cleanup deleted volumes in DB en block database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null); foreach (var i in missingHash) { log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null); } return(new RemoteAnalysisResult() { ParsedVolumes = remotelist, OtherVolumes = otherlist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }); }
public void Run(string baseVersion, string compareVersion, IEnumerable <string> filterstrings = null, Library.Utility.IFilter compositefilter = null, Action <IListChangesResults, IEnumerable <Tuple <Library.Interface.ListChangesChangeType, Library.Interface.ListChangesElementType, string> > > callback = null) { var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter); var useLocalDb = !m_options.NoLocalDb && System.IO.File.Exists(m_options.Dbpath); baseVersion = string.IsNullOrEmpty(baseVersion) ? "1" : baseVersion; compareVersion = string.IsNullOrEmpty(compareVersion) ? "0" : compareVersion; long baseVersionIndex = -1; long compareVersionIndex = -1; DateTime baseVersionTime = new DateTime(0); DateTime compareVersionTime = new DateTime(0); using (var tmpdb = useLocalDb ? null : new Library.Utility.TempFile()) using (var db = new Database.LocalListChangesDatabase(useLocalDb ? m_options.Dbpath : (string)tmpdb)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) using (var storageKeeper = db.CreateStorageHelper()) { m_result.SetDatabase(db); if (useLocalDb) { var dbtimes = db.FilesetTimes.ToList(); if (dbtimes.Count < 2) { throw new UserInformationException(string.Format("Need at least two backups to show differences, database contains {0} backups", dbtimes.Count), "NeedTwoBackupsToStartDiff"); } long baseVersionId; long compareVersionId; var times = dbtimes.Zip(Enumerable.Range(0, dbtimes.Count), (a, b) => new Tuple <long, DateTime, long>(b, a.Value, a.Key)).ToList(); var bt = SelectTime(baseVersion, times, out baseVersionIndex, out baseVersionTime, out baseVersionId); times.Remove(bt); SelectTime(compareVersion, times, out compareVersionIndex, out compareVersionTime, out compareVersionId); storageKeeper.AddFromDb(baseVersionId, false, filter); storageKeeper.AddFromDb(compareVersionId, true, filter); } else { Logging.Log.WriteInformationMessage(LOGTAG, "NoLocalDatabase", "No local database, accessing remote store"); var parsedlist = (from n in backend.List() let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.FileType == RemoteVolumeType.Files orderby p.Time descending select p).ToArray(); var numberedList = parsedlist.Zip(Enumerable.Range(0, parsedlist.Length), (a, b) => new Tuple <long, DateTime, Volumes.IParsedVolume>(b, a.Time, a)).ToList(); if (numberedList.Count < 2) { throw new UserInformationException(string.Format("Need at least two backups to show differences, database contains {0} backups", numberedList.Count), "NeedTwoBackupsToStartDiff"); } Volumes.IParsedVolume baseFile; Volumes.IParsedVolume compareFile; var bt = SelectTime(baseVersion, numberedList, out baseVersionIndex, out baseVersionTime, out baseFile); numberedList.Remove(bt); SelectTime(compareVersion, numberedList, out compareVersionIndex, out compareVersionTime, out compareFile); Func <FilelistEntryType, Library.Interface.ListChangesElementType> conv = (x) => { switch (x) { case FilelistEntryType.File: return(Library.Interface.ListChangesElementType.File); case FilelistEntryType.Folder: return(Library.Interface.ListChangesElementType.Folder); case FilelistEntryType.Symlink: return(Library.Interface.ListChangesElementType.Symlink); default: return((Library.Interface.ListChangesElementType)(-1)); } }; if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { return; } using (var tmpfile = backend.Get(baseFile.File.Name, baseFile.File.Size, null)) using (var rd = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(baseFile.File.Name), tmpfile, m_options)) foreach (var f in rd.Files) { if (Library.Utility.FilterExpression.Matches(filter, f.Path)) { storageKeeper.AddElement(f.Path, f.Hash, f.Metahash, f.Size, conv(f.Type), false); } } if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { return; } using (var tmpfile = backend.Get(compareFile.File.Name, compareFile.File.Size, null)) using (var rd = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(compareFile.File.Name), tmpfile, m_options)) foreach (var f in rd.Files) { if (Library.Utility.FilterExpression.Matches(filter, f.Path)) { storageKeeper.AddElement(f.Path, f.Hash, f.Metahash, f.Size, conv(f.Type), true); } } } var changes = storageKeeper.CreateChangeCountReport(); var sizes = storageKeeper.CreateChangeSizeReport(); var lst = (m_options.FullResult || callback != null) ? (from n in storageKeeper.CreateChangedFileReport() select n) : null; m_result.SetResult( baseVersionTime, baseVersionIndex, compareVersionTime, compareVersionIndex, changes.AddedFolders, changes.AddedSymlinks, changes.AddedFiles, changes.DeletedFolders, changes.DeletedSymlinks, changes.DeletedFiles, changes.ModifiedFolders, changes.ModifiedSymlinks, changes.ModifiedFiles, sizes.AddedSize, sizes.DeletedSize, sizes.PreviousSize, sizes.CurrentSize, (lst == null || callback == null) ? null : lst.ToArray() ); if (callback != null) { callback(m_result, lst); } return; } }
public void Run(IEnumerable<string> filterstrings = null, Library.Utility.IFilter compositefilter = null) { var parsedfilter = new Library.Utility.FilterExpression(filterstrings); var simpleList = !(parsedfilter.Type == Library.Utility.FilterType.Simple || m_options.AllVersions); var filter = Library.Utility.JoinedFilterExpression.Join(parsedfilter, compositefilter); //Use a speedy local query if (!m_options.NoLocalDb && System.IO.File.Exists(m_options.Dbpath)) using(var db = new Database.LocalListDatabase(m_options.Dbpath)) { m_result.SetDatabase(db); using(var filesets = db.SelectFileSets(m_options.Time, m_options.Version)) { if (parsedfilter.Type != Library.Utility.FilterType.Empty) { if (simpleList || (m_options.ListFolderContents && !m_options.AllVersions)) filesets.TakeFirst(); } IEnumerable<Database.LocalListDatabase.IFileversion> files; if (m_options.ListFolderContents) files = filesets.SelectFolderContents(filter); else if (m_options.ListPrefixOnly) files = filesets.GetLargestPrefix(filter); else if (parsedfilter.Type == Duplicati.Library.Utility.FilterType.Empty) files = null; else files = filesets.SelectFiles(filter); if (m_options.ListSetsOnly) m_result.SetResult( filesets.QuickSets.Select(x => new ListResultFileset(x.Version, x.Time, x.FileCount, x.FileSizes)).ToArray(), null ); else m_result.SetResult( filesets.Sets.Select(x => new ListResultFileset(x.Version, x.Time, x.FileCount, x.FileSizes)).ToArray(), files == null ? null : (from n in files select (Duplicati.Library.Interface.IListResultFile)(new ListResultFile(n.Path, n.Sizes.ToArray()))) .ToArray() ); return; } } m_result.AddMessage("No local database, accessing remote store"); //TODO: Add prefix and foldercontents // Otherwise, grab info from remote location using (var tmpdb = new Library.Utility.TempFile()) using (var db = new Database.LocalDatabase(tmpdb, "List")) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); var filteredList = ParseAndFilterFilesets(backend.List(), m_options); if (filteredList.Count == 0) throw new Exception("No filesets found on remote target"); var numberSeq = CreateResultSequence(filteredList); if (parsedfilter.Type == Library.Utility.FilterType.Empty) { m_result.SetResult(numberSeq, null); m_result.EncryptedFiles = filteredList.Any(x => !string.IsNullOrWhiteSpace(x.Value.EncryptionModule)); return; } var firstEntry = filteredList[0].Value; filteredList.RemoveAt(0); Dictionary<string, List<long>> res; if (m_result.TaskControlRendevouz() == TaskControlState.Stop) return; using (var tmpfile = backend.Get(firstEntry.File.Name, firstEntry.File.Size, null)) using (var rd = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(firstEntry.File.Name), tmpfile, m_options)) if (simpleList) { m_result.SetResult( numberSeq.Take(1), (from n in rd.Files where Library.Utility.FilterExpression.Matches(filter, n.Path) orderby n.Path select new ListResultFile(n.Path, new long[] { n.Size })) .ToArray() ); return; } else { res = rd.Files .Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)) .ToDictionary( x => x.Path, y => { var lst = new List<long>(); lst.Add(y.Size); return lst; }, Library.Utility.Utility.ClientFilenameStringComparer ); } long flindex = 1; foreach(var flentry in filteredList) using(var tmpfile = backend.Get(flentry.Value.File.Name, flentry.Value.File == null ? -1 : flentry.Value.File.Size, null)) using (var rd = new Volumes.FilesetVolumeReader(flentry.Value.CompressionModule, tmpfile, m_options)) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) return; foreach(var p in from n in rd.Files where Library.Utility.FilterExpression.Matches(filter, n.Path) select n) { List<long> lst; if (!res.TryGetValue(p.Path, out lst)) { lst = new List<long>(); res[p.Path] = lst; for(var i = 0; i < flindex; i++) lst.Add(-1); } lst.Add(p.Size); } foreach(var n in from i in res where i.Value.Count < flindex + 1 select i) n.Value.Add(-1); flindex++; } m_result.SetResult( numberSeq, from n in res orderby n.Key select (Duplicati.Library.Interface.IListResultFile)(new ListResultFile(n.Key, n.Value)) ); } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var rawlist = backend.List(); var lookup = new Dictionary<string, Volumes.IParsedVolume>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix == options.Prefix select p).ToList(); var otherlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix != options.Prefix select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count(); log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum(); log.UnknownFileCount = unknownlist.Count(); log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); if (backend is Library.Interface.IQuotaEnabledBackend) { log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace; log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace; } log.AssignedQuotaSpace = options.QuotaSize; foreach(var s in remotelist) lookup[s.File.Name] = s; var missing = new List<RemoteVolumeEntry>(); var missingHash = new List<Tuple<long, RemoteVolumeEntry>>(); var cleanupRemovedRemoteVolumes = new HashSet<string>(); foreach(var e in database.DuplicateRemoteVolumes()) { if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary) database.UnlinkRemoteVolume(e.Key, e.Value); else throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString())); } var locallist = database.GetRemoteVolumes(); foreach(var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name)); break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } else { if (i.deleteGracePeriod > DateTime.UtcNow) { log.AddMessage(string.Format("keeping delete request for {0} until {1}", i.Name, i.deleteGracePeriod.ToLocalTime())); } else { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); cleanupRemovedRemoteVolumes.Add(i.Name); } } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else if (!remoteFound) { log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name)); cleanupRemovedRemoteVolumes.Add(i.Name); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null); } else { log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } break; case RemoteVolumeState.Uploaded: if (!remoteFound) missing.Add(i); else if (correctSize) database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); else missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i)); break; case RemoteVolumeState.Verified: if (!remoteFound) missing.Add(i); else if (!correctSize) missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i)); break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } backend.FlushDbMessages(); } // cleanup deleted volumes in DB en block database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null); foreach(var i in missingHash) log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null); return new RemoteAnalysisResult() { ParsedVolumes = remotelist, OtherVolumes = otherlist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }; }
/// <summary> /// Run the recreate procedure /// </summary> /// <param name="dbparent">The database to restore into</param> /// <param name="updating">True if this is an update call, false otherwise</param> /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param> /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param> /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param> internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running); //We build a local database in steps. using(var restoredb = new LocalRecreateDatabase(dbparent, m_options)) using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb)) { restoredb.RepairInProgress = true; var volumeIds = new Dictionary<string, long>(); var rawlist = backend.List(); //First step is to examine the remote storage to see what // kind of data we can find var remotefiles = (from x in rawlist let n = VolumeBase.ParseFilename(x) where n != null && n.Prefix == m_options.Prefix select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times if (remotefiles.Length == 0) { if (rawlist.Count == 0) throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?"); else { var tmp = (from x in rawlist let n = VolumeBase.ParseFilename(x) where n != null select n.Prefix).ToArray(); var types = tmp.Distinct().ToArray(); if (tmp.Length == 0) throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count)); else if (types.Length == 1) throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0])); else throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types))); } } //Then we select the filelist we should work with, // and create the filelist table to fit IEnumerable<IParsedVolume> filelists = from n in remotefiles where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n; if (filelists.Count() <= 0) throw new Exception(string.Format("No filelists found on the remote destination")); if (filelistfilter != null) filelists = filelistfilter(filelists).Select(x => x.Value).ToArray(); if (filelists.Count() <= 0) throw new Exception(string.Format("No filelists")); // If we are updating, all files should be accounted for foreach(var fl in remotefiles) volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded); var hasUpdatedOptions = false; if (updating) { Utility.UpdateOptionsFromDb(restoredb, m_options); Utility.VerifyParameters(restoredb, m_options); } //Record all blocksets and files needed using(var tr = restoredb.BeginTransaction()) { var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList(); m_result.AddMessage(string.Format("Rebuild database started, downloading {0} filelists", filelistWork.Count)); var progress = 0; // Register the files we are working with, if not already updated if (updating) { foreach(var n in filelists) if (volumeIds[n.File.Name] == -1) volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr); } var isFirstFilelist = true; var blocksize = m_options.Blocksize; var hashes_pr_block = blocksize / m_options.BlockhashSize; foreach(var entry in new AsyncDownloader(filelistWork, backend)) try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } progress++; if (filelistWork.Count == 1 && m_options.RepairOnlyPaths) m_result.OperationProgressUpdater.UpdateProgress(0.5f); else m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f)); using(var tmpfile = entry.TempFile) { isFirstFilelist = false; if (entry.Hash != null && entry.Size > 0) restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr); var parsed = VolumeBase.ParseFilename(entry.Name); if (!hasUpdatedOptions && !updating) { VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options); hasUpdatedOptions = true; // Recompute the cached sizes blocksize = m_options.Blocksize; hashes_pr_block = blocksize / m_options.BlockhashSize; } // Create timestamped operations based on the file timestamp var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr); using(var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options)) foreach(var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path))) { try { if (fe.Type == FilelistEntryType.Folder) { restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else if (fe.Type == FilelistEntryType.File) { var expectedblocks = (fe.Size + blocksize - 1) / blocksize; var expectedblocklisthashes = (expectedblocks + hashes_pr_block - 1) / hashes_pr_block; if (expectedblocks <= 1) expectedblocklisthashes = 0; var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, expectedblocklisthashes, tr); restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else if (fe.Type == FilelistEntryType.Symlink) { restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr); } else { m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null); } } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex); } } } } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex); if (ex is System.Threading.ThreadAbortException) throw; if (isFirstFilelist && ex is System.Security.Cryptography.CryptographicException) throw; } //Make sure we write the config if (!updating) Utility.VerifyParameters(restoredb, m_options, tr); using(new Logging.Timer("CommitUpdateFilesetFromRemote")) tr.Commit(); } if (!m_options.RepairOnlyPaths) { var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); if (hashalg == null) throw new Exception(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm)); var hashsize = hashalg.HashSize / 8; //Grab all index files, and update the block table using(var tr = restoredb.BeginTransaction()) { var indexfiles = ( from n in remotefiles where n.FileType == RemoteVolumeType.Index select new RemoteVolume(n.File) as IRemoteVolume).ToList(); m_result.AddMessage(string.Format("Filelists restored, downloading {0} index files", indexfiles.Count)); var progress = 0; foreach(var sf in new AsyncDownloader(indexfiles, backend)) try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f); using(var tmpfile = sf.TempFile) { if (sf.Hash != null && sf.Size > 0) restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr); using(var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize)) { foreach(var a in svr.Volumes) { var filename = a.Filename; var volumeID = restoredb.GetRemoteVolumeID(filename); // No such file if (volumeID < 0) volumeID = ProbeForMatchingFilename(ref filename, restoredb); // Still broken, register a missing item if (volumeID < 0) { var p = VolumeBase.ParseFilename(filename); if (p == null) throw new Exception(string.Format("Unable to parse filename: {0}", filename)); m_result.AddError(string.Format("Remote file referenced as {0}, but not found in list, registering a missing remote file", filename), null); volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Verified, tr); } //Add all block/volume mappings foreach(var b in a.Blocks) restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr); restoredb.UpdateRemoteVolume(filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr); restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr); } //If there are blocklists in the index file, update the blocklists foreach(var b in svr.BlockLists) restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr); } } } catch (Exception ex) { //Not fatal m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex); if (ex is System.Threading.ThreadAbortException) throw; } using(new Logging.Timer("CommitRecreatedDb")) tr.Commit(); // TODO: In some cases, we can avoid downloading all index files, // if we are lucky and pick the right ones } // We have now grabbed as much information as possible, // if we are still missing data, we must now fetch block files restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null); //We do this in three passes for(var i = 0; i < 3; i++) { // Grab the list matching the pass type var lst = restoredb.GetMissingBlockListVolumes(i, m_options.Blocksize, hashsize).ToList(); if (lst.Count > 0) { switch (i) { case 0: if (m_options.Verbose) m_result.AddVerboseMessage("Processing required {0} blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name))); else m_result.AddMessage(string.Format("Processing required {0} blocklist volumes", lst.Count)); break; case 1: if (m_options.Verbose) m_result.AddVerboseMessage("Probing {0} candidate blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name))); else m_result.AddMessage(string.Format("Probing {0} candidate blocklist volumes", lst.Count)); break; default: if (m_options.Verbose) m_result.AddVerboseMessage("Processing all of the {0} volumes for blocklists: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name))); else m_result.AddMessage(string.Format("Processing all of the {0} volumes for blocklists", lst.Count)); break; } } var progress = 0; foreach(var sf in new AsyncDownloader(lst, backend)) using(var tmpfile = sf.TempFile) using(var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options)) using(var tr = restoredb.BeginTransaction()) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f)); var volumeid = restoredb.GetRemoteVolumeID(sf.Name); restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr); // Update the block table so we know about the block/volume map foreach(var h in rd.Blocks) restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr); // Grab all known blocklists from the volume foreach(var blocklisthash in restoredb.GetBlockLists(volumeid)) restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr); // Update tables so we know if we are done restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr); using(new Logging.Timer("CommitRestoredBlocklist")) tr.Commit(); //At this point we can patch files with data from the block volume if (blockprocessor != null) blockprocessor(sf.Name, rd); } } } backend.WaitForComplete(restoredb, null); m_result.AddMessage("Recreate completed, verifying the database consistency"); //All done, we must verify that we have all blocklist fully intact // if this fails, the db will not be deleted, so it can be used, // except to continue a backup restoredb.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize); m_result.AddMessage("Recreate completed, and consistency checks completed, marking database as complete"); restoredb.RepairInProgress = false; } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var rawlist = backend.List(); var lookup = new Dictionary<string, Volumes.IParsedVolume>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count(); log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum(); log.UnknownFileCount = unknownlist.Count(); log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); if (backend is Library.Interface.IQuotaEnabledBackend) { log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace; log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace; } log.AssignedQuotaSpace = options.QuotaSize; foreach(var s in remotelist) if (s.Prefix == options.Prefix) lookup[s.File.Name] = s; var missing = new List<RemoteVolumeEntry>(); var missingHash = new List<Tuple<long, RemoteVolumeEntry>>(); var locallist = database.GetRemoteVolumes(); foreach(var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name)); break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } else { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else { log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } break; case RemoteVolumeState.Uploaded: if (!remoteFound) missing.Add(i); else if (correctSize) database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); else missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i)); break; case RemoteVolumeState.Verified: if (!remoteFound) missing.Add(i); else if (!correctSize) missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i)); break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } } foreach(var i in missingHash) log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null); return new RemoteAnalysisResult() { ParsedVolumes = remotelist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }; }
public void Run(IEnumerable <string> filterstrings = null, Library.Utility.IFilter compositefilter = null) { var parsedfilter = new Library.Utility.FilterExpression(filterstrings); var simpleList = !(parsedfilter.Type == Library.Utility.FilterType.Simple || m_options.AllVersions); var filter = Library.Utility.JoinedFilterExpression.Join(parsedfilter, compositefilter); //Use a speedy local query if (!m_options.NoLocalDb && System.IO.File.Exists(m_options.Dbpath)) { using (var db = new Database.LocalListDatabase(m_options.Dbpath)) { m_result.SetDatabase(db); using (var filesets = db.SelectFileSets(m_options.Time, m_options.Version)) { if (parsedfilter.Type != Library.Utility.FilterType.Empty) { if (simpleList || (m_options.ListFolderContents && !m_options.AllVersions)) { filesets.TakeFirst(); } } IEnumerable <Database.LocalListDatabase.IFileversion> files; if (m_options.ListFolderContents) { files = filesets.SelectFolderContents(filter); } else if (m_options.ListPrefixOnly) { files = filesets.GetLargestPrefix(filter); } else if (parsedfilter.Type == Duplicati.Library.Utility.FilterType.Empty) { files = null; } else { files = filesets.SelectFiles(filter); } if (m_options.ListSetsOnly) { m_result.SetResult( filesets.QuickSets.Select(x => new ListResultFileset(x.Version, x.Time, x.FileCount, x.FileSizes)).ToArray(), null ); } else { m_result.SetResult( filesets.Sets.Select(x => new ListResultFileset(x.Version, x.Time, x.FileCount, x.FileSizes)).ToArray(), files == null ? null : (from n in files select(Duplicati.Library.Interface.IListResultFile)(new ListResultFile(n.Path, n.Sizes.ToArray()))) .ToArray() ); } return; } } } m_result.AddMessage("No local database, accessing remote store"); //TODO: Add prefix and foldercontents if (m_options.ListFolderContents) { throw new Exception("Listing folder contents is not supported without a local database, consider using the \"repair\" option to rebuild the database."); } else if (m_options.ListPrefixOnly) { throw new Exception("Listing prefixes is not supported without a local database, consider using the \"repair\" option to rebuild the database."); } // Otherwise, grab info from remote location using (var tmpdb = new Library.Utility.TempFile()) using (var db = new Database.LocalDatabase(tmpdb, "List", true)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); var filteredList = ParseAndFilterFilesets(backend.List(), m_options); if (filteredList.Count == 0) { throw new Exception("No filesets found on remote target"); } var numberSeq = CreateResultSequence(filteredList); if (parsedfilter.Type == Library.Utility.FilterType.Empty) { m_result.SetResult(numberSeq, null); m_result.EncryptedFiles = filteredList.Any(x => !string.IsNullOrWhiteSpace(x.Value.EncryptionModule)); return; } var firstEntry = filteredList[0].Value; filteredList.RemoveAt(0); Dictionary <string, List <long> > res; if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { return; } using (var tmpfile = backend.Get(firstEntry.File.Name, firstEntry.File.Size, null)) using (var rd = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(firstEntry.File.Name), tmpfile, m_options)) if (simpleList) { m_result.SetResult( numberSeq.Take(1), (from n in rd.Files where Library.Utility.FilterExpression.Matches(filter, n.Path) orderby n.Path select new ListResultFile(n.Path, new long[] { n.Size })) .ToArray() ); return; } else { res = rd.Files .Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)) .ToDictionary( x => x.Path, y => { var lst = new List <long>(); lst.Add(y.Size); return(lst); }, Library.Utility.Utility.ClientFilenameStringComparer ); } long flindex = 1; foreach (var flentry in filteredList) { using (var tmpfile = backend.Get(flentry.Value.File.Name, flentry.Value.File == null ? -1 : flentry.Value.File.Size, null)) using (var rd = new Volumes.FilesetVolumeReader(flentry.Value.CompressionModule, tmpfile, m_options)) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { return; } foreach (var p in from n in rd.Files where Library.Utility.FilterExpression.Matches(filter, n.Path) select n) { List <long> lst; if (!res.TryGetValue(p.Path, out lst)) { lst = new List <long>(); res[p.Path] = lst; for (var i = 0; i < flindex; i++) { lst.Add(-1); } } lst.Add(p.Size); } foreach (var n in from i in res where i.Value.Count < flindex + 1 select i) { n.Value.Add(-1); } flindex++; } } m_result.SetResult( numberSeq, from n in res orderby n.Key select(Duplicati.Library.Interface.IListResultFile)(new ListResultFile(n.Key, n.Value)) ); } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var rawlist = backend.List(); var lookup = new Dictionary<string, Volumes.IParsedVolume>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count(); log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum(); log.UnknownFileCount = unknownlist.Count(); log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); if (backend is Library.Interface.IQuotaEnabledBackend) { log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace; log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace; } log.AssignedQuotaSpace = options.QuotaSize; foreach (var s in remotelist) if (s.Prefix == options.Prefix) lookup[s.File.Name] = s; var missing = new List<RemoteVolumeEntry>(); var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { //Ignore those that are deleted if (i.State == RemoteVolumeState.Deleted) continue; if (i.State == RemoteVolumeState.Temporary) { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); } else if (i.State == RemoteVolumeState.Deleting && lookup.ContainsKey(i.Name)) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); lookup.Remove(i.Name); } else { Volumes.IParsedVolume r; if (!lookup.TryGetValue(i.Name, out r)) { if (i.State == RemoteVolumeState.Uploading || i.State == RemoteVolumeState.Deleting || (r != null && r.File.Size != i.Size && r.File.Size >= 0 && i.Size >= 0)) { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); } else missing.Add(i); } else if (i.State != RemoteVolumeState.Verified) { database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); } lookup.Remove(i.Name); } } return new RemoteAnalysisResult() { ParsedVolumes = remotelist, ExtraVolumes = lookup.Values, MissingVolumes = missing }; }
/// <summary> /// Run the recreate procedure /// </summary> /// <param name="dbparent">The database to restore into</param> /// <param name="updating">True if this is an update call, false otherwise</param> /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param> /// <param name="filelistfilter">Filters the files in a filelist to prevent downloading unwanted data</param> /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param> internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running); //We build a local database in steps. using (var restoredb = new LocalRecreateDatabase(dbparent, m_options)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb)) { restoredb.RepairInProgress = true; var volumeIds = new Dictionary <string, long>(); var rawlist = backend.List(); //First step is to examine the remote storage to see what // kind of data we can find var remotefiles = (from x in rawlist let n = VolumeBase.ParseFilename(x) where n != null && n.Prefix == m_options.Prefix select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times if (remotefiles.Length == 0) { if (rawlist.Count == 0) { throw new UserInformationException("No files were found at the remote location, perhaps the target url is incorrect?", "EmptyRemoteLocation"); } else { var tmp = (from x in rawlist let n = VolumeBase.ParseFilename(x) where n != null select n.Prefix).ToArray(); var types = tmp.Distinct().ToArray(); if (tmp.Length == 0) { throw new UserInformationException(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count), "EmptyRemoteLocation"); } else if (types.Length == 1) { throw new UserInformationException(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup prefix?", tmp.Length, types[0]), "EmptyRemoteLocationWithPrefix"); } else { throw new UserInformationException(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)), "EmptyRemoteLocationWithPrefix"); } } } //Then we select the filelist we should work with, // and create the filelist table to fit IEnumerable <IParsedVolume> filelists = from n in remotefiles where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n; if (filelists.Count() <= 0) { throw new UserInformationException(string.Format("No filelists found on the remote destination"), "EmptyRemoteLocation"); } if (filelistfilter != null) { filelists = filelistfilter(filelists).Select(x => x.Value).ToArray(); } if (filelists.Count() <= 0) { throw new UserInformationException(string.Format("No filelists"), "NoMatchingRemoteFilelists"); } // If we are updating, all files should be accounted for foreach (var fl in remotefiles) { volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded); } var hasUpdatedOptions = false; if (updating) { Utility.UpdateOptionsFromDb(restoredb, m_options); Utility.VerifyParameters(restoredb, m_options); } //Record all blocksets and files needed using (var tr = restoredb.BeginTransaction()) { var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList(); Logging.Log.WriteInformationMessage(LOGTAG, "RebuildStarted", "Rebuild database started, downloading {0} filelists", filelistWork.Count); var progress = 0; // Register the files we are working with, if not already updated if (updating) { foreach (var n in filelists) { if (volumeIds[n.File.Name] == -1) { volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr); } } } var isFirstFilelist = true; var blocksize = m_options.Blocksize; var hashes_pr_block = blocksize / m_options.BlockhashSize; foreach (var entry in new AsyncDownloader(filelistWork, backend)) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); m_result.EndTime = DateTime.UtcNow; return; } progress++; if (filelistWork.Count == 1 && m_options.RepairOnlyPaths) { m_result.OperationProgressUpdater.UpdateProgress(0.5f); } else { m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f)); } using (var tmpfile = entry.TempFile) { isFirstFilelist = false; if (entry.Hash != null && entry.Size > 0) { restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr); } var parsed = VolumeBase.ParseFilename(entry.Name); if (!hasUpdatedOptions && !updating) { VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options); hasUpdatedOptions = true; // Recompute the cached sizes blocksize = m_options.Blocksize; hashes_pr_block = blocksize / m_options.BlockhashSize; } // Create timestamped operations based on the file timestamp var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr); using (var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options)) foreach (var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path))) { try { var expectedmetablocks = (fe.Metasize + blocksize - 1) / blocksize; var expectedmetablocklisthashes = (expectedmetablocks + hashes_pr_block - 1) / hashes_pr_block; if (expectedmetablocks <= 1) { expectedmetablocklisthashes = 0; } var metadataid = long.MinValue; switch (fe.Type) { case FilelistEntryType.Folder: metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr); restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, metadataid, tr); break; case FilelistEntryType.File: var expectedblocks = (fe.Size + blocksize - 1) / blocksize; var expectedblocklisthashes = (expectedblocks + hashes_pr_block - 1) / hashes_pr_block; if (expectedblocks <= 1) { expectedblocklisthashes = 0; } var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, expectedblocklisthashes, tr); metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr); restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, metadataid, tr); if (fe.Size <= blocksize) { if (!string.IsNullOrWhiteSpace(fe.Blockhash)) { restoredb.AddSmallBlocksetLink(fe.Hash, fe.Blockhash, fe.Blocksize, tr); } else if (m_options.BlockHashAlgorithm == m_options.FileHashAlgorithm) { restoredb.AddSmallBlocksetLink(fe.Hash, fe.Hash, fe.Size, tr); } else { Logging.Log.WriteWarningMessage(LOGTAG, "MissingBlockHash", null, "No block hash found for file: {0}", fe.Path); } } break; case FilelistEntryType.Symlink: metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr); restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, metadataid, tr); break; default: Logging.Log.WriteWarningMessage(LOGTAG, "SkippingUnknownFileEntry", null, "Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path); break; } if (fe.Metasize <= blocksize && (fe.Type == FilelistEntryType.Folder || fe.Type == FilelistEntryType.File || fe.Type == FilelistEntryType.Symlink)) { if (!string.IsNullOrWhiteSpace(fe.Metablockhash)) { restoredb.AddSmallBlocksetLink(fe.Metahash, fe.Metablockhash, fe.Metasize, tr); } else if (m_options.BlockHashAlgorithm == m_options.FileHashAlgorithm) { restoredb.AddSmallBlocksetLink(fe.Metahash, fe.Metahash, fe.Metasize, tr); } else { Logging.Log.WriteWarningMessage(LOGTAG, "MissingMetadataBlockHash", null, "No block hash found for file metadata: {0}", fe.Path); } } } catch (Exception ex) { Logging.Log.WriteWarningMessage(LOGTAG, "FileEntryProcessingFailed", ex, "Failed to process file-entry: {0}", fe.Path); } } } } catch (Exception ex) { Logging.Log.WriteWarningMessage(LOGTAG, "FileProcessingFailed", ex, "Failed to process file: {0}", entry.Name); if (ex is System.Threading.ThreadAbortException) { m_result.EndTime = DateTime.UtcNow; throw; } if (isFirstFilelist && ex is System.Security.Cryptography.CryptographicException) { m_result.EndTime = DateTime.UtcNow; throw; } } } //Make sure we write the config if (!updating) { Utility.VerifyParameters(restoredb, m_options, tr); } using (new Logging.Timer(LOGTAG, "CommitUpdateFilesetFromRemote", "CommitUpdateFilesetFromRemote")) tr.Commit(); } if (!m_options.RepairOnlyPaths) { var hashalg = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm); if (hashalg == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } var hashsize = hashalg.HashSize / 8; //Grab all index files, and update the block table using (var tr = restoredb.BeginTransaction()) { var indexfiles = ( from n in remotefiles where n.FileType == RemoteVolumeType.Index select new RemoteVolume(n.File) as IRemoteVolume).ToList(); Logging.Log.WriteInformationMessage(LOGTAG, "FilelistsRestored", "Filelists restored, downloading {0} index files", indexfiles.Count); var progress = 0; foreach (var sf in new AsyncDownloader(indexfiles, backend)) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); m_result.EndTime = DateTime.UtcNow; return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f); using (var tmpfile = sf.TempFile) { if (sf.Hash != null && sf.Size > 0) { restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr); } using (var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize)) { foreach (var a in svr.Volumes) { var filename = a.Filename; var volumeID = restoredb.GetRemoteVolumeID(filename); // No such file if (volumeID < 0) { volumeID = ProbeForMatchingFilename(ref filename, restoredb); } // Still broken, register a missing item if (volumeID < 0) { var p = VolumeBase.ParseFilename(filename); if (p == null) { throw new Exception(string.Format("Unable to parse filename: {0}", filename)); } Logging.Log.WriteErrorMessage(LOGTAG, "MissingFileDetected", null, "Remote file referenced as {0}, but not found in list, registering a missing remote file", filename); volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Verified, tr); } //Add all block/volume mappings foreach (var b in a.Blocks) { restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr); } restoredb.UpdateRemoteVolume(filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr); restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr); } //If there are blocklists in the index file, update the blocklists foreach (var b in svr.BlockLists) { restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr); } } } } catch (Exception ex) { //Not fatal Logging.Log.WriteErrorMessage(LOGTAG, "IndexFileProcessingFailed", ex, "Failed to process index file: {0}", sf.Name); if (ex is System.Threading.ThreadAbortException) { m_result.EndTime = DateTime.UtcNow; throw; } } } using (new Logging.Timer(LOGTAG, "CommitRecreateDb", "CommitRecreatedDb")) tr.Commit(); // TODO: In some cases, we can avoid downloading all index files, // if we are lucky and pick the right ones } // We have now grabbed as much information as possible, // if we are still missing data, we must now fetch block files restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null); //We do this in three passes for (var i = 0; i < 3; i++) { // Grab the list matching the pass type var lst = restoredb.GetMissingBlockListVolumes(i, m_options.Blocksize, hashsize).ToList(); if (lst.Count > 0) { var fullist = ": " + string.Join(", ", lst.Select(x => x.Name)); switch (i) { case 0: Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingRequiredBlocklistVolumes", "Processing required {0} blocklist volumes{1}", lst.Count, fullist); Logging.Log.WriteInformationMessage(LOGTAG, "ProcessingRequiredBlocklistVolumes", "Processing required {0} blocklist volumes{1}", lst.Count, m_options.FullResult ? fullist : string.Empty); break; case 1: Logging.Log.WriteVerboseMessage(LOGTAG, "ProbingCandicateBlocklistVolumes", "Probing {0} candidate blocklist volumes{1}", lst.Count, fullist); Logging.Log.WriteInformationMessage(LOGTAG, "ProbingCandicateBlocklistVolumes", "Probing {0} candidate blocklist volumes{1}", lst.Count, m_options.FullResult ? fullist : string.Empty); break; default: Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingAllBlocklistVolumes", "Processing all of the {0} volumes for blocklists{1}", lst.Count, fullist); Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingAllBlocklistVolumes", "Processing all of the {0} volumes for blocklists{1}", lst.Count, m_options.FullResult ? fullist : string.Empty); break; } } var progress = 0; foreach (var sf in new AsyncDownloader(lst, backend)) { using (var tmpfile = sf.TempFile) using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options)) using (var tr = restoredb.BeginTransaction()) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(restoredb, null); m_result.EndTime = DateTime.UtcNow; return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f)); var volumeid = restoredb.GetRemoteVolumeID(sf.Name); restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr); // Update the block table so we know about the block/volume map foreach (var h in rd.Blocks) { restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr); } // Grab all known blocklists from the volume foreach (var blocklisthash in restoredb.GetBlockLists(volumeid)) { restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr); } // Update tables so we know if we are done restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr); using (new Logging.Timer(LOGTAG, "CommitRestoredBlocklist", "CommitRestoredBlocklist")) tr.Commit(); //At this point we can patch files with data from the block volume if (blockprocessor != null) { blockprocessor(sf.Name, rd); } } } } } backend.WaitForComplete(restoredb, null); if (m_options.RepairOnlyPaths) { Logging.Log.WriteInformationMessage(LOGTAG, "RecreateOrUpdateOnly", "Recreate/path-update completed, not running consistency checks"); } else { Logging.Log.WriteInformationMessage(LOGTAG, "RecreateCompletedCheckingDatabase", "Recreate completed, verifying the database consistency"); //All done, we must verify that we have all blocklist fully intact // if this fails, the db will not be deleted, so it can be used, // except to continue a backup m_result.EndTime = DateTime.UtcNow; using (var lbfdb = new LocalListBrokenFilesDatabase(restoredb)) { var broken = lbfdb.GetBrokenFilesets(new DateTime(0), null, null).Count(); if (broken != 0) { throw new UserInformationException(string.Format("Recreated database has missing blocks and {0} broken filelists. Consider using \"{1}\" and \"{2}\" to purge broken data from the remote store and the database.", broken, "list-broken-files", "purge-broken-files"), "DatabaseIsBrokenConsiderPurge"); } } restoredb.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, true, null); Logging.Log.WriteInformationMessage(LOGTAG, "RecreateCompleted", "Recreate completed, and consistency checks completed, marking database as complete"); restoredb.RepairInProgress = false; } m_result.EndTime = DateTime.UtcNow; } }
public void Run(IEnumerable <string> filterstrings = null, Library.Utility.IFilter compositefilter = null) { if (string.IsNullOrEmpty(m_options.Restorepath)) { throw new Exception("Cannot restore control files without --restore-path"); } if (!System.IO.Directory.Exists(m_options.Restorepath)) { System.IO.Directory.CreateDirectory(m_options.Restorepath); } using (var tmpdb = new Library.Utility.TempFile()) using (var db = new Database.LocalDatabase(System.IO.File.Exists(m_options.Dbpath) ? m_options.Dbpath : (string)tmpdb, "RestoreControlFiles")) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter); try { var filteredList = ListFilesHandler.ParseAndFilterFilesets(backend.List(), m_options); if (filteredList.Count == 0) { throw new Exception("No filesets found on remote target"); } Exception lastEx = new Exception("No suitable files found on remote target"); foreach (var fileversion in filteredList) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } var file = fileversion.Value.File; long size; string hash; RemoteVolumeType type; RemoteVolumeState state; if (!db.GetRemoteVolume(file.Name, out hash, out size, out type, out state)) { size = file.Size; } var res = new List <string>(); using (var tmpfile = backend.Get(file.Name, size, hash)) using (var tmp = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(file.Name), tmpfile, m_options)) foreach (var cf in tmp.ControlFiles) { if (Library.Utility.FilterExpression.Matches(filter, cf.Key)) { var targetpath = System.IO.Path.Combine(m_options.Restorepath, cf.Key); using (var ts = System.IO.File.Create(targetpath)) Library.Utility.Utility.CopyStream(cf.Value, ts); res.Add(targetpath); } } m_result.SetResult(res); lastEx = null; break; } catch (Exception ex) { lastEx = ex; if (ex is System.Threading.ThreadAbortException) { throw; } } } if (lastEx != null) { throw lastEx; } } finally { backend.WaitForComplete(db, null); } db.WriteResults(); } }