public void Run(long samples) { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath), "DatabaseDoesNotExist"); } using (var db = new LocalTestDatabase(m_options.Dbpath)) using (var backend = new BackendManager(m_backendurl, m_options, m_results.BackendWriter, db)) { db.SetResult(m_results); Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, true, null); if (!m_options.NoBackendverification) { var backupDatabase = new LocalBackupDatabase(db, m_options); var latestFilelist = backupDatabase.GetTemporaryFilelistVolumeNames(latestOnly: true); FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_results.BackendWriter, latestFilelist); } DoRun(samples, db, backend); db.WriteResults(); } }
private void PreBackupVerify(BackendManager backend, string protectedfile) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify); using (new Logging.Timer(LOGTAG, "PreBackupVerify", "PreBackupVerify")) { try { if (m_options.NoBackendverification) { FilelistProcessor.VerifyLocalList(backend, m_options, m_database, m_result.BackendWriter); UpdateStorageStatsFromDatabase(); } else { FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter, protectedfile); } } catch (Exception ex) { if (m_options.AutoCleanup) { Logging.Log.WriteWarningMessage(LOGTAG, "BackendVerifyFailedAttemptingCleanup", ex, "Backend verification failed, attempting automatic cleanup"); m_result.RepairResults = new RepairResults(m_result); new RepairHandler(backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run(); Logging.Log.WriteInformationMessage(LOGTAG, "BackendCleanupFinished", "Backend cleanup finished, retrying verification"); FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter); } else { throw; } } } }
public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter backendWriter, bool latestVolumesOnly, IDbTransaction transaction) { if (!options.NoBackendverification) { LocalBackupDatabase backupDatabase = new LocalBackupDatabase(database, options); IEnumerable <string> protectedFiles = backupDatabase.GetTemporaryFilelistVolumeNames(latestVolumesOnly, transaction); FilelistProcessor.VerifyRemoteList(backend, options, database, backendWriter, protectedFiles); } }
public virtual void Run() { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath)); } using (var db = new LocalDeleteDatabase(m_options.Dbpath, "Compact")) { var tr = db.BeginTransaction(); try { m_result.SetDatabase(db); Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); var changed = DoCompact(db, false, ref tr, null); if (changed && m_options.UploadVerificationFile) { FilelistProcessor.UploadVerificationFile(m_backendurl, m_options, m_result.BackendWriter, db, null); } if (!m_options.Dryrun) { using (new Logging.Timer(LOGTAG, "CommitCompact", "CommitCompact")) tr.Commit(); if (changed) { db.WriteResults(); if (m_options.AutoVacuum) { m_result.VacuumResults = new VacuumResults(m_result); new VacuumHandler(m_options, (VacuumResults)m_result.VacuumResults).Run(); } } } else { tr.Rollback(); } tr = null; } finally { if (tr != null) { try { tr.Rollback(); } catch { } } } } }
public static Tuple <DateTime, long, long>[] GetBrokenFilesetsFromRemote(string backendurl, BasicResults result, Database.LocalListBrokenFilesDatabase db, System.Data.IDbTransaction transaction, Options options, out List <Database.RemoteVolumeEntry> missing) { missing = null; var brokensets = db.GetBrokenFilesets(options.Time, options.Version, transaction).ToArray(); if (brokensets.Length == 0) { if (db.RepairInProgress) { throw new UserInformationException("Cannot continue because the database is marked as being under repair, but does not have broken files.", "CannotListOnDatabaseInRepair"); } Logging.Log.WriteInformationMessage(LOGTAG, "NoBrokenFilesetsInDatabase", "No broken filesets found in database, checking for missing remote files"); using (var backend = new BackendManager(backendurl, options, result.BackendWriter, db)) { var remotestate = FilelistProcessor.RemoteListAnalysis(backend, options, db, result.BackendWriter, null); if (!remotestate.ParsedVolumes.Any()) { throw new UserInformationException("No remote volumes were found, refusing purge", "CannotPurgeWithNoRemoteVolumes"); } missing = remotestate.MissingVolumes.ToList(); if (missing.Count == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "NoMissingFilesFound", "Skipping operation because no files were found to be missing, and no filesets were recorded as broken."); return(null); } // Mark all volumes as disposable foreach (var f in missing) { db.UpdateRemoteVolume(f.Name, RemoteVolumeState.Deleting, f.Size, f.Hash, transaction); } Logging.Log.WriteInformationMessage(LOGTAG, "MarkedRemoteFilesForDeletion", "Marked {0} remote files for deletion", missing.Count); // Drop all content from tables db.RemoveMissingBlocks(missing.Select(x => x.Name), transaction); } brokensets = db.GetBrokenFilesets(options.Time, options.Version, transaction).ToArray(); } return(brokensets); }
/// <summary> /// Uploads the verification file. /// </summary> /// <param name="backendurl">The backend url</param> /// <param name="options">The options to use</param> /// <param name="result">The result writer</param> /// <param name="db">The attached database</param> /// <param name="transaction">An optional transaction object</param> public static void UploadVerificationFile(string backendurl, Options options, IBackendWriter result, LocalDatabase db, System.Data.IDbTransaction transaction) { using (var backend = new BackendManager(backendurl, options, result, db)) using (var tempfile = new Library.Utility.TempFile()) { var remotename = options.Prefix + "-verification.json"; using (var stream = new System.IO.StreamWriter(tempfile, false, System.Text.Encoding.UTF8)) FilelistProcessor.CreateVerificationFile(db, stream); if (options.Dryrun) { result.AddDryrunMessage(string.Format("Would upload verification file: {0}, size: {1}", remotename, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(tempfile).Length))); } else { backend.PutUnencrypted(remotename, tempfile); backend.WaitForComplete(db, transaction); } } }
private void PostBackupVerification() { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify); using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) { using (new Logging.Timer(LOGTAG, "AfterBackupVerify", "AfterBackupVerify")) FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter); backend.WaitForComplete(m_database, null); } if (m_options.BackupTestSampleCount > 0 && m_database.GetRemoteVolumes().Count() > 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest); m_result.TestResults = new TestResults(m_result); using (var testdb = new LocalTestDatabase(m_database)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb)) new TestHandler(m_backendurl, m_options, (TestResults)m_result.TestResults) .DoRun(m_options.BackupTestSampleCount, testdb, backend); } }
public void Run(long samples) { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath)); } using (var db = new LocalTestDatabase(m_options.Dbpath)) using (var backend = new BackendManager(m_backendurl, m_options, m_results.BackendWriter, db)) { db.SetResult(m_results); Utility.VerifyParameters(db, m_options); if (!m_options.NoBackendverification) { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_results.BackendWriter); } DoRun(samples, db, backend); db.WriteResults(); } }
public virtual void Run() { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath)); } using (var db = new LocalDeleteDatabase(m_options.Dbpath, true)) using (var tr = db.BeginTransaction()) { m_result.SetDatabase(db); Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); var changed = DoCompact(db, false, tr); if (changed && m_options.UploadVerificationFile) { FilelistProcessor.UploadVerificationFile(m_backendurl, m_options, m_result.BackendWriter, db, null); } if (!m_options.Dryrun) { using (new Logging.Timer("CommitCompact")) tr.Commit(); if (changed) { db.WriteResults(); db.Vacuum(); } } else { tr.Rollback(); } } }
private void PostBackupVerification() { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify); using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) { using (new Logging.Timer(LOGTAG, "AfterBackupVerify", "AfterBackupVerify")) FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter); backend.WaitForComplete(m_database, null); } long remoteVolumeCount = m_database.GetRemoteVolumes().LongCount(x => x.State == RemoteVolumeState.Verified); long samplesToTest = Math.Max(m_options.BackupTestSampleCount, (long)Math.Round(remoteVolumeCount * (m_options.BackupTestPercentage / 100D), MidpointRounding.AwayFromZero)); if (samplesToTest > 0 && remoteVolumeCount > 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest); m_result.TestResults = new TestResults(m_result); using (var testdb = new LocalTestDatabase(m_database)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb)) new TestHandler(m_backendurl, m_options, (TestResults)m_result.TestResults) .DoRun(samplesToTest, testdb, backend); } }
public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact, BackendManager sharedManager) { // Workaround where we allow a running backendmanager to be used using (var bk = sharedManager == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null) { var backend = bk ?? sharedManager; if (!hasVerifiedBacked && !m_options.NoBackendverification) { var backupDatabase = new LocalBackupDatabase(db, m_options); var latestFilelist = backupDatabase.GetTemporaryFilelistVolumeNames(latestOnly: true, transaction: transaction); FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, latestFilelist); } IListResultFileset[] filesets = db.FilesetsWithBackupVersion.ToArray(); List <IListResultFileset> versionsToDelete = new List <IListResultFileset>(); versionsToDelete.AddRange(new SpecificVersionsRemover(this.m_options).GetFilesetsToDelete(filesets)); versionsToDelete.AddRange(new KeepTimeRemover(this.m_options).GetFilesetsToDelete(filesets)); versionsToDelete.AddRange(new RetentionPolicyRemover(this.m_options).GetFilesetsToDelete(filesets)); // When determining the number of full versions to keep, we need to ignore the versions already marked for removal. versionsToDelete.AddRange(new KeepVersionsRemover(this.m_options).GetFilesetsToDelete(filesets.Except(versionsToDelete))); if (!m_options.AllowFullRemoval && filesets.Length == versionsToDelete.Count) { Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal"); versionsToDelete = versionsToDelete.OrderBy(x => x.Version).Skip(1).ToList(); } if (versionsToDelete.Count > 0) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", versionsToDelete.Count); } var lst = db.DropFilesetsFromTable(versionsToDelete.Select(x => x.Time).ToArray(), transaction).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction); } if (!m_options.Dryrun) { transaction.Commit(); transaction = db.BeginTransaction(); } foreach (var f in lst) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return; } if (!m_options.Dryrun) { backend.Delete(f.Key, f.Value); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFileset", "Would delete remote fileset: {0}", f.Key); } } if (sharedManager == null) { backend.WaitForComplete(db, transaction); } else { backend.WaitForEmpty(db, transaction); } var count = lst.Length; if (!m_options.Dryrun) { if (count == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "No remote filesets were deleted"); } else { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "Deleted {0} remote fileset(s)", count); } } else { if (count == 0) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "No remote filesets would be deleted"); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "{0} remote fileset(s) would be deleted", count); } if (count > 0 && m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files"); } } if (!m_options.NoAutoCompact && (forceCompact || versionsToDelete.Count > 0)) { m_result.CompactResults = new CompactResults(m_result); new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager); } m_result.SetResults(versionsToDelete.Select(v => new Tuple <long, DateTime>(v.Version, v.Time)), m_options.Dryrun); } }
public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact) { using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { if (!hasVerifiedBacked && !m_options.NoBackendverification) { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter); } var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple <long, DateTime>(b, a.Value)); var toDelete = m_options.GetFilesetsToDelete(db.FilesetTimes.Select(x => x.Value).ToArray()); if (toDelete != null && toDelete.Length > 0) { m_result.AddMessage(string.Format("Deleting {0} remote fileset(s) ...", toDelete.Length)); } var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction); } if (!m_options.Dryrun) { transaction.Commit(); transaction = db.BeginTransaction(); } foreach (var f in lst) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return; } if (!m_options.Dryrun) { backend.Delete(f.Key, f.Value); } else { m_result.AddDryrunMessage(string.Format("Would delete remote fileset: {0}", f.Key)); } } backend.WaitForComplete(db, transaction); var count = lst.Length; if (!m_options.Dryrun) { if (count == 0) { m_result.AddMessage("No remote filesets were deleted"); } else { m_result.AddMessage(string.Format("Deleted {0} remote fileset(s)", count)); } } else { if (count == 0) { m_result.AddDryrunMessage("No remote filesets would be deleted"); } else { m_result.AddDryrunMessage(string.Format("{0} remote fileset(s) would be deleted", count)); } if (count > 0 && m_options.Dryrun) { m_result.AddDryrunMessage("Remove --dry-run to actually delete files"); } } if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0))) { m_result.CompactResults = new CompactResults(m_result); new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction); } m_result.SetResults( from n in filesetNumbers where toDelete.Contains(n.Item2) select n, m_options.Dryrun); } }
public void RunRepairRemote() { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath), "RepairDatabaseFileDoesNotExist"); } m_result.OperationProgressUpdater.UpdateProgress(0); using (var db = new LocalRepairDatabase(m_options.Dbpath)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); if (db.PartiallyRecreated) { throw new UserInformationException("The database was only partially recreated. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsPartiallyRecreated"); } if (db.RepairInProgress) { throw new UserInformationException("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsInRepairState"); } var tp = FilelistProcessor.RemoteListAnalysis(backend, m_options, db, m_result.BackendWriter, null); var buffer = new byte[m_options.Blocksize]; var blockhasher = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm); var hashsize = blockhasher.HashSize / 8; if (blockhasher == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } if (!blockhasher.CanReuseTransform) { throw new UserInformationException(Strings.Common.InvalidCryptoSystem(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } var progress = 0; var targetProgess = tp.ExtraVolumes.Count() + tp.MissingVolumes.Count() + tp.VerificationRequiredVolumes.Count(); if (m_options.Dryrun) { if (tp.ParsedVolumes.Count() == 0 && tp.OtherVolumes.Count() > 0) { if (tp.BackupPrefixes.Length == 1) { throw new UserInformationException(string.Format("Found no backup files with prefix {0}, but files with prefix {1}, did you forget to set the backup prefix?", m_options.Prefix, tp.BackupPrefixes[0]), "RemoteFolderEmptyWithPrefix"); } else { throw new UserInformationException(string.Format("Found no backup files with prefix {0}, but files with prefixes {1}, did you forget to set the backup prefix?", m_options.Prefix, string.Join(", ", tp.BackupPrefixes)), "RemoteFolderEmptyWithPrefix"); } } else if (tp.ParsedVolumes.Count() == 0 && tp.ExtraVolumes.Count() > 0) { throw new UserInformationException(string.Format("No files were missing, but {0} remote files were, found, did you mean to run recreate-database?", tp.ExtraVolumes.Count()), "NoRemoteFilesMissing"); } } if (tp.ExtraVolumes.Count() > 0 || tp.MissingVolumes.Count() > 0 || tp.VerificationRequiredVolumes.Count() > 0) { if (tp.VerificationRequiredVolumes.Any()) { using (var testdb = new LocalTestDatabase(db)) { foreach (var n in tp.VerificationRequiredVolumes) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); long size; string hash; KeyValuePair <string, IEnumerable <KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> > > res; using (var tf = backend.GetWithInfo(n.Name, out size, out hash)) res = TestHandler.TestVolumeInternals(testdb, n, tf, m_options, 1); if (res.Value.Any()) { throw new Exception(string.Format("Remote verification failure: {0}", res.Value.First())); } if (!m_options.Dryrun) { Logging.Log.WriteInformationMessage(LOGTAG, "CapturedRemoteFileHash", "Sucessfully captured hash for {0}, updating database", n.Name); db.UpdateRemoteVolume(n.Name, RemoteVolumeState.Verified, size, hash); } } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RemoteFileVerificationError", ex, "Failed to perform verification for file: {0}, please run verify; message: {1}", n.Name, ex.Message); if (ex is System.Threading.ThreadAbortException) { throw; } } } } } // TODO: It is actually possible to use the extra files if we parse them foreach (var n in tp.ExtraVolumes) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); // If this is a new index file, we can accept it if it matches our local data // This makes it possible to augment the remote store with new index data if (n.FileType == RemoteVolumeType.Index && m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { try { string hash; long size; using (var tf = backend.GetWithInfo(n.File.Name, out size, out hash)) using (var ifr = new IndexVolumeReader(n.CompressionModule, tf, m_options, m_options.BlockhashSize)) { foreach (var rv in ifr.Volumes) { var entry = db.GetRemoteVolume(rv.Filename); if (entry.ID < 0) { throw new Exception(string.Format("Unknown remote file {0} detected", rv.Filename)); } if (!new [] { RemoteVolumeState.Uploading, RemoteVolumeState.Uploaded, RemoteVolumeState.Verified }.Contains(entry.State)) { throw new Exception(string.Format("Volume {0} has local state {1}", rv.Filename, entry.State)); } if (entry.Hash != rv.Hash || entry.Size != rv.Length || !new [] { RemoteVolumeState.Uploading, RemoteVolumeState.Uploaded, RemoteVolumeState.Verified }.Contains(entry.State)) { throw new Exception(string.Format("Volume {0} hash/size mismatch ({1} - {2}) vs ({3} - {4})", rv.Filename, entry.Hash, entry.Size, rv.Hash, rv.Length)); } db.CheckAllBlocksAreInVolume(rv.Filename, rv.Blocks); } var blocksize = m_options.Blocksize; foreach (var ixb in ifr.BlockLists) { db.CheckBlocklistCorrect(ixb.Hash, ixb.Length, ixb.Blocklist, blocksize, hashsize); } var selfid = db.GetRemoteVolumeID(n.File.Name); foreach (var rv in ifr.Volumes) { db.AddIndexBlockLink(selfid, db.GetRemoteVolumeID(rv.Filename), null); } } // All checks fine, we accept the new index file Logging.Log.WriteInformationMessage(LOGTAG, "AcceptNewIndexFile", "Accepting new index file {0}", n.File.Name); db.RegisterRemoteVolume(n.File.Name, RemoteVolumeType.Index, size, RemoteVolumeState.Uploading); db.UpdateRemoteVolume(n.File.Name, RemoteVolumeState.Verified, size, hash); continue; } catch (Exception rex) { Logging.Log.WriteErrorMessage(LOGTAG, "FailedNewIndexFile", rex, "Failed to accept new index file: {0}, message: {1}", n.File.Name, rex.Message); } } if (!m_options.Dryrun) { db.RegisterRemoteVolume(n.File.Name, n.FileType, n.File.Size, RemoteVolumeState.Deleting); backend.Delete(n.File.Name, n.File.Size); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteFile", "would delete file {0}", n.File.Name); } } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "FailedExtraFileCleanup", ex, "Failed to perform cleanup for extra file: {0}, message: {1}", n.File.Name, ex.Message); if (ex is System.Threading.ThreadAbortException) { throw; } } } if (!m_options.RebuildMissingDblockFiles) { var missingDblocks = tp.MissingVolumes.Where(x => x.Type == RemoteVolumeType.Blocks).ToArray(); if (missingDblocks.Length > 0) { throw new UserInformationException($"The backup storage destination is missing data files. You can either enable `--rebuild-missing-dblock-files` or run the purge command to remove these files. The following files are missing: {string.Join(", ", missingDblocks.Select(x => x.Name))}", "MissingDblockFiles"); } } foreach (var n in tp.MissingVolumes) { IDisposable newEntry = null; try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); if (n.Type == RemoteVolumeType.Files) { var filesetId = db.GetFilesetIdFromRemotename(n.Name); var w = new FilesetVolumeWriter(m_options, DateTime.UtcNow); newEntry = w; w.SetRemoteFilename(n.Name); db.WriteFileset(w, filesetId, null); w.Close(); if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadFileset", "would re-upload fileset {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } else if (n.Type == RemoteVolumeType.Index) { var w = new IndexVolumeWriter(m_options); newEntry = w; w.SetRemoteFilename(n.Name); var h = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm); foreach (var blockvolume in db.GetBlockVolumesFromIndexName(n.Name)) { w.StartVolume(blockvolume.Name); var volumeid = db.GetRemoteVolumeID(blockvolume.Name); foreach (var b in db.GetBlocks(volumeid)) { w.AddBlock(b.Hash, b.Size); } w.FinishVolume(blockvolume.Hash, blockvolume.Size); if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full) { foreach (var b in db.GetBlocklists(volumeid, m_options.Blocksize, hashsize)) { var bh = Convert.ToBase64String(h.ComputeHash(b.Item2, 0, b.Item3)); if (bh != b.Item1) { throw new Exception(string.Format("Internal consistency check failed, generated index block has wrong hash, {0} vs {1}", bh, b.Item1)); } w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3); } } } w.Close(); if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadIndexFile", "would re-upload index file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } else if (n.Type == RemoteVolumeType.Blocks) { var w = new BlockVolumeWriter(m_options); newEntry = w; w.SetRemoteFilename(n.Name); using (var mbl = db.CreateBlockList(n.Name)) { //First we grab all known blocks from local files foreach (var block in mbl.GetSourceFilesWithBlocks(m_options.Blocksize)) { var hash = block.Hash; var size = (int)block.Size; foreach (var source in block.Sources) { var file = source.File; var offset = source.Offset; try { if (System.IO.File.Exists(file)) { using (var f = System.IO.File.OpenRead(file)) { f.Position = offset; if (size == Library.Utility.Utility.ForceStreamRead(f, buffer, size)) { var newhash = Convert.ToBase64String(blockhasher.ComputeHash(buffer, 0, size)); if (newhash == hash) { if (mbl.SetBlockRestored(hash, size)) { w.AddBlock(hash, buffer, 0, size, Duplicati.Library.Interface.CompressionHint.Default); } break; } } } } } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "FileAccessError", ex, "Failed to access file: {0}", file); } } } //Then we grab all remote volumes that have the missing blocks foreach (var vol in new AsyncDownloader(mbl.GetMissingBlockSources().ToList(), backend)) { try { using (var tmpfile = vol.TempFile) using (var f = new BlockVolumeReader(RestoreHandler.GetCompressionModule(vol.Name), tmpfile, m_options)) foreach (var b in f.Blocks) { if (mbl.SetBlockRestored(b.Key, b.Value)) { if (f.ReadBlock(b.Key, buffer) == b.Value) { w.AddBlock(b.Key, buffer, 0, (int)b.Value, Duplicati.Library.Interface.CompressionHint.Default); } } } } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RemoteFileAccessError", ex, "Failed to access remote file: {0}", vol.Name); } } // If we managed to recover all blocks, NICE! var missingBlocks = mbl.GetMissingBlocks().Count(); if (missingBlocks > 0) { Logging.Log.WriteInformationMessage(LOGTAG, "RepairMissingBlocks", "Repair cannot acquire {0} required blocks for volume {1}, which are required by the following filesets: ", missingBlocks, n.Name); foreach (var f in mbl.GetFilesetsUsingMissingBlocks()) { Logging.Log.WriteInformationMessage(LOGTAG, "AffectedFilesetName", f.Name); } var recoverymsg = string.Format("If you want to continue working with the database, you can use the \"{0}\" and \"{1}\" commands to purge the missing data from the database and the remote storage.", "list-broken-files", "purge-broken-files"); if (!m_options.Dryrun) { Logging.Log.WriteInformationMessage(LOGTAG, "RecoverySuggestion", "This may be fixed by deleting the filesets and running repair again"); throw new UserInformationException(string.Format("Repair not possible, missing {0} blocks.\n" + recoverymsg, missingBlocks), "RepairIsNotPossible"); } else { Logging.Log.WriteInformationMessage(LOGTAG, "RecoverySuggestion", recoverymsg); } } else { if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadBlockFile", "would re-upload block file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } } } } catch (Exception ex) { if (newEntry != null) { try { newEntry.Dispose(); } catch { } finally { newEntry = null; } } Logging.Log.WriteErrorMessage(LOGTAG, "CleanupMissingFileError", ex, "Failed to perform cleanup for missing file: {0}, message: {1}", n.Name, ex.Message); if (ex is System.Threading.ThreadAbortException) { throw; } } } } else { Logging.Log.WriteInformationMessage(LOGTAG, "DatabaseIsSynchronized", "Destination and database are synchronized, not making any changes"); } m_result.OperationProgressUpdater.UpdateProgress(1); backend.WaitForComplete(db, null); db.WriteResults(); } }
private void DoRun(Database.LocalPurgeDatabase db, Library.Utility.IFilter filter, Action <System.Data.IDbCommand, long, string> filtercommand, float pgoffset, float pgspan) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Begin); Logging.Log.WriteInformationMessage(LOGTAG, "StartingPurge", "Starting purge operation"); var doCompactStep = !m_options.NoAutoCompact && filtercommand == null; using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { if (db.PartiallyRecreated) { throw new UserInformationException("The purge command does not work on partially recreated databases", "PurgeNotAllowedOnPartialDatabase"); } if (db.RepairInProgress && filtercommand == null) { throw new UserInformationException(string.Format("The purge command does not work on an incomplete database, try the {0} operation.", "purge-broken-files"), "PurgeNotAllowedOnIncompleteDatabase"); } var versions = db.GetFilesetIDs(m_options.Time, m_options.Version).OrderByDescending(x => x).ToArray(); if (versions.Length <= 0) { throw new UserInformationException("No filesets matched the supplied time or versions", "NoFilesetFoundForTimeOrVersion"); } var orphans = db.CountOrphanFiles(null); if (orphans != 0) { throw new UserInformationException(string.Format("Unable to start the purge process as there are {0} orphan file(s)", orphans), "CannotPurgeWithOrphans"); } Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); if (filtercommand == null) { db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, false, null); if (m_options.NoBackendverification) { FilelistProcessor.VerifyLocalList(backend, db); } else { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, null); } } var filesets = db.FilesetTimes.OrderByDescending(x => x.Value).ToArray(); var versionprogress = ((doCompactStep ? 0.75f : 1.0f) / versions.Length) * pgspan; var currentprogress = pgoffset; var progress = 0; m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Process); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); // Reverse makes sure we re-write the old versions first foreach (var versionid in versions.Reverse()) { progress++; Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingFilelistVolumes", "Processing filelist volume {0} of {1}", progress, versions.Length); using (var tr = db.BeginTransaction()) { var ix = -1; for (var i = 0; i < filesets.Length; i++) { if (filesets[i].Key == versionid) { ix = i; break; } } if (ix < 0) { throw new InvalidProgramException(string.Format("Fileset was reported with id {0}, but could not be found?", versionid)); } var secs = 0; while (secs < 60) { secs++; var tfn = Volumes.VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, filesets[ix].Value.AddSeconds(secs)); if (db.GetRemoteVolumeID(tfn, tr) < 0) { break; } } var tsOriginal = filesets[ix].Value; var ts = tsOriginal.AddSeconds(secs); var prevfilename = db.GetRemoteVolumeNameForFileset(filesets[ix].Key, tr); if (secs >= 60) { throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is more than 60 seconds away", prevfilename, ts)); } if (ix != 0 && filesets[ix - 1].Value <= ts) { throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is larger than the next timestamp {2}", prevfilename, ts, filesets[ix - 1].Value)); } using (var tempset = db.CreateTemporaryFileset(versionid, tr)) { if (filtercommand == null) { tempset.ApplyFilter(filter); } else { tempset.ApplyFilter(filtercommand); } if (tempset.RemovedFileCount == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "NotWritingNewFileset", "Not writing a new fileset for {0} as it was not changed", prevfilename); currentprogress += versionprogress; tr.Rollback(); continue; } else { using (var tf = new Library.Utility.TempFile()) using (var vol = new Volumes.FilesetVolumeWriter(m_options, ts)) { var isOriginalFilesetFullBackup = db.IsFilesetFullBackup(tsOriginal); var newids = tempset.ConvertToPermanentFileset(vol.RemoteFilename, ts, isOriginalFilesetFullBackup); vol.VolumeID = newids.Item1; vol.CreateFilesetFile(isOriginalFilesetFullBackup); Logging.Log.WriteInformationMessage(LOGTAG, "ReplacingFileset", "Replacing fileset {0} with {1} which has with {2} fewer file(s) ({3} reduction)", prevfilename, vol.RemoteFilename, tempset.RemovedFileCount, Library.Utility.Utility.FormatSizeString(tempset.RemovedFileSize)); db.WriteFileset(vol, newids.Item2, tr); m_result.RemovedFileSize += tempset.RemovedFileSize; m_result.RemovedFileCount += tempset.RemovedFileCount; m_result.RewrittenFileLists++; currentprogress += (versionprogress / 2); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); if (m_options.Dryrun || m_options.FullResult) { foreach (var fe in tempset.ListAllDeletedFiles()) { var msg = string.Format(" Purging file {0} ({1})", fe.Key, Library.Utility.Utility.FormatSizeString(fe.Value)); Logging.Log.WriteProfilingMessage(LOGTAG, "PurgeFile", msg); Logging.Log.WriteVerboseMessage(LOGTAG, "PurgeFile", msg); if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldPurgeFile", msg); } } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldWriteRemoteFiles", "Would write files to remote storage"); } Logging.Log.WriteVerboseMessage(LOGTAG, "WritingRemoteFiles", "Writing files to remote storage"); } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadAndDelete", "Would upload file {0} ({1}) and delete file {2}, removing {3} files", vol.RemoteFilename, Library.Utility.Utility.FormatSizeString(vol.Filesize), prevfilename, tempset.RemovedFileCount); tr.Rollback(); } else { var lst = db.DropFilesetsFromTable(new[] { filesets[ix].Value }, tr).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, tr); } tr.Commit(); backend.Put(vol, synchronous: true); backend.Delete(prevfilename, -1, true); backend.FlushDbMessages(); } } } } } currentprogress += (versionprogress / 2); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); } if (doCompactStep) { if (m_result.RewrittenFileLists == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "SkippingCompacting", "Skipping compacting as no new volumes were written"); } else { m_result.OperationProgressUpdater.UpdateProgress(pgoffset + (0.75f * pgspan)); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Compact); m_result.CompactResults = new CompactResults(m_result); using (var cdb = new Database.LocalDeleteDatabase(db)) { var tr = cdb.BeginTransaction(); try { new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(cdb, true, ref tr, backend); } catch { try { tr.Rollback(); } catch { } } finally { try { tr.Commit(); } catch { } } } } m_result.OperationProgressUpdater.UpdateProgress(pgoffset + pgspan); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Complete); } backend.WaitForComplete(db, null); } }
public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact, BackendManager sharedManager) { // Workaround where we allow a running backendmanager to be used using (var bk = sharedManager == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null) { var backend = bk ?? sharedManager; if (!hasVerifiedBacked && !m_options.NoBackendverification) { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter); } var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple <long, DateTime>(b, a.Value)).ToList(); var sets = db.FilesetTimes.Select(x => x.Value).ToArray(); var toDelete = GetFilesetsToDelete(db, sets); if (!m_options.AllowFullRemoval && sets.Length == toDelete.Length) { Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal"); toDelete = toDelete.Skip(1).ToArray(); } if (toDelete != null && toDelete.Length > 0) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", toDelete.Length); } var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction); } if (!m_options.Dryrun) { transaction.Commit(); transaction = db.BeginTransaction(); } foreach (var f in lst) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return; } if (!m_options.Dryrun) { backend.Delete(f.Key, f.Value); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFileset", "Would delete remote fileset: {0}", f.Key); } } if (sharedManager == null) { backend.WaitForComplete(db, transaction); } else { backend.WaitForEmpty(db, transaction); } var count = lst.Length; if (!m_options.Dryrun) { if (count == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "No remote filesets were deleted"); } else { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "Deleted {0} remote fileset(s)", count); } } else { if (count == 0) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "No remote filesets would be deleted"); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "{0} remote fileset(s) would be deleted", count); } if (count > 0 && m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files"); } } if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0))) { m_result.CompactResults = new CompactResults(m_result); new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager); } m_result.SetResults( from n in filesetNumbers where toDelete.Contains(n.Item2) select n, m_options.Dryrun); } }
private async Task RunAsync(string[] sources, Library.Utility.IFilter filter) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin); // New isolated scope for each operation using (new IsolatedChannelScope()) using (m_database = new LocalBackupDatabase(m_options.Dbpath, m_options)) { m_result.SetDatabase(m_database); m_result.Dryrun = m_options.Dryrun; // Check the database integrity Utility.UpdateOptionsFromDb(m_database, m_options); Utility.VerifyParameters(m_database, m_options); var probe_path = m_database.GetFirstPath(); if (probe_path != null && Duplicati.Library.Utility.Utility.GuessDirSeparator(probe_path) != System.IO.Path.DirectorySeparatorChar.ToString()) { throw new UserInformationException(string.Format("The backup contains files that belong to another operating system. Proceeding with a backup would cause the database to contain paths from two different operation systems, which is not supported. To proceed without losing remote data, delete all filesets and make sure the --{0} option is set, then run the backup again to re-use the existing data on the remote store.", "no-auto-compact"), "CrossOsDatabaseReuseNotSupported"); } if (m_database.PartiallyRecreated) { throw new UserInformationException("The database was only partially recreated. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsPartiallyRecreated"); } if (m_database.RepairInProgress) { throw new UserInformationException("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the backup process cannot continue. You may delete the local database and attempt to repair it again.", "DatabaseRepairInProgress"); } // If there is no filter, we set an empty filter to simplify the code // If there is a filter, we make sure that the sources are included m_filter = filter ?? new Library.Utility.FilterExpression(); m_sourceFilter = new Library.Utility.FilterExpression(sources, true); Task parallelScanner = null; Task uploader = null; try { // Setup runners and instances here using (var db = new Backup.BackupDatabase(m_database, m_options)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) using (var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp)) using (var stats = new Backup.BackupStatsCollector(m_result)) using (var bk = new Common.BackendHandler(m_options, m_backendurl, db, stats, m_result.TaskReader)) // Keep a reference to these channels to avoid shutdown using (var uploadtarget = ChannelManager.GetChannel(Backup.Channels.BackendRequest.ForWrite)) { long filesetid; var counterToken = new CancellationTokenSource(); using (var snapshot = GetSnapshot(sources, m_options)) { try { // Start parallel scan, or use the database if (m_options.DisableFileScanner) { var d = m_database.GetLastBackupFileCountAndSize(); m_result.OperationProgressUpdater.UpdatefileCount(d.Item1, d.Item2, true); } else { parallelScanner = Backup.CountFilesHandler.Run(sources, snapshot, m_result, m_options, m_sourceFilter, m_filter, m_result.TaskReader, counterToken.Token); } // Make sure the database is sane await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, true); // Start the uploader process uploader = Backup.BackendUploader.Run(bk, m_options, db, m_result, m_result.TaskReader, stats); // If we have an interrupted backup, grab the string lasttempfilelist = null; long lasttempfileid = -1; if (!m_options.DisableSyntheticFilelist) { var candidates = (await db.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToArray(); if (candidates.Length > 0) { lasttempfileid = candidates.Last().Key; lasttempfilelist = m_database.GetRemoteVolumeFromID(lasttempfileid).Name; } } // TODO: Rewrite to using the uploader process, or the BackendHandler interface // Do a remote verification, unless disabled PreBackupVerify(backend, lasttempfilelist); // If the previous backup was interrupted, send a synthetic list await Backup.UploadSyntheticFilelist.Run(db, m_options, m_result, m_result.TaskReader, lasttempfilelist, lasttempfileid); // Grab the previous backup ID, if any var prevfileset = m_database.FilesetTimes.FirstOrDefault(); if (prevfileset.Value.ToUniversalTime() > m_database.OperationTimestamp.ToUniversalTime()) { throw new Exception(string.Format("The previous backup has time {0}, but this backup has time {1}. Something is wrong with the clock.", prevfileset.Value.ToLocalTime(), m_database.OperationTimestamp.ToLocalTime())); } var lastfilesetid = prevfileset.Value.Ticks == 0 ? -1 : prevfileset.Key; // Rebuild any index files that are missing await Backup.RecreateMissingIndexFiles.Run(db, m_options, m_result, m_result.TaskReader); // This should be removed as the lookups are no longer used m_database.BuildLookupTable(m_options); // Prepare the operation by registering the filelist m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles); var repcnt = 0; while (repcnt < 100 && await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0) { filesetvolume.ResetRemoteFilename(m_options, m_database.OperationTimestamp.AddSeconds(repcnt++)); } if (await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0) { throw new Exception("Unable to generate a unique fileset name"); } var filesetvolumeid = await db.RegisterRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary); filesetid = await db.CreateFilesetAsync(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time); // create USN-based scanner if enabled var journalService = GetJournalService(sources, snapshot, filter, lastfilesetid); // Run the backup operation if (await m_result.TaskReader.ProgressAsync) { await RunMainOperation(sources, snapshot, journalService, db, stats, m_options, m_sourceFilter, m_filter, m_result, m_result.TaskReader, lastfilesetid).ConfigureAwait(false); } } finally { //If the scanner is still running for some reason, make sure we kill it now counterToken.Cancel(); } } // Ensure the database is in a sane state after adding data using (new Logging.Timer(LOGTAG, "VerifyConsistency", "VerifyConsistency")) await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, false); // Send the actual filelist if (await m_result.TaskReader.ProgressAsync) { await Backup.UploadRealFilelist.Run(m_result, db, m_options, filesetvolume, filesetid, m_result.TaskReader); } // Wait for upload completion m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload); var lastVolumeSize = await FlushBackend(m_result, uploadtarget, uploader).ConfigureAwait(false); // Make sure we have the database up-to-date await db.CommitTransactionAsync("CommitAfterUpload", false); // TODO: Remove this later m_transaction = m_database.BeginTransaction(); if (await m_result.TaskReader.ProgressAsync) { CompactIfRequired(backend, lastVolumeSize); } if (m_options.UploadVerificationFile && await m_result.TaskReader.ProgressAsync) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload); FilelistProcessor.UploadVerificationFile(backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction); } if (m_options.Dryrun) { m_transaction.Rollback(); m_transaction = null; } else { using (new Logging.Timer(LOGTAG, "CommitFinalizingBackup", "CommitFinalizingBackup")) m_transaction.Commit(); m_transaction = null; if (m_result.TaskControlRendevouz() != TaskControlState.Stop) { if (m_options.NoBackendverification) { UpdateStorageStatsFromDatabase(); } else { PostBackupVerification(); } } } m_database.WriteResults(); m_database.PurgeLogData(m_options.LogRetention); if (m_options.AutoVacuum) { m_database.Vacuum(); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete); return; } } catch (Exception ex) { var aex = BuildException(ex, uploader, parallelScanner); Logging.Log.WriteErrorMessage(LOGTAG, "FatalError", ex, "Fatal error"); if (aex == ex) { throw; } throw aex; } finally { if (parallelScanner != null && !parallelScanner.IsCompleted) { parallelScanner.Wait(500); } // TODO: We want to commit? always? if (m_transaction != null) { try { m_transaction.Rollback(); } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RollbackError", ex, "Rollback error: {0}", ex.Message); } } } } }
internal bool DoCompact(LocalDeleteDatabase db, bool hasVerifiedBackend, ref System.Data.IDbTransaction transaction, BackendManager sharedBackend) { var report = db.GetCompactReport(m_options.VolumeSize, m_options.Threshold, m_options.SmallFileSize, m_options.SmallFileMaxCount, transaction); report.ReportCompactData(); if (report.ShouldReclaim || report.ShouldCompact) { // Workaround where we allow a running backendmanager to be used using (var bk = sharedBackend == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null) { var backend = bk ?? sharedBackend; if (!hasVerifiedBackend && !m_options.NoBackendverification) { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter); } BlockVolumeWriter newvol = new BlockVolumeWriter(m_options); newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction); IndexVolumeWriter newvolindex = null; if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { newvolindex = new IndexVolumeWriter(m_options); newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction); db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction); } long blocksInVolume = 0; byte[] buffer = new byte[m_options.Blocksize]; var remoteList = db.GetRemoteVolumes().Where(n => n.State == RemoteVolumeState.Uploaded || n.State == RemoteVolumeState.Verified).ToArray(); //These are for bookkeeping var uploadedVolumes = new List <KeyValuePair <string, long> >(); var deletedVolumes = new List <KeyValuePair <string, long> >(); var downloadedVolumes = new List <KeyValuePair <string, long> >(); //We start by deleting unused volumes to save space before uploading new stuff var fullyDeleteable = (from v in remoteList where report.DeleteableVolumes.Contains(v.Name) select(IRemoteVolume) v).ToList(); deletedVolumes.AddRange(DoDelete(db, backend, fullyDeleteable, ref transaction)); // This list is used to pick up unused volumes, // so they can be deleted once the upload of the // required fragments is complete var deleteableVolumes = new List <IRemoteVolume>(); if (report.ShouldCompact) { newvolindex?.StartVolume(newvol.RemoteFilename); var volumesToDownload = (from v in remoteList where report.CompactableVolumes.Contains(v.Name) select(IRemoteVolume) v).ToList(); using (var q = db.CreateBlockQueryHelper(transaction)) { foreach (var entry in new AsyncDownloader(volumesToDownload, backend)) { using (var tmpfile = entry.TempFile) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return(false); } downloadedVolumes.Add(new KeyValuePair <string, long>(entry.Name, entry.Size)); var inst = VolumeBase.ParseFilename(entry.Name); using (var f = new BlockVolumeReader(inst.CompressionModule, tmpfile, m_options)) { foreach (var e in f.Blocks) { if (q.UseBlock(e.Key, e.Value, transaction)) { //TODO: How do we get the compression hint? Reverse query for filename in db? var s = f.ReadBlock(e.Key, buffer); if (s != e.Value) { throw new Exception(string.Format("Size mismatch problem for block {0}, {1} vs {2}", e.Key, s, e.Value)); } newvol.AddBlock(e.Key, buffer, 0, s, Duplicati.Library.Interface.CompressionHint.Compressible); if (newvolindex != null) { newvolindex.AddBlock(e.Key, e.Value); } db.MoveBlockToNewVolume(e.Key, e.Value, newvol.VolumeID, transaction); blocksInVolume++; if (newvol.Filesize > m_options.VolumeSize) { uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, newvol.Filesize)); if (newvolindex != null) { uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, newvolindex.Filesize)); } if (!m_options.Dryrun) { backend.Put(newvol, newvolindex); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadGeneratedBlockset", "Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(newvol.Filesize)); } newvol = new BlockVolumeWriter(m_options); newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction); if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { newvolindex = new IndexVolumeWriter(m_options); newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction); db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction); newvolindex.StartVolume(newvol.RemoteFilename); } blocksInVolume = 0; //After we upload this volume, we can delete all previous encountered volumes deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction)); deleteableVolumes = new List <IRemoteVolume>(); } } } } deleteableVolumes.Add(entry); } } if (blocksInVolume > 0) { uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, newvol.Filesize)); if (newvolindex != null) { uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, newvolindex.Filesize)); } if (!m_options.Dryrun) { backend.Put(newvol, newvolindex); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadGeneratedBlockset", "Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(newvol.Filesize)); } } else { db.RemoveRemoteVolume(newvol.RemoteFilename, transaction); if (newvolindex != null) { db.RemoveRemoteVolume(newvolindex.RemoteFilename, transaction); newvolindex.FinishVolume(null, 0); } } } } else { newvolindex?.Dispose(); newvol.Dispose(); } deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction)); var downloadSize = downloadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value); var deletedSize = deletedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value); var uploadSize = uploadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value); m_result.DeletedFileCount = deletedVolumes.Count; m_result.DownloadedFileCount = downloadedVolumes.Count; m_result.UploadedFileCount = uploadedVolumes.Count; m_result.DeletedFileSize = deletedSize; m_result.DownloadedFileSize = downloadSize; m_result.UploadedFileSize = uploadSize; m_result.Dryrun = m_options.Dryrun; if (m_result.Dryrun) { if (downloadedVolumes.Count == 0) { Logging.Log.WriteDryrunMessage(LOGTAG, "CompactResults", "Would delete {0} files, which would reduce storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize)); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "CompactResults", "Would download {0} file(s) with a total size of {1}, delete {2} file(s) with a total size of {3}, and compact to {4} file(s) with a size of {5}, which would reduce storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DownloadedFileSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize)); } } else { if (m_result.DownloadedFileCount == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "CompactResults", "Deleted {0} files, which reduced storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize)); } else { Logging.Log.WriteInformationMessage(LOGTAG, "CompactResults", "Downloaded {0} file(s) with a total size of {1}, deleted {2} file(s) with a total size of {3}, and compacted to {4} file(s) with a size of {5}, which reduced storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(downloadSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize)); } } backend.WaitForComplete(db, transaction); } m_result.EndTime = DateTime.UtcNow; return((m_result.DeletedFileCount + m_result.UploadedFileCount) > 0); } else { m_result.EndTime = DateTime.UtcNow; return(false); } }
public void RunRepairRemote() { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath)); } m_result.OperationProgressUpdater.UpdateProgress(0); using (var db = new LocalRepairDatabase(m_options.Dbpath)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); Utility.VerifyParameters(db, m_options); var tp = FilelistProcessor.RemoteListAnalysis(backend, m_options, db, m_result.BackendWriter); var buffer = new byte[m_options.Blocksize]; var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); var hashsize = blockhasher.HashSize / 8; if (blockhasher == null) { throw new Exception(Strings.Foresthash.InvalidHashAlgorithm(m_options.BlockHashAlgorithm)); } if (!blockhasher.CanReuseTransform) { throw new Exception(Strings.Foresthash.InvalidCryptoSystem(m_options.BlockHashAlgorithm)); } var progress = 0; var targetProgess = tp.ExtraVolumes.Count() + tp.MissingVolumes.Count() + tp.VerificationRequiredVolumes.Count(); if (m_options.Dryrun) { if (tp.ParsedVolumes.Count() == 0 && tp.OtherVolumes.Count() > 0) { if (tp.BackupPrefixes.Length == 1) { throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefix {1}, did you forget to set the backup-prefix?", m_options.Prefix, tp.BackupPrefixes[0])); } else { throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefixes {1}, did you forget to set the backup-prefix?", m_options.Prefix, string.Join(", ", tp.BackupPrefixes))); } } else if (tp.ParsedVolumes.Count() == 0 && tp.ExtraVolumes.Count() > 0) { throw new Exception(string.Format("No files were missing, but {0} remote files were, found, did you mean to run recreate-database?", tp.ExtraVolumes.Count())); } } if (tp.ExtraVolumes.Count() > 0 || tp.MissingVolumes.Count() > 0 || tp.VerificationRequiredVolumes.Count() > 0) { if (tp.VerificationRequiredVolumes.Any()) { using (var testdb = new LocalTestDatabase(db)) { foreach (var n in tp.VerificationRequiredVolumes) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); long size; string hash; KeyValuePair <string, IEnumerable <KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> > > res; using (var tf = backend.GetWithInfo(n.Name, out size, out hash)) res = TestHandler.TestVolumeInternals(testdb, n, tf, m_options, m_result, 1); if (res.Value.Any()) { throw new Exception(string.Format("Remote verification failure: {0}", res.Value.First())); } if (!m_options.Dryrun) { m_result.AddMessage(string.Format("Sucessfully captured hash for {0}, updating database", n.Name)); db.UpdateRemoteVolume(n.Name, RemoteVolumeState.Verified, size, hash); } } catch (Exception ex) { m_result.AddError(string.Format("Failed to perform verification for file: {0}, please run verify; message: {1}", n.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } } } // TODO: It is actually possible to use the extra files if we parse them foreach (var n in tp.ExtraVolumes) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); if (!m_options.Dryrun) { db.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Deleting); backend.Delete(n.File.Name, n.File.Size); } else { m_result.AddDryrunMessage(string.Format("would delete file {0}", n.File.Name)); } } catch (Exception ex) { m_result.AddError(string.Format("Failed to perform cleanup for extra file: {0}, message: {1}", n.File.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } foreach (var n in tp.MissingVolumes) { IDisposable newEntry = null; try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); if (n.Type == RemoteVolumeType.Files) { var filesetId = db.GetFilesetIdFromRemotename(n.Name); var w = new FilesetVolumeWriter(m_options, DateTime.UtcNow); newEntry = w; w.SetRemoteFilename(n.Name); db.WriteFileset(w, null, filesetId); w.Close(); if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("would re-upload fileset {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size))); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } else if (n.Type == RemoteVolumeType.Index) { var w = new IndexVolumeWriter(m_options); newEntry = w; w.SetRemoteFilename(n.Name); foreach (var blockvolume in db.GetBlockVolumesFromIndexName(n.Name)) { w.StartVolume(blockvolume.Name); var volumeid = db.GetRemoteVolumeID(blockvolume.Name); foreach (var b in db.GetBlocks(volumeid)) { w.AddBlock(b.Hash, b.Size); } w.FinishVolume(blockvolume.Hash, blockvolume.Size); if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full) { foreach (var b in db.GetBlocklists(volumeid, m_options.Blocksize, hashsize)) { w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3); } } } w.Close(); if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("would re-upload index file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size))); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } else if (n.Type == RemoteVolumeType.Blocks) { var w = new BlockVolumeWriter(m_options); newEntry = w; w.SetRemoteFilename(n.Name); using (var mbl = db.CreateBlockList(n.Name)) { //First we grab all known blocks from local files foreach (var block in mbl.GetSourceFilesWithBlocks(m_options.Blocksize)) { var hash = block.Hash; var size = (int)block.Size; foreach (var source in block.Sources) { var file = source.File; var offset = source.Offset; try { if (System.IO.File.Exists(file)) { using (var f = System.IO.File.OpenRead(file)) { f.Position = offset; if (size == Library.Utility.Utility.ForceStreamRead(f, buffer, size)) { var newhash = Convert.ToBase64String(blockhasher.ComputeHash(buffer, 0, size)); if (newhash == hash) { if (mbl.SetBlockRestored(hash, size)) { w.AddBlock(hash, buffer, 0, size, Duplicati.Library.Interface.CompressionHint.Default); } break; } } } } } catch (Exception ex) { m_result.AddError(string.Format("Failed to access file: {0}", file), ex); } } } //Then we grab all remote volumes that have the missing blocks foreach (var vol in new AsyncDownloader(mbl.GetMissingBlockSources().ToList(), backend)) { try { using (var tmpfile = vol.TempFile) using (var f = new BlockVolumeReader(RestoreHandler.GetCompressionModule(vol.Name), tmpfile, m_options)) foreach (var b in f.Blocks) { if (mbl.SetBlockRestored(b.Key, b.Value)) { if (f.ReadBlock(b.Key, buffer) == b.Value) { w.AddBlock(b.Key, buffer, 0, (int)b.Value, Duplicati.Library.Interface.CompressionHint.Default); } } } } catch (Exception ex) { m_result.AddError(string.Format("Failed to access remote file: {0}", vol.Name), ex); } } // If we managed to recover all blocks, NICE! var missingBlocks = mbl.GetMissingBlocks().Count(); if (missingBlocks > 0) { //TODO: How do we handle this situation? m_result.AddMessage(string.Format("Repair cannot acquire {0} required blocks for volume {1}, which are required by the following filesets: ", missingBlocks, n.Name)); foreach (var f in mbl.GetFilesetsUsingMissingBlocks()) { m_result.AddMessage(f.Name); } if (!m_options.Dryrun) { m_result.AddMessage("This may be fixed by deleting the filesets and running repair again"); throw new Exception(string.Format("Repair not possible, missing {0} blocks!!!", missingBlocks)); } } else { if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("would re-upload block file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size))); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } } } } catch (Exception ex) { if (newEntry != null) { try { newEntry.Dispose(); } catch { } finally { newEntry = null; } } m_result.AddError(string.Format("Failed to perform cleanup for missing file: {0}, message: {1}", n.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } } else { m_result.AddMessage("Destination and database are synchronized, not making any changes"); } m_result.OperationProgressUpdater.UpdateProgress(1); backend.WaitForComplete(db, null); db.WriteResults(); } }
private void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter, RestoreResults result) { //In this case, we check that the remote storage fits with the database. //We can then query the database and find the blocks that we need to do the restore using (var database = new LocalRestoreDatabase(dbparent, m_options.Blocksize)) using (var backend = new BackendManager(m_backendurl, m_options, result.BackendWriter, database)) { database.SetResult(m_result); Utility.VerifyParameters(database, m_options); var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); var filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm); if (blockhasher == null) { throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm)); } if (!blockhasher.CanReuseTransform) { throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm)); } if (filehasher == null) { throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.FileHashAlgorithm)); } if (!filehasher.CanReuseTransform) { throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.FileHashAlgorithm)); } if (!m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PreRestoreVerify); FilelistProcessor.VerifyRemoteList(backend, m_options, database, result.BackendWriter); } //Figure out what files are to be patched, and what blocks are needed m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateFileList); using (new Logging.Timer("PrepareBlockList")) PrepareBlockAndFileList(database, m_options, filter, result); //Make the entire output setup m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateTargetFolders); using (new Logging.Timer("CreateDirectory")) CreateDirectoryStructure(database, m_options, result); //If we are patching an existing target folder, do not touch stuff that is already updated m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForExistingFiles); using (new Logging.Timer("ScanForexistingTargetBlocks")) ScanForExistingTargetBlocks(database, m_blockbuffer, blockhasher, filehasher, m_options, result); //Look for existing blocks in the original source files only using (new Logging.Timer("ScanForExistingSourceBlocksFast")) #if DEBUG if (!m_options.NoLocalBlocks && !string.IsNullOrEmpty(m_options.Restorepath)) #else if (!string.IsNullOrEmpty(m_options.Restorepath)) #endif { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForLocalBlocks); ScanForExistingSourceBlocksFast(database, m_options, m_blockbuffer, blockhasher, result); } if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } // If other local files already have the blocks we want, we use them instead of downloading if (m_options.PatchWithLocalBlocks) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PatchWithLocalBlocks); using (new Logging.Timer("PatchWithLocalBlocks")) ScanForExistingSourceBlocks(database, m_options, m_blockbuffer, blockhasher, result); } if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } // Fill BLOCKS with remote sources var volumes = database.GetMissingVolumes().ToList(); if (volumes.Count > 0) { m_result.AddMessage(string.Format("{0} remote files are required to restore", volumes.Count)); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_DownloadingRemoteFiles); } var brokenFiles = new List <string>(); foreach (var blockvolume in new AsyncDownloader(volumes, backend)) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } using (var tmpfile = blockvolume.TempFile) using (var blocks = new BlockVolumeReader(GetCompressionModule(blockvolume.Name), tmpfile, m_options)) PatchWithBlocklist(database, blocks, m_options, result, m_blockbuffer); } catch (Exception ex) { brokenFiles.Add(blockvolume.Name); result.AddError(string.Format("Failed to patch with remote file: \"{0}\", message: {1}", blockvolume.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } // Reset the filehasher if it was used to verify existing files filehasher.Initialize(); if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { return; } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PostRestoreVerify); var fileErrors = 0L; // After all blocks in the files are restored, verify the file hash using (new Logging.Timer("RestoreVerification")) foreach (var file in database.GetFilesToRestore()) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } result.AddVerboseMessage("Testing restored file integrity: {0}", file.Path); string key; long size; using (var fs = m_systemIO.FileOpenRead(file.Path)) { size = fs.Length; key = Convert.ToBase64String(filehasher.ComputeHash(fs)); } if (key != file.Hash) { throw new Exception(string.Format("Failed to restore file: \"{0}\". File hash is {1}, expected hash is {2}", file.Path, key, file.Hash)); } result.FilesRestored++; result.SizeOfRestoredFiles += size; } catch (Exception ex) { fileErrors++; result.AddWarning(ex.Message, ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } if (fileErrors > 0 && brokenFiles.Count > 0) { m_result.AddMessage(string.Format("Failed to restore {0} files, additionally the following files failed to download, which may be the cause:{1}", fileErrors, Environment.NewLine, string.Join(Environment.NewLine, brokenFiles))); } // Drop the temp tables database.DropRestoreTable(); backend.WaitForComplete(database, null); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_Complete); result.EndTime = DateTime.UtcNow; }