public void RunRepairRemote() { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath)); } m_result.OperationProgressUpdater.UpdateProgress(0); using (var db = new LocalRepairDatabase(m_options.Dbpath)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); Utility.VerifyParameters(db, m_options); var tp = FilelistProcessor.RemoteListAnalysis(backend, m_options, db, m_result.BackendWriter); var buffer = new byte[m_options.Blocksize]; var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); var hashsize = blockhasher.HashSize / 8; if (blockhasher == null) { throw new Exception(Strings.Foresthash.InvalidHashAlgorithm(m_options.BlockHashAlgorithm)); } if (!blockhasher.CanReuseTransform) { throw new Exception(Strings.Foresthash.InvalidCryptoSystem(m_options.BlockHashAlgorithm)); } var progress = 0; var targetProgess = tp.ExtraVolumes.Count() + tp.MissingVolumes.Count() + tp.VerificationRequiredVolumes.Count(); if (m_options.Dryrun) { if (tp.ParsedVolumes.Count() == 0 && tp.OtherVolumes.Count() > 0) { if (tp.BackupPrefixes.Length == 1) { throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefix {1}, did you forget to set the backup-prefix?", m_options.Prefix, tp.BackupPrefixes[0])); } else { throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefixes {1}, did you forget to set the backup-prefix?", m_options.Prefix, string.Join(", ", tp.BackupPrefixes))); } } else if (tp.ParsedVolumes.Count() == 0 && tp.ExtraVolumes.Count() > 0) { throw new Exception(string.Format("No files were missing, but {0} remote files were, found, did you mean to run recreate-database?", tp.ExtraVolumes.Count())); } } if (tp.ExtraVolumes.Count() > 0 || tp.MissingVolumes.Count() > 0 || tp.VerificationRequiredVolumes.Count() > 0) { if (tp.VerificationRequiredVolumes.Any()) { using (var testdb = new LocalTestDatabase(db)) { foreach (var n in tp.VerificationRequiredVolumes) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); long size; string hash; KeyValuePair <string, IEnumerable <KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> > > res; using (var tf = backend.GetWithInfo(n.Name, out size, out hash)) res = TestHandler.TestVolumeInternals(testdb, n, tf, m_options, m_result, 1); if (res.Value.Any()) { throw new Exception(string.Format("Remote verification failure: {0}", res.Value.First())); } if (!m_options.Dryrun) { m_result.AddMessage(string.Format("Sucessfully captured hash for {0}, updating database", n.Name)); db.UpdateRemoteVolume(n.Name, RemoteVolumeState.Verified, size, hash); } } catch (Exception ex) { m_result.AddError(string.Format("Failed to perform verification for file: {0}, please run verify; message: {1}", n.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } } } // TODO: It is actually possible to use the extra files if we parse them foreach (var n in tp.ExtraVolumes) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); if (!m_options.Dryrun) { db.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Deleting); backend.Delete(n.File.Name, n.File.Size); } else { m_result.AddDryrunMessage(string.Format("would delete file {0}", n.File.Name)); } } catch (Exception ex) { m_result.AddError(string.Format("Failed to perform cleanup for extra file: {0}, message: {1}", n.File.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } foreach (var n in tp.MissingVolumes) { IDisposable newEntry = null; try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); if (n.Type == RemoteVolumeType.Files) { var filesetId = db.GetFilesetIdFromRemotename(n.Name); var w = new FilesetVolumeWriter(m_options, DateTime.UtcNow); newEntry = w; w.SetRemoteFilename(n.Name); db.WriteFileset(w, null, filesetId); w.Close(); if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("would re-upload fileset {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size))); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } else if (n.Type == RemoteVolumeType.Index) { var w = new IndexVolumeWriter(m_options); newEntry = w; w.SetRemoteFilename(n.Name); foreach (var blockvolume in db.GetBlockVolumesFromIndexName(n.Name)) { w.StartVolume(blockvolume.Name); var volumeid = db.GetRemoteVolumeID(blockvolume.Name); foreach (var b in db.GetBlocks(volumeid)) { w.AddBlock(b.Hash, b.Size); } w.FinishVolume(blockvolume.Hash, blockvolume.Size); if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full) { foreach (var b in db.GetBlocklists(volumeid, m_options.Blocksize, hashsize)) { w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3); } } } w.Close(); if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("would re-upload index file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size))); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } else if (n.Type == RemoteVolumeType.Blocks) { var w = new BlockVolumeWriter(m_options); newEntry = w; w.SetRemoteFilename(n.Name); using (var mbl = db.CreateBlockList(n.Name)) { //First we grab all known blocks from local files foreach (var block in mbl.GetSourceFilesWithBlocks(m_options.Blocksize)) { var hash = block.Hash; var size = (int)block.Size; foreach (var source in block.Sources) { var file = source.File; var offset = source.Offset; try { if (System.IO.File.Exists(file)) { using (var f = System.IO.File.OpenRead(file)) { f.Position = offset; if (size == Library.Utility.Utility.ForceStreamRead(f, buffer, size)) { var newhash = Convert.ToBase64String(blockhasher.ComputeHash(buffer, 0, size)); if (newhash == hash) { if (mbl.SetBlockRestored(hash, size)) { w.AddBlock(hash, buffer, 0, size, Duplicati.Library.Interface.CompressionHint.Default); } break; } } } } } catch (Exception ex) { m_result.AddError(string.Format("Failed to access file: {0}", file), ex); } } } //Then we grab all remote volumes that have the missing blocks foreach (var vol in new AsyncDownloader(mbl.GetMissingBlockSources().ToList(), backend)) { try { using (var tmpfile = vol.TempFile) using (var f = new BlockVolumeReader(RestoreHandler.GetCompressionModule(vol.Name), tmpfile, m_options)) foreach (var b in f.Blocks) { if (mbl.SetBlockRestored(b.Key, b.Value)) { if (f.ReadBlock(b.Key, buffer) == b.Value) { w.AddBlock(b.Key, buffer, 0, (int)b.Value, Duplicati.Library.Interface.CompressionHint.Default); } } } } catch (Exception ex) { m_result.AddError(string.Format("Failed to access remote file: {0}", vol.Name), ex); } } // If we managed to recover all blocks, NICE! var missingBlocks = mbl.GetMissingBlocks().Count(); if (missingBlocks > 0) { //TODO: How do we handle this situation? m_result.AddMessage(string.Format("Repair cannot acquire {0} required blocks for volume {1}, which are required by the following filesets: ", missingBlocks, n.Name)); foreach (var f in mbl.GetFilesetsUsingMissingBlocks()) { m_result.AddMessage(f.Name); } if (!m_options.Dryrun) { m_result.AddMessage("This may be fixed by deleting the filesets and running repair again"); throw new Exception(string.Format("Repair not possible, missing {0} blocks!!!", missingBlocks)); } } else { if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("would re-upload block file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size))); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } } } } catch (Exception ex) { if (newEntry != null) { try { newEntry.Dispose(); } catch { } finally { newEntry = null; } } m_result.AddError(string.Format("Failed to perform cleanup for missing file: {0}, message: {1}", n.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } } else { m_result.AddMessage("Destination and database are synchronized, not making any changes"); } m_result.OperationProgressUpdater.UpdateProgress(1); backend.WaitForComplete(db, null); db.WriteResults(); } }
public void RunRepairRemote() { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath), "RepairDatabaseFileDoesNotExist"); } m_result.OperationProgressUpdater.UpdateProgress(0); using (var db = new LocalRepairDatabase(m_options.Dbpath)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { m_result.SetDatabase(db); Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); if (db.PartiallyRecreated) { throw new UserInformationException("The database was only partially recreated. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsPartiallyRecreated"); } if (db.RepairInProgress) { throw new UserInformationException("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsInRepairState"); } var tp = FilelistProcessor.RemoteListAnalysis(backend, m_options, db, m_result.BackendWriter, null); var buffer = new byte[m_options.Blocksize]; var blockhasher = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm); var hashsize = blockhasher.HashSize / 8; if (blockhasher == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } if (!blockhasher.CanReuseTransform) { throw new UserInformationException(Strings.Common.InvalidCryptoSystem(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } var progress = 0; var targetProgess = tp.ExtraVolumes.Count() + tp.MissingVolumes.Count() + tp.VerificationRequiredVolumes.Count(); if (m_options.Dryrun) { if (tp.ParsedVolumes.Count() == 0 && tp.OtherVolumes.Count() > 0) { if (tp.BackupPrefixes.Length == 1) { throw new UserInformationException(string.Format("Found no backup files with prefix {0}, but files with prefix {1}, did you forget to set the backup prefix?", m_options.Prefix, tp.BackupPrefixes[0]), "RemoteFolderEmptyWithPrefix"); } else { throw new UserInformationException(string.Format("Found no backup files with prefix {0}, but files with prefixes {1}, did you forget to set the backup prefix?", m_options.Prefix, string.Join(", ", tp.BackupPrefixes)), "RemoteFolderEmptyWithPrefix"); } } else if (tp.ParsedVolumes.Count() == 0 && tp.ExtraVolumes.Count() > 0) { throw new UserInformationException(string.Format("No files were missing, but {0} remote files were, found, did you mean to run recreate-database?", tp.ExtraVolumes.Count()), "NoRemoteFilesMissing"); } } if (tp.ExtraVolumes.Count() > 0 || tp.MissingVolumes.Count() > 0 || tp.VerificationRequiredVolumes.Count() > 0) { if (tp.VerificationRequiredVolumes.Any()) { using (var testdb = new LocalTestDatabase(db)) { foreach (var n in tp.VerificationRequiredVolumes) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); long size; string hash; KeyValuePair <string, IEnumerable <KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> > > res; using (var tf = backend.GetWithInfo(n.Name, out size, out hash)) res = TestHandler.TestVolumeInternals(testdb, n, tf, m_options, 1); if (res.Value.Any()) { throw new Exception(string.Format("Remote verification failure: {0}", res.Value.First())); } if (!m_options.Dryrun) { Logging.Log.WriteInformationMessage(LOGTAG, "CapturedRemoteFileHash", "Sucessfully captured hash for {0}, updating database", n.Name); db.UpdateRemoteVolume(n.Name, RemoteVolumeState.Verified, size, hash); } } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RemoteFileVerificationError", ex, "Failed to perform verification for file: {0}, please run verify; message: {1}", n.Name, ex.Message); if (ex is System.Threading.ThreadAbortException) { throw; } } } } } // TODO: It is actually possible to use the extra files if we parse them foreach (var n in tp.ExtraVolumes) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); // If this is a new index file, we can accept it if it matches our local data // This makes it possible to augment the remote store with new index data if (n.FileType == RemoteVolumeType.Index && m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { try { string hash; long size; using (var tf = backend.GetWithInfo(n.File.Name, out size, out hash)) using (var ifr = new IndexVolumeReader(n.CompressionModule, tf, m_options, m_options.BlockhashSize)) { foreach (var rv in ifr.Volumes) { string cmphash; long cmpsize; RemoteVolumeType cmptype; RemoteVolumeState cmpstate; if (!db.GetRemoteVolume(rv.Filename, out cmphash, out cmpsize, out cmptype, out cmpstate)) { throw new Exception(string.Format("Unknown remote file {0} detected", rv.Filename)); } if (!new [] { RemoteVolumeState.Uploading, RemoteVolumeState.Uploaded, RemoteVolumeState.Verified }.Contains(cmpstate)) { throw new Exception(string.Format("Volume {0} has local state {1}", rv.Filename, cmpstate)); } if (cmphash != rv.Hash || cmpsize != rv.Length || !new [] { RemoteVolumeState.Uploading, RemoteVolumeState.Uploaded, RemoteVolumeState.Verified }.Contains(cmpstate)) { throw new Exception(string.Format("Volume {0} hash/size mismatch ({1} - {2}) vs ({3} - {4})", rv.Filename, cmphash, cmpsize, rv.Hash, rv.Length)); } db.CheckAllBlocksAreInVolume(rv.Filename, rv.Blocks); } var blocksize = m_options.Blocksize; foreach (var ixb in ifr.BlockLists) { db.CheckBlocklistCorrect(ixb.Hash, ixb.Length, ixb.Blocklist, blocksize, hashsize); } var selfid = db.GetRemoteVolumeID(n.File.Name); foreach (var rv in ifr.Volumes) { db.AddIndexBlockLink(selfid, db.GetRemoteVolumeID(rv.Filename), null); } } // All checks fine, we accept the new index file Logging.Log.WriteInformationMessage(LOGTAG, "AcceptNewIndexFile", "Accepting new index file {0}", n.File.Name); db.RegisterRemoteVolume(n.File.Name, RemoteVolumeType.Index, size, RemoteVolumeState.Uploading); db.UpdateRemoteVolume(n.File.Name, RemoteVolumeState.Verified, size, hash); continue; } catch (Exception rex) { Logging.Log.WriteErrorMessage(LOGTAG, "FailedNewIndexFile", rex, "Failed to accept new index file: {0}, message: {1}", n.File.Name, rex.Message); } } if (!m_options.Dryrun) { db.RegisterRemoteVolume(n.File.Name, n.FileType, n.File.Size, RemoteVolumeState.Deleting); backend.Delete(n.File.Name, n.File.Size); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteFile", "would delete file {0}", n.File.Name); } } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "FailedExtraFileCleanup", ex, "Failed to perform cleanup for extra file: {0}, message: {1}", n.File.Name, ex.Message); if (ex is System.Threading.ThreadAbortException) { throw; } } } foreach (var n in tp.MissingVolumes) { IDisposable newEntry = null; try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, null); return; } progress++; m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess); if (n.Type == RemoteVolumeType.Files) { var filesetId = db.GetFilesetIdFromRemotename(n.Name); var w = new FilesetVolumeWriter(m_options, DateTime.UtcNow); newEntry = w; w.SetRemoteFilename(n.Name); db.WriteFileset(w, null, filesetId); w.Close(); if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadFileset", "would re-upload fileset {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } else if (n.Type == RemoteVolumeType.Index) { var w = new IndexVolumeWriter(m_options); newEntry = w; w.SetRemoteFilename(n.Name); var h = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm); foreach (var blockvolume in db.GetBlockVolumesFromIndexName(n.Name)) { w.StartVolume(blockvolume.Name); var volumeid = db.GetRemoteVolumeID(blockvolume.Name); foreach (var b in db.GetBlocks(volumeid)) { w.AddBlock(b.Hash, b.Size); } w.FinishVolume(blockvolume.Hash, blockvolume.Size); if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full) { foreach (var b in db.GetBlocklists(volumeid, m_options.Blocksize, hashsize)) { var bh = Convert.ToBase64String(h.ComputeHash(b.Item2, 0, b.Item3)); if (bh != b.Item1) { throw new Exception(string.Format("Internal consistency check failed, generated index block has wrong hash, {0} vs {1}", bh, b.Item1)); } w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3); } } } w.Close(); if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadIndexFile", "would re-upload index file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } else if (n.Type == RemoteVolumeType.Blocks) { var w = new BlockVolumeWriter(m_options); newEntry = w; w.SetRemoteFilename(n.Name); using (var mbl = db.CreateBlockList(n.Name)) { //First we grab all known blocks from local files foreach (var block in mbl.GetSourceFilesWithBlocks(m_options.Blocksize)) { var hash = block.Hash; var size = (int)block.Size; foreach (var source in block.Sources) { var file = source.File; var offset = source.Offset; try { if (System.IO.File.Exists(file)) { using (var f = System.IO.File.OpenRead(file)) { f.Position = offset; if (size == Library.Utility.Utility.ForceStreamRead(f, buffer, size)) { var newhash = Convert.ToBase64String(blockhasher.ComputeHash(buffer, 0, size)); if (newhash == hash) { if (mbl.SetBlockRestored(hash, size)) { w.AddBlock(hash, buffer, 0, size, Duplicati.Library.Interface.CompressionHint.Default); } break; } } } } } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "FileAccessError", ex, "Failed to access file: {0}", file); } } } //Then we grab all remote volumes that have the missing blocks foreach (var vol in new AsyncDownloader(mbl.GetMissingBlockSources().ToList(), backend)) { try { using (var tmpfile = vol.TempFile) using (var f = new BlockVolumeReader(RestoreHandler.GetCompressionModule(vol.Name), tmpfile, m_options)) foreach (var b in f.Blocks) { if (mbl.SetBlockRestored(b.Key, b.Value)) { if (f.ReadBlock(b.Key, buffer) == b.Value) { w.AddBlock(b.Key, buffer, 0, (int)b.Value, Duplicati.Library.Interface.CompressionHint.Default); } } } } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RemoteFileAccessError", ex, "Failed to access remote file: {0}", vol.Name); } } // If we managed to recover all blocks, NICE! var missingBlocks = mbl.GetMissingBlocks().Count(); if (missingBlocks > 0) { Logging.Log.WriteInformationMessage(LOGTAG, "RepairMissingBlocks", "Repair cannot acquire {0} required blocks for volume {1}, which are required by the following filesets: ", missingBlocks, n.Name); foreach (var f in mbl.GetFilesetsUsingMissingBlocks()) { Logging.Log.WriteInformationMessage(LOGTAG, "AffectedFilesetName", f.Name); } var recoverymsg = string.Format("If you want to continue working with the database, you can use the \"{0}\" and \"{1}\" commands to purge the missing data from the database and the remote storage.", "list-broken-files", "purge-broken-files"); if (!m_options.Dryrun) { Logging.Log.WriteInformationMessage(LOGTAG, "RecoverySuggestion", "This may be fixed by deleting the filesets and running repair again"); throw new UserInformationException(string.Format("Repair not possible, missing {0} blocks.\n" + recoverymsg, missingBlocks), "RepairIsNotPossible"); } else { Logging.Log.WriteInformationMessage(LOGTAG, "RecoverySuggestion", recoverymsg); } } else { if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadBlockFile", "would re-upload block file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)); } else { db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } } } } catch (Exception ex) { if (newEntry != null) { try { newEntry.Dispose(); } catch { } finally { newEntry = null; } } Logging.Log.WriteErrorMessage(LOGTAG, "CleanupMissingFileError", ex, "Failed to perform cleanup for missing file: {0}, message: {1}", n.Name, ex.Message); if (ex is System.Threading.ThreadAbortException) { throw; } } } } else { Logging.Log.WriteInformationMessage(LOGTAG, "DatabaseIsSynchronized", "Destination and database are synchronized, not making any changes"); } m_result.OperationProgressUpdater.UpdateProgress(1); backend.WaitForComplete(db, null); db.WriteResults(); } }
internal bool DoCompact(LocalDeleteDatabase db, bool hasVerifiedBackend, ref System.Data.IDbTransaction transaction) { var report = db.GetCompactReport(m_options.VolumeSize, m_options.Threshold, m_options.SmallFileSize, m_options.SmallFileMaxCount, transaction); report.ReportCompactData(m_result); if (report.ShouldReclaim || report.ShouldCompact) { using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { if (!hasVerifiedBackend && !m_options.NoBackendverification) { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter); } BlockVolumeWriter newvol = new BlockVolumeWriter(m_options); newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction); IndexVolumeWriter newvolindex = null; if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { newvolindex = new IndexVolumeWriter(m_options); newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction); db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction); newvolindex.StartVolume(newvol.RemoteFilename); } long blocksInVolume = 0; long discardedBlocks = 0; long discardedSize = 0; byte[] buffer = new byte[m_options.Blocksize]; var remoteList = db.GetRemoteVolumes().Where(n => n.State == RemoteVolumeState.Uploaded || n.State == RemoteVolumeState.Verified).ToArray(); //These are for bookkeeping var uploadedVolumes = new List <KeyValuePair <string, long> >(); var deletedVolumes = new List <KeyValuePair <string, long> >(); var downloadedVolumes = new List <KeyValuePair <string, long> >(); //We start by deleting unused volumes to save space before uploading new stuff var fullyDeleteable = (from v in remoteList where report.DeleteableVolumes.Contains(v.Name) select(IRemoteVolume) v).ToList(); deletedVolumes.AddRange(DoDelete(db, backend, fullyDeleteable, ref transaction)); // This list is used to pick up unused volumes, // so they can be deleted once the upload of the // required fragments is complete var deleteableVolumes = new List <IRemoteVolume>(); if (report.ShouldCompact) { var volumesToDownload = (from v in remoteList where report.CompactableVolumes.Contains(v.Name) select(IRemoteVolume) v).ToList(); using (var q = db.CreateBlockQueryHelper(m_options, transaction)) { foreach (var entry in new AsyncDownloader(volumesToDownload, backend)) { using (var tmpfile = entry.TempFile) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return(false); } downloadedVolumes.Add(new KeyValuePair <string, long>(entry.Name, entry.Size)); var inst = VolumeBase.ParseFilename(entry.Name); using (var f = new BlockVolumeReader(inst.CompressionModule, tmpfile, m_options)) { foreach (var e in f.Blocks) { if (q.UseBlock(e.Key, e.Value, transaction)) { //TODO: How do we get the compression hint? Reverse query for filename in db? var s = f.ReadBlock(e.Key, buffer); if (s != e.Value) { throw new Exception(string.Format("Size mismatch problem for block {0}, {1} vs {2}", e.Key, s, e.Value)); } newvol.AddBlock(e.Key, buffer, 0, s, Duplicati.Library.Interface.CompressionHint.Compressible); if (newvolindex != null) { newvolindex.AddBlock(e.Key, e.Value); } db.MoveBlockToNewVolume(e.Key, e.Value, newvol.VolumeID, transaction); blocksInVolume++; if (newvol.Filesize > m_options.VolumeSize) { uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, new System.IO.FileInfo(newvol.LocalFilename).Length)); if (newvolindex != null) { uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, new System.IO.FileInfo(newvolindex.LocalFilename).Length)); } if (!m_options.Dryrun) { backend.Put(newvol, newvolindex); } else { m_result.AddDryrunMessage(string.Format("Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(newvol.LocalFilename).Length))); } newvol = new BlockVolumeWriter(m_options); newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction); if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { newvolindex = new IndexVolumeWriter(m_options); newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction); db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction); newvolindex.StartVolume(newvol.RemoteFilename); } blocksInVolume = 0; //After we upload this volume, we can delete all previous encountered volumes deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction)); deleteableVolumes = new List <IRemoteVolume>(); } } else { discardedBlocks++; discardedSize += e.Value; } } } deleteableVolumes.Add(entry); } } if (blocksInVolume > 0) { uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, new System.IO.FileInfo(newvol.LocalFilename).Length)); if (newvolindex != null) { uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, new System.IO.FileInfo(newvolindex.LocalFilename).Length)); } if (!m_options.Dryrun) { backend.Put(newvol, newvolindex); } else { m_result.AddDryrunMessage(string.Format("Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(newvol.LocalFilename).Length))); } } else { db.RemoveRemoteVolume(newvol.RemoteFilename, transaction); if (newvolindex != null) { db.RemoveRemoteVolume(newvolindex.RemoteFilename, transaction); newvolindex.FinishVolume(null, 0); } } } } deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction)); var downloadSize = downloadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value); var deletedSize = deletedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value); var uploadSize = uploadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value); m_result.DeletedFileCount = deletedVolumes.Count; m_result.DownloadedFileCount = downloadedVolumes.Count; m_result.UploadedFileCount = uploadedVolumes.Count; m_result.DeletedFileSize = deletedSize; m_result.DownloadedFileSize = downloadSize; m_result.UploadedFileSize = uploadSize; m_result.Dryrun = m_options.Dryrun; if (m_result.Dryrun) { if (downloadedVolumes.Count == 0) { m_result.AddDryrunMessage(string.Format("Would delete {0} files, which would reduce storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize))); } else { m_result.AddDryrunMessage(string.Format("Would download {0} file(s) with a total size of {1}, delete {2} file(s) with a total size of {3}, and compact to {4} file(s) with a size of {5}, which would reduce storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DownloadedFileSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize))); } } else { if (m_result.DownloadedFileCount == 0) { m_result.AddMessage(string.Format("Deleted {0} files, which reduced storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize))); } else { m_result.AddMessage(string.Format("Downloaded {0} file(s) with a total size of {1}, deleted {2} file(s) with a total size of {3}, and compacted to {4} file(s) with a size of {5}, which reduced storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(downloadSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize))); } } backend.WaitForComplete(db, transaction); } return((m_result.DeletedFileCount + m_result.UploadedFileCount) > 0); } else { return(false); } }