public void Run() { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath)); } using (var db = new Database.LocalDeleteDatabase(m_options.Dbpath, false)) using (var tr = db.BeginTransaction()) { m_result.SetDatabase(db); Utility.VerifyParameters(db, m_options); DoRun(db, tr, false, false); if (!m_options.Dryrun) { using (new Logging.Timer("CommitDelete")) tr.Commit(); db.WriteResults(); } else { tr.Rollback(); } } }
public void Run() { if (!System.IO.File.Exists(m_options.Dbpath)) throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath)); using(var db = new Database.LocalDeleteDatabase(m_options.Dbpath, false)) using(var tr = db.BeginTransaction()) { m_result.SetDatabase(db); Utility.VerifyParameters(db, m_options); DoRun(db, tr, false, false); if (!m_options.Dryrun) { using(new Logging.Timer("CommitDelete")) tr.Commit(); db.WriteResults(); } else tr.Rollback(); } }
public void Run() { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath), "DatabaseFileMissing"); } using (var db = new Database.LocalDeleteDatabase(m_options.Dbpath, "Delete")) { var tr = db.BeginTransaction(); try { m_result.SetDatabase(db); Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); DoRun(db, ref tr, false, false, null); if (!m_options.Dryrun) { using (new Logging.Timer(LOGTAG, "CommitDelete", "CommitDelete")) tr.Commit(); db.WriteResults(); } else { tr.Rollback(); } tr = null; } finally { if (tr != null) { try { tr.Rollback(); } catch { } } } } }
public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact, BackendManager sharedManager) { // Workaround where we allow a running backendmanager to be used using (var bk = sharedManager == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null) { var backend = bk ?? sharedManager; if (!hasVerifiedBacked && !m_options.NoBackendverification) { var backupDatabase = new LocalBackupDatabase(db, m_options); var latestFilelist = backupDatabase.GetTemporaryFilelistVolumeNames(latestOnly: true, transaction: transaction); FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, latestFilelist); } IListResultFileset[] filesets = db.FilesetsWithBackupVersion.ToArray(); List <IListResultFileset> versionsToDelete = new List <IListResultFileset>(); versionsToDelete.AddRange(new SpecificVersionsRemover(this.m_options).GetFilesetsToDelete(filesets)); versionsToDelete.AddRange(new KeepTimeRemover(this.m_options).GetFilesetsToDelete(filesets)); versionsToDelete.AddRange(new RetentionPolicyRemover(this.m_options).GetFilesetsToDelete(filesets)); // When determining the number of full versions to keep, we need to ignore the versions already marked for removal. versionsToDelete.AddRange(new KeepVersionsRemover(this.m_options).GetFilesetsToDelete(filesets.Except(versionsToDelete))); if (!m_options.AllowFullRemoval && filesets.Length == versionsToDelete.Count) { Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal"); versionsToDelete = versionsToDelete.OrderBy(x => x.Version).Skip(1).ToList(); } if (versionsToDelete.Count > 0) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", versionsToDelete.Count); } var lst = db.DropFilesetsFromTable(versionsToDelete.Select(x => x.Time).ToArray(), transaction).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction); } if (!m_options.Dryrun) { transaction.Commit(); transaction = db.BeginTransaction(); } foreach (var f in lst) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return; } if (!m_options.Dryrun) { backend.Delete(f.Key, f.Value); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFileset", "Would delete remote fileset: {0}", f.Key); } } if (sharedManager == null) { backend.WaitForComplete(db, transaction); } else { backend.WaitForEmpty(db, transaction); } var count = lst.Length; if (!m_options.Dryrun) { if (count == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "No remote filesets were deleted"); } else { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "Deleted {0} remote fileset(s)", count); } } else { if (count == 0) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "No remote filesets would be deleted"); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "{0} remote fileset(s) would be deleted", count); } if (count > 0 && m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files"); } } if (!m_options.NoAutoCompact && (forceCompact || versionsToDelete.Count > 0)) { m_result.CompactResults = new CompactResults(m_result); new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager); } m_result.SetResults(versionsToDelete.Select(v => new Tuple <long, DateTime>(v.Version, v.Time)), m_options.Dryrun); } }
public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact) { using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { if (!hasVerifiedBacked && !m_options.NoBackendverification) { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter); } var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple <long, DateTime>(b, a.Value)); var toDelete = m_options.GetFilesetsToDelete(db.FilesetTimes.Select(x => x.Value).ToArray()); if (toDelete != null && toDelete.Length > 0) { m_result.AddMessage(string.Format("Deleting {0} remote fileset(s) ...", toDelete.Length)); } var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction); } if (!m_options.Dryrun) { transaction.Commit(); transaction = db.BeginTransaction(); } foreach (var f in lst) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return; } if (!m_options.Dryrun) { backend.Delete(f.Key, f.Value); } else { m_result.AddDryrunMessage(string.Format("Would delete remote fileset: {0}", f.Key)); } } backend.WaitForComplete(db, transaction); var count = lst.Length; if (!m_options.Dryrun) { if (count == 0) { m_result.AddMessage("No remote filesets were deleted"); } else { m_result.AddMessage(string.Format("Deleted {0} remote fileset(s)", count)); } } else { if (count == 0) { m_result.AddDryrunMessage("No remote filesets would be deleted"); } else { m_result.AddDryrunMessage(string.Format("{0} remote fileset(s) would be deleted", count)); } if (count > 0 && m_options.Dryrun) { m_result.AddDryrunMessage("Remove --dry-run to actually delete files"); } } if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0))) { m_result.CompactResults = new CompactResults(m_result); new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction); } m_result.SetResults( from n in filesetNumbers where toDelete.Contains(n.Item2) select n, m_options.Dryrun); } }
private void DoRun(Database.LocalPurgeDatabase db, Library.Utility.IFilter filter, Action <System.Data.IDbCommand, long, string> filtercommand, float pgoffset, float pgspan) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Begin); Logging.Log.WriteInformationMessage(LOGTAG, "StartingPurge", "Starting purge operation"); var doCompactStep = !m_options.NoAutoCompact && filtercommand == null; using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { if (db.PartiallyRecreated) { throw new UserInformationException("The purge command does not work on partially recreated databases", "PurgeNotAllowedOnPartialDatabase"); } if (db.RepairInProgress && filtercommand == null) { throw new UserInformationException(string.Format("The purge command does not work on an incomplete database, try the {0} operation.", "purge-broken-files"), "PurgeNotAllowedOnIncompleteDatabase"); } var versions = db.GetFilesetIDs(m_options.Time, m_options.Version).OrderByDescending(x => x).ToArray(); if (versions.Length <= 0) { throw new UserInformationException("No filesets matched the supplied time or versions", "NoFilesetFoundForTimeOrVersion"); } var orphans = db.CountOrphanFiles(null); if (orphans != 0) { throw new UserInformationException(string.Format("Unable to start the purge process as there are {0} orphan file(s)", orphans), "CannotPurgeWithOrphans"); } Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); if (filtercommand == null) { db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, false, null); if (m_options.NoBackendverification) { FilelistProcessor.VerifyLocalList(backend, db); } else { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, null); } } var filesets = db.FilesetTimes.OrderByDescending(x => x.Value).ToArray(); var versionprogress = ((doCompactStep ? 0.75f : 1.0f) / versions.Length) * pgspan; var currentprogress = pgoffset; var progress = 0; m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Process); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); // Reverse makes sure we re-write the old versions first foreach (var versionid in versions.Reverse()) { progress++; Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingFilelistVolumes", "Processing filelist volume {0} of {1}", progress, versions.Length); using (var tr = db.BeginTransaction()) { var ix = -1; for (var i = 0; i < filesets.Length; i++) { if (filesets[i].Key == versionid) { ix = i; break; } } if (ix < 0) { throw new InvalidProgramException(string.Format("Fileset was reported with id {0}, but could not be found?", versionid)); } var secs = 0; while (secs < 60) { secs++; var tfn = Volumes.VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, filesets[ix].Value.AddSeconds(secs)); if (db.GetRemoteVolumeID(tfn, tr) < 0) { break; } } var tsOriginal = filesets[ix].Value; var ts = tsOriginal.AddSeconds(secs); var prevfilename = db.GetRemoteVolumeNameForFileset(filesets[ix].Key, tr); if (secs >= 60) { throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is more than 60 seconds away", prevfilename, ts)); } if (ix != 0 && filesets[ix - 1].Value <= ts) { throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is larger than the next timestamp {2}", prevfilename, ts, filesets[ix - 1].Value)); } using (var tempset = db.CreateTemporaryFileset(versionid, tr)) { if (filtercommand == null) { tempset.ApplyFilter(filter); } else { tempset.ApplyFilter(filtercommand); } if (tempset.RemovedFileCount == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "NotWritingNewFileset", "Not writing a new fileset for {0} as it was not changed", prevfilename); currentprogress += versionprogress; tr.Rollback(); continue; } else { using (var tf = new Library.Utility.TempFile()) using (var vol = new Volumes.FilesetVolumeWriter(m_options, ts)) { var isOriginalFilesetFullBackup = db.IsFilesetFullBackup(tsOriginal); var newids = tempset.ConvertToPermanentFileset(vol.RemoteFilename, ts, isOriginalFilesetFullBackup); vol.VolumeID = newids.Item1; vol.CreateFilesetFile(isOriginalFilesetFullBackup); Logging.Log.WriteInformationMessage(LOGTAG, "ReplacingFileset", "Replacing fileset {0} with {1} which has with {2} fewer file(s) ({3} reduction)", prevfilename, vol.RemoteFilename, tempset.RemovedFileCount, Library.Utility.Utility.FormatSizeString(tempset.RemovedFileSize)); db.WriteFileset(vol, newids.Item2, tr); m_result.RemovedFileSize += tempset.RemovedFileSize; m_result.RemovedFileCount += tempset.RemovedFileCount; m_result.RewrittenFileLists++; currentprogress += (versionprogress / 2); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); if (m_options.Dryrun || m_options.FullResult) { foreach (var fe in tempset.ListAllDeletedFiles()) { var msg = string.Format(" Purging file {0} ({1})", fe.Key, Library.Utility.Utility.FormatSizeString(fe.Value)); Logging.Log.WriteProfilingMessage(LOGTAG, "PurgeFile", msg); Logging.Log.WriteVerboseMessage(LOGTAG, "PurgeFile", msg); if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldPurgeFile", msg); } } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldWriteRemoteFiles", "Would write files to remote storage"); } Logging.Log.WriteVerboseMessage(LOGTAG, "WritingRemoteFiles", "Writing files to remote storage"); } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadAndDelete", "Would upload file {0} ({1}) and delete file {2}, removing {3} files", vol.RemoteFilename, Library.Utility.Utility.FormatSizeString(vol.Filesize), prevfilename, tempset.RemovedFileCount); tr.Rollback(); } else { var lst = db.DropFilesetsFromTable(new[] { filesets[ix].Value }, tr).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, tr); } tr.Commit(); backend.Put(vol, synchronous: true); backend.Delete(prevfilename, -1, true); backend.FlushDbMessages(); } } } } } currentprogress += (versionprogress / 2); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); } if (doCompactStep) { if (m_result.RewrittenFileLists == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "SkippingCompacting", "Skipping compacting as no new volumes were written"); } else { m_result.OperationProgressUpdater.UpdateProgress(pgoffset + (0.75f * pgspan)); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Compact); m_result.CompactResults = new CompactResults(m_result); using (var cdb = new Database.LocalDeleteDatabase(db)) { var tr = cdb.BeginTransaction(); try { new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(cdb, true, ref tr, backend); } catch { try { tr.Rollback(); } catch { } } finally { try { tr.Commit(); } catch { } } } } m_result.OperationProgressUpdater.UpdateProgress(pgoffset + pgspan); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Complete); } backend.WaitForComplete(db, null); } }
public void Run(Library.Utility.IFilter filter) { if (!System.IO.File.Exists(m_options.Dbpath)) { throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath)); } if (filter != null && !filter.Empty) { throw new UserInformationException("Filters are not supported for this operation"); } List <Database.RemoteVolumeEntry> missing = null; using (var db = new Database.LocalListBrokenFilesDatabase(m_options.Dbpath)) using (var tr = db.BeginTransaction()) { if (db.PartiallyRecreated) { throw new UserInformationException("The command does not work on partially recreated databases"); } var sets = ListBrokenFilesHandler.GetBrokenFilesetsFromRemote(m_backendurl, m_result, db, tr, m_options, out missing); if (sets == null) { return; } if (sets.Length == 0) { if (missing == null) { m_result.AddMessage("Found no broken filesets"); } else if (missing.Count == 0) { m_result.AddMessage("Found no broken filesets and no missing remote files"); } else { throw new UserInformationException(string.Format("Found no broken filesets, but {0} missing remote files", sets.Length)); } } m_result.AddMessage(string.Format("Found {0} broken filesets with {1} affected files, purging files", sets.Length, sets.Sum(x => x.Item3))); var pgoffset = 0.0f; var pgspan = 0.95f / sets.Length; var filesets = db.FilesetTimes.ToList(); var compare_list = sets.Select(x => new { FilesetID = x.Item2, Timestamp = x.Item1, RemoveCount = x.Item3, Version = filesets.FindIndex(y => y.Key == x.Item2), SetCount = db.GetFilesetFileCount(x.Item2, tr) }).ToArray(); var fully_emptied = compare_list.Where(x => x.RemoveCount == x.SetCount).ToArray(); var to_purge = compare_list.Where(x => x.RemoveCount != x.SetCount).ToArray(); if (fully_emptied.Length != 0) { if (fully_emptied.Length == 1) { m_result.AddMessage(string.Format("Removing entire fileset {1} as all {0} file(s) are broken", fully_emptied.First().Timestamp, fully_emptied.First().RemoveCount)); } else { m_result.AddMessage(string.Format("Removing {0} filesets where all file(s) are broken: {1}", fully_emptied.Length, string.Join(", ", fully_emptied.Select(x => x.Timestamp.ToLocalTime().ToString())))); } m_result.DeleteResults = new DeleteResults(m_result); using (var rmdb = new Database.LocalDeleteDatabase(db)) { var deltr = rmdb.BeginTransaction(); try { var opts = new Options(new Dictionary <string, string>(m_options.RawOptions)); opts.RawOptions["version"] = string.Join(",", fully_emptied.Select(x => x.Version.ToString())); opts.RawOptions.Remove("time"); opts.RawOptions["no-auto-compact"] = "true"; new DeleteHandler(m_backendurl, opts, (DeleteResults)m_result.DeleteResults) .DoRun(rmdb, ref deltr, true, false); if (!m_options.Dryrun) { using (new Logging.Timer("CommitDelete")) deltr.Commit(); rmdb.WriteResults(); } else { deltr.Rollback(); } } finally { if (deltr != null) { try { deltr.Rollback(); } catch { } } } } pgoffset += (pgspan * fully_emptied.Length); m_result.OperationProgressUpdater.UpdateProgress(pgoffset); } if (to_purge.Length > 0) { m_result.PurgeResults = new PurgeFilesResults(m_result); foreach (var bs in to_purge) { m_result.AddMessage(string.Format("Purging {0} file(s) from fileset {1}", bs.RemoveCount, bs.Timestamp.ToLocalTime())); var opts = new Options(new Dictionary <string, string>(m_options.RawOptions)); using (var pgdb = new Database.LocalPurgeDatabase(db)) { // Recompute the version number after we deleted the versions before filesets = pgdb.FilesetTimes.ToList(); var thisversion = filesets.FindIndex(y => y.Key == bs.FilesetID); if (thisversion < 0) { throw new Exception(string.Format("Failed to find match for {0} ({1}) in {2}", bs.FilesetID, bs.Timestamp.ToLocalTime(), string.Join(", ", filesets.Select(x => x.ToString())))); } opts.RawOptions["version"] = thisversion.ToString(); opts.RawOptions.Remove("time"); opts.RawOptions["no-auto-compact"] = "true"; new PurgeFilesHandler(m_backendurl, opts, (PurgeFilesResults)m_result.PurgeResults).Run(pgdb, pgoffset, pgspan, (cmd, filesetid, tablename) => { if (filesetid != bs.FilesetID) { throw new Exception(string.Format("Unexpected filesetid: {0}, expected {1}", filesetid, bs.FilesetID)); } db.InsertBrokenFileIDsIntoTable(filesetid, tablename, "FileID", cmd.Transaction); }); } pgoffset += pgspan; m_result.OperationProgressUpdater.UpdateProgress(pgoffset); } } if (m_options.Dryrun) { tr.Rollback(); } else { tr.Commit(); } m_result.OperationProgressUpdater.UpdateProgress(0.95f); if (missing != null && missing.Count > 0) { using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { foreach (var f in missing) { if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("Would delete remote file: {0}, size: {1}", f.Name, Library.Utility.Utility.FormatSizeString(f.Size))); } else { backend.Delete(f.Name, f.Size); } } } } if (!m_options.Dryrun && db.RepairInProgress) { m_result.AddMessage("Database was previously marked as in-progress, checking if it is valid after purging files"); db.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize, true); m_result.AddMessage("Purge completed, and consistency checks completed, marking database as complete"); db.RepairInProgress = false; } m_result.OperationProgressUpdater.UpdateProgress(1.0f); } }
public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact, BackendManager sharedManager) { // Workaround where we allow a running backendmanager to be used using (var bk = sharedManager == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null) { var backend = bk ?? sharedManager; if (!hasVerifiedBacked && !m_options.NoBackendverification) { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter); } var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple <long, DateTime>(b, a.Value)).ToList(); var sets = db.FilesetTimes.Select(x => x.Value).ToArray(); var toDelete = GetFilesetsToDelete(db, sets); if (!m_options.AllowFullRemoval && sets.Length == toDelete.Length) { Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal"); toDelete = toDelete.Skip(1).ToArray(); } if (toDelete != null && toDelete.Length > 0) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", toDelete.Length); } var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction); } if (!m_options.Dryrun) { transaction.Commit(); transaction = db.BeginTransaction(); } foreach (var f in lst) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return; } if (!m_options.Dryrun) { backend.Delete(f.Key, f.Value); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFileset", "Would delete remote fileset: {0}", f.Key); } } if (sharedManager == null) { backend.WaitForComplete(db, transaction); } else { backend.WaitForEmpty(db, transaction); } var count = lst.Length; if (!m_options.Dryrun) { if (count == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "No remote filesets were deleted"); } else { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "Deleted {0} remote fileset(s)", count); } } else { if (count == 0) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "No remote filesets would be deleted"); } else { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "{0} remote fileset(s) would be deleted", count); } if (count > 0 && m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files"); } } if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0))) { m_result.CompactResults = new CompactResults(m_result); new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager); } m_result.SetResults( from n in filesetNumbers where toDelete.Contains(n.Item2) select n, m_options.Dryrun); } }