private void PreBackupVerify(BackendManager backend, string protectedfile) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify); using (new Logging.Timer(LOGTAG, "PreBackupVerify", "PreBackupVerify")) { try { if (m_options.NoBackendverification) { FilelistProcessor.VerifyLocalList(backend, m_database); UpdateStorageStatsFromDatabase(); } else { FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter, protectedfile); } } catch (Exception ex) { if (m_options.AutoCleanup) { Logging.Log.WriteWarningMessage(LOGTAG, "BackendVerifyFailedAttemptingCleanup", ex, "Backend verification failed, attempting automatic cleanup"); m_result.RepairResults = new RepairResults(m_result); new RepairHandler(backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run(); Logging.Log.WriteInformationMessage(LOGTAG, "BackendCleanupFinished", "Backend cleanup finished, retrying verification"); FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter); } else { throw; } } } }
private void DoRun(Database.LocalPurgeDatabase db, Library.Utility.IFilter filter, Action <System.Data.IDbCommand, long, string> filtercommand, float pgoffset, float pgspan) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Begin); Logging.Log.WriteInformationMessage(LOGTAG, "StartingPurge", "Starting purge operation"); var doCompactStep = !m_options.NoAutoCompact && filtercommand == null; using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { if (db.PartiallyRecreated) { throw new UserInformationException("The purge command does not work on partially recreated databases", "PurgeNotAllowedOnPartialDatabase"); } if (db.RepairInProgress && filtercommand == null) { throw new UserInformationException(string.Format("The purge command does not work on an incomplete database, try the {0} operation.", "purge-broken-files"), "PurgeNotAllowedOnIncompleteDatabase"); } var versions = db.GetFilesetIDs(m_options.Time, m_options.Version).OrderByDescending(x => x).ToArray(); if (versions.Length <= 0) { throw new UserInformationException("No filesets matched the supplied time or versions", "NoFilesetFoundForTimeOrVersion"); } var orphans = db.CountOrphanFiles(null); if (orphans != 0) { throw new UserInformationException(string.Format("Unable to start the purge process as there are {0} orphan file(s)", orphans), "CannotPurgeWithOrphans"); } Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); if (filtercommand == null) { db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, false, null); if (m_options.NoBackendverification) { FilelistProcessor.VerifyLocalList(backend, db); } else { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, null); } } var filesets = db.FilesetTimes.OrderByDescending(x => x.Value).ToArray(); var versionprogress = ((doCompactStep ? 0.75f : 1.0f) / versions.Length) * pgspan; var currentprogress = pgoffset; var progress = 0; m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Process); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); // Reverse makes sure we re-write the old versions first foreach (var versionid in versions.Reverse()) { progress++; Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingFilelistVolumes", "Processing filelist volume {0} of {1}", progress, versions.Length); using (var tr = db.BeginTransaction()) { var ix = -1; for (var i = 0; i < filesets.Length; i++) { if (filesets[i].Key == versionid) { ix = i; break; } } if (ix < 0) { throw new InvalidProgramException(string.Format("Fileset was reported with id {0}, but could not be found?", versionid)); } var secs = 0; while (secs < 60) { secs++; var tfn = Volumes.VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, filesets[ix].Value.AddSeconds(secs)); if (db.GetRemoteVolumeID(tfn, tr) < 0) { break; } } var tsOriginal = filesets[ix].Value; var ts = tsOriginal.AddSeconds(secs); var prevfilename = db.GetRemoteVolumeNameForFileset(filesets[ix].Key, tr); if (secs >= 60) { throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is more than 60 seconds away", prevfilename, ts)); } if (ix != 0 && filesets[ix - 1].Value <= ts) { throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is larger than the next timestamp {2}", prevfilename, ts, filesets[ix - 1].Value)); } using (var tempset = db.CreateTemporaryFileset(versionid, tr)) { if (filtercommand == null) { tempset.ApplyFilter(filter); } else { tempset.ApplyFilter(filtercommand); } if (tempset.RemovedFileCount == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "NotWritingNewFileset", "Not writing a new fileset for {0} as it was not changed", prevfilename); currentprogress += versionprogress; tr.Rollback(); continue; } else { using (var tf = new Library.Utility.TempFile()) using (var vol = new Volumes.FilesetVolumeWriter(m_options, ts)) { var isOriginalFilesetFullBackup = db.IsFilesetFullBackup(tsOriginal); var newids = tempset.ConvertToPermanentFileset(vol.RemoteFilename, ts, isOriginalFilesetFullBackup); vol.VolumeID = newids.Item1; vol.CreateFilesetFile(isOriginalFilesetFullBackup); Logging.Log.WriteInformationMessage(LOGTAG, "ReplacingFileset", "Replacing fileset {0} with {1} which has with {2} fewer file(s) ({3} reduction)", prevfilename, vol.RemoteFilename, tempset.RemovedFileCount, Library.Utility.Utility.FormatSizeString(tempset.RemovedFileSize)); db.WriteFileset(vol, newids.Item2, tr); m_result.RemovedFileSize += tempset.RemovedFileSize; m_result.RemovedFileCount += tempset.RemovedFileCount; m_result.RewrittenFileLists++; currentprogress += (versionprogress / 2); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); if (m_options.Dryrun || m_options.FullResult) { foreach (var fe in tempset.ListAllDeletedFiles()) { var msg = string.Format(" Purging file {0} ({1})", fe.Key, Library.Utility.Utility.FormatSizeString(fe.Value)); Logging.Log.WriteProfilingMessage(LOGTAG, "PurgeFile", msg); Logging.Log.WriteVerboseMessage(LOGTAG, "PurgeFile", msg); if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldPurgeFile", msg); } } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldWriteRemoteFiles", "Would write files to remote storage"); } Logging.Log.WriteVerboseMessage(LOGTAG, "WritingRemoteFiles", "Writing files to remote storage"); } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadAndDelete", "Would upload file {0} ({1}) and delete file {2}, removing {3} files", vol.RemoteFilename, Library.Utility.Utility.FormatSizeString(vol.Filesize), prevfilename, tempset.RemovedFileCount); tr.Rollback(); } else { var lst = db.DropFilesetsFromTable(new[] { filesets[ix].Value }, tr).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, tr); } tr.Commit(); backend.Put(vol, synchronous: true); backend.Delete(prevfilename, -1, true); backend.FlushDbMessages(); } } } } } currentprogress += (versionprogress / 2); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); } if (doCompactStep) { if (m_result.RewrittenFileLists == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "SkippingCompacting", "Skipping compacting as no new volumes were written"); } else { m_result.OperationProgressUpdater.UpdateProgress(pgoffset + (0.75f * pgspan)); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Compact); m_result.CompactResults = new CompactResults(m_result); using (var cdb = new Database.LocalDeleteDatabase(db)) { var tr = cdb.BeginTransaction(); try { new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(cdb, true, ref tr, backend); } catch { try { tr.Rollback(); } catch { } } finally { try { tr.Commit(); } catch { } } } } m_result.OperationProgressUpdater.UpdateProgress(pgoffset + pgspan); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Complete); } backend.WaitForComplete(db, null); } }