/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> /// <param name="log">The log instance to use</param> public static void VerifyLocalList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var locallist = database.GetRemoteVolumes(); foreach(var i in locallist) { switch (i.State) { case RemoteVolumeState.Uploaded: case RemoteVolumeState.Verified: case RemoteVolumeState.Deleted: break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: case RemoteVolumeState.Uploading: log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); try { backend.Delete(i.Name, i.Size, true); } catch (Exception ex) { log.AddWarning(string.Format("Failed to erase file {0}, treating as deleted: {1}", i.Name, ex.Message), ex); } break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } backend.FlushDbMessages(); } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> /// <param name="log">The log instance to use</param> public static void VerifyLocalList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { switch (i.State) { case RemoteVolumeState.Uploaded: case RemoteVolumeState.Verified: case RemoteVolumeState.Deleted: break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: case RemoteVolumeState.Uploading: log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); try { backend.Delete(i.Name, i.Size, true); } catch (Exception ex) { log.AddWarning(string.Format("Failed to erase file {0}, treating as deleted: {1}", i.Name, ex.Message), ex); } break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } backend.FlushDbMessages(); } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="database">The database to compare with</param> public static void VerifyLocalList(BackendManager backend, LocalDatabase database) { var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { switch (i.State) { case RemoteVolumeState.Uploaded: case RemoteVolumeState.Verified: case RemoteVolumeState.Deleted: break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: case RemoteVolumeState.Uploading: Logging.Log.WriteInformationMessage(LOGTAG, "RemovingStaleFile", "Removing remote file listed as {0}: {1}", i.State, i.Name); try { backend.Delete(i.Name, i.Size, true); } catch (Exception ex) { Logging.Log.WriteWarningMessage(LOGTAG, "DeleteFileFailed", ex, "Failed to erase file {0}, treating as deleted: {1}", i.Name, ex.Message); } break; default: Logging.Log.WriteWarningMessage(LOGTAG, "UnknownFileState", null, "Unknown state for remote file listed as {0}: {1}", i.State, i.Name); break; } backend.FlushDbMessages(); } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var rawlist = backend.List(); var lookup = new Dictionary <string, Volumes.IParsedVolume>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix == options.Prefix select p).ToList(); var otherlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix != options.Prefix select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count(); log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum(); log.UnknownFileCount = unknownlist.Count(); log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); if (backend is Library.Interface.IQuotaEnabledBackend) { log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace; log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace; } log.AssignedQuotaSpace = options.QuotaSize; foreach (var s in remotelist) { lookup[s.File.Name] = s; } var missing = new List <RemoteVolumeEntry>(); var missingHash = new List <Tuple <long, RemoteVolumeEntry> >(); var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) { log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name)); } break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } else { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else if (!remoteFound) { log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name)); database.RemoveRemoteVolume(i.Name, null); database.RegisterRemoteVolume(i.Name, i.Type, RemoteVolumeState.Deleting, null); } else { log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } break; case RemoteVolumeState.Uploaded: if (!remoteFound) { missing.Add(i); } else if (correctSize) { database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); } else { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; case RemoteVolumeState.Verified: if (!remoteFound) { missing.Add(i); } else if (!correctSize) { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } backend.FlushDbMessages(); } foreach (var i in missingHash) { log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null); } return(new RemoteAnalysisResult() { ParsedVolumes = remotelist, OtherVolumes = otherlist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }); }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> /// <param name="protectedFiles">Filenames that should be exempted from deletion</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log, IEnumerable <string> protectedFiles) { var rawlist = backend.List(); var lookup = new Dictionary <string, Volumes.IParsedVolume>(); protectedFiles = protectedFiles ?? Enumerable.Empty <string>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix == options.Prefix select p).ToList(); var otherlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix != options.Prefix select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count; long knownFileSize = remotelist.Select(x => Math.Max(0, x.File.Size)).Sum(); log.KnownFileSize = knownFileSize; log.UnknownFileCount = unknownlist.Count; log.UnknownFileSize = unknownlist.Select(x => Math.Max(0, x.Size)).Sum(); log.BackupListCount = database.FilesetTimes.Count(); log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); // TODO: We should query through the backendmanager using (var bk = DynamicLoader.BackendLoader.GetBackend(backend.BackendUrl, options.RawOptions)) if (bk is IQuotaEnabledBackend enabledBackend) { Library.Interface.IQuotaInfo quota = enabledBackend.Quota; if (quota != null) { log.TotalQuotaSpace = quota.TotalQuotaSpace; log.FreeQuotaSpace = quota.FreeQuotaSpace; // Check to see if there should be a warning or error about the quota // Since this processor may be called multiple times during a backup // (both at the start and end, for example), the log keeps track of // whether a quota error or warning has been sent already. // Note that an error can still be sent later even if a warning was sent earlier. if (!log.ReportedQuotaError && quota.FreeQuotaSpace == 0) { log.ReportedQuotaError = true; Logging.Log.WriteErrorMessage(LOGTAG, "BackendQuotaExceeded", null, "Backend quota has been exceeded: Using {0} of {1} ({2} available)", Library.Utility.Utility.FormatSizeString(knownFileSize), Library.Utility.Utility.FormatSizeString(quota.TotalQuotaSpace), Library.Utility.Utility.FormatSizeString(quota.FreeQuotaSpace)); } else if (!log.ReportedQuotaWarning && !log.ReportedQuotaError && quota.FreeQuotaSpace >= 0) // Negative value means the backend didn't return the quota info { // Warnings are sent if the available free space is less than the given percentage of the total backup size. double warningThreshold = options.QuotaWarningThreshold / (double)100; if (quota.FreeQuotaSpace < warningThreshold * knownFileSize) { log.ReportedQuotaWarning = true; Logging.Log.WriteWarningMessage(LOGTAG, "BackendQuotaNear", null, "Backend quota is close to being exceeded: Using {0} of {1} ({2} available)", Library.Utility.Utility.FormatSizeString(knownFileSize), Library.Utility.Utility.FormatSizeString(quota.TotalQuotaSpace), Library.Utility.Utility.FormatSizeString(quota.FreeQuotaSpace)); } } } } log.AssignedQuotaSpace = options.QuotaSize; foreach (var s in remotelist) { lookup[s.File.Name] = s; } var missing = new List <RemoteVolumeEntry>(); var missingHash = new List <Tuple <long, RemoteVolumeEntry> >(); var cleanupRemovedRemoteVolumes = new HashSet <string>(); foreach (var e in database.DuplicateRemoteVolumes()) { if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary) { database.UnlinkRemoteVolume(e.Key, e.Value); } else { throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString())); } } var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) { Logging.Log.WriteInformationMessage(LOGTAG, "IgnoreRemoteDeletedFile", "ignoring remote file listed as {0}: {1}", i.State, i.Name); } break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { Logging.Log.WriteInformationMessage(LOGTAG, "RemoveUnwantedRemoteFile", "removing remote file listed as {0}: {1}", i.State, i.Name); backend.Delete(i.Name, i.Size, true); } else { if (i.DeleteGracePeriod > DateTime.UtcNow) { Logging.Log.WriteInformationMessage(LOGTAG, "KeepDeleteRequest", "keeping delete request for {0} until {1}", i.Name, i.DeleteGracePeriod.ToLocalTime()); } else { if (i.State == RemoteVolumeState.Temporary && protectedFiles.Any(pf => pf == i.Name)) { Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name); } else { Logging.Log.WriteInformationMessage(LOGTAG, "RemoteUnwantedMissingFile", "removing file listed as {0}: {1}", i.State, i.Name); cleanupRemovedRemoteVolumes.Add(i.Name); } } } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { Logging.Log.WriteInformationMessage(LOGTAG, "PromotingCompleteFile", "promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else if (!remoteFound) { if (protectedFiles.Any(pf => pf == i.Name)) { Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Temporary, i.Size, i.Hash, false, new TimeSpan(0), null); } else { Logging.Log.WriteInformationMessage(LOGTAG, "SchedulingMissingFileForDelete", "scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name); cleanupRemovedRemoteVolumes.Add(i.Name); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null); } } else { if (protectedFiles.Any(pf => pf == i.Name)) { Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name); } else { Logging.Log.WriteInformationMessage(LOGTAG, "Remove incomplete file", "removing incomplete remote file listed as {0}: {1}", i.State, i.Name); backend.Delete(i.Name, i.Size, true); } } break; case RemoteVolumeState.Uploaded: if (!remoteFound) { missing.Add(i); } else if (correctSize) { database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); } else { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; case RemoteVolumeState.Verified: if (!remoteFound) { missing.Add(i); } else if (!correctSize) { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; default: Logging.Log.WriteWarningMessage(LOGTAG, "UnknownFileState", null, "unknown state for remote file listed as {0}: {1}", i.State, i.Name); break; } backend.FlushDbMessages(); } // cleanup deleted volumes in DB en block database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null); foreach (var i in missingHash) { Logging.Log.WriteWarningMessage(LOGTAG, "MissingRemoteHash", null, "remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash); } return(new RemoteAnalysisResult() { ParsedVolumes = remotelist, OtherVolumes = otherlist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }); }
private void DoRun(Database.LocalPurgeDatabase db, Library.Utility.IFilter filter, Action <System.Data.IDbCommand, long, string> filtercommand, float pgoffset, float pgspan) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Begin); Logging.Log.WriteInformationMessage(LOGTAG, "StartingPurge", "Starting purge operation"); var doCompactStep = !m_options.NoAutoCompact && filtercommand == null; using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { if (db.PartiallyRecreated) { throw new UserInformationException("The purge command does not work on partially recreated databases", "PurgeNotAllowedOnPartialDatabase"); } if (db.RepairInProgress && filtercommand == null) { throw new UserInformationException(string.Format("The purge command does not work on an incomplete database, try the {0} operation.", "purge-broken-files"), "PurgeNotAllowedOnIncompleteDatabase"); } var versions = db.GetFilesetIDs(m_options.Time, m_options.Version).OrderByDescending(x => x).ToArray(); if (versions.Length <= 0) { throw new UserInformationException("No filesets matched the supplied time or versions", "NoFilesetFoundForTimeOrVersion"); } var orphans = db.CountOrphanFiles(null); if (orphans != 0) { throw new UserInformationException(string.Format("Unable to start the purge process as there are {0} orphan file(s)", orphans), "CannotPurgeWithOrphans"); } Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); if (filtercommand == null) { db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, false, null); if (m_options.NoBackendverification) { FilelistProcessor.VerifyLocalList(backend, db); } else { FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, null); } } var filesets = db.FilesetTimes.OrderByDescending(x => x.Value).ToArray(); var versionprogress = ((doCompactStep ? 0.75f : 1.0f) / versions.Length) * pgspan; var currentprogress = pgoffset; var progress = 0; m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Process); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); // Reverse makes sure we re-write the old versions first foreach (var versionid in versions.Reverse()) { progress++; Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingFilelistVolumes", "Processing filelist volume {0} of {1}", progress, versions.Length); using (var tr = db.BeginTransaction()) { var ix = -1; for (var i = 0; i < filesets.Length; i++) { if (filesets[i].Key == versionid) { ix = i; break; } } if (ix < 0) { throw new InvalidProgramException(string.Format("Fileset was reported with id {0}, but could not be found?", versionid)); } var secs = 0; while (secs < 60) { secs++; var tfn = Volumes.VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, filesets[ix].Value.AddSeconds(secs)); if (db.GetRemoteVolumeID(tfn, tr) < 0) { break; } } var tsOriginal = filesets[ix].Value; var ts = tsOriginal.AddSeconds(secs); var prevfilename = db.GetRemoteVolumeNameForFileset(filesets[ix].Key, tr); if (secs >= 60) { throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is more than 60 seconds away", prevfilename, ts)); } if (ix != 0 && filesets[ix - 1].Value <= ts) { throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is larger than the next timestamp {2}", prevfilename, ts, filesets[ix - 1].Value)); } using (var tempset = db.CreateTemporaryFileset(versionid, tr)) { if (filtercommand == null) { tempset.ApplyFilter(filter); } else { tempset.ApplyFilter(filtercommand); } if (tempset.RemovedFileCount == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "NotWritingNewFileset", "Not writing a new fileset for {0} as it was not changed", prevfilename); currentprogress += versionprogress; tr.Rollback(); continue; } else { using (var tf = new Library.Utility.TempFile()) using (var vol = new Volumes.FilesetVolumeWriter(m_options, ts)) { var isOriginalFilesetFullBackup = db.IsFilesetFullBackup(tsOriginal); var newids = tempset.ConvertToPermanentFileset(vol.RemoteFilename, ts, isOriginalFilesetFullBackup); vol.VolumeID = newids.Item1; vol.CreateFilesetFile(isOriginalFilesetFullBackup); Logging.Log.WriteInformationMessage(LOGTAG, "ReplacingFileset", "Replacing fileset {0} with {1} which has with {2} fewer file(s) ({3} reduction)", prevfilename, vol.RemoteFilename, tempset.RemovedFileCount, Library.Utility.Utility.FormatSizeString(tempset.RemovedFileSize)); db.WriteFileset(vol, newids.Item2, tr); m_result.RemovedFileSize += tempset.RemovedFileSize; m_result.RemovedFileCount += tempset.RemovedFileCount; m_result.RewrittenFileLists++; currentprogress += (versionprogress / 2); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); if (m_options.Dryrun || m_options.FullResult) { foreach (var fe in tempset.ListAllDeletedFiles()) { var msg = string.Format(" Purging file {0} ({1})", fe.Key, Library.Utility.Utility.FormatSizeString(fe.Value)); Logging.Log.WriteProfilingMessage(LOGTAG, "PurgeFile", msg); Logging.Log.WriteVerboseMessage(LOGTAG, "PurgeFile", msg); if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldPurgeFile", msg); } } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldWriteRemoteFiles", "Would write files to remote storage"); } Logging.Log.WriteVerboseMessage(LOGTAG, "WritingRemoteFiles", "Writing files to remote storage"); } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadAndDelete", "Would upload file {0} ({1}) and delete file {2}, removing {3} files", vol.RemoteFilename, Library.Utility.Utility.FormatSizeString(vol.Filesize), prevfilename, tempset.RemovedFileCount); tr.Rollback(); } else { var lst = db.DropFilesetsFromTable(new[] { filesets[ix].Value }, tr).ToArray(); foreach (var f in lst) { db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, tr); } tr.Commit(); backend.Put(vol, synchronous: true); backend.Delete(prevfilename, -1, true); backend.FlushDbMessages(); } } } } } currentprogress += (versionprogress / 2); m_result.OperationProgressUpdater.UpdateProgress(currentprogress); } if (doCompactStep) { if (m_result.RewrittenFileLists == 0) { Logging.Log.WriteInformationMessage(LOGTAG, "SkippingCompacting", "Skipping compacting as no new volumes were written"); } else { m_result.OperationProgressUpdater.UpdateProgress(pgoffset + (0.75f * pgspan)); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Compact); m_result.CompactResults = new CompactResults(m_result); using (var cdb = new Database.LocalDeleteDatabase(db)) { var tr = cdb.BeginTransaction(); try { new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(cdb, true, ref tr, backend); } catch { try { tr.Rollback(); } catch { } } finally { try { tr.Commit(); } catch { } } } } m_result.OperationProgressUpdater.UpdateProgress(pgoffset + pgspan); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Complete); } backend.WaitForComplete(db, null); } }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> /// <param name="protectedfile">A filename that should be excempted for deletion</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log, string protectedfile) { var rawlist = backend.List(); var lookup = new Dictionary <string, Volumes.IParsedVolume>(); protectedfile = protectedfile ?? string.Empty; var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix == options.Prefix select p).ToList(); var otherlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix != options.Prefix select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count; log.KnownFileSize = remotelist.Select(x => Math.Max(0, x.File.Size)).Sum(); log.UnknownFileCount = unknownlist.Count; log.UnknownFileSize = unknownlist.Select(x => Math.Max(0, x.Size)).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); // TODO: We should query through the backendmanager using (var bk = DynamicLoader.BackendLoader.GetBackend(backend.BackendUrl, options.RawOptions)) if (bk is Library.Interface.IQuotaEnabledBackend) { Library.Interface.IQuotaInfo quota = ((Library.Interface.IQuotaEnabledBackend)bk).Quota; if (quota != null) { log.TotalQuotaSpace = quota.TotalQuotaSpace; log.FreeQuotaSpace = quota.FreeQuotaSpace; } } log.AssignedQuotaSpace = options.QuotaSize; foreach (var s in remotelist) { lookup[s.File.Name] = s; } var missing = new List <RemoteVolumeEntry>(); var missingHash = new List <Tuple <long, RemoteVolumeEntry> >(); var cleanupRemovedRemoteVolumes = new HashSet <string>(); foreach (var e in database.DuplicateRemoteVolumes()) { if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary) { database.UnlinkRemoteVolume(e.Key, e.Value); } else { throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString())); } } var locallist = database.GetRemoteVolumes(); foreach (var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) { log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name)); } break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } else { if (i.deleteGracePeriod > DateTime.UtcNow) { log.AddMessage(string.Format("keeping delete request for {0} until {1}", i.Name, i.deleteGracePeriod.ToLocalTime())); } else { if (string.Equals(i.Name, protectedfile) && i.State == RemoteVolumeState.Temporary) { log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name)); } else { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); cleanupRemovedRemoteVolumes.Add(i.Name); } } } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else if (!remoteFound) { if (string.Equals(i.Name, protectedfile)) { log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Temporary, i.Size, i.Hash, false, new TimeSpan(0), null); } else { log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name)); cleanupRemovedRemoteVolumes.Add(i.Name); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null); } } else { if (string.Equals(i.Name, protectedfile)) { log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name)); } else { log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } } break; case RemoteVolumeState.Uploaded: if (!remoteFound) { missing.Add(i); } else if (correctSize) { database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); } else { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; case RemoteVolumeState.Verified: if (!remoteFound) { missing.Add(i); } else if (!correctSize) { missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i)); } break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } backend.FlushDbMessages(); } // cleanup deleted volumes in DB en block database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null); foreach (var i in missingHash) { log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null); } return(new RemoteAnalysisResult() { ParsedVolumes = remotelist, OtherVolumes = otherlist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }); }
/// <summary> /// Helper method that verifies uploaded volumes and updates their state in the database. /// Throws an error if there are issues with the remote storage /// </summary> /// <param name="backend">The backend instance to use</param> /// <param name="options">The options used</param> /// <param name="database">The database to compare with</param> public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log) { var rawlist = backend.List(); var lookup = new Dictionary<string, Volumes.IParsedVolume>(); var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix == options.Prefix select p).ToList(); var otherlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null && p.Prefix != options.Prefix select p).ToList(); var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList(); var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList(); log.KnownFileCount = remotelist.Count(); log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum(); log.UnknownFileCount = unknownlist.Count(); log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum(); log.BackupListCount = filesets.Count; log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime(); if (backend is Library.Interface.IQuotaEnabledBackend) { log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace; log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace; } log.AssignedQuotaSpace = options.QuotaSize; foreach(var s in remotelist) lookup[s.File.Name] = s; var missing = new List<RemoteVolumeEntry>(); var missingHash = new List<Tuple<long, RemoteVolumeEntry>>(); var cleanupRemovedRemoteVolumes = new HashSet<string>(); foreach(var e in database.DuplicateRemoteVolumes()) { if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary) database.UnlinkRemoteVolume(e.Key, e.Value); else throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString())); } var locallist = database.GetRemoteVolumes(); foreach(var i in locallist) { Volumes.IParsedVolume r; var remoteFound = lookup.TryGetValue(i.Name, out r); var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0); lookup.Remove(i.Name); switch (i.State) { case RemoteVolumeState.Deleted: if (remoteFound) log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name)); break; case RemoteVolumeState.Temporary: case RemoteVolumeState.Deleting: if (remoteFound) { log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } else { if (i.deleteGracePeriod > DateTime.UtcNow) { log.AddMessage(string.Format("keeping delete request for {0} until {1}", i.Name, i.deleteGracePeriod.ToLocalTime())); } else { log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name)); cleanupRemovedRemoteVolumes.Add(i.Name); } } break; case RemoteVolumeState.Uploading: if (remoteFound && correctSize && r.File.Size >= 0) { log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded)); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash); } else if (!remoteFound) { log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name)); cleanupRemovedRemoteVolumes.Add(i.Name); database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null); } else { log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name)); backend.Delete(i.Name, i.Size, true); } break; case RemoteVolumeState.Uploaded: if (!remoteFound) missing.Add(i); else if (correctSize) database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash); else missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i)); break; case RemoteVolumeState.Verified: if (!remoteFound) missing.Add(i); else if (!correctSize) missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i)); break; default: log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null); break; } backend.FlushDbMessages(); } // cleanup deleted volumes in DB en block database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null); foreach(var i in missingHash) log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null); return new RemoteAnalysisResult() { ParsedVolumes = remotelist, OtherVolumes = otherlist, ExtraVolumes = lookup.Values, MissingVolumes = missing, VerificationRequiredVolumes = missingHash.Select(x => x.Item2) }; }
private bool HandleFilesystemEntry(Snapshots.ISnapshotService snapshot, BackendManager backend, string path, System.IO.FileAttributes attributes) { // If we lost the connection, there is no point in keeping on processing if (backend.HasDied) throw backend.LastException; try { m_result.OperationProgressUpdater.StartFile(path, -1); if (m_backendLogFlushTimer < DateTime.Now) { m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN); backend.FlushDbMessages(m_database, null); } DateTime lastwrite = new DateTime(0, DateTimeKind.Utc); try { lastwrite = snapshot.GetLastWriteTimeUtc(path); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to read timestamp on \"{0}\"", path), ex); } if ((attributes & FileAttributes.ReparsePoint) == FileAttributes.ReparsePoint) { if (m_options.SymlinkPolicy == Options.SymlinkStrategy.Ignore) { m_result.AddVerboseMessage("Ignoring symlink {0}", path); return false; } if (m_options.SymlinkPolicy == Options.SymlinkStrategy.Store) { Dictionary<string, string> metadata = GenerateMetadata(snapshot, path, attributes); if (!metadata.ContainsKey("CoreSymlinkTarget")) { var p = snapshot.GetSymlinkTarget(path); if (string.IsNullOrWhiteSpace(p)) m_result.AddVerboseMessage("Ignoring empty symlink {0}", path); else metadata["CoreSymlinkTarget"] = p; } var metahash = Utility.WrapMetadata(metadata, m_options); AddSymlinkToOutput(backend, path, DateTime.UtcNow, metahash); m_result.AddVerboseMessage("Stored symlink {0}", path); //Do not recurse symlinks return false; } } if ((attributes & FileAttributes.Directory) == FileAttributes.Directory) { IMetahash metahash; if (m_options.StoreMetadata) { metahash = Utility.WrapMetadata(GenerateMetadata(snapshot, path, attributes), m_options); } else { metahash = EMPTY_METADATA; } m_result.AddVerboseMessage("Adding directory {0}", path); AddFolderToOutput(backend, path, lastwrite, metahash); return true; } m_result.OperationProgressUpdater.UpdatefilesProcessed(++m_result.ExaminedFiles, m_result.SizeOfExaminedFiles); bool changed = false; // Last scan time DateTime oldModified; long lastFileSize = -1; string oldMetahash; long oldMetasize; var oldId = m_database.GetFileEntry(path, out oldModified, out lastFileSize, out oldMetahash, out oldMetasize); long filestatsize = -1; try { filestatsize = snapshot.GetFileSize(path); } catch { } IMetahash metahashandsize = m_options.StoreMetadata ? Utility.WrapMetadata(GenerateMetadata(snapshot, path, attributes), m_options) : EMPTY_METADATA; var timestampChanged = lastwrite != oldModified || lastwrite.Ticks == 0 || oldModified.Ticks == 0; var filesizeChanged = filestatsize < 0 || lastFileSize < 0 || filestatsize != lastFileSize; var tooLargeFile = m_options.SkipFilesLargerThan != long.MaxValue && m_options.SkipFilesLargerThan != 0 && filestatsize >= 0 && filestatsize > m_options.SkipFilesLargerThan; var metadatachanged = !m_options.SkipMetadata && (metahashandsize.Size != oldMetasize || metahashandsize.Hash != oldMetahash); if ((oldId < 0 || m_options.DisableFiletimeCheck || timestampChanged || filesizeChanged || metadatachanged) && !tooLargeFile) { m_result.AddVerboseMessage("Checking file for changes {0}, new: {1}, timestamp changed: {2}, size changed: {3}, metadatachanged: {4}, {5} vs {6}", path, oldId <= 0, timestampChanged, filesizeChanged, metadatachanged, lastwrite, oldModified); m_result.OpenedFiles++; long filesize = 0; var hint = m_options.GetCompressionHintFromFilename(path); var oldHash = oldId < 0 ? null : m_database.GetFileHash(oldId); using (var blocklisthashes = new Library.Utility.FileBackedStringList()) using (var hashcollector = new Library.Utility.FileBackedStringList()) { using (var fs = new Blockprocessor(snapshot.OpenRead(path), m_blockbuffer)) { try { m_result.OperationProgressUpdater.StartFile(path, fs.Length); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to read file length for file {0}", path), ex); } int blocklistoffset = 0; m_filehasher.Initialize(); var offset = 0; var remaining = fs.Readblock(); do { var size = Math.Min(m_blocksize, remaining); m_filehasher.TransformBlock(m_blockbuffer, offset, size, m_blockbuffer, offset); var blockkey = m_blockhasher.ComputeHash(m_blockbuffer, offset, size); if (m_blocklistbuffer.Length - blocklistoffset < blockkey.Length) { var blkey = Convert.ToBase64String(m_blockhasher.ComputeHash(m_blocklistbuffer, 0, blocklistoffset)); blocklisthashes.Add(blkey); AddBlockToOutput(backend, blkey, m_blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true); blocklistoffset = 0; } Array.Copy(blockkey, 0, m_blocklistbuffer, blocklistoffset, blockkey.Length); blocklistoffset += blockkey.Length; var key = Convert.ToBase64String(blockkey); AddBlockToOutput(backend, key, m_blockbuffer, offset, size, hint, false); hashcollector.Add(key); filesize += size; m_result.OperationProgressUpdater.UpdateFileProgress(filesize); if (m_result.TaskControlRendevouz() == TaskControlState.Stop) return false; remaining -= size; offset += size; if (remaining == 0) { offset = 0; remaining = fs.Readblock(); } } while (remaining > 0); //If all fits in a single block, don't bother with blocklists if (hashcollector.Count > 1) { var blkeyfinal = Convert.ToBase64String(m_blockhasher.ComputeHash(m_blocklistbuffer, 0, blocklistoffset)); blocklisthashes.Add(blkeyfinal); AddBlockToOutput(backend, blkeyfinal, m_blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true); } } m_result.SizeOfOpenedFiles += filesize; m_filehasher.TransformFinalBlock(m_blockbuffer, 0, 0); var filekey = Convert.ToBase64String(m_filehasher.Hash); if (oldHash != filekey) { if (oldHash == null) m_result.AddVerboseMessage("New file {0}", path); else m_result.AddVerboseMessage("File has changed {0}", path); if (oldId < 0) { m_result.AddedFiles++; m_result.SizeOfAddedFiles += filesize; if (m_options.Dryrun) m_result.AddDryrunMessage(string.Format("Would add new file {0}, size {1}", path, Library.Utility.Utility.FormatSizeString(filesize))); } else { m_result.ModifiedFiles++; m_result.SizeOfModifiedFiles += filesize; if (m_options.Dryrun) m_result.AddDryrunMessage(string.Format("Would add changed file {0}, size {1}", path, Library.Utility.Utility.FormatSizeString(filesize))); } AddFileToOutput(backend, path, filesize, lastwrite, metahashandsize, hashcollector, filekey, blocklisthashes); changed = true; } else if (metadatachanged) { m_result.AddVerboseMessage("File has only metadata changes {0}", path); AddFileToOutput(backend, path, filesize, lastwrite, metahashandsize, hashcollector, filekey, blocklisthashes); changed = true; } else { // When we write the file to output, update the last modified time oldModified = lastwrite; m_result.AddVerboseMessage("File has not changed {0}", path); } } } else { if (m_options.SkipFilesLargerThan == long.MaxValue || m_options.SkipFilesLargerThan == 0 || snapshot.GetFileSize(path) < m_options.SkipFilesLargerThan) m_result.AddVerboseMessage("Skipped checking file, because timestamp was not updated {0}", path); else m_result.AddVerboseMessage("Skipped checking file, because the size exceeds limit {0}", path); } if (!changed) AddUnmodifiedFile(oldId, lastwrite); m_result.SizeOfExaminedFiles += filestatsize; if (filestatsize != 0) m_result.OperationProgressUpdater.UpdatefilesProcessed(m_result.ExaminedFiles, m_result.SizeOfExaminedFiles); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process path: {0}", path), ex); m_result.FilesWithError++; } return true; }
/// <summary> /// Adds the found file data to the output unless the block already exists /// </summary> /// <param name="key">The block hash</param> /// <param name="data">The data matching the hash</param> /// <param name="len">The size of the data</param> /// <param name="offset">The offset into the data</param> /// <param name="hint">Hint for compression module</param> /// <param name="isBlocklistData">Indicates if the block is list data</param> private bool AddBlockToOutput(BackendManager backend, string key, byte[] data, int offset, int len, CompressionHint hint, bool isBlocklistData) { if (m_blockvolume == null) { m_blockvolume = new BlockVolumeWriter(m_options); m_blockvolume.VolumeID = m_database.RegisterRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, m_transaction); if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { m_indexvolume = new IndexVolumeWriter(m_options); m_indexvolume.VolumeID = m_database.RegisterRemoteVolume(m_indexvolume.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, m_transaction); } } if (m_database.AddBlock(key, len, m_blockvolume.VolumeID, m_transaction)) { m_blockvolume.AddBlock(key, data, offset, len, hint); //TODO: In theory a normal data block and blocklist block could be equal. // this would cause the index file to not contain all data, // if the data file is added before the blocklist data // ... highly theoretical ... if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full && isBlocklistData) m_indexvolume.WriteBlocklist(key, data, offset, len); if (m_blockvolume.Filesize > m_options.VolumeSize - m_options.Blocksize) { if (m_options.Dryrun) { m_blockvolume.Close(); m_result.AddDryrunMessage(string.Format("Would upload block volume: {0}, size: {1}", m_blockvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_blockvolume.LocalFilename).Length))); if (m_indexvolume != null) { UpdateIndexVolume(); m_indexvolume.FinishVolume(Library.Utility.Utility.CalculateHash(m_blockvolume.LocalFilename), new FileInfo(m_blockvolume.LocalFilename).Length); m_result.AddDryrunMessage(string.Format("Would upload index volume: {0}, size: {1}", m_indexvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_indexvolume.LocalFilename).Length))); m_indexvolume.Dispose(); m_indexvolume = null; } m_blockvolume.Dispose(); m_blockvolume = null; m_indexvolume.Dispose(); m_indexvolume = null; } else { //When uploading a new volume, we register the volumes and then flush the transaction // this ensures that the local database and remote storage are as closely related as possible m_database.UpdateRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); m_blockvolume.Close(); UpdateIndexVolume(); backend.FlushDbMessages(m_database, m_transaction); m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN); using(new Logging.Timer("CommitAddBlockToOutputFlush")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); backend.Put(m_blockvolume, m_indexvolume); m_blockvolume = null; m_indexvolume = null; using(new Logging.Timer("CommitAddBlockToOutputFlush")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); } } return true; } return false; }