public virtual void Run() { if (!System.IO.File.Exists(m_options.Dbpath)) throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath)); using(var db = new LocalDeleteDatabase(m_options.Dbpath, true)) using(var tr = db.BeginTransaction()) { m_result.SetDatabase(db); Utility.UpdateOptionsFromDb(db, m_options); Utility.VerifyParameters(db, m_options); var changed = DoCompact(db, false, tr); if (changed && m_options.UploadVerificationFile) FilelistProcessor.UploadVerificationFile(m_backendurl, m_options, m_result.BackendWriter, db, null); if (!m_options.Dryrun) { using(new Logging.Timer("CommitCompact")) tr.Commit(); if (changed) { db.WriteResults(); db.Vacuum(); } } else tr.Rollback(); } }
public void Run(string[] sources, Library.Utility.IFilter filter) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin); using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options)) { m_result.SetDatabase(m_database); m_result.Dryrun = m_options.Dryrun; Utility.VerifyParameters(m_database, m_options); m_database.VerifyConsistency(null); // If there is no filter, we set an empty filter to simplify the code // If there is a filter, we make sure that the sources are included m_filter = filter ?? new Library.Utility.FilterExpression(); m_sourceFilter = new Library.Utility.FilterExpression(sources, true); var lastVolumeSize = -1L; m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN); System.Threading.Thread parallelScanner = null; try { m_snapshot = GetSnapshot(sources, m_options, m_result); // Start parallel scan if (m_options.ChangedFilelist == null || m_options.ChangedFilelist.Length < 1) { parallelScanner = new System.Threading.Thread(CountFilesThread) { Name = "Read ahead file counter", IsBackground = true }; parallelScanner.Start(); } using(m_backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) using(m_filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp)) { var incompleteFilesets = m_database.GetIncompleteFilesets(null).OrderBy(x => x.Value).ToArray(); if (incompleteFilesets.Length != 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize); m_result.AddMessage(string.Format("Uploading filelist from previous interrupted backup")); using(var trn = m_database.BeginTransaction()) { var incompleteSet = incompleteFilesets.Last(); var badIds = from n in incompleteFilesets select n.Key; var prevs = (from n in m_database.FilesetTimes where n.Key < incompleteSet.Key && !badIds.Contains(n.Key) orderby n.Key select n.Key).ToArray(); var prevId = prevs.Length == 0 ? -1 : prevs.Last(); FilesetVolumeWriter fsw = null; try { var s = 1; var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s); var oldFilesetID = incompleteSet.Key; // Probe for an unused filename while (s < 60) { var id = m_database.GetRemoteVolumeID(VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, fileTime)); if (id < 0) break; fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s); } fsw = new FilesetVolumeWriter(m_options, fileTime); fsw.VolumeID = m_database.RegisterRemoteVolume(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction); var newFilesetID = m_database.CreateFileset(fsw.VolumeID, fileTime, trn); m_database.LinkFilesetToVolume(newFilesetID, fsw.VolumeID, trn); m_database.AppendFilesFromPreviousSet(trn, null, newFilesetID, prevId, fileTime); m_database.WriteFileset(fsw, trn, newFilesetID); if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("Would upload fileset: {0}, size: {1}", fsw.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(fsw.LocalFilename).Length))); } else { m_database.UpdateRemoteVolume(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null, trn); using(new Logging.Timer("CommitUpdateFilelistVolume")) trn.Commit(); m_backend.Put(fsw); fsw = null; } } finally { if (fsw != null) try { fsw.Dispose(); } catch { fsw = null; } } } } if (!m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify); using(new Logging.Timer("PreBackupVerify")) { try { FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter); } catch (Exception ex) { if (m_options.AutoCleanup) { m_result.AddWarning("Backend verification failed, attempting automatic cleanup", ex); m_result.RepairResults = new RepairResults(m_result); new RepairHandler(m_backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run(); m_result.AddMessage("Backend cleanup finished, retrying verification"); FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter); } else throw; } } } m_database.BuildLookupTable(m_options); m_transaction = m_database.BeginTransaction(); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles); var filesetvolumeid = m_database.RegisterRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction); m_database.CreateFileset(filesetvolumeid, VolumeBase.ParseFilename(m_filesetvolume.RemoteFilename).Time, m_transaction); m_blockvolume = new BlockVolumeWriter(m_options); m_blockvolume.VolumeID = m_database.RegisterRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, m_transaction); if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { m_indexvolume = new IndexVolumeWriter(m_options); m_indexvolume.VolumeID = m_database.RegisterRemoteVolume(m_indexvolume.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, m_transaction); } var filterhandler = new FilterHandler(m_snapshot, m_attributeFilter, m_sourceFilter, m_filter, m_symlinkPolicy, m_options.HardlinkPolicy, m_result); using(new Logging.Timer("BackupMainOperation")) { if (m_options.ChangedFilelist != null && m_options.ChangedFilelist.Length >= 1) { m_result.AddVerboseMessage("Processing supplied change list instead of enumerating filesystem"); m_result.OperationProgressUpdater.UpdatefileCount(m_options.ChangedFilelist.Length, 0, true); foreach(var p in m_options.ChangedFilelist) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { m_result.AddMessage("Stopping backup operation on request"); break; } FileAttributes fa = new FileAttributes(); try { fa = m_snapshot.GetAttributes(p); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to read attributes: {0}, message: {1}", p, ex.Message), ex); } if (filterhandler.AttributeFilter(null, p, fa)) { try { this.HandleFilesystemEntry(p, fa); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process element: {0}, message: {1}", p, ex.Message), ex); } } } m_database.AppendFilesFromPreviousSet(m_transaction, m_options.DeletedFilelist); } else { foreach(var path in m_snapshot.EnumerateFilesAndFolders(filterhandler.AttributeFilter)) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { m_result.AddMessage("Stopping backup operation on request"); break; } this.HandleFilesystemEntry(path, m_snapshot.GetAttributes(path)); } } //If the scanner is still running for some reason, make sure we kill it now if (parallelScanner != null && parallelScanner.IsAlive) parallelScanner.Abort(); // We no longer need to snapshot active try { m_snapshot.Dispose(); } finally { m_snapshot = null; } m_result.OperationProgressUpdater.UpdatefileCount(m_result.ExaminedFiles, m_result.SizeOfExaminedFiles, true); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Finalize); using(new Logging.Timer("FinalizeRemoteVolumes")) { if (m_blockvolume.SourceSize > 0) { lastVolumeSize = m_blockvolume.SourceSize; if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("Would upload block volume: {0}, size: {1}", m_blockvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_blockvolume.LocalFilename).Length))); if (m_indexvolume != null) { m_blockvolume.Close(); UpdateIndexVolume(); m_indexvolume.FinishVolume(Library.Utility.Utility.CalculateHash(m_blockvolume.LocalFilename), new FileInfo(m_blockvolume.LocalFilename).Length); m_result.AddDryrunMessage(string.Format("Would upload index volume: {0}, size: {1}", m_indexvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_indexvolume.LocalFilename).Length))); } m_blockvolume.Dispose(); m_blockvolume = null; m_indexvolume.Dispose(); m_indexvolume = null; } else { m_database.UpdateRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); m_blockvolume.Close(); UpdateIndexVolume(); using(new Logging.Timer("CommitUpdateRemoteVolume")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); m_backend.Put(m_blockvolume, m_indexvolume); m_blockvolume = null; m_indexvolume = null; } } else { m_database.RemoveRemoteVolume(m_blockvolume.RemoteFilename, m_transaction); if (m_indexvolume != null) m_database.RemoveRemoteVolume(m_indexvolume.RemoteFilename, m_transaction); } } using(new Logging.Timer("UpdateChangeStatistics")) m_database.UpdateChangeStatistics(m_result); using(new Logging.Timer("VerifyConsistency")) m_database.VerifyConsistency(m_transaction); var changeCount = m_result.AddedFiles + m_result.ModifiedFiles + m_result.DeletedFiles + m_result.AddedFolders + m_result.ModifiedFolders + m_result.DeletedFolders + m_result.AddedSymlinks + m_result.ModifiedSymlinks + m_result.DeletedSymlinks; //Changes in the filelist triggers a filelist upload if (m_options.UploadUnchangedBackups || changeCount > 0) { using(new Logging.Timer("Uploading a new fileset")) { if (!string.IsNullOrEmpty(m_options.ControlFiles)) foreach(var p in m_options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) m_filesetvolume.AddControlFile(p, m_options.GetCompressionHintFromFilename(p)); m_database.WriteFileset(m_filesetvolume, m_transaction); m_filesetvolume.Close(); if (m_options.Dryrun) m_result.AddDryrunMessage(string.Format("Would upload fileset volume: {0}, size: {1}", m_filesetvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_filesetvolume.LocalFilename).Length))); else { m_database.UpdateRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); using(new Logging.Timer("CommitUpdateRemoteVolume")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); m_backend.Put(m_filesetvolume); } } } else { m_result.AddVerboseMessage("removing temp files, as no data needs to be uploaded"); m_database.RemoveRemoteVolume(m_filesetvolume.RemoteFilename, m_transaction); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload); using(new Logging.Timer("Async backend wait")) m_backend.WaitForComplete(m_database, m_transaction); if (m_result.TaskControlRendevouz() != TaskControlState.Stop) { if (m_options.KeepTime.Ticks > 0 || m_options.KeepVersions != 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Delete); m_result.DeleteResults = new DeleteResults(m_result); using(var db = new LocalDeleteDatabase(m_database)) new DeleteHandler(m_backend.BackendUrl, m_options, (DeleteResults)m_result.DeleteResults).DoRun(db, m_transaction, true, lastVolumeSize <= m_options.SmallFileSize); } else if (lastVolumeSize <= m_options.SmallFileSize && !m_options.NoAutoCompact) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Compact); m_result.CompactResults = new CompactResults(m_result); using(var db = new LocalDeleteDatabase(m_database)) new CompactHandler(m_backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, m_transaction); } } if (m_options.UploadVerificationFile) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload); FilelistProcessor.UploadVerificationFile(m_backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction); } if (m_options.Dryrun) { m_transaction.Rollback(); m_transaction = null; } else { using(new Logging.Timer("CommitFinalizingBackup")) m_transaction.Commit(); m_transaction = null; m_database.Vacuum(); if (m_result.TaskControlRendevouz() != TaskControlState.Stop && !m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify); using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) { using(new Logging.Timer("AfterBackupVerify")) FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter); backend.WaitForComplete(m_database, null); } if (m_options.BackupTestSampleCount > 0 && m_database.GetRemoteVolumes().Count() > 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest); m_result.TestResults = new TestResults(m_result); using(var testdb = new LocalTestDatabase(m_database)) using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb)) new TestHandler(m_backendurl, m_options, new TestResults(m_result)) .DoRun(m_options.BackupTestSampleCount, testdb, backend); } } } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete); m_database.WriteResults(); return; } } catch (Exception ex) { m_result.AddError("Fatal error", ex); throw; } finally { if (parallelScanner != null && parallelScanner.IsAlive) { parallelScanner.Abort(); parallelScanner.Join(500); if (parallelScanner.IsAlive) m_result.AddWarning("Failed to terminate filecounter thread", null); } if (m_snapshot != null) try { m_snapshot.Dispose(); } catch (Exception ex) { m_result.AddError(string.Format("Failed to dispose snapshot"), ex); } finally { m_snapshot = null; } if (m_transaction != null) try { m_transaction.Rollback(); } catch (Exception ex) { m_result.AddError(string.Format("Rollback error: {0}", ex.Message), ex); } } } }
internal bool DoCompact(LocalDeleteDatabase db, bool hasVerifiedBackend, ref System.Data.IDbTransaction transaction) { var report = db.GetCompactReport(m_options.VolumeSize, m_options.Threshold, m_options.SmallFileSize, m_options.SmallFileMaxCount, transaction); report.ReportCompactData(m_result); if (report.ShouldReclaim || report.ShouldCompact) { using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db)) { if (!hasVerifiedBackend && !m_options.NoBackendverification) FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter); BlockVolumeWriter newvol = new BlockVolumeWriter(m_options); newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction); IndexVolumeWriter newvolindex = null; if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { newvolindex = new IndexVolumeWriter(m_options); newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction); db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction); newvolindex.StartVolume(newvol.RemoteFilename); } long blocksInVolume = 0; long discardedBlocks = 0; long discardedSize = 0; byte[] buffer = new byte[m_options.Blocksize]; var remoteList = db.GetRemoteVolumes().Where(n => n.State == RemoteVolumeState.Uploaded || n.State == RemoteVolumeState.Verified).ToArray(); //These are for bookkeeping var uploadedVolumes = new List<KeyValuePair<string, long>>(); var deletedVolumes = new List<KeyValuePair<string, long>>(); var downloadedVolumes = new List<KeyValuePair<string, long>>(); //We start by deleting unused volumes to save space before uploading new stuff var fullyDeleteable = (from v in remoteList where report.DeleteableVolumes.Contains(v.Name) select (IRemoteVolume)v).ToList(); deletedVolumes.AddRange(DoDelete(db, backend, fullyDeleteable, ref transaction)); // This list is used to pick up unused volumes, // so they can be deleted once the upload of the // required fragments is complete var deleteableVolumes = new List<IRemoteVolume>(); if (report.ShouldCompact) { var volumesToDownload = (from v in remoteList where report.CompactableVolumes.Contains(v.Name) select (IRemoteVolume)v).ToList(); using(var q = db.CreateBlockQueryHelper(m_options, transaction)) { foreach(var entry in new AsyncDownloader(volumesToDownload, backend)) using(var tmpfile = entry.TempFile) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(db, transaction); return false; } downloadedVolumes.Add(new KeyValuePair<string, long>(entry.Name, entry.Size)); var inst = VolumeBase.ParseFilename(entry.Name); using(var f = new BlockVolumeReader(inst.CompressionModule, tmpfile, m_options)) { foreach(var e in f.Blocks) { if (q.UseBlock(e.Key, e.Value, transaction)) { //TODO: How do we get the compression hint? Reverse query for filename in db? var s = f.ReadBlock(e.Key, buffer); if (s != e.Value) throw new Exception("Size mismatch problem, {0} vs {1}"); newvol.AddBlock(e.Key, buffer, 0, s, Duplicati.Library.Interface.CompressionHint.Compressible); if (newvolindex != null) newvolindex.AddBlock(e.Key, e.Value); db.MoveBlockToNewVolume(e.Key, e.Value, newvol.VolumeID, transaction); blocksInVolume++; if (newvol.Filesize > m_options.VolumeSize) { uploadedVolumes.Add(new KeyValuePair<string, long>(newvol.RemoteFilename, new System.IO.FileInfo(newvol.LocalFilename).Length)); if (newvolindex != null) uploadedVolumes.Add(new KeyValuePair<string, long>(newvolindex.RemoteFilename, new System.IO.FileInfo(newvolindex.LocalFilename).Length)); if (!m_options.Dryrun) backend.Put(newvol, newvolindex); else m_result.AddDryrunMessage(string.Format("Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(newvol.LocalFilename).Length))); newvol = new BlockVolumeWriter(m_options); newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction); if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { newvolindex = new IndexVolumeWriter(m_options); newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction); db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction); newvolindex.StartVolume(newvol.RemoteFilename); } blocksInVolume = 0; //After we upload this volume, we can delete all previous encountered volumes deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction)); deleteableVolumes = new List<IRemoteVolume>(); } } else { discardedBlocks++; discardedSize += e.Value; } } } deleteableVolumes.Add(entry); } if (blocksInVolume > 0) { uploadedVolumes.Add(new KeyValuePair<string, long>(newvol.RemoteFilename, new System.IO.FileInfo(newvol.LocalFilename).Length)); if (newvolindex != null) uploadedVolumes.Add(new KeyValuePair<string, long>(newvolindex.RemoteFilename, new System.IO.FileInfo(newvolindex.LocalFilename).Length)); if (!m_options.Dryrun) backend.Put(newvol, newvolindex); else m_result.AddDryrunMessage(string.Format("Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(newvol.LocalFilename).Length))); } else { db.RemoveRemoteVolume(newvol.RemoteFilename, transaction); if (newvolindex != null) { db.RemoveRemoteVolume(newvolindex.RemoteFilename, transaction); newvolindex.FinishVolume(null, 0); } } } } deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction)); var downloadSize = downloadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value); var deletedSize = deletedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value); var uploadSize = uploadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value); m_result.DeletedFileCount = deletedVolumes.Count; m_result.DownloadedFileCount = downloadedVolumes.Count; m_result.UploadedFileCount = uploadedVolumes.Count; m_result.DeletedFileSize = deletedSize; m_result.DownloadedFileSize = downloadSize; m_result.UploadedFileSize = uploadSize; m_result.Dryrun = m_options.Dryrun; if (m_result.Dryrun) { if (downloadedVolumes.Count == 0) m_result.AddDryrunMessage(string.Format("Would delete {0} files, which would reduce storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize))); else m_result.AddDryrunMessage(string.Format("Would download {0} file(s) with a total size of {1}, delete {2} file(s) with a total size of {3}, and compact to {4} file(s) with a size of {5}, which would reduce storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DownloadedFileSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize))); } else { if (m_result.DownloadedFileCount == 0) m_result.AddMessage(string.Format("Deleted {0} files, which reduced storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize))); else m_result.AddMessage(string.Format("Downloaded {0} file(s) with a total size of {1}, deleted {2} file(s) with a total size of {3}, and compacted to {4} file(s) with a size of {5}, which reduced storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(downloadSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize))); } backend.WaitForComplete(db, transaction); } return (m_result.DeletedFileCount + m_result.UploadedFileCount) > 0; } else { return false; } }
private IEnumerable<KeyValuePair<string, long>> DoDelete(LocalDeleteDatabase db, BackendManager backend, IEnumerable<IRemoteVolume> deleteableVolumes, ref System.Data.IDbTransaction transaction) { // Mark all volumes as disposable foreach(var f in deleteableVolumes) db.UpdateRemoteVolume(f.Name, RemoteVolumeState.Deleting, f.Size, f.Hash, transaction); // Before we commit the current state, make sure the backend has caught up backend.WaitForEmpty(db, transaction); if (!m_options.Dryrun) { transaction.Commit(); transaction = db.BeginTransaction(); } return PerformDelete(backend, db.GetDeletableVolumes(deleteableVolumes, transaction)); }
private void CompactIfRequired(BackendManager backend, long lastVolumeSize) { var currentIsSmall = lastVolumeSize != -1 && lastVolumeSize <= m_options.SmallFileSize; if (m_options.KeepTime.Ticks > 0 || m_options.KeepVersions != 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Delete); m_result.DeleteResults = new DeleteResults(m_result); using(var db = new LocalDeleteDatabase(m_database)) new DeleteHandler(backend.BackendUrl, m_options, (DeleteResults)m_result.DeleteResults).DoRun(db, ref m_transaction, true, currentIsSmall); } else if (currentIsSmall && !m_options.NoAutoCompact) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Compact); m_result.CompactResults = new CompactResults(m_result); using(var db = new LocalDeleteDatabase(m_database)) new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref m_transaction); } }
private IEnumerable<KeyValuePair<string, long>> DoDelete(LocalDeleteDatabase db, BackendManager backend, List<IRemoteVolume> deleteableVolumes, System.Data.IDbTransaction transaction) { foreach(var f in db.GetDeletableVolumes(deleteableVolumes, transaction)) { if (!m_options.Dryrun) backend.Delete(f.Name, f.Size); else m_result.AddDryrunMessage(string.Format("Would delete remote file: {0}, size: {1}", f.Name, Library.Utility.Utility.FormatSizeString(f.Size))); yield return new KeyValuePair<string, long>(f.Name, f.Size); } deleteableVolumes.Clear(); }