private void UploadRealFileList(BackendManager backend, FilesetVolumeWriter filesetvolume) { var changeCount = m_result.AddedFiles + m_result.ModifiedFiles + m_result.DeletedFiles + m_result.AddedFolders + m_result.ModifiedFolders + m_result.DeletedFolders + m_result.AddedSymlinks + m_result.ModifiedSymlinks + m_result.DeletedSymlinks; //Changes in the filelist triggers a filelist upload if (m_options.UploadUnchangedBackups || changeCount > 0) { using(new Logging.Timer("Uploading a new fileset")) { if (!string.IsNullOrEmpty(m_options.ControlFiles)) foreach(var p in m_options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) filesetvolume.AddControlFile(p, m_options.GetCompressionHintFromFilename(p)); m_database.WriteFileset(filesetvolume, m_transaction); filesetvolume.Close(); if (m_options.Dryrun) m_result.AddDryrunMessage(string.Format("Would upload fileset volume: {0}, size: {1}", filesetvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(filesetvolume.LocalFilename).Length))); else { m_database.UpdateRemoteVolume(filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); using(new Logging.Timer("CommitUpdateRemoteVolume")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); backend.Put(filesetvolume); using(new Logging.Timer("CommitUpdateRemoteVolume")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); } } } else { m_result.AddVerboseMessage("removing temp files, as no data needs to be uploaded"); m_database.RemoveRemoteVolume(filesetvolume.RemoteFilename, m_transaction); } }
public void Run(string[] sources, Library.Utility.IFilter filter) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin); using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options)) { m_result.SetDatabase(m_database); m_result.Dryrun = m_options.Dryrun; Utility.VerifyParameters(m_database, m_options); m_database.VerifyConsistency(null); // If there is no filter, we set an empty filter to simplify the code // If there is a filter, we make sure that the sources are included m_filter = filter ?? new Library.Utility.FilterExpression(); m_sourceFilter = new Library.Utility.FilterExpression(sources, true); var lastVolumeSize = -1L; m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN); System.Threading.Thread parallelScanner = null; try { m_snapshot = GetSnapshot(sources, m_options, m_result); // Start parallel scan if (m_options.ChangedFilelist == null || m_options.ChangedFilelist.Length < 1) { parallelScanner = new System.Threading.Thread(CountFilesThread) { Name = "Read ahead file counter", IsBackground = true }; parallelScanner.Start(); } using(m_backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) using(m_filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp)) { var incompleteFilesets = m_database.GetIncompleteFilesets(null).OrderBy(x => x.Value).ToArray(); if (incompleteFilesets.Length != 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize); m_result.AddMessage(string.Format("Uploading filelist from previous interrupted backup")); using(var trn = m_database.BeginTransaction()) { var incompleteSet = incompleteFilesets.Last(); var badIds = from n in incompleteFilesets select n.Key; var prevs = (from n in m_database.FilesetTimes where n.Key < incompleteSet.Key && !badIds.Contains(n.Key) orderby n.Key select n.Key).ToArray(); var prevId = prevs.Length == 0 ? -1 : prevs.Last(); FilesetVolumeWriter fsw = null; try { var s = 1; var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s); var oldFilesetID = incompleteSet.Key; // Probe for an unused filename while (s < 60) { var id = m_database.GetRemoteVolumeID(VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, fileTime)); if (id < 0) break; fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s); } fsw = new FilesetVolumeWriter(m_options, fileTime); fsw.VolumeID = m_database.RegisterRemoteVolume(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction); var newFilesetID = m_database.CreateFileset(fsw.VolumeID, fileTime, trn); m_database.LinkFilesetToVolume(newFilesetID, fsw.VolumeID, trn); m_database.AppendFilesFromPreviousSet(trn, null, newFilesetID, prevId, fileTime); m_database.WriteFileset(fsw, trn, newFilesetID); if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("Would upload fileset: {0}, size: {1}", fsw.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(fsw.LocalFilename).Length))); } else { m_database.UpdateRemoteVolume(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null, trn); using(new Logging.Timer("CommitUpdateFilelistVolume")) trn.Commit(); m_backend.Put(fsw); fsw = null; } } finally { if (fsw != null) try { fsw.Dispose(); } catch { fsw = null; } } } } if (!m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify); using(new Logging.Timer("PreBackupVerify")) { try { FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter); } catch (Exception ex) { if (m_options.AutoCleanup) { m_result.AddWarning("Backend verification failed, attempting automatic cleanup", ex); m_result.RepairResults = new RepairResults(m_result); new RepairHandler(m_backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run(); m_result.AddMessage("Backend cleanup finished, retrying verification"); FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter); } else throw; } } } m_database.BuildLookupTable(m_options); m_transaction = m_database.BeginTransaction(); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles); var filesetvolumeid = m_database.RegisterRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction); m_database.CreateFileset(filesetvolumeid, VolumeBase.ParseFilename(m_filesetvolume.RemoteFilename).Time, m_transaction); m_blockvolume = new BlockVolumeWriter(m_options); m_blockvolume.VolumeID = m_database.RegisterRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, m_transaction); if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { m_indexvolume = new IndexVolumeWriter(m_options); m_indexvolume.VolumeID = m_database.RegisterRemoteVolume(m_indexvolume.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, m_transaction); } var filterhandler = new FilterHandler(m_snapshot, m_attributeFilter, m_sourceFilter, m_filter, m_symlinkPolicy, m_options.HardlinkPolicy, m_result); using(new Logging.Timer("BackupMainOperation")) { if (m_options.ChangedFilelist != null && m_options.ChangedFilelist.Length >= 1) { m_result.AddVerboseMessage("Processing supplied change list instead of enumerating filesystem"); m_result.OperationProgressUpdater.UpdatefileCount(m_options.ChangedFilelist.Length, 0, true); foreach(var p in m_options.ChangedFilelist) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { m_result.AddMessage("Stopping backup operation on request"); break; } FileAttributes fa = new FileAttributes(); try { fa = m_snapshot.GetAttributes(p); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to read attributes: {0}, message: {1}", p, ex.Message), ex); } if (filterhandler.AttributeFilter(null, p, fa)) { try { this.HandleFilesystemEntry(p, fa); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process element: {0}, message: {1}", p, ex.Message), ex); } } } m_database.AppendFilesFromPreviousSet(m_transaction, m_options.DeletedFilelist); } else { foreach(var path in m_snapshot.EnumerateFilesAndFolders(filterhandler.AttributeFilter)) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { m_result.AddMessage("Stopping backup operation on request"); break; } this.HandleFilesystemEntry(path, m_snapshot.GetAttributes(path)); } } //If the scanner is still running for some reason, make sure we kill it now if (parallelScanner != null && parallelScanner.IsAlive) parallelScanner.Abort(); // We no longer need to snapshot active try { m_snapshot.Dispose(); } finally { m_snapshot = null; } m_result.OperationProgressUpdater.UpdatefileCount(m_result.ExaminedFiles, m_result.SizeOfExaminedFiles, true); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Finalize); using(new Logging.Timer("FinalizeRemoteVolumes")) { if (m_blockvolume.SourceSize > 0) { lastVolumeSize = m_blockvolume.SourceSize; if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("Would upload block volume: {0}, size: {1}", m_blockvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_blockvolume.LocalFilename).Length))); if (m_indexvolume != null) { m_blockvolume.Close(); UpdateIndexVolume(); m_indexvolume.FinishVolume(Library.Utility.Utility.CalculateHash(m_blockvolume.LocalFilename), new FileInfo(m_blockvolume.LocalFilename).Length); m_result.AddDryrunMessage(string.Format("Would upload index volume: {0}, size: {1}", m_indexvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_indexvolume.LocalFilename).Length))); } m_blockvolume.Dispose(); m_blockvolume = null; m_indexvolume.Dispose(); m_indexvolume = null; } else { m_database.UpdateRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); m_blockvolume.Close(); UpdateIndexVolume(); using(new Logging.Timer("CommitUpdateRemoteVolume")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); m_backend.Put(m_blockvolume, m_indexvolume); m_blockvolume = null; m_indexvolume = null; } } else { m_database.RemoveRemoteVolume(m_blockvolume.RemoteFilename, m_transaction); if (m_indexvolume != null) m_database.RemoveRemoteVolume(m_indexvolume.RemoteFilename, m_transaction); } } using(new Logging.Timer("UpdateChangeStatistics")) m_database.UpdateChangeStatistics(m_result); using(new Logging.Timer("VerifyConsistency")) m_database.VerifyConsistency(m_transaction); var changeCount = m_result.AddedFiles + m_result.ModifiedFiles + m_result.DeletedFiles + m_result.AddedFolders + m_result.ModifiedFolders + m_result.DeletedFolders + m_result.AddedSymlinks + m_result.ModifiedSymlinks + m_result.DeletedSymlinks; //Changes in the filelist triggers a filelist upload if (m_options.UploadUnchangedBackups || changeCount > 0) { using(new Logging.Timer("Uploading a new fileset")) { if (!string.IsNullOrEmpty(m_options.ControlFiles)) foreach(var p in m_options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) m_filesetvolume.AddControlFile(p, m_options.GetCompressionHintFromFilename(p)); m_database.WriteFileset(m_filesetvolume, m_transaction); m_filesetvolume.Close(); if (m_options.Dryrun) m_result.AddDryrunMessage(string.Format("Would upload fileset volume: {0}, size: {1}", m_filesetvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_filesetvolume.LocalFilename).Length))); else { m_database.UpdateRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); using(new Logging.Timer("CommitUpdateRemoteVolume")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); m_backend.Put(m_filesetvolume); } } } else { m_result.AddVerboseMessage("removing temp files, as no data needs to be uploaded"); m_database.RemoveRemoteVolume(m_filesetvolume.RemoteFilename, m_transaction); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload); using(new Logging.Timer("Async backend wait")) m_backend.WaitForComplete(m_database, m_transaction); if (m_result.TaskControlRendevouz() != TaskControlState.Stop) { if (m_options.KeepTime.Ticks > 0 || m_options.KeepVersions != 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Delete); m_result.DeleteResults = new DeleteResults(m_result); using(var db = new LocalDeleteDatabase(m_database)) new DeleteHandler(m_backend.BackendUrl, m_options, (DeleteResults)m_result.DeleteResults).DoRun(db, m_transaction, true, lastVolumeSize <= m_options.SmallFileSize); } else if (lastVolumeSize <= m_options.SmallFileSize && !m_options.NoAutoCompact) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Compact); m_result.CompactResults = new CompactResults(m_result); using(var db = new LocalDeleteDatabase(m_database)) new CompactHandler(m_backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, m_transaction); } } if (m_options.UploadVerificationFile) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload); FilelistProcessor.UploadVerificationFile(m_backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction); } if (m_options.Dryrun) { m_transaction.Rollback(); m_transaction = null; } else { using(new Logging.Timer("CommitFinalizingBackup")) m_transaction.Commit(); m_transaction = null; m_database.Vacuum(); if (m_result.TaskControlRendevouz() != TaskControlState.Stop && !m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify); using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) { using(new Logging.Timer("AfterBackupVerify")) FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter); backend.WaitForComplete(m_database, null); } if (m_options.BackupTestSampleCount > 0 && m_database.GetRemoteVolumes().Count() > 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest); m_result.TestResults = new TestResults(m_result); using(var testdb = new LocalTestDatabase(m_database)) using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb)) new TestHandler(m_backendurl, m_options, new TestResults(m_result)) .DoRun(m_options.BackupTestSampleCount, testdb, backend); } } } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete); m_database.WriteResults(); return; } } catch (Exception ex) { m_result.AddError("Fatal error", ex); throw; } finally { if (parallelScanner != null && parallelScanner.IsAlive) { parallelScanner.Abort(); parallelScanner.Join(500); if (parallelScanner.IsAlive) m_result.AddWarning("Failed to terminate filecounter thread", null); } if (m_snapshot != null) try { m_snapshot.Dispose(); } catch (Exception ex) { m_result.AddError(string.Format("Failed to dispose snapshot"), ex); } finally { m_snapshot = null; } if (m_transaction != null) try { m_transaction.Rollback(); } catch (Exception ex) { m_result.AddError(string.Format("Rollback error: {0}", ex.Message), ex); } } } }
private void UploadSyntheticFilelist(BackendManager backend) { var incompleteFilesets = m_database.GetIncompleteFilesets(null).OrderBy(x => x.Value).ToArray(); if (incompleteFilesets.Length != 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize); m_result.AddMessage(string.Format("Uploading filelist from previous interrupted backup")); using(var trn = m_database.BeginTransaction()) { var incompleteSet = incompleteFilesets.Last(); var badIds = from n in incompleteFilesets select n.Key; var prevs = (from n in m_database.FilesetTimes where n.Key < incompleteSet.Key && !badIds.Contains(n.Key) orderby n.Key select n.Key).ToArray(); var prevId = prevs.Length == 0 ? -1 : prevs.Last(); FilesetVolumeWriter fsw = null; try { var s = 1; var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s); var oldFilesetID = incompleteSet.Key; // Probe for an unused filename while (s < 60) { var id = m_database.GetRemoteVolumeID(VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, fileTime)); if (id < 0) break; fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s); } fsw = new FilesetVolumeWriter(m_options, fileTime); fsw.VolumeID = m_database.RegisterRemoteVolume(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction); if (!string.IsNullOrEmpty(m_options.ControlFiles)) foreach(var p in m_options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) fsw.AddControlFile(p, m_options.GetCompressionHintFromFilename(p)); var newFilesetID = m_database.CreateFileset(fsw.VolumeID, fileTime, trn); m_database.LinkFilesetToVolume(newFilesetID, fsw.VolumeID, trn); m_database.AppendFilesFromPreviousSet(trn, null, newFilesetID, prevId, fileTime); m_database.WriteFileset(fsw, trn, newFilesetID); if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("Would upload fileset: {0}, size: {1}", fsw.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(fsw.LocalFilename).Length))); } else { m_database.UpdateRemoteVolume(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null, trn); using(new Logging.Timer("CommitUpdateFilelistVolume")) trn.Commit(); backend.Put(fsw); fsw = null; } } finally { if (fsw != null) try { fsw.Dispose(); } catch { fsw = null; } } } } if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); var hashsize = blockhasher.HashSize / 8; foreach(var blockfile in m_database.GetMissingIndexFiles()) { m_result.AddMessage(string.Format("Re-creating missing index file for {0}", blockfile)); var w = new IndexVolumeWriter(m_options); w.VolumeID = m_database.RegisterRemoteVolume(w.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, null); var blockvolume = m_database.GetRemoteVolumeFromName(blockfile); w.StartVolume(blockvolume.Name); var volumeid = m_database.GetRemoteVolumeID(blockvolume.Name); foreach(var b in m_database.GetBlocks(volumeid)) w.AddBlock(b.Hash, b.Size); w.FinishVolume(blockvolume.Hash, blockvolume.Size); if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full) foreach(var b in m_database.GetBlocklists(volumeid, m_options.Blocksize, hashsize)) w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3); w.Close(); m_database.AddIndexBlockLink(w.VolumeID, volumeid, null); if (m_options.Dryrun) m_result.AddDryrunMessage(string.Format("would upload new index file {0}, with size {1}, previous size {2}", w.RemoteFilename, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(w.Filesize))); else { m_database.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null); backend.Put(w); } } } }