public void Run(string[] sources, Library.Utility.IFilter filter) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin); using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options)) { m_result.SetDatabase(m_database); m_result.Dryrun = m_options.Dryrun; Utility.VerifyParameters(m_database, m_options); m_database.VerifyConsistency(null); // If there is no filter, we set an empty filter to simplify the code // If there is a filter, we make sure that the sources are included m_filter = filter ?? new Library.Utility.FilterExpression(); m_sourceFilter = new Library.Utility.FilterExpression(sources, true); var lastVolumeSize = -1L; m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN); System.Threading.Thread parallelScanner = null; try { m_snapshot = GetSnapshot(sources, m_options, m_result); // Start parallel scan if (m_options.ChangedFilelist == null || m_options.ChangedFilelist.Length < 1) { parallelScanner = new System.Threading.Thread(CountFilesThread) { Name = "Read ahead file counter", IsBackground = true }; parallelScanner.Start(); } using(m_backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) using(m_filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp)) { var incompleteFilesets = m_database.GetIncompleteFilesets(null).OrderBy(x => x.Value).ToArray(); if (incompleteFilesets.Length != 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize); m_result.AddMessage(string.Format("Uploading filelist from previous interrupted backup")); using(var trn = m_database.BeginTransaction()) { var incompleteSet = incompleteFilesets.Last(); var badIds = from n in incompleteFilesets select n.Key; var prevs = (from n in m_database.FilesetTimes where n.Key < incompleteSet.Key && !badIds.Contains(n.Key) orderby n.Key select n.Key).ToArray(); var prevId = prevs.Length == 0 ? -1 : prevs.Last(); FilesetVolumeWriter fsw = null; try { var s = 1; var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s); var oldFilesetID = incompleteSet.Key; // Probe for an unused filename while (s < 60) { var id = m_database.GetRemoteVolumeID(VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, fileTime)); if (id < 0) break; fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s); } fsw = new FilesetVolumeWriter(m_options, fileTime); fsw.VolumeID = m_database.RegisterRemoteVolume(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction); var newFilesetID = m_database.CreateFileset(fsw.VolumeID, fileTime, trn); m_database.LinkFilesetToVolume(newFilesetID, fsw.VolumeID, trn); m_database.AppendFilesFromPreviousSet(trn, null, newFilesetID, prevId, fileTime); m_database.WriteFileset(fsw, trn, newFilesetID); if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("Would upload fileset: {0}, size: {1}", fsw.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(fsw.LocalFilename).Length))); } else { m_database.UpdateRemoteVolume(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null, trn); using(new Logging.Timer("CommitUpdateFilelistVolume")) trn.Commit(); m_backend.Put(fsw); fsw = null; } } finally { if (fsw != null) try { fsw.Dispose(); } catch { fsw = null; } } } } if (!m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify); using(new Logging.Timer("PreBackupVerify")) { try { FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter); } catch (Exception ex) { if (m_options.AutoCleanup) { m_result.AddWarning("Backend verification failed, attempting automatic cleanup", ex); m_result.RepairResults = new RepairResults(m_result); new RepairHandler(m_backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run(); m_result.AddMessage("Backend cleanup finished, retrying verification"); FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter); } else throw; } } } m_database.BuildLookupTable(m_options); m_transaction = m_database.BeginTransaction(); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles); var filesetvolumeid = m_database.RegisterRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction); m_database.CreateFileset(filesetvolumeid, VolumeBase.ParseFilename(m_filesetvolume.RemoteFilename).Time, m_transaction); m_blockvolume = new BlockVolumeWriter(m_options); m_blockvolume.VolumeID = m_database.RegisterRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, m_transaction); if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { m_indexvolume = new IndexVolumeWriter(m_options); m_indexvolume.VolumeID = m_database.RegisterRemoteVolume(m_indexvolume.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, m_transaction); } var filterhandler = new FilterHandler(m_snapshot, m_attributeFilter, m_sourceFilter, m_filter, m_symlinkPolicy, m_options.HardlinkPolicy, m_result); using(new Logging.Timer("BackupMainOperation")) { if (m_options.ChangedFilelist != null && m_options.ChangedFilelist.Length >= 1) { m_result.AddVerboseMessage("Processing supplied change list instead of enumerating filesystem"); m_result.OperationProgressUpdater.UpdatefileCount(m_options.ChangedFilelist.Length, 0, true); foreach(var p in m_options.ChangedFilelist) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { m_result.AddMessage("Stopping backup operation on request"); break; } FileAttributes fa = new FileAttributes(); try { fa = m_snapshot.GetAttributes(p); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to read attributes: {0}, message: {1}", p, ex.Message), ex); } if (filterhandler.AttributeFilter(null, p, fa)) { try { this.HandleFilesystemEntry(p, fa); } catch (Exception ex) { m_result.AddWarning(string.Format("Failed to process element: {0}, message: {1}", p, ex.Message), ex); } } } m_database.AppendFilesFromPreviousSet(m_transaction, m_options.DeletedFilelist); } else { foreach(var path in m_snapshot.EnumerateFilesAndFolders(filterhandler.AttributeFilter)) { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { m_result.AddMessage("Stopping backup operation on request"); break; } this.HandleFilesystemEntry(path, m_snapshot.GetAttributes(path)); } } //If the scanner is still running for some reason, make sure we kill it now if (parallelScanner != null && parallelScanner.IsAlive) parallelScanner.Abort(); // We no longer need to snapshot active try { m_snapshot.Dispose(); } finally { m_snapshot = null; } m_result.OperationProgressUpdater.UpdatefileCount(m_result.ExaminedFiles, m_result.SizeOfExaminedFiles, true); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Finalize); using(new Logging.Timer("FinalizeRemoteVolumes")) { if (m_blockvolume.SourceSize > 0) { lastVolumeSize = m_blockvolume.SourceSize; if (m_options.Dryrun) { m_result.AddDryrunMessage(string.Format("Would upload block volume: {0}, size: {1}", m_blockvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_blockvolume.LocalFilename).Length))); if (m_indexvolume != null) { m_blockvolume.Close(); UpdateIndexVolume(); m_indexvolume.FinishVolume(Library.Utility.Utility.CalculateHash(m_blockvolume.LocalFilename), new FileInfo(m_blockvolume.LocalFilename).Length); m_result.AddDryrunMessage(string.Format("Would upload index volume: {0}, size: {1}", m_indexvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_indexvolume.LocalFilename).Length))); } m_blockvolume.Dispose(); m_blockvolume = null; m_indexvolume.Dispose(); m_indexvolume = null; } else { m_database.UpdateRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); m_blockvolume.Close(); UpdateIndexVolume(); using(new Logging.Timer("CommitUpdateRemoteVolume")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); m_backend.Put(m_blockvolume, m_indexvolume); m_blockvolume = null; m_indexvolume = null; } } else { m_database.RemoveRemoteVolume(m_blockvolume.RemoteFilename, m_transaction); if (m_indexvolume != null) m_database.RemoveRemoteVolume(m_indexvolume.RemoteFilename, m_transaction); } } using(new Logging.Timer("UpdateChangeStatistics")) m_database.UpdateChangeStatistics(m_result); using(new Logging.Timer("VerifyConsistency")) m_database.VerifyConsistency(m_transaction); var changeCount = m_result.AddedFiles + m_result.ModifiedFiles + m_result.DeletedFiles + m_result.AddedFolders + m_result.ModifiedFolders + m_result.DeletedFolders + m_result.AddedSymlinks + m_result.ModifiedSymlinks + m_result.DeletedSymlinks; //Changes in the filelist triggers a filelist upload if (m_options.UploadUnchangedBackups || changeCount > 0) { using(new Logging.Timer("Uploading a new fileset")) { if (!string.IsNullOrEmpty(m_options.ControlFiles)) foreach(var p in m_options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) m_filesetvolume.AddControlFile(p, m_options.GetCompressionHintFromFilename(p)); m_database.WriteFileset(m_filesetvolume, m_transaction); m_filesetvolume.Close(); if (m_options.Dryrun) m_result.AddDryrunMessage(string.Format("Would upload fileset volume: {0}, size: {1}", m_filesetvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_filesetvolume.LocalFilename).Length))); else { m_database.UpdateRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); using(new Logging.Timer("CommitUpdateRemoteVolume")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); m_backend.Put(m_filesetvolume); } } } else { m_result.AddVerboseMessage("removing temp files, as no data needs to be uploaded"); m_database.RemoveRemoteVolume(m_filesetvolume.RemoteFilename, m_transaction); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload); using(new Logging.Timer("Async backend wait")) m_backend.WaitForComplete(m_database, m_transaction); if (m_result.TaskControlRendevouz() != TaskControlState.Stop) { if (m_options.KeepTime.Ticks > 0 || m_options.KeepVersions != 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Delete); m_result.DeleteResults = new DeleteResults(m_result); using(var db = new LocalDeleteDatabase(m_database)) new DeleteHandler(m_backend.BackendUrl, m_options, (DeleteResults)m_result.DeleteResults).DoRun(db, m_transaction, true, lastVolumeSize <= m_options.SmallFileSize); } else if (lastVolumeSize <= m_options.SmallFileSize && !m_options.NoAutoCompact) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Compact); m_result.CompactResults = new CompactResults(m_result); using(var db = new LocalDeleteDatabase(m_database)) new CompactHandler(m_backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, m_transaction); } } if (m_options.UploadVerificationFile) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload); FilelistProcessor.UploadVerificationFile(m_backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction); } if (m_options.Dryrun) { m_transaction.Rollback(); m_transaction = null; } else { using(new Logging.Timer("CommitFinalizingBackup")) m_transaction.Commit(); m_transaction = null; m_database.Vacuum(); if (m_result.TaskControlRendevouz() != TaskControlState.Stop && !m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify); using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) { using(new Logging.Timer("AfterBackupVerify")) FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter); backend.WaitForComplete(m_database, null); } if (m_options.BackupTestSampleCount > 0 && m_database.GetRemoteVolumes().Count() > 0) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest); m_result.TestResults = new TestResults(m_result); using(var testdb = new LocalTestDatabase(m_database)) using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb)) new TestHandler(m_backendurl, m_options, new TestResults(m_result)) .DoRun(m_options.BackupTestSampleCount, testdb, backend); } } } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete); m_database.WriteResults(); return; } } catch (Exception ex) { m_result.AddError("Fatal error", ex); throw; } finally { if (parallelScanner != null && parallelScanner.IsAlive) { parallelScanner.Abort(); parallelScanner.Join(500); if (parallelScanner.IsAlive) m_result.AddWarning("Failed to terminate filecounter thread", null); } if (m_snapshot != null) try { m_snapshot.Dispose(); } catch (Exception ex) { m_result.AddError(string.Format("Failed to dispose snapshot"), ex); } finally { m_snapshot = null; } if (m_transaction != null) try { m_transaction.Rollback(); } catch (Exception ex) { m_result.AddError(string.Format("Rollback error: {0}", ex.Message), ex); } } } }
public void Run(string[] sources, Library.Utility.IFilter filter) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin); using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options)) { m_result.SetDatabase(m_database); m_result.Dryrun = m_options.Dryrun; Utility.UpdateOptionsFromDb(m_database, m_options); Utility.VerifyParameters(m_database, m_options); if (m_database.RepairInProgress) throw new Exception("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the backup process cannot continue. You may delete the local database and attempt to repair it again."); m_blocksize = m_options.Blocksize; m_blockbuffer = new byte[m_options.Blocksize * Math.Max(1, m_options.FileReadBufferSize / m_options.Blocksize)]; m_blocklistbuffer = new byte[m_options.Blocksize]; m_blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); m_filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm); if (m_blockhasher == null) throw new Exception(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm)); if (m_filehasher == null) throw new Exception(Strings.Common.InvalidHashAlgorithm(m_options.FileHashAlgorithm)); if (!m_blockhasher.CanReuseTransform) throw new Exception(Strings.Common.InvalidCryptoSystem(m_options.BlockHashAlgorithm)); if (!m_filehasher.CanReuseTransform) throw new Exception(Strings.Common.InvalidCryptoSystem(m_options.FileHashAlgorithm)); m_database.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize); // If there is no filter, we set an empty filter to simplify the code // If there is a filter, we make sure that the sources are included m_filter = filter ?? new Library.Utility.FilterExpression(); m_sourceFilter = new Library.Utility.FilterExpression(sources, true); m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN); System.Threading.Thread parallelScanner = null; try { using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) using(var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp)) { using(var snapshot = GetSnapshot(sources, m_options, m_result)) { // Start parallel scan if (m_options.ChangedFilelist == null || m_options.ChangedFilelist.Length < 1) { parallelScanner = new System.Threading.Thread(CountFilesThread) { Name = "Read ahead file counter", IsBackground = true }; parallelScanner.Start(snapshot); } PreBackupVerify(backend); // Verify before uploading a synthetic list m_database.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize); UploadSyntheticFilelist(backend); m_database.BuildLookupTable(m_options); m_transaction = m_database.BeginTransaction(); var repcnt = 0; while(repcnt < 100 && m_database.GetRemoteVolumeID(filesetvolume.RemoteFilename) >= 0) filesetvolume.ResetRemoteFilename(m_options, m_database.OperationTimestamp.AddSeconds(repcnt++)); if (m_database.GetRemoteVolumeID(filesetvolume.RemoteFilename) >= 0) throw new Exception("Unable to generate a unique fileset name"); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles); var filesetvolumeid = m_database.RegisterRemoteVolume(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction); m_database.CreateFileset(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time, m_transaction); RunMainOperation(snapshot, backend); //If the scanner is still running for some reason, make sure we kill it now if (parallelScanner != null && parallelScanner.IsAlive) parallelScanner.Abort(); } var lastVolumeSize = FinalizeRemoteVolumes(backend); using(new Logging.Timer("UpdateChangeStatistics")) m_database.UpdateChangeStatistics(m_result); using(new Logging.Timer("VerifyConsistency")) m_database.VerifyConsistency(m_transaction, m_options.Blocksize, m_options.BlockhashSize); UploadRealFileList(backend, filesetvolume); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload); using(new Logging.Timer("Async backend wait")) backend.WaitForComplete(m_database, m_transaction); if (m_result.TaskControlRendevouz() != TaskControlState.Stop) CompactIfRequired(backend, lastVolumeSize); if (m_options.UploadVerificationFile) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload); FilelistProcessor.UploadVerificationFile(backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction); } if (m_options.Dryrun) { m_transaction.Rollback(); m_transaction = null; } else { using(new Logging.Timer("CommitFinalizingBackup")) m_transaction.Commit(); m_transaction = null; m_database.Vacuum(); if (m_result.TaskControlRendevouz() != TaskControlState.Stop && !m_options.NoBackendverification) { PostBackupVerification(); } } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete); m_database.WriteResults(); m_database.PurgeLogData(m_options.LogRetention); return; } } catch (Exception ex) { m_result.AddError("Fatal error", ex); throw; } finally { if (parallelScanner != null && parallelScanner.IsAlive) { parallelScanner.Abort(); parallelScanner.Join(500); if (parallelScanner.IsAlive) m_result.AddWarning("Failed to terminate filecounter thread", null); } if (m_transaction != null) try { m_transaction.Rollback(); } catch (Exception ex) { m_result.AddError(string.Format("Rollback error: {0}", ex.Message), ex); } } } }