/// <summary> /// Performs the bulk of work by starting all relevant processes /// </summary> private static async Task RunMainOperation(IEnumerable <string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long filesetid, long lastfilesetid, CancellationToken token) { using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation")) { // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it var unused = options.CompressionHints.Count; Task all; using (new ChannelScope()) { all = Task.WhenAll( new[] { Backup.DataBlockProcessor.Run(database, options, taskreader), Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader, token), Backup.StreamBlockSplitter.Run(options, database, taskreader), Backup.FileEnumerationProcess.Run(sources, snapshot, journalService, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader, token), Backup.FilePreFilterProcess.Run(snapshot, options, stats, database), Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid, token), Backup.SpillCollectorProcess.Run(options, database, taskreader), Backup.ProgressHandler.Run(result) } // Spawn additional block hashers .Union( Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader)) ) // Spawn additional compressors .Union( Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader)) ) ); } await all.ConfigureAwait(false); if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1) { await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist); } else if (journalService != null) { // append files from previous fileset, unless part of modifiedSources, which we've just scanned await database.AppendFilesFromPreviousSetWithPredicateAsync((path, fileSize) => { if (journalService.IsPathEnumerated(path)) { return(true); } if (fileSize >= 0) { stats.AddExaminedFile(fileSize); } return(false); }); // store journal data in database, unless job is being canceled if (!token.IsCancellationRequested) { var data = journalService.VolumeDataList.Where(p => p.JournalData != null).Select(p => p.JournalData).ToList(); if (data.Any()) { // always record change journal data for current fileset (entry may be dropped later if nothing is uploaded) await database.CreateChangeJournalDataAsync(data); // update the previous fileset's change journal entry to resume at this point in case nothing was backed up await database.UpdateChangeJournalDataAsync(data, lastfilesetid); } } } if (token.IsCancellationRequested) { result.PartialBackup = true; Log.WriteWarningMessage(LOGTAG, "CancellationRequested", null, "Cancellation was requested by user."); } else { result.PartialBackup = false; await database.UpdateFilesetAndMarkAsFullBackupAsync(filesetid); } result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true); } }