public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask(new { UploadChannel = Channels.BackendRequest.ForWrite }, async self => { if (options.IndexfilePolicy != Options.IndexFileStrategy.None) { foreach (var blockfile in await database.GetMissingIndexFilesAsync()) { if (!await taskreader.ProgressAsync) { return; } Logging.Log.WriteInformationMessage(LOGTAG, "RecreateMissingIndexFile", "Re-creating missing index file for {0}", blockfile); var w = await Common.IndexVolumeCreator.CreateIndexVolume(blockfile, options, database); if (!await taskreader.ProgressAsync) { return; } await database.UpdateRemoteVolumeAsync(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await self.UploadChannel.WriteAsync(new IndexVolumeUploadRequest(w)); } } })); }
public VolumeUploadRequest(BlockVolumeWriter blockVolume, FileEntryItem blockEntry, TemporaryIndexVolume indexVolume, Options options, BackupDatabase database) { BlockVolume = blockVolume; BlockEntry = blockEntry; IndexVolume = indexVolume; Options = options; Database = database; }
/// <summary> /// Adds metadata to output, and returns the metadataset ID /// </summary> /// <returns>The metadataset ID.</returns> /// <param name="path">The path for which metadata is processed.</param> /// <param name="meta">The metadata entry.</param> /// <param name="database">The database connection.</param> /// <param name="streamblockchannel">The channel to write streams to.</param> internal static async Task <Tuple <bool, long> > AddMetadataToOutputAsync(string path, IMetahash meta, BackupDatabase database, IWriteChannel <StreamBlock> streamblockchannel) { StreamProcessResult res; using (var ms = new MemoryStream(meta.Blob)) res = await StreamBlock.ProcessStream(streamblockchannel, path, ms, true, CompressionHint.Default); return(await database.AddMetadatasetAsync(res.Streamhash, res.Streamlength, res.Blocksetid)); }
/// <summary> /// Processes the metadata for the given path. /// </summary> /// <returns><c>True</c> if the path should be submitted to more analysis, <c>false</c> if there is nothing else to do</returns> private static async Task <bool> ProcessMetadata(string path, FileAttributes attributes, DateTime lastwrite, Options options, Snapshots.ISnapshotService snapshot, IMetahash emptymetadata, BackupDatabase database, IWriteChannel <StreamBlock> streamblockchannel) { if (snapshot.IsSymlink(path, attributes)) { // Not all reparse points are symlinks. // For example, on Windows 10 Fall Creator's Update, the OneDrive folder (and all subfolders) // are reparse points, which allows the folder to hook into the OneDrive service and download things on-demand. // If we can't find a symlink target for the current path, we won't treat it as a symlink. string symlinkTarget = snapshot.GetSymlinkTarget(path); if (!string.IsNullOrWhiteSpace(symlinkTarget)) { if (options.SymlinkPolicy == Options.SymlinkStrategy.Ignore) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "IgnoreSymlink", "Ignoring symlink {0}", path); return(false); } if (options.SymlinkPolicy == Options.SymlinkStrategy.Store) { var metadata = MetadataGenerator.GenerateMetadata(path, attributes, options, snapshot); if (!metadata.ContainsKey("CoreSymlinkTarget")) { metadata["CoreSymlinkTarget"] = symlinkTarget; } var metahash = Utility.WrapMetadata(metadata, options); await AddSymlinkToOutputAsync(path, DateTime.UtcNow, metahash, database, streamblockchannel).ConfigureAwait(false); Logging.Log.WriteVerboseMessage(FILELOGTAG, "StoreSymlink", "Stored symlink {0}", path); // Don't process further return(false); } } else { Logging.Log.WriteVerboseMessage(FILELOGTAG, "FollowingEmptySymlink", "Treating empty symlink as regular path {0}", path); } } if ((attributes & FileAttributes.Directory) == FileAttributes.Directory) { IMetahash metahash; if (!options.SkipMetadata) { metahash = Utility.WrapMetadata(MetadataGenerator.GenerateMetadata(path, attributes, options, snapshot), options); } else { metahash = emptymetadata; } Logging.Log.WriteVerboseMessage(FILELOGTAG, "AddDirectory", "Adding directory {0}", path); await AddFolderToOutputAsync(path, lastwrite, metahash, database, streamblockchannel).ConfigureAwait(false); return(false); } // Regular file, keep going return(true); }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, long lastfilesetid) { return(AutomationExtensions.RunTask(new { Input = Backup.Channels.SourcePaths.ForRead, StreamBlockChannel = Channels.StreamBlock.ForWrite, Output = Backup.Channels.ProcessedFiles.ForWrite, }, async self => { var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options); var CHECKFILETIMEONLY = options.CheckFiletimeOnly; var DISABLEFILETIMECHECK = options.DisableFiletimeCheck; while (true) { var path = await self.Input.ReadAsync(); var lastwrite = new DateTime(0, DateTimeKind.Utc); var attributes = default(FileAttributes); try { lastwrite = snapshot.GetLastWriteTimeUtc(path); } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "TimestampReadFailed", ex, "Failed to read timestamp on \"{0}\"", path); } try { attributes = snapshot.GetAttributes(path); } catch (Exception ex) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "FailedAttributeRead", "Failed to read attributes from {0}: {1}", path, ex.Message); } // If we only have metadata, stop here if (await ProcessMetadata(path, attributes, lastwrite, options, snapshot, emptymetadata, database, self.StreamBlockChannel).ConfigureAwait(false)) { try { if (CHECKFILETIMEONLY || DISABLEFILETIMECHECK) { var tmp = await database.GetFileLastModifiedAsync(path, lastfilesetid, false); await self.Output.WriteAsync(new FileEntry() { OldId = tmp.Item1, Path = path, Attributes = attributes, LastWrite = lastwrite, OldModified = tmp.Item2, LastFileSize = tmp.Item3, OldMetaHash = null, OldMetaSize = -1 }); } else { var res = await database.GetFileEntryAsync(path, lastfilesetid); await self.Output.WriteAsync(new FileEntry() { OldId = res == null ? -1 : res.id, Path = path, Attributes = attributes, LastWrite = lastwrite, OldModified = res == null ? new DateTime(0) : res.modified, LastFileSize = res == null ? -1 : res.filesize, OldMetaHash = res == null ? null : res.metahash, OldMetaSize = res == null ? -1 : res.metasize }); } } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "ProcessingMetadataFailed", ex, "Failed to process entry, path: {0}", path); } } } })); }
public static Task Run(Options options, BackupDatabase database, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.StreamBlock.ForRead, ProgressChannel = Channels.ProgressEvents.ForWrite, BlockOutput = Channels.OutputBlocks.ForWrite }, async self => { var blocksize = options.Blocksize; var filehasher = Duplicati.Library.Utility.HashAlgorithmHelper.Create(options.FileHashAlgorithm); var blockhasher = Duplicati.Library.Utility.HashAlgorithmHelper.Create(options.BlockHashAlgorithm); var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options); var maxmetadatasize = (options.Blocksize / (long)options.BlockhashSize) * options.Blocksize; if (blockhasher == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } if (filehasher == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(options.FileHashAlgorithm), "FileHashAlgorithmNotSupported"); } if (!blockhasher.CanReuseTransform) { throw new UserInformationException(Strings.Common.InvalidCryptoSystem(options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } if (!filehasher.CanReuseTransform) { throw new UserInformationException(Strings.Common.InvalidCryptoSystem(options.FileHashAlgorithm), "FileHashAlgorithmNotSupported"); } using (var empty_metadata_stream = new MemoryStream(emptymetadata.Blob)) while (await taskreader.ProgressAsync) { var send_close = false; var filesize = 0L; var filename = string.Empty; var e = await self.Input.ReadAsync(); var cur = e.Result; try { var stream = e.Stream; using (var blocklisthashes = new Library.Utility.FileBackedStringList()) using (var hashcollector = new Library.Utility.FileBackedStringList()) { var blocklistbuffer = new byte[blocksize]; var blocklistoffset = 0L; long fslen = -1; try { fslen = stream.Length; } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "FileLengthFailure", ex, "Failed to read file length for file {0}", e.Path); } if (e.IsMetadata && fslen > maxmetadatasize) { //TODO: To fix this, the "WriteFileset" method in BackupHandler needs to // be updated such that it can select sets even when there are multiple // blocklist hashes for the metadata. // This could be done such that an extra query is made if the metadata // spans multiple blocklist hashes, as it is not expected to be common Logging.Log.WriteWarningMessage(LOGTAG, "TooLargeMetadata", null, "Metadata size is {0}, but the largest accepted size is {1}, recording empty metadata for {2}", fslen, maxmetadatasize, e.Path); empty_metadata_stream.Position = 0; stream = empty_metadata_stream; fslen = stream.Length; } await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = fslen, Type = EventType.FileStarted }); send_close = true; filehasher.Initialize(); var lastread = 0; var buf = new byte[blocksize]; var lastupdate = DateTime.Now; // Core processing loop, read blocks of data and hash individually while (((lastread = await stream.ForceStreamReadAsync(buf, blocksize)) != 0)) { // Run file hashing concurrently to squeeze a little extra concurrency out of it var pftask = Task.Run(() => filehasher.TransformBlock(buf, 0, lastread, buf, 0)); var hashdata = blockhasher.ComputeHash(buf, 0, lastread); var hashkey = Convert.ToBase64String(hashdata); // If we have too many hashes, flush the blocklist if (blocklistbuffer.Length - blocklistoffset < hashdata.Length) { var blkey = Convert.ToBase64String(blockhasher.ComputeHash(blocklistbuffer, 0, (int)blocklistoffset)); blocklisthashes.Add(blkey); await DataBlock.AddBlockToOutputAsync(self.BlockOutput, blkey, blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true); blocklistoffset = 0; blocklistbuffer = new byte[blocksize]; } // Store the current hash in the blocklist Array.Copy(hashdata, 0, blocklistbuffer, blocklistoffset, hashdata.Length); blocklistoffset += hashdata.Length; hashcollector.Add(hashkey); filesize += lastread; // Don't spam updates if ((DateTime.Now - lastupdate).TotalSeconds > 10) { await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filesize, Type = EventType.FileProgressUpdate }); lastupdate = DateTime.Now; } // Make sure the filehasher is done with the buf instance before we pass it on await pftask; await DataBlock.AddBlockToOutputAsync(self.BlockOutput, hashkey, buf, 0, lastread, e.Hint, true); buf = new byte[blocksize]; } // If we have more than a single block of data, output the (trailing) blocklist if (hashcollector.Count > 1) { var blkey = Convert.ToBase64String(blockhasher.ComputeHash(blocklistbuffer, 0, (int)blocklistoffset)); blocklisthashes.Add(blkey); await DataBlock.AddBlockToOutputAsync(self.BlockOutput, blkey, blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true); } filehasher.TransformFinalBlock(new byte[0], 0, 0); var filehash = Convert.ToBase64String(filehasher.Hash); var blocksetid = await database.AddBlocksetAsync(filehash, filesize, blocksize, hashcollector, blocklisthashes); cur.SetResult(new StreamProcessResult() { Streamlength = filesize, Streamhash = filehash, Blocksetid = blocksetid }); cur = null; } } catch (Exception ex) { try { if (cur != null) { cur.TrySetException(ex); } } catch { } // Rethrow if (ex.IsRetiredException()) { throw; } } finally { if (cur != null) { try { cur.TrySetCanceled(); } catch { } cur = null; } if (send_close) { await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filesize, Type = EventType.FileClosed }); } send_close = false; } } })); }
private static async Task UploadVolumeAndIndex(SpillVolumeRequest target, IWriteChannel <IUploadRequest> outputChannel, Options options, BackupDatabase database) { var blockEntry = CreateFileEntryForUpload(target.BlockVolume, options); IndexVolumeWriter indexVolume = null; FileEntryItem indexEntry = null; if (target.IndexVolume != null) { indexVolume = await target.IndexVolume.CreateVolume(target.BlockVolume.RemoteFilename, blockEntry.Hash, blockEntry.Size, options, database).ConfigureAwait(false); indexEntry = CreateFileEntryForUpload(indexVolume, options); } var uploadRequest = new VolumeUploadRequest(target.BlockVolume, blockEntry, indexVolume, indexEntry); await outputChannel.WriteAsync(uploadRequest).ConfigureAwait(false); }
private async Task RunAsync(string[] sources, Library.Utility.IFilter filter) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin); // New isolated scope for each operation using (new IsolatedChannelScope()) using (m_database = new LocalBackupDatabase(m_options.Dbpath, m_options)) { m_result.SetDatabase(m_database); m_result.Dryrun = m_options.Dryrun; // Check the database integrity Utility.UpdateOptionsFromDb(m_database, m_options); Utility.VerifyParameters(m_database, m_options); var probe_path = m_database.GetFirstPath(); if (probe_path != null && Duplicati.Library.Utility.Utility.GuessDirSeparator(probe_path) != System.IO.Path.DirectorySeparatorChar.ToString()) { throw new UserInformationException(string.Format("The backup contains files that belong to another operating system. Proceeding with a backup would cause the database to contain paths from two different operation systems, which is not supported. To proceed without losing remote data, delete all filesets and make sure the --{0} option is set, then run the backup again to re-use the existing data on the remote store.", "no-auto-compact"), "CrossOsDatabaseReuseNotSupported"); } if (m_database.PartiallyRecreated) { throw new UserInformationException("The database was only partially recreated. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsPartiallyRecreated"); } if (m_database.RepairInProgress) { throw new UserInformationException("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the backup process cannot continue. You may delete the local database and attempt to repair it again.", "DatabaseRepairInProgress"); } // If there is no filter, we set an empty filter to simplify the code // If there is a filter, we make sure that the sources are included m_filter = filter ?? new Library.Utility.FilterExpression(); m_sourceFilter = new Library.Utility.FilterExpression(sources, true); Task parallelScanner = null; Task uploader = null; try { // Setup runners and instances here using (var db = new Backup.BackupDatabase(m_database, m_options)) using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database)) using (var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp)) using (var stats = new Backup.BackupStatsCollector(m_result)) using (var bk = new Common.BackendHandler(m_options, m_backendurl, db, stats, m_result.TaskReader)) // Keep a reference to these channels to avoid shutdown using (var uploadtarget = ChannelManager.GetChannel(Backup.Channels.BackendRequest.ForWrite)) { long filesetid; var counterToken = new CancellationTokenSource(); using (var snapshot = GetSnapshot(sources, m_options)) { try { // Start parallel scan, or use the database if (m_options.DisableFileScanner) { var d = m_database.GetLastBackupFileCountAndSize(); m_result.OperationProgressUpdater.UpdatefileCount(d.Item1, d.Item2, true); } else { parallelScanner = Backup.CountFilesHandler.Run(sources, snapshot, m_result, m_options, m_sourceFilter, m_filter, m_result.TaskReader, counterToken.Token); } // Make sure the database is sane await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, true); // Start the uploader process uploader = Backup.BackendUploader.Run(bk, m_options, db, m_result, m_result.TaskReader, stats); // If we have an interrupted backup, grab the string lasttempfilelist = null; long lasttempfileid = -1; if (!m_options.DisableSyntheticFilelist) { var candidates = (await db.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToArray(); if (candidates.Length > 0) { lasttempfileid = candidates.Last().Key; lasttempfilelist = m_database.GetRemoteVolumeFromID(lasttempfileid).Name; } } // TODO: Rewrite to using the uploader process, or the BackendHandler interface // Do a remote verification, unless disabled PreBackupVerify(backend, lasttempfilelist); // If the previous backup was interrupted, send a synthetic list await Backup.UploadSyntheticFilelist.Run(db, m_options, m_result, m_result.TaskReader, lasttempfilelist, lasttempfileid); // Grab the previous backup ID, if any var prevfileset = m_database.FilesetTimes.FirstOrDefault(); if (prevfileset.Value.ToUniversalTime() > m_database.OperationTimestamp.ToUniversalTime()) { throw new Exception(string.Format("The previous backup has time {0}, but this backup has time {1}. Something is wrong with the clock.", prevfileset.Value.ToLocalTime(), m_database.OperationTimestamp.ToLocalTime())); } var lastfilesetid = prevfileset.Value.Ticks == 0 ? -1 : prevfileset.Key; // Rebuild any index files that are missing await Backup.RecreateMissingIndexFiles.Run(db, m_options, m_result, m_result.TaskReader); // This should be removed as the lookups are no longer used m_database.BuildLookupTable(m_options); // Prepare the operation by registering the filelist m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles); var repcnt = 0; while (repcnt < 100 && await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0) { filesetvolume.ResetRemoteFilename(m_options, m_database.OperationTimestamp.AddSeconds(repcnt++)); } if (await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0) { throw new Exception("Unable to generate a unique fileset name"); } var filesetvolumeid = await db.RegisterRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary); filesetid = await db.CreateFilesetAsync(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time); // create USN-based scanner if enabled var journalService = GetJournalService(sources, snapshot, filter, lastfilesetid); // Run the backup operation if (await m_result.TaskReader.ProgressAsync) { await RunMainOperation(sources, snapshot, journalService, db, stats, m_options, m_sourceFilter, m_filter, m_result, m_result.TaskReader, lastfilesetid).ConfigureAwait(false); } } finally { //If the scanner is still running for some reason, make sure we kill it now counterToken.Cancel(); } } // Ensure the database is in a sane state after adding data using (new Logging.Timer(LOGTAG, "VerifyConsistency", "VerifyConsistency")) await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, false); // Send the actual filelist if (await m_result.TaskReader.ProgressAsync) { await Backup.UploadRealFilelist.Run(m_result, db, m_options, filesetvolume, filesetid, m_result.TaskReader); } // Wait for upload completion m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload); var lastVolumeSize = await FlushBackend(m_result, uploadtarget, uploader).ConfigureAwait(false); // Make sure we have the database up-to-date await db.CommitTransactionAsync("CommitAfterUpload", false); // TODO: Remove this later m_transaction = m_database.BeginTransaction(); if (await m_result.TaskReader.ProgressAsync) { CompactIfRequired(backend, lastVolumeSize); } if (m_options.UploadVerificationFile && await m_result.TaskReader.ProgressAsync) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload); FilelistProcessor.UploadVerificationFile(backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction); } if (m_options.Dryrun) { m_transaction.Rollback(); m_transaction = null; } else { using (new Logging.Timer(LOGTAG, "CommitFinalizingBackup", "CommitFinalizingBackup")) m_transaction.Commit(); m_transaction = null; if (m_result.TaskControlRendevouz() != TaskControlState.Stop) { if (m_options.NoBackendverification) { UpdateStorageStatsFromDatabase(); } else { PostBackupVerification(); } } } m_database.WriteResults(); m_database.PurgeLogData(m_options.LogRetention); if (m_options.AutoVacuum) { m_database.Vacuum(); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete); return; } } catch (Exception ex) { var aex = BuildException(ex, uploader, parallelScanner); Logging.Log.WriteErrorMessage(LOGTAG, "FatalError", ex, "Fatal error"); if (aex == ex) { throw; } throw aex; } finally { if (parallelScanner != null && !parallelScanner.IsCompleted) { parallelScanner.Wait(500); } // TODO: We want to commit? always? if (m_transaction != null) { try { m_transaction.Rollback(); } catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RollbackError", ex, "Rollback error: {0}", ex.Message); } } } } }
public static Task Run(Options options, BackupDatabase database, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.SpillPickup.ForRead, Output = Channels.BackendRequest.ForWrite, }, async self => { var lst = new List <VolumeUploadRequest>(); while (!await self.Input.IsRetiredAsync) { try { lst.Add((VolumeUploadRequest)await self.Input.ReadAsync()); } catch (Exception ex) { if (ex.IsRetiredException()) { break; } throw; } } while (lst.Count > 1) { // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; VolumeUploadRequest target = null; var source = lst[0]; // Finalize the current work source.BlockVolume.Close(); // Remove it from the list of active operations lst.RemoveAt(0); var buffer = new byte[options.Blocksize]; using (var rd = new BlockVolumeReader(options.CompressionModule, source.BlockVolume.LocalFilename, options)) { foreach (var file in rd.Blocks) { // Grab a target if (target == null) { if (lst.Count == 0) { // No more targets, make one target = new VolumeUploadRequest(new BlockVolumeWriter(options), source.IndexVolume == null ? null : new TemporaryIndexVolume(options)); target.BlockVolume.VolumeID = await database.RegisterRemoteVolumeAsync(target.BlockVolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary); } else { // Grab the next target target = lst[0]; lst.RemoveAt(0); } // We copy all the blocklisthashes, which may create duplicates // but otherwise we need to query all hashes to see if they are blocklisthashes if (source.IndexVolume != null) { source.IndexVolume.CopyTo(target.IndexVolume, true); } } var len = rd.ReadBlock(file.Key, buffer); target.BlockVolume.AddBlock(file.Key, buffer, 0, len, Duplicati.Library.Interface.CompressionHint.Default); await database.MoveBlockToVolumeAsync(file.Key, len, source.BlockVolume.VolumeID, target.BlockVolume.VolumeID); if (target.IndexVolume != null) { target.IndexVolume.AddBlock(file.Key, len); } if (target.BlockVolume.Filesize > options.VolumeSize - options.Blocksize) { target.BlockVolume.Close(); await self.Output.WriteAsync(target); target = null; } } } // Make sure they are out of the database System.IO.File.Delete(source.BlockVolume.LocalFilename); await database.SafeDeleteRemoteVolumeAsync(source.BlockVolume.RemoteFilename); // Re-inject the target if it has content if (target != null) { lst.Insert(lst.Count == 0 ? 0 : 1, target); } } foreach (var n in lst) { // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; n.BlockVolume.Close(); await self.Output.WriteAsync(n); } })); }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupStatsCollector stats, BackupDatabase database) { return(AutomationExtensions.RunTask( new { Input = Channels.ProcessedFiles.ForRead, ProgressChannel = Channels.ProgressEvents.ForWrite, Output = Channels.AcceptedChangedFile.ForWrite }, async self => { var EMPTY_METADATA = Utility.WrapMetadata(new Dictionary <string, string>(), options); // Pre-cache the option variables here to simplify and // speed up repeated option access below var SKIPFILESLARGERTHAN = options.SkipFilesLargerThan; // Zero and max both indicate no size limit if (SKIPFILESLARGERTHAN == long.MaxValue) { SKIPFILESLARGERTHAN = 0; } var DISABLEFILETIMECHECK = options.DisableFiletimeCheck; var CHECKFILETIMEONLY = options.CheckFiletimeOnly; var SKIPMETADATA = options.SkipMetadata; while (true) { var e = await self.Input.ReadAsync(); long filestatsize = -1; try { filestatsize = snapshot.GetFileSize(e.Path); } catch (Exception ex) { Logging.Log.WriteExplicitMessage(FILELOGTAG, "FailedToReadSize", ex, "Failed to read size of file: {0}", e.Path); } await stats.AddExaminedFile(filestatsize); // Stop now if the file is too large var tooLargeFile = SKIPFILESLARGERTHAN != 0 && filestatsize >= 0 && filestatsize > SKIPFILESLARGERTHAN; if (tooLargeFile) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckTooLarge", "Skipped checking file, because the size exceeds limit {0}", e.Path); await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filestatsize, Type = EventType.FileSkipped }); continue; } // Invalid ID indicates a new file var isNewFile = e.OldId < 0; // If we disable the filetime check, we always assume that the file has changed // Otherwise we check that the timestamps are different or if any of them are empty var timestampChanged = DISABLEFILETIMECHECK || e.LastWrite != e.OldModified || e.LastWrite.Ticks == 0 || e.OldModified.Ticks == 0; // Avoid generating a new matadata blob if timestamp has not changed // and we only check for timestamp changes if (CHECKFILETIMEONLY && !timestampChanged && !isNewFile) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckNoTimestampChange", "Skipped checking file, because timestamp was not updated {0}", e.Path); try { await database.AddUnmodifiedAsync(e.OldId, e.LastWrite); } catch (Exception ex) { if (ex.IsRetiredException()) { throw; } Logging.Log.WriteWarningMessage(FILELOGTAG, "FailedToAddFile", ex, "Failed while attempting to add unmodified file to database: {0}", e.Path); } await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filestatsize, Type = EventType.FileSkipped }); continue; } // If we have have disabled the filetime check, we do not have the metadata info // but we want to know if the metadata is potentially changed if (!isNewFile && DISABLEFILETIMECHECK) { var tp = await database.GetMetadataHashAndSizeForFileAsync(e.OldId); if (tp != null) { e.OldMetaSize = tp.Item1; e.OldMetaHash = tp.Item2; } } // Compute current metadata e.MetaHashAndSize = SKIPMETADATA ? EMPTY_METADATA : Utility.WrapMetadata(MetadataGenerator.GenerateMetadata(e.Path, e.Attributes, options, snapshot), options); e.MetadataChanged = !SKIPMETADATA && (e.MetaHashAndSize.Blob.Length != e.OldMetaSize || e.MetaHashAndSize.FileHash != e.OldMetaHash); // Check if the file is new, or something indicates a change var filesizeChanged = filestatsize < 0 || e.LastFileSize < 0 || filestatsize != e.LastFileSize; if (isNewFile || timestampChanged || filesizeChanged || e.MetadataChanged) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "CheckFileForChanges", "Checking file for changes {0}, new: {1}, timestamp changed: {2}, size changed: {3}, metadatachanged: {4}, {5} vs {6}", e.Path, isNewFile, timestampChanged, filesizeChanged, e.MetadataChanged, e.LastWrite, e.OldModified); await self.Output.WriteAsync(e); } else { Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckNoMetadataChange", "Skipped checking file, because no metadata was updated {0}", e.Path); try { await database.AddUnmodifiedAsync(e.OldId, e.LastWrite); } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "FailedToAddFile", ex, "Failed while attempting to add unmodified file to database: {0}", e.Path); } await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filestatsize, Type = EventType.FileSkipped }); } } })); }
public static Task Run(BackupResults result, BackupDatabase db, Options options, FilesetVolumeWriter filesetvolume, long filesetid, Common.ITaskReader taskreader) { return(AutomationExtensions.RunTask(new { Output = Channels.BackendRequest.ForWrite, }, async self => { if (!await taskreader.ProgressAsync) { return; } // Update the reported source and backend changes using (new Logging.Timer(LOGTAG, "UpdateChangeStatistics", "UpdateChangeStatistics")) await db.UpdateChangeStatisticsAsync(result); var changeCount = result.AddedFiles + result.ModifiedFiles + result.DeletedFiles + result.AddedFolders + result.ModifiedFolders + result.DeletedFolders + result.AddedSymlinks + result.ModifiedSymlinks + result.DeletedSymlinks; //Changes in the filelist triggers a filelist upload if (options.UploadUnchangedBackups || changeCount > 0) { using (new Logging.Timer(LOGTAG, "UploadNewFileset", "Uploading a new fileset")) { if (!string.IsNullOrEmpty(options.ControlFiles)) { foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) { filesetvolume.AddControlFile(p, options.GetCompressionHintFromFilename(p)); } } if (!await taskreader.ProgressAsync) { return; } await db.WriteFilesetAsync(filesetvolume, filesetid); filesetvolume.Close(); if (!await taskreader.ProgressAsync) { return; } await db.UpdateRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await db.CommitTransactionAsync("CommitUpdateRemoteVolume"); await self.Output.WriteAsync(new FilesetUploadRequest(filesetvolume)); } } else { Logging.Log.WriteVerboseMessage(LOGTAG, "RemovingLeftoverTempFile", "removing temp files, as no data needs to be uploaded"); await db.RemoveRemoteVolumeAsync(filesetvolume.RemoteFilename); } await db.CommitTransactionAsync("CommitUpdateRemoteVolume"); })); }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, BackupStatsCollector stats, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.AcceptedChangedFile.ForRead, StreamBlockChannel = Channels.StreamBlock.ForWrite, }, async self => { var blocksize = options.Blocksize; while (await taskreader.ProgressAsync) { var e = await self.Input.ReadAsync(); try { var hint = options.GetCompressionHintFromFilename(e.Path); var oldHash = e.OldId < 0 ? null : await database.GetFileHashAsync(e.OldId); StreamProcessResult filestreamdata; // Process metadata and actual data in parallel var metatask = Task.Run(async() => { // If we have determined that metadata has not changed, just grab the ID if (!e.MetadataChanged) { var res = await database.GetMetadataIDAsync(e.MetaHashAndSize.FileHash, e.MetaHashAndSize.Blob.Length); if (res.Item1) { return res.Item2; } Logging.Log.WriteWarningMessage(FILELOGTAG, "UnexpextedMetadataLookup", null, "Metadata was reported as not changed, but still requires being added?\nHash: {0}, Length: {1}, ID: {2}, Path: {3}", e.MetaHashAndSize.FileHash, e.MetaHashAndSize.Blob.Length, res.Item2, e.Path); e.MetadataChanged = true; } return (await MetadataPreProcess.AddMetadataToOutputAsync(e.Path, e.MetaHashAndSize, database, self.StreamBlockChannel)).Item2; }); using (var fs = snapshot.OpenRead(e.Path)) filestreamdata = await StreamBlock.ProcessStream(self.StreamBlockChannel, e.Path, fs, false, hint); await stats.AddOpenedFile(filestreamdata.Streamlength); var metadataid = await metatask; var filekey = filestreamdata.Streamhash; var filesize = filestreamdata.Streamlength; if (oldHash != filekey) { if (oldHash == null) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "NewFile", "New file {0}", e.Path); } else { Logging.Log.WriteVerboseMessage(FILELOGTAG, "ChangedFile", "File has changed {0}", e.Path); } if (e.OldId < 0) { await stats.AddAddedFile(filesize); if (options.Dryrun) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "WoudlAddNewFile", "Would add new file {0}, size {1}", e.Path, Library.Utility.Utility.FormatSizeString(filesize)); } } else { await stats.AddModifiedFile(filesize); if (options.Dryrun) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "WoudlAddChangedFile", "Would add changed file {0}, size {1}", e.Path, Library.Utility.Utility.FormatSizeString(filesize)); } } await database.AddFileAsync(e.Path, e.LastWrite, filestreamdata.Blocksetid, metadataid); } else if (e.MetadataChanged) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "FileMetadataChanged", "File has only metadata changes {0}", e.Path); await database.AddFileAsync(e.Path, e.LastWrite, filestreamdata.Blocksetid, metadataid); } else /*if (e.OldId >= 0)*/ { // When we write the file to output, update the last modified time Logging.Log.WriteVerboseMessage(FILELOGTAG, "NoFileChanges", "File has not changed {0}", e.Path); try { await database.AddUnmodifiedAsync(e.OldId, e.LastWrite); } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "FailedToAddFile", ex, "Failed while attempting to add unmodified file to database: {0}", e.Path); } } } catch (Exception ex) { if (ex.IsRetiredException()) { return; } else { Logging.Log.WriteWarningMessage(FILELOGTAG, "PathProcessingFailed", ex, "Failed to process path: {0}", e.Path); } } } } )); }
/// <summary> /// Performs the bulk of work by starting all relevant processes /// </summary> private static async Task RunMainOperation(Snapshots.ISnapshotService snapshot, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long lastfilesetid) { using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation")) { // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it var tb = options.CompressionHints.Count; Task all; using (new ChannelScope()) { all = Task.WhenAll( new [] { Backup.DataBlockProcessor.Run(database, options, taskreader), Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader), Backup.StreamBlockSplitter.Run(options, database, taskreader), Backup.FileEnumerationProcess.Run(snapshot, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader), Backup.FilePreFilterProcess.Run(snapshot, options, stats, database), Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid), Backup.SpillCollectorProcess.Run(options, database, taskreader), Backup.ProgressHandler.Run(result) } // Spawn additional block hashers .Union( Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader)) ) // Spawn additional compressors .Union( Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader)) ) ); } await all; if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1) { await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist); } result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true); } }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupStatsCollector stats, BackupDatabase database) { return(AutomationExtensions.RunTask( new { Input = Channels.ProcessedFiles.ForRead, Output = Channels.AcceptedChangedFile.ForWrite }, async self => { var EMPTY_METADATA = Utility.WrapMetadata(new Dictionary <string, string>(), options); var blocksize = options.Blocksize; while (true) { var e = await self.Input.ReadAsync(); long filestatsize = -1; try { filestatsize = snapshot.GetFileSize(e.Path); } catch (Exception ex) { Logging.Log.WriteExplicitMessage(FILELOGTAG, "FailedToReadSize", ex, "Failed tp read size of file: {0}", e.Path); } await stats.AddExaminedFile(filestatsize); e.MetaHashAndSize = options.StoreMetadata ? Utility.WrapMetadata(await MetadataGenerator.GenerateMetadataAsync(e.Path, e.Attributes, options, snapshot), options) : EMPTY_METADATA; var timestampChanged = e.LastWrite != e.OldModified || e.LastWrite.Ticks == 0 || e.OldModified.Ticks == 0; var filesizeChanged = filestatsize < 0 || e.LastFileSize < 0 || filestatsize != e.LastFileSize; var tooLargeFile = options.SkipFilesLargerThan != long.MaxValue && options.SkipFilesLargerThan != 0 && filestatsize >= 0 && filestatsize > options.SkipFilesLargerThan; e.MetadataChanged = !options.CheckFiletimeOnly && !options.SkipMetadata && (e.MetaHashAndSize.Blob.Length != e.OldMetaSize || e.MetaHashAndSize.FileHash != e.OldMetaHash); if ((e.OldId < 0 || options.DisableFiletimeCheck || timestampChanged || filesizeChanged || e.MetadataChanged) && !tooLargeFile) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "CheckFileForChanges", "Checking file for changes {0}, new: {1}, timestamp changed: {2}, size changed: {3}, metadatachanged: {4}, {5} vs {6}", e.Path, e.OldId <= 0, timestampChanged, filesizeChanged, e.MetadataChanged, e.LastWrite, e.OldModified); await self.Output.WriteAsync(e); } else { if (tooLargeFile) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckTooLarge", "Skipped checking file, because the size exceeds limit {0}", e.Path); } else { Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckNoTimestampChange", "Skipped checking file, because timestamp was not updated {0}", e.Path); } await database.AddUnmodifiedAsync(e.OldId, e.LastWrite); } } })); }
/// <summary> /// Adds a file to the output, /// </summary> /// <param name="filename">The name of the file to record</param> /// <param name="lastModified">The value of the lastModified timestamp</param> /// <param name="database">The database to use</param> /// <param name="streamblockchannel">The channel to write blocks to</param> /// <param name="meta">The metadata ti record</param> private static async Task AddSymlinkToOutputAsync(string filename, DateTime lastModified, IMetahash meta, BackupDatabase database, IWriteChannel <StreamBlock> streamblockchannel) { var metadataid = await AddMetadataToOutputAsync(filename, meta, database, streamblockchannel).ConfigureAwait(false); await database.AddSymlinkEntryAsync(filename, metadataid.Item2, lastModified); }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, long lastfilesetid, CancellationToken token) { return(AutomationExtensions.RunTask(new { Input = Backup.Channels.SourcePaths.ForRead, StreamBlockChannel = Channels.StreamBlock.ForWrite, Output = Backup.Channels.ProcessedFiles.ForWrite, }, async self => { var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options); var prevprefix = new KeyValuePair <string, long>(null, -1); var CHECKFILETIMEONLY = options.CheckFiletimeOnly; var DISABLEFILETIMECHECK = options.DisableFiletimeCheck; while (true) { var path = await self.Input.ReadAsync(); var lastwrite = new DateTime(0, DateTimeKind.Utc); var attributes = default(FileAttributes); try { lastwrite = snapshot.GetLastWriteTimeUtc(path); } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "TimestampReadFailed", ex, "Failed to read timestamp on \"{0}\"", path); } try { attributes = snapshot.GetAttributes(path); } catch (Exception ex) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "FailedAttributeRead", "Failed to read attributes from {0}: {1}", path, ex.Message); } // If we only have metadata, stop here if (await ProcessMetadata(path, attributes, lastwrite, options, snapshot, emptymetadata, database, self.StreamBlockChannel).ConfigureAwait(false)) { try { var split = Database.LocalDatabase.SplitIntoPrefixAndName(path); long prefixid; if (string.Equals(prevprefix.Key, split.Key, StringComparison.Ordinal)) { prefixid = prevprefix.Value; } else { prefixid = await database.GetOrCreatePathPrefix(split.Key); prevprefix = new KeyValuePair <string, long>(split.Key, prefixid); } if (CHECKFILETIMEONLY || DISABLEFILETIMECHECK) { var tmp = await database.GetFileLastModifiedAsync(prefixid, split.Value, lastfilesetid, false); await self.Output.WriteAsync(new FileEntry { OldId = tmp.Item1, Path = path, PathPrefixID = prefixid, Filename = split.Value, Attributes = attributes, LastWrite = lastwrite, OldModified = tmp.Item2, LastFileSize = tmp.Item3, OldMetaHash = null, OldMetaSize = -1 }); } else { var res = await database.GetFileEntryAsync(prefixid, split.Value, lastfilesetid); await self.Output.WriteAsync(new FileEntry { OldId = res == null ? -1 : res.id, Path = path, PathPrefixID = prefixid, Filename = split.Value, Attributes = attributes, LastWrite = lastwrite, OldModified = res == null ? new DateTime(0) : res.modified, LastFileSize = res == null ? -1 : res.filesize, OldMetaHash = res == null ? null : res.metahash, OldMetaSize = res == null ? -1 : res.metasize }); } } catch (Exception ex) { if (ex.IsRetiredException() || token.IsCancellationRequested) { continue; } Logging.Log.WriteWarningMessage(FILELOGTAG, "ProcessingMetadataFailed", ex, "Failed to process entry, path: {0}", path); } } } })); }
public static Task Run(BackupDatabase database, Options options, BackupResults result, ITaskReader taskreader, string lasttempfilelist, long lasttempfileid) { return(AutomationExtensions.RunTask(new { UploadChannel = Channels.BackendRequest.ForWrite }, async self => { // Check if we should upload a synthetic filelist if (options.DisableSyntheticFilelist || string.IsNullOrWhiteSpace(lasttempfilelist) || lasttempfileid < 0) { return; } // Check that we still need to process this after the cleanup has performed its duties var syntbase = await database.GetRemoteVolumeFromIDAsync(lasttempfileid); // If we do not have a valid entry, warn and quit if (syntbase.Name == null || syntbase.State != RemoteVolumeState.Uploaded) { // TODO: If the repair succeeds, this could give a false warning? Logging.Log.WriteWarningMessage(LOGTAG, "MissingTemporaryFilelist", null, "Expected there to be a temporary fileset for synthetic filelist ({0}, {1}), but none was found?", lasttempfileid, lasttempfilelist); return; } // Files is missing or repaired if (syntbase.Name == null || (syntbase.State != RemoteVolumeState.Uploading && syntbase.State != RemoteVolumeState.Temporary)) { Logging.Log.WriteInformationMessage(LOGTAG, "SkippingSyntheticListUpload", "Skipping synthetic upload because temporary fileset appers to be complete: ({0}, {1}, {2})", lasttempfileid, lasttempfilelist, syntbase.State); return; } // Ready to build and upload the synthetic list await database.CommitTransactionAsync("PreSyntheticFilelist"); var incompleteFilesets = (await database.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToList(); result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize); Logging.Log.WriteInformationMessage(LOGTAG, "PreviousBackupFilelistUpload", "Uploading filelist from previous interrupted backup"); if (!await taskreader.ProgressAsync) { return; } var incompleteSet = incompleteFilesets.Last(); var badIds = from n in incompleteFilesets select n.Key; var prevs = (from n in await database.GetFilesetTimesAsync() where n.Key < incompleteSet.Key && !badIds.Contains(n.Key) orderby n.Key select n.Key).ToArray(); var prevId = prevs.Length == 0 ? -1 : prevs.Last(); FilesetVolumeWriter fsw = null; try { var s = 1; var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s); var oldFilesetID = incompleteSet.Key; // Probe for an unused filename while (s < 60) { var id = await database.GetRemoteVolumeIDAsync(VolumeBase.GenerateFilename(RemoteVolumeType.Files, options, null, fileTime)); if (id < 0) { break; } fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s); } fsw = new FilesetVolumeWriter(options, fileTime); fsw.VolumeID = await database.RegisterRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary); if (!string.IsNullOrEmpty(options.ControlFiles)) { foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) { fsw.AddControlFile(p, options.GetCompressionHintFromFilename(p)); } } var newFilesetID = await database.CreateFilesetAsync(fsw.VolumeID, fileTime); await database.LinkFilesetToVolumeAsync(newFilesetID, fsw.VolumeID); await database.AppendFilesFromPreviousSetAsync(null, newFilesetID, prevId, fileTime); await database.WriteFilesetAsync(fsw, newFilesetID); if (!await taskreader.ProgressAsync) { return; } await database.UpdateRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await database.CommitTransactionAsync("CommitUpdateFilelistVolume"); await self.UploadChannel.WriteAsync(new FilesetUploadRequest(fsw)); fsw = null; } catch { await database.RollbackTransactionAsync(); throw; } finally { if (fsw != null) { try { fsw.Dispose(); } catch { fsw = null; } } } } )); }
/// <summary> /// Performs the bulk of work by starting all relevant processes /// </summary> private static async Task RunMainOperation(IEnumerable <string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long lastfilesetid) { using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation")) { // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it var tb = options.CompressionHints.Count; Task all; using (new ChannelScope()) { all = Task.WhenAll( new [] { Backup.DataBlockProcessor.Run(database, options, taskreader), Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader), Backup.StreamBlockSplitter.Run(options, database, taskreader), Backup.FileEnumerationProcess.Run(sources, snapshot, journalService, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader), Backup.FilePreFilterProcess.Run(snapshot, options, stats, database), Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid), Backup.SpillCollectorProcess.Run(options, database, taskreader), Backup.ProgressHandler.Run(result) } // Spawn additional block hashers .Union( Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader)) ) // Spawn additional compressors .Union( Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader)) ) ); } await all.ConfigureAwait(false); if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1) { await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist); } else if (journalService != null) { // append files from previous fileset, unless part of modifiedSources, which we've just scanned await database.AppendFilesFromPreviousSetWithPredicateAsync((path, fileSize) => { if (journalService.IsPathEnumerated(path)) { return(true); } if (fileSize >= 0) { stats.AddExaminedFile(fileSize); } return(false); }); // store journal data in database var data = journalService.VolumeDataList.Where(p => p.JournalData != null).Select(p => p.JournalData).ToList(); if (data.Any()) { // always record change journal data for current fileset (entry may be dropped later if nothing is uploaded) await database.CreateChangeJournalDataAsync(data); // update the previous fileset's change journal entry to resume at this point in case nothing was backed up await database.UpdateChangeJournalDataAsync(data, lastfilesetid); } } result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true); } }
/// <summary> /// Adds a file to the output, /// </summary> /// <param name="filename">The name of the file to record</param> /// <param name="lastModified">The value of the lastModified timestamp</param> private static async Task AddFolderToOutputAsync(string filename, DateTime lastModified, IMetahash meta, BackupDatabase database, IWriteChannel <StreamBlock> streamblockchannel) { var metadataid = await AddMetadataToOutputAsync(filename, meta, database, streamblockchannel); await database.AddDirectoryEntryAsync(filename, metadataid.Item2, lastModified); }
public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.OutputBlocks.ForRead, Output = Channels.BackendRequest.ForWrite, SpillPickup = Channels.SpillPickup.ForWrite, }, async self => { var noIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.None; var fullIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.Full; BlockVolumeWriter blockvolume = null; TemporaryIndexVolume indexvolume = null; try { while (true) { var b = await self.Input.ReadAsync(); // Lazy-start a new block volume if (blockvolume == null) { // Before we start a new volume, probe to see if it exists // This will delay creation of volumes for differential backups // There can be a race, such that two workers determine that // the block is missing, but this will be solved by the AddBlock call // which runs atomically if (await database.FindBlockIDAsync(b.HashKey, b.Size) >= 0) { b.TaskCompletion.TrySetResult(false); continue; } blockvolume = new BlockVolumeWriter(options); blockvolume.VolumeID = await database.RegisterRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary); indexvolume = noIndexFiles ? null : new TemporaryIndexVolume(options); } var newBlock = await database.AddBlockAsync(b.HashKey, b.Size, blockvolume.VolumeID); b.TaskCompletion.TrySetResult(newBlock); if (newBlock) { blockvolume.AddBlock(b.HashKey, b.Data, b.Offset, (int)b.Size, b.Hint); if (indexvolume != null) { indexvolume.AddBlock(b.HashKey, b.Size); if (b.IsBlocklistHashes && fullIndexFiles) { indexvolume.AddBlockListHash(b.HashKey, b.Size, b.Data); } } // If the volume is full, send to upload if (blockvolume.Filesize > options.VolumeSize - options.Blocksize) { //When uploading a new volume, we register the volumes and then flush the transaction // this ensures that the local database and remote storage are as closely related as possible await database.UpdateRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null); blockvolume.Close(); await database.CommitTransactionAsync("CommitAddBlockToOutputFlush"); FileEntryItem blockEntry = blockvolume.CreateFileEntryForUpload(options); TemporaryIndexVolume indexVolumeCopy = null; if (indexvolume != null) { indexVolumeCopy = new TemporaryIndexVolume(options); indexvolume.CopyTo(indexVolumeCopy, false); } var uploadRequest = new VolumeUploadRequest(blockvolume, blockEntry, indexVolumeCopy, options, database); blockvolume = null; indexvolume = null; // Write to output at the end here to prevent sending a full volume to the SpillCollector await self.Output.WriteAsync(uploadRequest); } } // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; } } catch (Exception ex) { if (ex.IsRetiredException()) { // If we have collected data, merge all pending volumes into a single volume if (blockvolume != null && blockvolume.SourceSize > 0) { await self.SpillPickup.WriteAsync(new SpillVolumeRequest(blockvolume, indexvolume)); } } throw; } })); }
private static async Task UploadVolumeAndIndex(SpillVolumeRequest target, IWriteChannel <IUploadRequest> outputChannel, Options options, BackupDatabase database) { var blockEntry = target.BlockVolume.CreateFileEntryForUpload(options); TemporaryIndexVolume indexVolumeCopy = null; if (target.IndexVolume != null) { indexVolumeCopy = new TemporaryIndexVolume(options); target.IndexVolume.CopyTo(indexVolumeCopy, false); } var uploadRequest = new VolumeUploadRequest(target.BlockVolume, blockEntry, indexVolumeCopy, options, database); await outputChannel.WriteAsync(uploadRequest).ConfigureAwait(false); }