public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask(new { UploadChannel = Channels.BackendRequest.ForWrite }, async self => { if (options.IndexfilePolicy != Options.IndexFileStrategy.None) { foreach (var blockfile in await database.GetMissingIndexFilesAsync()) { if (!await taskreader.ProgressAsync) { return; } Logging.Log.WriteInformationMessage(LOGTAG, "RecreateMissingIndexFile", "Re-creating missing index file for {0}", blockfile); var w = await Common.IndexVolumeCreator.CreateIndexVolume(blockfile, options, database); if (!await taskreader.ProgressAsync) { return; } await database.UpdateRemoteVolumeAsync(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await self.UploadChannel.WriteAsync(new IndexVolumeUploadRequest(w)); } } })); }
public TasksController(ITaskWriter taskWriter, ITaskUpdater taskUpdater, ITaskReader taskReader) { _taskWriter = taskWriter; _taskUpdater = taskUpdater; _taskReader = taskReader; }
public BackendUploader(Func <IBackend> backendFactory, Options options, DatabaseCommon database, ITaskReader taskReader, StatsCollector stats) { m_backendFactory = backendFactory; m_options = options; m_taskReader = taskReader; m_stats = stats; m_database = database; m_progressUpdater = new FileProgressThrottler(stats, options.MaxUploadPrSecond); }
public BackendHandler(Options options, string backendUrl, DatabaseCommon database, StatsCollector stats, ITaskReader taskreader) : base() { m_backendurl = backendUrl; m_database = database; m_options = options; m_backendurl = backendUrl; m_stats = stats; m_taskreader = taskreader; m_backend = DynamicLoader.BackendLoader.GetBackend(backendUrl, options.RawOptions); var shortname = m_backendurl; // Try not to leak hostnames or other information in the error messages try { shortname = new Library.Utility.Uri(shortname).Scheme; } catch { } if (m_backend == null) { throw new Duplicati.Library.Interface.UserInformationException(string.Format("Backend not supported: {0}", shortname), "BackendNotSupported"); } }
public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.OutputBlocks.ForRead, Output = Channels.BackendRequest.ForWrite, SpillPickup = Channels.SpillPickup.ForWrite, }, async self => { var noIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.None; var fullIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.Full; BlockVolumeWriter blockvolume = null; TemporaryIndexVolume indexvolume = null; try { while (true) { var b = await self.Input.ReadAsync(); // Lazy-start a new block volume if (blockvolume == null) { // Before we start a new volume, probe to see if it exists // This will delay creation of volumes for differential backups // There can be a race, such that two workers determine that // the block is missing, but this will be solved by the AddBlock call // which runs atomically if (await database.FindBlockIDAsync(b.HashKey, b.Size) >= 0) { b.TaskCompletion.TrySetResult(false); continue; } blockvolume = new BlockVolumeWriter(options); blockvolume.VolumeID = await database.RegisterRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary); indexvolume = noIndexFiles ? null : new TemporaryIndexVolume(options); } var newBlock = await database.AddBlockAsync(b.HashKey, b.Size, blockvolume.VolumeID); b.TaskCompletion.TrySetResult(newBlock); if (newBlock) { blockvolume.AddBlock(b.HashKey, b.Data, b.Offset, (int)b.Size, b.Hint); if (indexvolume != null) { indexvolume.AddBlock(b.HashKey, b.Size); if (b.IsBlocklistHashes && fullIndexFiles) { indexvolume.AddBlockListHash(b.HashKey, b.Size, b.Data); } } // If the volume is full, send to upload if (blockvolume.Filesize > options.VolumeSize - options.Blocksize) { //When uploading a new volume, we register the volumes and then flush the transaction // this ensures that the local database and remote storage are as closely related as possible await database.UpdateRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null); blockvolume.Close(); await database.CommitTransactionAsync("CommitAddBlockToOutputFlush"); FileEntryItem blockEntry = blockvolume.CreateFileEntryForUpload(options); TemporaryIndexVolume indexVolumeCopy = null; if (indexvolume != null) { indexVolumeCopy = new TemporaryIndexVolume(options); indexvolume.CopyTo(indexVolumeCopy, false); } var uploadRequest = new VolumeUploadRequest(blockvolume, blockEntry, indexVolumeCopy, options, database); blockvolume = null; indexvolume = null; // Write to output at the end here to prevent sending a full volume to the SpillCollector await self.Output.WriteAsync(uploadRequest); } } // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; } } catch (Exception ex) { if (ex.IsRetiredException()) { // If we have collected data, merge all pending volumes into a single volume if (blockvolume != null && blockvolume.SourceSize > 0) { await self.SpillPickup.WriteAsync(new SpillVolumeRequest(blockvolume, indexvolume)); } } throw; } })); }
public static Task Run(IEnumerable <string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, FileAttributes fileAttributes, Duplicati.Library.Utility.IFilter sourcefilter, Duplicati.Library.Utility.IFilter emitfilter, Options.SymlinkStrategy symlinkPolicy, Options.HardlinkStrategy hardlinkPolicy, bool excludeemptyfolders, string[] ignorenames, string[] changedfilelist, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Output = Backup.Channels.SourcePaths.ForWrite }, async self => { var hardlinkmap = new Dictionary <string, string>(); var mixinqueue = new Queue <string>(); Duplicati.Library.Utility.IFilter enumeratefilter = emitfilter; bool includes; bool excludes; Library.Utility.FilterExpression.AnalyzeFilters(emitfilter, out includes, out excludes); if (includes && !excludes) { enumeratefilter = Library.Utility.FilterExpression.Combine(emitfilter, new Duplicati.Library.Utility.FilterExpression("*" + System.IO.Path.DirectorySeparatorChar, true)); } // Simplify checking for an empty list if (ignorenames != null && ignorenames.Length == 0) { ignorenames = null; } // If we have a specific list, use that instead of enumerating the filesystem IEnumerable <string> worklist; if (changedfilelist != null && changedfilelist.Length > 0) { worklist = changedfilelist.Where(x => { var fa = FileAttributes.Normal; try { fa = snapshot.GetAttributes(x); } catch { } return AttributeFilter(x, fa, snapshot, sourcefilter, hardlinkPolicy, symlinkPolicy, hardlinkmap, fileAttributes, enumeratefilter, ignorenames, mixinqueue); }); } else { Library.Utility.Utility.EnumerationFilterDelegate attributeFilter = (root, path, attr) => AttributeFilter(path, attr, snapshot, sourcefilter, hardlinkPolicy, symlinkPolicy, hardlinkmap, fileAttributes, enumeratefilter, ignorenames, mixinqueue); if (journalService != null) { // filter sources using USN journal, to obtain a sub-set of files / folders that may have been modified sources = journalService.GetModifiedSources(attributeFilter); } worklist = snapshot.EnumerateFilesAndFolders(sources, attributeFilter, (rootpath, errorpath, ex) => { Logging.Log.WriteWarningMessage(FILTER_LOGTAG, "FileAccessError", ex, "Error reported while accessing file: {0}", errorpath); }); } var source = ExpandWorkList(worklist, mixinqueue, emitfilter, enumeratefilter); if (excludeemptyfolders) { source = ExcludeEmptyFolders(source); } // Process each path, and dequeue the mixins with symlinks as we go foreach (var s in source) { if (!await taskreader.ProgressAsync) { return; } await self.Output.WriteAsync(s); } })); }
public static Task Run(Options options, BackupDatabase database, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.SpillPickup.ForRead, Output = Channels.BackendRequest.ForWrite, }, async self => { var lst = new List <VolumeUploadRequest>(); while (!await self.Input.IsRetiredAsync) { try { lst.Add((VolumeUploadRequest)await self.Input.ReadAsync()); } catch (Exception ex) { if (ex.IsRetiredException()) { break; } throw; } } while (lst.Count > 1) { // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; VolumeUploadRequest target = null; var source = lst[0]; // Finalize the current work source.BlockVolume.Close(); // Remove it from the list of active operations lst.RemoveAt(0); var buffer = new byte[options.Blocksize]; using (var rd = new BlockVolumeReader(options.CompressionModule, source.BlockVolume.LocalFilename, options)) { foreach (var file in rd.Blocks) { // Grab a target if (target == null) { if (lst.Count == 0) { // No more targets, make one target = new VolumeUploadRequest(new BlockVolumeWriter(options), source.IndexVolume == null ? null : new TemporaryIndexVolume(options)); target.BlockVolume.VolumeID = await database.RegisterRemoteVolumeAsync(target.BlockVolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary); } else { // Grab the next target target = lst[0]; lst.RemoveAt(0); } // We copy all the blocklisthashes, which may create duplicates // but otherwise we need to query all hashes to see if they are blocklisthashes if (source.IndexVolume != null) { source.IndexVolume.CopyTo(target.IndexVolume, true); } } var len = rd.ReadBlock(file.Key, buffer); target.BlockVolume.AddBlock(file.Key, buffer, 0, len, Duplicati.Library.Interface.CompressionHint.Default); await database.MoveBlockToVolumeAsync(file.Key, len, source.BlockVolume.VolumeID, target.BlockVolume.VolumeID); if (target.IndexVolume != null) { target.IndexVolume.AddBlock(file.Key, len); } if (target.BlockVolume.Filesize > options.VolumeSize - options.Blocksize) { target.BlockVolume.Close(); await self.Output.WriteAsync(target); target = null; } } } // Make sure they are out of the database System.IO.File.Delete(source.BlockVolume.LocalFilename); await database.SafeDeleteRemoteVolumeAsync(source.BlockVolume.RemoteFilename); // Re-inject the target if it has content if (target != null) { lst.Insert(lst.Count == 0 ? 0 : 1, target); } } foreach (var n in lst) { // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; n.BlockVolume.Close(); await self.Output.WriteAsync(n); } })); }
public static Task Run(Options options, BackupDatabase database, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.StreamBlock.ForRead, ProgressChannel = Channels.ProgressEvents.ForWrite, BlockOutput = Channels.OutputBlocks.ForWrite }, async self => { var blocksize = options.Blocksize; var filehasher = Duplicati.Library.Utility.HashAlgorithmHelper.Create(options.FileHashAlgorithm); var blockhasher = Duplicati.Library.Utility.HashAlgorithmHelper.Create(options.BlockHashAlgorithm); var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options); var maxmetadatasize = (options.Blocksize / (long)options.BlockhashSize) * options.Blocksize; if (blockhasher == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } if (filehasher == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(options.FileHashAlgorithm), "FileHashAlgorithmNotSupported"); } if (!blockhasher.CanReuseTransform) { throw new UserInformationException(Strings.Common.InvalidCryptoSystem(options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } if (!filehasher.CanReuseTransform) { throw new UserInformationException(Strings.Common.InvalidCryptoSystem(options.FileHashAlgorithm), "FileHashAlgorithmNotSupported"); } using (var empty_metadata_stream = new MemoryStream(emptymetadata.Blob)) while (await taskreader.ProgressAsync) { var send_close = false; var filesize = 0L; var filename = string.Empty; var e = await self.Input.ReadAsync(); var cur = e.Result; try { var stream = e.Stream; using (var blocklisthashes = new Library.Utility.FileBackedStringList()) using (var hashcollector = new Library.Utility.FileBackedStringList()) { var blocklistbuffer = new byte[blocksize]; var blocklistoffset = 0L; long fslen = -1; try { fslen = stream.Length; } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "FileLengthFailure", ex, "Failed to read file length for file {0}", e.Path); } if (e.IsMetadata && fslen > maxmetadatasize) { //TODO: To fix this, the "WriteFileset" method in BackupHandler needs to // be updated such that it can select sets even when there are multiple // blocklist hashes for the metadata. // This could be done such that an extra query is made if the metadata // spans multiple blocklist hashes, as it is not expected to be common Logging.Log.WriteWarningMessage(LOGTAG, "TooLargeMetadata", null, "Metadata size is {0}, but the largest accepted size is {1}, recording empty metadata for {2}", fslen, maxmetadatasize, e.Path); empty_metadata_stream.Position = 0; stream = empty_metadata_stream; fslen = stream.Length; } await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = fslen, Type = EventType.FileStarted }); send_close = true; filehasher.Initialize(); var lastread = 0; var buf = new byte[blocksize]; var lastupdate = DateTime.Now; // Core processing loop, read blocks of data and hash individually while (((lastread = await stream.ForceStreamReadAsync(buf, blocksize)) != 0)) { // Run file hashing concurrently to squeeze a little extra concurrency out of it var pftask = Task.Run(() => filehasher.TransformBlock(buf, 0, lastread, buf, 0)); var hashdata = blockhasher.ComputeHash(buf, 0, lastread); var hashkey = Convert.ToBase64String(hashdata); // If we have too many hashes, flush the blocklist if (blocklistbuffer.Length - blocklistoffset < hashdata.Length) { var blkey = Convert.ToBase64String(blockhasher.ComputeHash(blocklistbuffer, 0, (int)blocklistoffset)); blocklisthashes.Add(blkey); await DataBlock.AddBlockToOutputAsync(self.BlockOutput, blkey, blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true); blocklistoffset = 0; blocklistbuffer = new byte[blocksize]; } // Store the current hash in the blocklist Array.Copy(hashdata, 0, blocklistbuffer, blocklistoffset, hashdata.Length); blocklistoffset += hashdata.Length; hashcollector.Add(hashkey); filesize += lastread; // Don't spam updates if ((DateTime.Now - lastupdate).TotalSeconds > 10) { await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filesize, Type = EventType.FileProgressUpdate }); lastupdate = DateTime.Now; } // Make sure the filehasher is done with the buf instance before we pass it on await pftask; await DataBlock.AddBlockToOutputAsync(self.BlockOutput, hashkey, buf, 0, lastread, e.Hint, true); buf = new byte[blocksize]; } // If we have more than a single block of data, output the (trailing) blocklist if (hashcollector.Count > 1) { var blkey = Convert.ToBase64String(blockhasher.ComputeHash(blocklistbuffer, 0, (int)blocklistoffset)); blocklisthashes.Add(blkey); await DataBlock.AddBlockToOutputAsync(self.BlockOutput, blkey, blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true); } filehasher.TransformFinalBlock(new byte[0], 0, 0); var filehash = Convert.ToBase64String(filehasher.Hash); var blocksetid = await database.AddBlocksetAsync(filehash, filesize, blocksize, hashcollector, blocklisthashes); cur.SetResult(new StreamProcessResult() { Streamlength = filesize, Streamhash = filehash, Blocksetid = blocksetid }); cur = null; } } catch (Exception ex) { try { if (cur != null) { cur.TrySetException(ex); } } catch { } // Rethrow if (ex.IsRetiredException()) { throw; } } finally { if (cur != null) { try { cur.TrySetCanceled(); } catch { } cur = null; } if (send_close) { await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filesize, Type = EventType.FileClosed }); } send_close = false; } } })); }
public static Task Run(BackupDatabase database, Options options, BackupResults result, ITaskReader taskreader, string lasttempfilelist, long lasttempfileid) { return(AutomationExtensions.RunTask(new { UploadChannel = Channels.BackendRequest.ForWrite }, async self => { // Check if we should upload a synthetic filelist if (options.DisableSyntheticFilelist || string.IsNullOrWhiteSpace(lasttempfilelist) || lasttempfileid < 0) { return; } // Check that we still need to process this after the cleanup has performed its duties var syntbase = await database.GetRemoteVolumeFromIDAsync(lasttempfileid); // If we do not have a valid entry, warn and quit if (syntbase.Name == null || syntbase.State != RemoteVolumeState.Uploaded) { // TODO: If the repair succeeds, this could give a false warning? Logging.Log.WriteWarningMessage(LOGTAG, "MissingTemporaryFilelist", null, "Expected there to be a temporary fileset for synthetic filelist ({0}, {1}), but none was found?", lasttempfileid, lasttempfilelist); return; } // Files is missing or repaired if (syntbase.Name == null || (syntbase.State != RemoteVolumeState.Uploading && syntbase.State != RemoteVolumeState.Temporary)) { Logging.Log.WriteInformationMessage(LOGTAG, "SkippingSyntheticListUpload", "Skipping synthetic upload because temporary fileset appers to be complete: ({0}, {1}, {2})", lasttempfileid, lasttempfilelist, syntbase.State); return; } // Ready to build and upload the synthetic list await database.CommitTransactionAsync("PreSyntheticFilelist"); var incompleteFilesets = (await database.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToList(); result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize); Logging.Log.WriteInformationMessage(LOGTAG, "PreviousBackupFilelistUpload", "Uploading filelist from previous interrupted backup"); if (!await taskreader.ProgressAsync) { return; } var incompleteSet = incompleteFilesets.Last(); var badIds = from n in incompleteFilesets select n.Key; var prevs = (from n in await database.GetFilesetTimesAsync() where n.Key < incompleteSet.Key && !badIds.Contains(n.Key) orderby n.Key select n.Key).ToArray(); var prevId = prevs.Length == 0 ? -1 : prevs.Last(); FilesetVolumeWriter fsw = null; try { var s = 1; var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s); var oldFilesetID = incompleteSet.Key; // Probe for an unused filename while (s < 60) { var id = await database.GetRemoteVolumeIDAsync(VolumeBase.GenerateFilename(RemoteVolumeType.Files, options, null, fileTime)); if (id < 0) { break; } fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s); } fsw = new FilesetVolumeWriter(options, fileTime); fsw.VolumeID = await database.RegisterRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary); if (!string.IsNullOrEmpty(options.ControlFiles)) { foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) { fsw.AddControlFile(p, options.GetCompressionHintFromFilename(p)); } } var newFilesetID = await database.CreateFilesetAsync(fsw.VolumeID, fileTime); await database.LinkFilesetToVolumeAsync(newFilesetID, fsw.VolumeID); await database.AppendFilesFromPreviousSetAsync(null, newFilesetID, prevId, fileTime); await database.WriteFilesetAsync(fsw, newFilesetID); if (!await taskreader.ProgressAsync) { return; } await database.UpdateRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await database.CommitTransactionAsync("CommitUpdateFilelistVolume"); await self.UploadChannel.WriteAsync(new FilesetUploadRequest(fsw)); fsw = null; } catch { await database.RollbackTransactionAsync(); throw; } finally { if (fsw != null) { try { fsw.Dispose(); } catch { fsw = null; } } } } )); }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, BackupStatsCollector stats, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.AcceptedChangedFile.ForRead, StreamBlockChannel = Channels.StreamBlock.ForWrite, }, async self => { var blocksize = options.Blocksize; while (await taskreader.ProgressAsync) { var e = await self.Input.ReadAsync(); try { var hint = options.GetCompressionHintFromFilename(e.Path); var oldHash = e.OldId < 0 ? null : await database.GetFileHashAsync(e.OldId); StreamProcessResult filestreamdata; // Process metadata and actual data in parallel var metatask = Task.Run(async() => { // If we have determined that metadata has not changed, just grab the ID if (!e.MetadataChanged) { var res = await database.GetMetadataIDAsync(e.MetaHashAndSize.FileHash, e.MetaHashAndSize.Blob.Length); if (res.Item1) { return res.Item2; } Logging.Log.WriteWarningMessage(FILELOGTAG, "UnexpextedMetadataLookup", null, "Metadata was reported as not changed, but still requires being added?\nHash: {0}, Length: {1}, ID: {2}, Path: {3}", e.MetaHashAndSize.FileHash, e.MetaHashAndSize.Blob.Length, res.Item2, e.Path); e.MetadataChanged = true; } return (await MetadataPreProcess.AddMetadataToOutputAsync(e.Path, e.MetaHashAndSize, database, self.StreamBlockChannel)).Item2; }); using (var fs = snapshot.OpenRead(e.Path)) filestreamdata = await StreamBlock.ProcessStream(self.StreamBlockChannel, e.Path, fs, false, hint); await stats.AddOpenedFile(filestreamdata.Streamlength); var metadataid = await metatask; var filekey = filestreamdata.Streamhash; var filesize = filestreamdata.Streamlength; if (oldHash != filekey) { if (oldHash == null) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "NewFile", "New file {0}", e.Path); } else { Logging.Log.WriteVerboseMessage(FILELOGTAG, "ChangedFile", "File has changed {0}", e.Path); } if (e.OldId < 0) { await stats.AddAddedFile(filesize); if (options.Dryrun) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "WoudlAddNewFile", "Would add new file {0}, size {1}", e.Path, Library.Utility.Utility.FormatSizeString(filesize)); } } else { await stats.AddModifiedFile(filesize); if (options.Dryrun) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "WoudlAddChangedFile", "Would add changed file {0}, size {1}", e.Path, Library.Utility.Utility.FormatSizeString(filesize)); } } await database.AddFileAsync(e.Path, e.LastWrite, filestreamdata.Blocksetid, metadataid); } else if (e.MetadataChanged) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "FileMetadataChanged", "File has only metadata changes {0}", e.Path); await database.AddFileAsync(e.Path, e.LastWrite, filestreamdata.Blocksetid, metadataid); } else /*if (e.OldId >= 0)*/ { // When we write the file to output, update the last modified time Logging.Log.WriteVerboseMessage(FILELOGTAG, "NoFileChanges", "File has not changed {0}", e.Path); try { await database.AddUnmodifiedAsync(e.OldId, e.LastWrite); } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "FailedToAddFile", ex, "Failed while attempting to add unmodified file to database: {0}", e.Path); } } } catch (Exception ex) { if (ex.IsRetiredException()) { return; } else { Logging.Log.WriteWarningMessage(FILELOGTAG, "PathProcessingFailed", ex, "Failed to process path: {0}", e.Path); } } } } )); }