/// <summary> /// Runs the upload process /// </summary> /// <returns>A tuple with the completion task and the channel to use</returns> public static Tuple <Task, IWriteChannel <string> > Run() { var channel = ChannelManager.CreateChannel <string>( buffersize: MAX_PENDING_UPLOADS, pendingWritersOverflowStrategy: QueueOverflowStrategy.LIFO ); var task = AutomationExtensions.RunTask( channel.AsRead(), async(chan) => { while (true) { var f = await chan.ReadAsync(); try { if (File.Exists(f)) { var req = (HttpWebRequest)WebRequest.Create(UPLOAD_URL); req.Method = "POST"; req.ContentType = "application/json; charset=utf-8"; int rc; using (var fs = File.OpenRead(f)) { if (fs.Length > 0) { req.ContentLength = fs.Length; var areq = new Library.Utility.AsyncHttpRequest(req); using (var rs = areq.GetRequestStream()) Library.Utility.Utility.CopyStream(fs, rs); using (var resp = (HttpWebResponse)areq.GetResponse()) rc = (int)resp.StatusCode; } else { rc = 200; } } if (rc >= 200 && rc <= 299) { File.Delete(f); } } } catch (Exception ex) { Logging.Log.WriteMessage("UsageReporter failed", Duplicati.Library.Logging.LogMessageType.Error, ex); } } } ); return(new Tuple <Task, IWriteChannel <string> >(task, channel)); }
public static Task RunGranterAsync(IWriteChannel <bool> channel, long count, CancellationToken token) { var canceltask = new TaskCompletionSource <bool>(); token.Register(() => canceltask.TrySetCanceled()); var total = count; return(AutomationExtensions.RunTask( new { channel }, async self => { while (count > 0) { DebugWriteLine($"Emitting task {total - count} of {total}"); if (await Task.WhenAny(new [] { canceltask.Task, channel.WriteAsync(true) }) == canceltask.Task) { throw new TaskCanceledException(); } count--; DebugWriteLine($"Emitted task {total - count} of {total}"); } DebugWriteLine("Stopping task granter"); } )); }
/// <summary> /// Reads input and applies the method to each input, and emits the output /// </summary> /// <param name="method">The worker method to apply to each element.</param> /// <typeparam name="TInput">The input type parameter.</typeparam> /// <typeparam name="TOutput">The output type parameter.</typeparam> private static Task Worker <TInput, TOutput>(Func <TInput, TOutput> method) { return(AutomationExtensions.RunTask( new { input = ChannelMarker.ForRead <TInput>(WORKERINPUT), output = ChannelMarker.ForWrite <TOutput>(WORKEROUTPUT) }, async self => { try { while (true) { await self.output.WriteAsync(method(await self.input.ReadAsync().ConfigureAwait(false))).ConfigureAwait(false); } } catch (Exception ex) { if (!(ex is RetiredException)) { Console.WriteLine("ex: {0}", ex); } throw; } } )); }
public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask(new { UploadChannel = Channels.BackendRequest.ForWrite }, async self => { if (options.IndexfilePolicy != Options.IndexFileStrategy.None) { foreach (var blockfile in await database.GetMissingIndexFilesAsync()) { if (!await taskreader.ProgressAsync) { return; } Logging.Log.WriteInformationMessage(LOGTAG, "RecreateMissingIndexFile", "Re-creating missing index file for {0}", blockfile); var w = await Common.IndexVolumeCreator.CreateIndexVolume(blockfile, options, database); if (!await taskreader.ProgressAsync) { return; } await database.UpdateRemoteVolumeAsync(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await self.UploadChannel.WriteAsync(new IndexVolumeUploadRequest(w)); } } })); }
/// <summary> /// Runs the MRU cache /// </summary> /// <returns>An awaitable task.</returns> /// <param name="selfinfo">This peer's information</param> /// <param name="storesize">The size of the MRU store</param> /// <param name="maxage">The maximum amount of time items are stored</param> /// <param name="buffersize">The size of the forwarding buffer.</param> public static Task RunAsync(PeerInfo selfinfo, int storesize, TimeSpan maxage, int buffersize = 10) { var parent = RunMRUAsync(selfinfo, storesize, maxage, buffersize); return(Task.WhenAll( parent, AutomationExtensions.RunTask( new { Request = Channels.MRURequests.ForWrite }, async self => { while (true) { // Sleep, but quit if the MRU stops if (await Task.WhenAny(parent, Task.Delay(new TimeSpan(maxage.Ticks / 3))) == parent) { return; } log.Debug("Invoking store expiration"); await self.Request.SendExpireAsync(); log.Debug("Store expiration completed, waiting ..."); } } ) )); }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupStatsCollector stats, BackupDatabase database) { return(AutomationExtensions.RunTask( new { Input = Channels.ProcessedFiles.ForRead, Output = Channels.AcceptedChangedFile.ForWrite }, async self => { var EMPTY_METADATA = Utility.WrapMetadata(new Dictionary <string, string>(), options); var blocksize = options.Blocksize; while (true) { var e = await self.Input.ReadAsync(); long filestatsize = -1; try { filestatsize = snapshot.GetFileSize(e.Path); } catch (Exception ex) { Logging.Log.WriteExplicitMessage(FILELOGTAG, "FailedToReadSize", ex, "Failed tp read size of file: {0}", e.Path); } await stats.AddExaminedFile(filestatsize); e.MetaHashAndSize = options.StoreMetadata ? Utility.WrapMetadata(await MetadataGenerator.GenerateMetadataAsync(e.Path, e.Attributes, options, snapshot), options) : EMPTY_METADATA; var timestampChanged = e.LastWrite != e.OldModified || e.LastWrite.Ticks == 0 || e.OldModified.Ticks == 0; var filesizeChanged = filestatsize < 0 || e.LastFileSize < 0 || filestatsize != e.LastFileSize; var tooLargeFile = options.SkipFilesLargerThan != long.MaxValue && options.SkipFilesLargerThan != 0 && filestatsize >= 0 && filestatsize > options.SkipFilesLargerThan; e.MetadataChanged = !options.CheckFiletimeOnly && !options.SkipMetadata && (e.MetaHashAndSize.Blob.Length != e.OldMetaSize || e.MetaHashAndSize.FileHash != e.OldMetaHash); if ((e.OldId < 0 || options.DisableFiletimeCheck || timestampChanged || filesizeChanged || e.MetadataChanged) && !tooLargeFile) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "CheckFileForChanges", "Checking file for changes {0}, new: {1}, timestamp changed: {2}, size changed: {3}, metadatachanged: {4}, {5} vs {6}", e.Path, e.OldId <= 0, timestampChanged, filesizeChanged, e.MetadataChanged, e.LastWrite, e.OldModified); await self.Output.WriteAsync(e); } else { if (tooLargeFile) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckTooLarge", "Skipped checking file, because the size exceeds limit {0}", e.Path); } else { Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckNoTimestampChange", "Skipped checking file, because timestamp was not updated {0}", e.Path); } await database.AddUnmodifiedAsync(e.OldId, e.LastWrite); } } })); }
public void TestRetireWithoutLoss() { Task[] tasks; int count = 0; using (new ChannelScope()) { tasks = new Task[] { AutomationExtensions.RunTask( new { channel = ChannelMarker.ForWrite <int>(CHANNEL_NAME) }, async self => { await Task.Delay(500); await self.channel.WriteAsync(1); } ), AutomationExtensions.RunTask( new { channel = ChannelMarker.ForWrite <int>(CHANNEL_NAME) }, async self => { await Task.Delay(1000); await self.channel.WriteAsync(1); } ), AutomationExtensions.RunTask( new { channel = ChannelMarker.ForRead <int>(CHANNEL_NAME) }, async self => { while (true) { await self.channel.ReadAsync(); count++; } } ) }; } var all = Task.WhenAll(tasks).WaitForTask(); if (count != 2) { throw new Exception(string.Format("Unexpected count, expected {0} but got {1}", 2, count)); } if (all.IsFaulted || !all.IsCompleted) { throw new Exception("Unexpected task state"); } }
/// <summary> /// Emits all values from the enumerable into the network /// </summary> /// <param name="values">Values.</param> /// <typeparam name="TInput">The 1st type parameter.</typeparam> private static Task Generator <TInput>(IEnumerable <TInput> values) { return(AutomationExtensions.RunTask( new { channel = ChannelMarker.ForWrite <TInput>(WORKERINPUT) }, async self => { foreach (var value in values) { await self.channel.WriteAsync(value).ConfigureAwait(false); } } )); }
public static Task RunWriter <T>(IChannel <T> channel, IEnumerable <T> values) { return(AutomationExtensions.RunTask( new { chan = channel.AsWriteOnly() }, async self => { foreach (var v in values) { await self.chan.WriteAsync(v); } } )); }
private void TestReaderOverflow(QueueOverflowStrategy strategy) { using (new IsolatedChannelScope()) { var readertasks = Enumerable.Range(0, 4).Select(count => AutomationExtensions.RunTask(new { Input = ChannelMarker.ForRead <int>("channel", maxPendingReaders: 3, pendingReadersOverflowStrategy: strategy) }, async x => { //Console.WriteLine("Started {0}", count); while (true) { await x.Input.ReadAsync(); } }) ).ToList(); using (ChannelManager.GetChannel <int>("channel").AsWriteOnly()) Task.Delay(500).WaitForTaskOrThrow(); Task.WhenAny(readertasks.Union(new [] { Task.Delay(1000) })).WaitForTaskOrThrow(); Task.Delay(500).WaitForTaskOrThrow(); int discard; switch (strategy) { case QueueOverflowStrategy.FIFO: discard = 0; break; case QueueOverflowStrategy.LIFO: discard = readertasks.Count - 2; break; case QueueOverflowStrategy.Reject: default: discard = readertasks.Count - 1; break; } Assert.IsTrue(readertasks[discard].IsFaulted); TestAssert.IsInstanceOf <ChannelOverflowException>(readertasks[discard].Exception.Flatten().InnerExceptions.First()); readertasks.RemoveAt(discard); Assert.IsTrue(readertasks.All(x => x.IsCompleted && !x.IsFaulted && !x.IsCanceled)); } }
/// <summary> /// Collects input and combines it with the join method /// </summary> /// <param name="joinmethod">The method used to join results.</param> /// <param name="initial">The initial input to the join method, aka. the neutral element.</param> /// <typeparam name="TOutput">The type parameter for the data to join.</typeparam> /// <typeparam name="TResult">The type parameter for the aggregated data.</typeparam> private static async Task <TResult> Collector <TOutput, TResult>(Func <TResult, TOutput, TResult> joinmethod, TResult initial) { var current = initial; await AutomationExtensions.RunTask( new { channel = ChannelMarker.ForRead <TOutput>(WORKEROUTPUT) }, async self => { while (true) { current = joinmethod(current, await self.channel.ReadAsync().ConfigureAwait(false)); } } ).ConfigureAwait(false); return(current); }
public static Task Run(Snapshots.ISnapshotService snapshot, BackupResults result, Options options, IFilter sourcefilter, IFilter filter, Common.ITaskReader taskreader, System.Threading.CancellationToken token) { // Make sure we create the enumeration process in a seperate scope, // but keep the log channel from the parent scope using (Logging.Log.StartIsolatingScope()) using (new IsolatedChannelScope()) { var enumeratorTask = Backup.FileEnumerationProcess.Run(snapshot, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader); var counterTask = AutomationExtensions.RunTask(new { Input = Backup.Channels.SourcePaths.ForRead }, async self => { var count = 0L; var size = 0L; try { while (await taskreader.ProgressAsync && !token.IsCancellationRequested) { var path = await self.Input.ReadAsync(); count++; try { size += snapshot.GetFileSize(path); } catch { } result.OperationProgressUpdater.UpdatefileCount(count, size, false); } } finally { result.OperationProgressUpdater.UpdatefileCount(count, size, true); } }); return(Task.WhenAll(enumeratorTask, counterTask)); } }
public static Task RunReader <T>(IChannel <T> channel, IEnumerable <T> values, CounterShim counter) { return(AutomationExtensions.RunTask( new { chan = channel.AsReadOnly() }, async self => { foreach (var v in values) { var r = await self.chan.ReadAsync(); counter.Increment(); if (Comparer <T> .Default.Compare(v, r) != 0) { throw new Exception(string.Format("Got {0} but expected {1}", r, v)); } } } )); }
public void TestAttributes() { var values = new[] { 0, 1, 2, 3, 4 }; var counter = new CounterShim(); var readercount = 10; var name = "bcast"; using (new IsolatedChannelScope()) { var writer = AutomationExtensions.RunTask( new Writer(), async self => { foreach (var v in values) { await self.chan.WriteAsync(v); } } ); var readers = Enumerable.Range(0, readercount).Select(x => AutomationExtensions.RunTask( new { chan = ChannelMarker.ForRead <int>(name) }, async self => { foreach (var v in values) { var r = await self.chan.ReadAsync(); counter.Increment(); if (Comparer <int> .Default.Compare(v, r) != 0) { throw new Exception(string.Format("Got {0} but expected {1}", r, v)); } } } )).ToArray(); Task.WhenAll(readers.Union(new[] { writer })).WaitForTaskOrThrow(); if (counter.Count != readercount * values.Length) { throw new Exception(string.Format("The counter said {0} values were read, but {1} was expected", counter.Count, readercount * values.Length)); } } }
public static Task RunStatPrinterAsync(IWriteChannel <StatRequest> channel, TimeSpan period, long total, CancellationToken token) { return(AutomationExtensions.RunTask( new { channel }, async _ => { while (true) { await Task.Delay(period, token); var tcs = new TaskCompletionSource <ResultStats>(); await channel.WriteAsync(new StatRequest() { Result = tcs }); var res = await tcs.Task; var pg = (res.Requests / (double)total) * 100; Console.WriteLine($" {pg:0.00}% ({res.Requests} of {total}) {(res.Failures == 0 ? "" : $"{res.Failures} {(res.Failures == 1 ? "failure" : "failures")}")}"); } }
private static Task WriteChannel(StreamWriter stream, IReadChannelEnd <string> ch) { stream.AutoFlush = true; return(AutomationExtensions.RunTask( new { Input = ch }, async self => { using (stream) { while (true) { var line = await self.Input.ReadAsync(); await stream.WriteLineAsync(line).ConfigureAwait(false); //Console.WriteLine("Wrote {0}", line); } } } )); }
/// <summary> /// The runner helper method that calls the abstract request method /// </summary> /// <returns>An awaitable task</returns> protected Task RunAsync(IReadChannel <bool> reqchan, IWriteChannel <RequestResult> respchan) { return(AutomationExtensions.RunTask( new { reqchan, respchan }, async _ => { while (await reqchan.ReadAsync()) { var start = DateTime.Now; try { var resp = await PeformRequestAsync(); await respchan.WriteAsync(new RequestResult() { Started = start, Finished = DateTime.Now, Failed = m_expectedresponse != null && m_expectedresponse != resp }); } catch (System.Exception ex) { await respchan.WriteAsync(new RequestResult() { Started = start, Finished = DateTime.Now, Failed = true, Exception = ex }); if (m_options.Verbose) { Console.WriteLine(ex.Message); } } } })); }
public static Task Run(BackupDatabase database, Options options, BackupResults result, ITaskReader taskreader, string lasttempfilelist, long lasttempfileid) { return(AutomationExtensions.RunTask(new { UploadChannel = Channels.BackendRequest.ForWrite }, async self => { // Check if we should upload a synthetic filelist if (options.DisableSyntheticFilelist || string.IsNullOrWhiteSpace(lasttempfilelist) || lasttempfileid < 0) { return; } // Check that we still need to process this after the cleanup has performed its duties var syntbase = await database.GetRemoteVolumeFromIDAsync(lasttempfileid); // If we do not have a valid entry, warn and quit if (syntbase.Name == null || syntbase.State != RemoteVolumeState.Uploaded) { // TODO: If the repair succeeds, this could give a false warning? Logging.Log.WriteWarningMessage(LOGTAG, "MissingTemporaryFilelist", null, "Expected there to be a temporary fileset for synthetic filelist ({0}, {1}), but none was found?", lasttempfileid, lasttempfilelist); return; } // Files is missing or repaired if (syntbase.Name == null || (syntbase.State != RemoteVolumeState.Uploading && syntbase.State != RemoteVolumeState.Temporary)) { Logging.Log.WriteInformationMessage(LOGTAG, "SkippingSyntheticListUpload", "Skipping synthetic upload because temporary fileset appers to be complete: ({0}, {1}, {2})", lasttempfileid, lasttempfilelist, syntbase.State); return; } // Ready to build and upload the synthetic list await database.CommitTransactionAsync("PreSyntheticFilelist"); var incompleteFilesets = (await database.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToList(); result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize); Logging.Log.WriteInformationMessage(LOGTAG, "PreviousBackupFilelistUpload", "Uploading filelist from previous interrupted backup"); if (!await taskreader.ProgressAsync) { return; } var incompleteSet = incompleteFilesets.Last(); var badIds = from n in incompleteFilesets select n.Key; var prevs = (from n in await database.GetFilesetTimesAsync() where n.Key < incompleteSet.Key && !badIds.Contains(n.Key) orderby n.Key select n.Key).ToArray(); var prevId = prevs.Length == 0 ? -1 : prevs.Last(); FilesetVolumeWriter fsw = null; try { var s = 1; var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s); var oldFilesetID = incompleteSet.Key; // Probe for an unused filename while (s < 60) { var id = await database.GetRemoteVolumeIDAsync(VolumeBase.GenerateFilename(RemoteVolumeType.Files, options, null, fileTime)); if (id < 0) { break; } fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s); } fsw = new FilesetVolumeWriter(options, fileTime); fsw.VolumeID = await database.RegisterRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary); if (!string.IsNullOrEmpty(options.ControlFiles)) { foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) { fsw.AddControlFile(p, options.GetCompressionHintFromFilename(p)); } } var newFilesetID = await database.CreateFilesetAsync(fsw.VolumeID, fileTime); await database.LinkFilesetToVolumeAsync(newFilesetID, fsw.VolumeID); await database.AppendFilesFromPreviousSetAsync(null, newFilesetID, prevId, fileTime); await database.WriteFilesetAsync(fsw, newFilesetID); if (!await taskreader.ProgressAsync) { return; } await database.UpdateRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await database.CommitTransactionAsync("CommitUpdateFilelistVolume"); await self.UploadChannel.WriteAsync(new FilesetUploadRequest(fsw)); fsw = null; } catch { await database.RollbackTransactionAsync(); throw; } finally { if (fsw != null) { try { fsw.Dispose(); } catch { fsw = null; } } } } )); }
public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.OutputBlocks.ForRead, Output = Channels.BackendRequest.ForWrite, SpillPickup = Channels.SpillPickup.ForWrite, }, async self => { var noIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.None; var fullIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.Full; BlockVolumeWriter blockvolume = null; TemporaryIndexVolume indexvolume = null; try { while (true) { var b = await self.Input.ReadAsync(); // Lazy-start a new block volume if (blockvolume == null) { // Before we start a new volume, probe to see if it exists // This will delay creation of volumes for differential backups // There can be a race, such that two workers determine that // the block is missing, but this will be solved by the AddBlock call // which runs atomically if (await database.FindBlockIDAsync(b.HashKey, b.Size) >= 0) { b.TaskCompletion.TrySetResult(false); continue; } blockvolume = new BlockVolumeWriter(options); blockvolume.VolumeID = await database.RegisterRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary); indexvolume = noIndexFiles ? null : new TemporaryIndexVolume(options); } var newBlock = await database.AddBlockAsync(b.HashKey, b.Size, blockvolume.VolumeID); b.TaskCompletion.TrySetResult(newBlock); if (newBlock) { blockvolume.AddBlock(b.HashKey, b.Data, b.Offset, (int)b.Size, b.Hint); if (indexvolume != null) { indexvolume.AddBlock(b.HashKey, b.Size); if (b.IsBlocklistHashes && fullIndexFiles) { indexvolume.AddBlockListHash(b.HashKey, b.Size, b.Data); } } // If the volume is full, send to upload if (blockvolume.Filesize > options.VolumeSize - options.Blocksize) { //When uploading a new volume, we register the volumes and then flush the transaction // this ensures that the local database and remote storage are as closely related as possible await database.UpdateRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null); blockvolume.Close(); await database.CommitTransactionAsync("CommitAddBlockToOutputFlush"); FileEntryItem blockEntry = blockvolume.CreateFileEntryForUpload(options); TemporaryIndexVolume indexVolumeCopy = null; if (indexvolume != null) { indexVolumeCopy = new TemporaryIndexVolume(options); indexvolume.CopyTo(indexVolumeCopy, false); } var uploadRequest = new VolumeUploadRequest(blockvolume, blockEntry, indexVolumeCopy, options, database); blockvolume = null; indexvolume = null; // Write to output at the end here to prevent sending a full volume to the SpillCollector await self.Output.WriteAsync(uploadRequest); } } // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; } } catch (Exception ex) { if (ex.IsRetiredException()) { // If we have collected data, merge all pending volumes into a single volume if (blockvolume != null && blockvolume.SourceSize > 0) { await self.SpillPickup.WriteAsync(new SpillVolumeRequest(blockvolume, indexvolume)); } } throw; } })); }
public static Task Run(Options options, BackupDatabase database, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.SpillPickup.ForRead, Output = Channels.BackendRequest.ForWrite, }, async self => { var lst = new List <VolumeUploadRequest>(); while (!await self.Input.IsRetiredAsync) { try { lst.Add((VolumeUploadRequest)await self.Input.ReadAsync()); } catch (Exception ex) { if (ex.IsRetiredException()) { break; } throw; } } while (lst.Count > 1) { // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; VolumeUploadRequest target = null; var source = lst[0]; // Finalize the current work source.BlockVolume.Close(); // Remove it from the list of active operations lst.RemoveAt(0); var buffer = new byte[options.Blocksize]; using (var rd = new BlockVolumeReader(options.CompressionModule, source.BlockVolume.LocalFilename, options)) { foreach (var file in rd.Blocks) { // Grab a target if (target == null) { if (lst.Count == 0) { // No more targets, make one target = new VolumeUploadRequest(new BlockVolumeWriter(options), source.IndexVolume == null ? null : new TemporaryIndexVolume(options)); target.BlockVolume.VolumeID = await database.RegisterRemoteVolumeAsync(target.BlockVolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary); } else { // Grab the next target target = lst[0]; lst.RemoveAt(0); } // We copy all the blocklisthashes, which may create duplicates // but otherwise we need to query all hashes to see if they are blocklisthashes if (source.IndexVolume != null) { source.IndexVolume.CopyTo(target.IndexVolume, true); } } var len = rd.ReadBlock(file.Key, buffer); target.BlockVolume.AddBlock(file.Key, buffer, 0, len, Duplicati.Library.Interface.CompressionHint.Default); await database.MoveBlockToVolumeAsync(file.Key, len, source.BlockVolume.VolumeID, target.BlockVolume.VolumeID); if (target.IndexVolume != null) { target.IndexVolume.AddBlock(file.Key, len); } if (target.BlockVolume.Filesize > options.VolumeSize - options.Blocksize) { target.BlockVolume.Close(); await self.Output.WriteAsync(target); target = null; } } } // Make sure they are out of the database System.IO.File.Delete(source.BlockVolume.LocalFilename); await database.SafeDeleteRemoteVolumeAsync(source.BlockVolume.RemoteFilename); // Re-inject the target if it has content if (target != null) { lst.Insert(lst.Count == 0 ? 0 : 1, target); } } foreach (var n in lst) { // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; n.BlockVolume.Close(); await self.Output.WriteAsync(n); } })); }
public static Task Run(IEnumerable <string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, FileAttributes fileAttributes, Duplicati.Library.Utility.IFilter sourcefilter, Duplicati.Library.Utility.IFilter emitfilter, Options.SymlinkStrategy symlinkPolicy, Options.HardlinkStrategy hardlinkPolicy, bool excludeemptyfolders, string[] ignorenames, string[] changedfilelist, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Output = Backup.Channels.SourcePaths.ForWrite }, async self => { var hardlinkmap = new Dictionary <string, string>(); var mixinqueue = new Queue <string>(); Duplicati.Library.Utility.IFilter enumeratefilter = emitfilter; bool includes; bool excludes; Library.Utility.FilterExpression.AnalyzeFilters(emitfilter, out includes, out excludes); if (includes && !excludes) { enumeratefilter = Library.Utility.FilterExpression.Combine(emitfilter, new Duplicati.Library.Utility.FilterExpression("*" + System.IO.Path.DirectorySeparatorChar, true)); } // Simplify checking for an empty list if (ignorenames != null && ignorenames.Length == 0) { ignorenames = null; } // If we have a specific list, use that instead of enumerating the filesystem IEnumerable <string> worklist; if (changedfilelist != null && changedfilelist.Length > 0) { worklist = changedfilelist.Where(x => { var fa = FileAttributes.Normal; try { fa = snapshot.GetAttributes(x); } catch { } return AttributeFilter(x, fa, snapshot, sourcefilter, hardlinkPolicy, symlinkPolicy, hardlinkmap, fileAttributes, enumeratefilter, ignorenames, mixinqueue); }); } else { Library.Utility.Utility.EnumerationFilterDelegate attributeFilter = (root, path, attr) => AttributeFilter(path, attr, snapshot, sourcefilter, hardlinkPolicy, symlinkPolicy, hardlinkmap, fileAttributes, enumeratefilter, ignorenames, mixinqueue); if (journalService != null) { // filter sources using USN journal, to obtain a sub-set of files / folders that may have been modified sources = journalService.GetModifiedSources(attributeFilter); } worklist = snapshot.EnumerateFilesAndFolders(sources, attributeFilter, (rootpath, errorpath, ex) => { Logging.Log.WriteWarningMessage(FILTER_LOGTAG, "FileAccessError", ex, "Error reported while accessing file: {0}", errorpath); }); } var source = ExpandWorkList(worklist, mixinqueue, emitfilter, enumeratefilter); if (excludeemptyfolders) { source = ExcludeEmptyFolders(source); } // Process each path, and dequeue the mixins with symlinks as we go foreach (var s in source) { if (!await taskreader.ProgressAsync) { return; } await self.Output.WriteAsync(s); } })); }
/// <summary> /// Runs the report processor /// </summary> /// <param name="forward">The channel accepting filenames with usage reports.</param> internal static Tuple <Task, IWriteChannel <ReportItem> > Run(IWriteChannel <string> forward) { var instanceid = System.Diagnostics.Process.GetCurrentProcess().Id.ToString(); var channel = ChannelManager.CreateChannel <ReportItem>( maxPendingWriters: MAX_QUEUE_SIZE, pendingWritersOverflowStrategy: QueueOverflowStrategy.LIFO ); var task = AutomationExtensions.RunTask( new { Input = channel.AsRead(), Output = forward }, async(self) => { // Wait 20 seconds before we start transmitting for (var i = 0; i < 20; i++) { await Task.Delay(TimeSpan.FromSeconds(1)).ConfigureAwait(false); if (await self.Input.IsRetiredAsync) { return; } } await ProcessAbandonedFiles(self.Output, self.Input, null).ConfigureAwait(false); var rs = new ReportSet(); var tf = GetTempFilename(instanceid); var nextTransmitTarget = new DateTime(0); while (true) { var forceSend = false; try { // We wait until we get an item, or WAIT_TIME from the last event var waittime = rs.Items.Count == 0 ? Timeout.Infinite : new TimeSpan(Math.Max(0, (nextTransmitTarget - DateTime.UtcNow).Ticks)); var item = await self.Input.ReadAsync(waittime); if (item != null) { if (rs.Items.Count == 0) { nextTransmitTarget = DateTime.UtcNow + WAIT_TIME; } forceSend = item.Type == ReportType.Crash; rs.Items.Add(item); File.WriteAllText(tf, JsonConvert.SerializeObject(rs)); } } catch (TimeoutException) { forceSend = true; } if ((forceSend && rs.Items.Count > 0) || (rs.Items.Count > MAX_ITEMS_IN_SET)) { var nextFilename = GetTempFilename(instanceid); self.Output.WriteNoWait(tf); rs = new ReportSet(); await ProcessAbandonedFiles(self.Output, self.Input, null); tf = nextFilename; } } } ); return(new Tuple <Task, IWriteChannel <ReportItem> >(task, channel)); }
public Task Run() { return(AutomationExtensions.RunTask(new { Input = Channels.BackendRequest.ForRead, }, async self => { var workers = new List <Worker>(); m_maxConcurrentUploads = m_options.AsynchronousConcurrentUploadLimit <= 0 ? int.MaxValue : m_options.AsynchronousConcurrentUploadLimit; m_initialUploadThrottleSpeed = m_options.AsynchronousConcurrentUploadLimit <= 0 ? int.MaxValue : m_options.MaxUploadPrSecond / m_maxConcurrentUploads; var lastSize = -1L; var uploadsInProgress = 0; m_cancelTokenSource = new CancellationTokenSource(); m_progressUpdater.Run(m_cancelTokenSource.Token); try { while (!await self.Input.IsRetiredAsync && await m_taskReader.ProgressAsync) { var req = await self.Input.ReadAsync(); if (!await m_taskReader.ProgressAsync) { break; } var worker = workers.FirstOrDefault(w => w.Task.IsCompleted && !w.Task.IsFaulted); if (worker == null) { worker = new Worker(m_backendFactory()); workers.Add(worker); } if (req is VolumeUploadRequest volumeUpload) { if (volumeUpload.IndexVolume == null) { worker.Task = Task.Run(() => UploadFileAsync(volumeUpload.BlockEntry, worker, m_cancelTokenSource.Token)); } else { worker.Task = Task.Run(() => UploadBlockAndIndexAsync(volumeUpload, worker, m_cancelTokenSource.Token)); } lastSize = volumeUpload.BlockVolume.SourceSize; uploadsInProgress++; } else if (req is FilesetUploadRequest filesetUpload) { worker.Task = Task.Run(() => UploadVolumeWriter(filesetUpload.Fileset, worker, m_cancelTokenSource.Token)); uploadsInProgress++; } else if (req is IndexVolumeUploadRequest indexUpload) { worker.Task = Task.Run(() => UploadVolumeWriter(indexUpload.IndexVolume, worker, m_cancelTokenSource.Token)); uploadsInProgress++; } else if (req is FlushRequest flush) { try { while (workers.Any()) { var finishedTask = await Task.WhenAny(workers.Select(w => w.Task)).ConfigureAwait(false); if (finishedTask.IsFaulted) { ExceptionDispatchInfo.Capture(finishedTask.Exception).Throw(); } workers.RemoveAll(w => w.Task == finishedTask); } uploadsInProgress = 0; } finally { flush.SetFlushed(lastSize); } break; } if (uploadsInProgress >= m_maxConcurrentUploads) { await Task.WhenAny(workers.Select(w => w.Task)).ConfigureAwait(false); uploadsInProgress--; var failedUploads = workers.Where(w => w.Task.IsFaulted).Select(w => GetInnerMostException(w.Task.Exception)).ToList(); if (failedUploads.Any()) { if (failedUploads.Count == 1) { ExceptionDispatchInfo.Capture(failedUploads.First()).Throw(); } else { throw new AggregateException(failedUploads); } } } } } catch (Exception ex) when(!ex.IsRetiredException()) { m_cancelTokenSource.Cancel(); try { await Task.WhenAll(workers.Select(w => w.Task)); } catch { /* As we are cancelling all threads we do not need to alert the user to any of these exceptions */ } throw; } try { m_stats.SetBlocking(true); await Task.WhenAll(workers.Select(w => w.Task)); } finally { m_stats.SetBlocking(false); } })); }
/// <summary> /// Runs the console interface /// </summary> /// <returns>An awaitable task.</returns> public static Task RunAsync() { // Set up a console forwarder process var consoleOut = Skeletons.CollectAsync( Channels.ConsoleOutput.ForRead, x => Console.Out.WriteLineAsync(x ?? string.Empty) ); // Set up a channel for sending control messages var inputChannel = Channel.Create <string>(buffersize: 10); // Set up the console reader process var consoleInput = AutomationExtensions.RunTask( new { Control = inputChannel.AsWrite() }, async self => { string line; // TODO: The blocking read prevents clean shutdown, // but direct access to the input stream has issues with the buffer while ((line = await Task.Run(() => Console.ReadLine())) != null) { await self.Control.WriteAsync(line); } } ); // Set up the control logic handler var proc = AutomationExtensions.RunTask(new { Control = inputChannel.AsRead(), Output = Channels.ConsoleOutput.ForWrite }, async self => { var peers = new List <Tuple <PeerInfo, Task, IWriteChannel <PeerRequest> > >(); var rnd = new Random(); var portnr = 15000; await self.Output.WriteAsync(HELPTEXT); while (true) { try { var commandline = await self.Control.ReadAsync() ?? string.Empty; var command = commandline.Split(new[] { ' ' }, StringSplitOptions.RemoveEmptyEntries).FirstOrDefault() ?? string.Empty; if (string.Equals(command, "help", StringComparison.OrdinalIgnoreCase)) { await self.Output.WriteAsync(HELPTEXT); } else if (string.Equals(command, "exit", StringComparison.OrdinalIgnoreCase) || string.Equals(command, "quit", StringComparison.OrdinalIgnoreCase)) { return; } else if (string.Equals(command, "check", StringComparison.OrdinalIgnoreCase)) { for (var i = peers.Count - 1; i >= 0; i--) { if (await peers[i].Item3.IsRetiredAsync) { await self.Output.WriteAsync($"Peer {peers[i].Item1.Key} at {peers[i].Item1.Address} terminated"); peers.RemoveAt(i); } } await self.Output.WriteAsync($"Completed check, found {peers.Count} live peers"); } else if (string.Equals(command, "node", StringComparison.OrdinalIgnoreCase)) { var actions = commandline.Split(new char[] { ' ' }, 4, StringSplitOptions.RemoveEmptyEntries); if (string.Equals(actions[1], "start", StringComparison.OrdinalIgnoreCase)) { var pi = new PeerInfo(Key.CreateRandomKey(), new IPEndPoint(IPAddress.Loopback, portnr)); await self.Output.WriteAsync($"Starting node {pi.Key} on {pi.Address}"); var chan = Channel.Create <PeerRequest>(); var s = Task.Run(() => Peer.RunPeer( pi, 5, 100, TimeSpan.FromDays(1), peers.Count == 0 ? new EndPoint[0] : new[] { peers[rnd.Next(0, peers.Count - 1)].Item1.Address }, chan.AsRead() ) .ContinueWith(_ => inputChannel.WriteAsync("check")) ); peers.Add(new Tuple <PeerInfo, Task, IWriteChannel <PeerRequest> >(pi, s, chan)); portnr++; } else if (string.Equals(actions[1], "list", StringComparison.OrdinalIgnoreCase)) { if (actions.Length != 2) { await self.Output.WriteAsync("The list command takes no arguments"); continue; } for (var i = 0; i < peers.Count; i++) { await self.Output.WriteAsync(string.Format("{0}: {1} - {2}", i, peers[i].Item1.Key, peers[i].Item1.Address)); } await self.Output.WriteAsync(string.Empty); } else if (string.Equals(actions[1], "connect", StringComparison.OrdinalIgnoreCase)) { actions = commandline.Split(new char[] { ' ' }, 5, StringSplitOptions.RemoveEmptyEntries); if (actions.Length != 4) { await self.Output.WriteAsync("The connect command needs exactly two arguments, the ip and the port"); continue; } if (!IPAddress.TryParse(actions[2], out var ip)) { await self.Output.WriteAsync($"Failed to parse ip: {actions[2]}"); continue; } if (!int.TryParse(actions[3], out var port)) { await self.Output.WriteAsync($"Failed to parse {actions[3]} as an integer"); continue; } var pi = new PeerInfo(Key.CreateRandomKey(), new IPEndPoint(IPAddress.Loopback, portnr)); await self.Output.WriteAsync($"Starting node {pi.Key} on {pi.Address}"); var chan = Channel.Create <PeerRequest>(); var s = Task.Run(() => Peer.RunPeer( pi, 5, 100, TimeSpan.FromDays(1), new[] { new IPEndPoint(ip, port) }, chan.AsRead() ) .ContinueWith(_ => inputChannel.WriteAsync("check")) ); peers.Add(new Tuple <PeerInfo, Task, IWriteChannel <PeerRequest> >(pi, s, chan)); portnr++; } else if (string.Equals(actions[1], "stop", StringComparison.OrdinalIgnoreCase) || string.Equals(actions[1], "stat", StringComparison.OrdinalIgnoreCase) || string.Equals(actions[1], "refresh", StringComparison.OrdinalIgnoreCase)) { if (actions.Length != 3) { await self.Output.WriteAsync($"The {actions[1]} command takes exactly one argument, the node number"); continue; } if (!int.TryParse(actions[2], out var ix)) { await self.Output.WriteAsync($"Failed to parse {actions[2]} as an integer"); continue; } if (ix < 0 || ix >= peers.Count) { await self.Output.WriteAsync($"The node number must be positive and less than {peers.Count}"); continue; } if (string.Equals(actions[1], "stop", StringComparison.OrdinalIgnoreCase)) { await self.Output.WriteAsync($"Stopping node {ix} ({peers[ix].Item1.Key} at {peers[ix].Item1.Address}) ..."); await peers[ix].Item3.RetireAsync(); await self.Output.WriteAsync($"Stopped node ({peers[ix].Item1.Key} at {peers[ix].Item1.Address}) ..."); //peers.RemoveAt(ix); } else if (string.Equals(actions[1], "stat", StringComparison.OrdinalIgnoreCase)) { await self.Output.WriteAsync($"Requesting stats from node {ix} ({peers[ix].Item1.Key} at {peers[ix].Item1.Address}) ..."); var channel = Channel.Create <PeerResponse>(); await peers[ix].Item3.WriteAsync(new PeerRequest() { Operation = PeerOperation.Stats, Response = channel }); await self.Output.WriteAsync($"Stats requested, waiting for response..."); await self.Output.WriteAsync(System.Text.Encoding.UTF8.GetString((await channel.ReadAsync()).Data)); } else if (string.Equals(actions[1], "refresh", StringComparison.OrdinalIgnoreCase)) { await self.Output.WriteAsync($"Performing refresh on {ix} ({peers[ix].Item1.Key} at {peers[ix].Item1.Address}) ..."); var channel = Channel.Create <PeerResponse>(); await peers[ix].Item3.WriteAsync(new PeerRequest() { Operation = PeerOperation.Refresh, Response = channel }); var res = await channel.ReadAsync(); await self.Output.WriteAsync($"Refreshed with {res.SuccessCount} node(s)"); } else { await self.Output.WriteAsync($"Node action not recognized: {actions[1]}"); } } else { await self.Output.WriteAsync($"Node command not recognized: {actions[1]}"); } } else if (string.Equals(command, "add", StringComparison.OrdinalIgnoreCase)) { var actions = commandline.Split(new char[] { ' ' }, 2, StringSplitOptions.RemoveEmptyEntries); if (actions.Length == 1) { await self.Output.WriteAsync("The add command needs the value to add"); continue; } if (peers.Count == 0) { await self.Output.WriteAsync("The add command does not work if no nodes are started"); continue; } var channel = Channel.Create <PeerResponse>(); var data = System.Text.Encoding.UTF8.GetBytes(actions[1]); var key = Key.ComputeKey(data); await self.Output.WriteAsync($"Adding {data.Length} byte(s) with key {key}"); await peers[rnd.Next(0, peers.Count)].Item3.WriteAsync(new PeerRequest() { Operation = PeerOperation.Add, Key = key, Data = data, Response = channel, }); await self.Output.WriteAsync("Send add request, waiting for completion"); var res = await channel.ReadAsync(); await self.Output.WriteAsync($"Add inserted into {res.SuccessCount} node(s)"); } else if (string.Equals(command, "get", StringComparison.OrdinalIgnoreCase)) { var actions = commandline.Split(new char[] { ' ' }, 3, StringSplitOptions.RemoveEmptyEntries); if (actions.Length == 1) { await self.Output.WriteAsync("The get command needs the hash to find"); continue; } if (actions.Length == 3) { await self.Output.WriteAsync("The get command needs only one argument"); continue; } if (peers.Count == 0) { await self.Output.WriteAsync("The get command does not work if no nodes are started"); continue; } Key key; try { key = new Key(actions[1]); } catch (Exception ex) { await self.Output.WriteAsync($"Failed to parse key: {ex.Message}"); continue; } var channel = Channel.Create <PeerResponse>(); await self.Output.WriteAsync($"Locating key"); await peers[rnd.Next(0, peers.Count)].Item3.WriteAsync(new PeerRequest() { Operation = PeerOperation.Find, Key = key, Response = channel, }); var res = await channel.ReadAsync(); if (res.Data == null) { await self.Output.WriteAsync($"Did not find the key ..."); } else { await self.Output.WriteAsync($"Found: {System.Text.Encoding.UTF8.GetString(res.Data)}"); } } else if (string.Equals(command, "hash", StringComparison.OrdinalIgnoreCase)) { var actions = commandline.Split(new char[] { ' ' }, 2, StringSplitOptions.RemoveEmptyEntries); if (actions.Length == 1) { await self.Output.WriteAsync("The add command needs the value to add"); continue; } await self.Output.WriteAsync($"Key: {Key.ComputeKey(actions[1])}"); } else { await self.Output.WriteAsync($"Command not recognized: {command}"); } } catch (Exception ex) { await self.Output.WriteAsync($"Command failed: {ex.Message}"); } } } ); return(Task.WhenAll(consoleOut /*, consoleInput*/, proc)); }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, long lastfilesetid, CancellationToken token) { return(AutomationExtensions.RunTask(new { Input = Backup.Channels.SourcePaths.ForRead, StreamBlockChannel = Channels.StreamBlock.ForWrite, Output = Backup.Channels.ProcessedFiles.ForWrite, }, async self => { var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options); var prevprefix = new KeyValuePair <string, long>(null, -1); var CHECKFILETIMEONLY = options.CheckFiletimeOnly; var DISABLEFILETIMECHECK = options.DisableFiletimeCheck; while (true) { var path = await self.Input.ReadAsync(); var lastwrite = new DateTime(0, DateTimeKind.Utc); var attributes = default(FileAttributes); try { lastwrite = snapshot.GetLastWriteTimeUtc(path); } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "TimestampReadFailed", ex, "Failed to read timestamp on \"{0}\"", path); } try { attributes = snapshot.GetAttributes(path); } catch (Exception ex) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "FailedAttributeRead", "Failed to read attributes from {0}: {1}", path, ex.Message); } // If we only have metadata, stop here if (await ProcessMetadata(path, attributes, lastwrite, options, snapshot, emptymetadata, database, self.StreamBlockChannel).ConfigureAwait(false)) { try { var split = Database.LocalDatabase.SplitIntoPrefixAndName(path); long prefixid; if (string.Equals(prevprefix.Key, split.Key, StringComparison.Ordinal)) { prefixid = prevprefix.Value; } else { prefixid = await database.GetOrCreatePathPrefix(split.Key); prevprefix = new KeyValuePair <string, long>(split.Key, prefixid); } if (CHECKFILETIMEONLY || DISABLEFILETIMECHECK) { var tmp = await database.GetFileLastModifiedAsync(prefixid, split.Value, lastfilesetid, false); await self.Output.WriteAsync(new FileEntry { OldId = tmp.Item1, Path = path, PathPrefixID = prefixid, Filename = split.Value, Attributes = attributes, LastWrite = lastwrite, OldModified = tmp.Item2, LastFileSize = tmp.Item3, OldMetaHash = null, OldMetaSize = -1 }); } else { var res = await database.GetFileEntryAsync(prefixid, split.Value, lastfilesetid); await self.Output.WriteAsync(new FileEntry { OldId = res == null ? -1 : res.id, Path = path, PathPrefixID = prefixid, Filename = split.Value, Attributes = attributes, LastWrite = lastwrite, OldModified = res == null ? new DateTime(0) : res.modified, LastFileSize = res == null ? -1 : res.filesize, OldMetaHash = res == null ? null : res.metahash, OldMetaSize = res == null ? -1 : res.metasize }); } } catch (Exception ex) { if (ex.IsRetiredException() || token.IsCancellationRequested) { continue; } Logging.Log.WriteWarningMessage(FILELOGTAG, "ProcessingMetadataFailed", ex, "Failed to process entry, path: {0}", path); } } } })); }
/// <summary> /// Runs the broker process. /// </summary> /// <param name="node">This nodes information</param> /// <param name="maxconnections">The maximum number of connections to allow</param> /// <returns>An awaitable task.</returns> public static Task RunAsync(PeerInfo node, int maxconnections = 50) { // The primary table for finding peers var peers = new Dictionary <EndPoint, Tuple <Task, IWriteChannel <ConnectionRequest> > >(); // The peers listed by key var peersbykey = new Dictionary <Key, EndPoint>(); // The MRU cache of peers var mrucache = new MRUCache <EndPoint, Key>(maxconnections, TimeSpan.FromDays(10)); return(AutomationExtensions.RunTask( new { Request = Channels.ConnectionBrokerRequests.ForRead, Registrations = Channels.ConnectionBrokerRegistrations.ForRead, Stats = Channels.ConnectionBrokerStats.ForRead, SelfHandler = Channels.RemoteRequests.ForWrite, Routing = Channels.RoutingTableRequests.ForWrite }, async self => { log.Debug($"Broker is now running"); while (true) { log.Debug($"Broker is waiting for requests ..."); var mreq = await MultiChannelAccess.ReadFromAnyAsync( self.Stats.RequestRead(), self.Registrations.RequestRead(), self.Request.RequestRead() ); if (mreq.Channel == self.Stats) { log.Debug($"Broker got stat request"); var req = (IWriteChannel <ConnectionStatsResponse>)mreq.Value; await req.WriteAsync(new ConnectionStatsResponse() { EndPoints = peers.Count, Keys = peersbykey.Count, Stats = (Channels.ConnectionBrokerRequests.Get() as ProfilingChannel <ConnectionRequest>)?.ReportStats() }); } else if (mreq.Channel == self.Registrations) { var req = (ConnectionRegistrationRequest)mreq.Value; log.Debug($"Broker got {(req.IsTerminate ? "termination" : "registration")} request"); if (req.IsTerminate) { // Make sure we do not have stale stuff in the MRU cache if (req.Peer != null && req.Peer.Address != null) { mrucache.Remove(req.Peer.Address); } if (req.Peer.Address != null && peers.TryGetValue(req.Peer.Address, out var c) && c.Item2 == req.Channel) { peers.Remove(req.Peer.Address); if (req.Peer.Key != null) { peersbykey.Remove(req.Peer.Key); } } if (req.UpdateRouting) { log.Debug($"Removing peer in routing table due to termination of connection {req.Peer.Key} - {req.Peer.Address}"); await self.Routing.RemovePeerAsync(req.Peer.Key); } } else { if (req.Peer.Address != null && peers.TryGetValue(req.Peer.Address, out var c) && (c.Item2 == req.Channel || c == null)) { if (c == null) { peers[req.Peer.Address] = new Tuple <Task, IWriteChannel <ConnectionRequest> >(null, req.Channel); } if (!peersbykey.ContainsKey(req.Peer.Key)) { peersbykey[req.Peer.Key] = req.Peer.Address; } } if (req.UpdateRouting) { log.Debug($"Adding new peer to routing table {req.Peer.Key} - {req.Peer.Address}"); await self.Routing.AddPeerAsync(req.Peer.Key, req.Peer); } } } else { var req = (ConnectionRequest)mreq.Value; log.Debug($"Broker got connection request for {req.EndPoint}"); // Check if we request ourselves if (node.Key.Equals(req.Key) || node.Address.Equals(req.EndPoint)) { log.Debug($"Broker got self-request, forwarding to owner"); await self.SelfHandler.WriteAsync(req); continue; } Tuple <Task, IWriteChannel <ConnectionRequest> > peer = null; try { // Existing connection, update MRU var overflow = mrucache.Add(req.EndPoint, req.Key); // If we have too many connections, kill one now if (overflow != null) { // We could make this also take the closest k peers into account log.Debug($"Broker has too many connections, closing {req.EndPoint}"); await peers[overflow].Item2.RetireAsync(); } if (!peers.TryGetValue(req.EndPoint, out peer)) { log.Debug($"Broker is starting a connection to {req.EndPoint}"); mrucache.Add(req.EndPoint, req.Key); peer = peers[req.EndPoint] = PeerConnection.CreatePeer( node, new PeerInfo(req.Key, req.EndPoint), () => ConnectToPeerAsync(req.EndPoint), REQ_BUFFER_SIZE ); if (req.Key != null) { peersbykey[req.Key] = req.EndPoint; } } await peer.Item2.WriteAsync(req); } catch (Exception ex) { log.Warn("Failed to send request to peer", ex); try { await req.Response.WriteAsync(new ConnectionResponse() { Exception = ex }); } catch (Exception ex2) { log.Warn("Failed to write failure response", ex2); } if (peer != null) { try { peer.Item2.AsWriteOnly().Dispose(); } catch (Exception ex2) { log.Warn("Failed to terminate write channel", ex2); } try { await peer.Item1; } catch (Exception ex2) { log.Warn("Peer connection stopped with error", ex2); } } peers.Remove(req.EndPoint); } } } } )); }
private void TestCappedPool(int poolsize, int readers, int writes) { var concurrent = 0; var max_concurrent = 0; var rnd = new Random(); var earlyRetire = new TaskCompletionSource <bool>(); using (new IsolatedChannelScope()) using (new ExecutionScope(poolsize <= 0 ? ThreadPool.DEFAULT_THREADPOOL : new CappedThreadedThreadPool(poolsize))) { var readertasks = Task.WhenAll(Enumerable.Range(0, readers).Select(count => AutomationExtensions.RunTask(new { Input = ChannelMarker.ForRead <int>("channel") }, async x => { //Console.WriteLine("Started {0}", count); while (true) { await x.Input.ReadAsync(); var cur = System.Threading.Interlocked.Increment(ref concurrent); //Console.WriteLine("Active {0}", count); // Dirty access to "concurrent" and "max_concurrent" variables max_concurrent = Math.Max(cur, Math.Max(max_concurrent, concurrent)); if (cur > poolsize && poolsize > 0) { Console.WriteLine("Found {0} concurrent threads", cur); earlyRetire.TrySetException(new Exception(string.Format("Found {0} concurrent threads", cur))); throw new Exception(string.Format("Found {0} concurrent threads", cur)); } // By blocking the actual thread, we provoke the threadpool to start multiple threads System.Threading.Thread.Sleep(rnd.Next(10, 500)); // Dirty access to "concurrent" and "max_concurrent" variables max_concurrent = Math.Max(cur, Math.Max(max_concurrent, concurrent)); System.Threading.Interlocked.Decrement(ref concurrent); //Console.WriteLine("Inactive {0}", count); } }) )); var writetask = AutomationExtensions.RunTask(new { Output = ChannelMarker.ForWrite <int>("channel") }, async x => { foreach (var i in Enumerable.Range(0, writes)) { //Console.WriteLine("Writing {0}", i); await x.Output.WriteAsync(i); } }); var timeout = Task.Delay((writes * 500) + 5000); if (Task.WhenAny(Task.WhenAll(readertasks, writetask), timeout, earlyRetire.Task).WaitForTaskOrThrow() == timeout) { throw new TimeoutException("I've waited for so long ...."); } Console.WriteLine("Threads at shutdown: {0}", concurrent); ExecutionScope.Current.EnsureFinishedAsync(TimeSpan.FromSeconds(5)).WaitForTaskOrThrow(); Console.WriteLine("Max concurrent threads: {0}, should be {1}", max_concurrent, poolsize <= 0 ? "unlimited" : poolsize.ToString()); } }
public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, long lastfilesetid) { return(AutomationExtensions.RunTask(new { Input = Backup.Channels.SourcePaths.ForRead, StreamBlockChannel = Channels.StreamBlock.ForWrite, Output = Backup.Channels.ProcessedFiles.ForWrite, }, async self => { var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options); var CHECKFILETIMEONLY = options.CheckFiletimeOnly; var DISABLEFILETIMECHECK = options.DisableFiletimeCheck; while (true) { var path = await self.Input.ReadAsync(); var lastwrite = new DateTime(0, DateTimeKind.Utc); var attributes = default(FileAttributes); try { lastwrite = snapshot.GetLastWriteTimeUtc(path); } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "TimestampReadFailed", ex, "Failed to read timestamp on \"{0}\"", path); } try { attributes = snapshot.GetAttributes(path); } catch (Exception ex) { Logging.Log.WriteVerboseMessage(FILELOGTAG, "FailedAttributeRead", "Failed to read attributes from {0}: {1}", path, ex.Message); } // If we only have metadata, stop here if (await ProcessMetadata(path, attributes, lastwrite, options, snapshot, emptymetadata, database, self.StreamBlockChannel).ConfigureAwait(false)) { try { if (CHECKFILETIMEONLY || DISABLEFILETIMECHECK) { var tmp = await database.GetFileLastModifiedAsync(path, lastfilesetid, false); await self.Output.WriteAsync(new FileEntry() { OldId = tmp.Item1, Path = path, Attributes = attributes, LastWrite = lastwrite, OldModified = tmp.Item2, LastFileSize = tmp.Item3, OldMetaHash = null, OldMetaSize = -1 }); } else { var res = await database.GetFileEntryAsync(path, lastfilesetid); await self.Output.WriteAsync(new FileEntry() { OldId = res == null ? -1 : res.id, Path = path, Attributes = attributes, LastWrite = lastwrite, OldModified = res == null ? new DateTime(0) : res.modified, LastFileSize = res == null ? -1 : res.filesize, OldMetaHash = res == null ? null : res.metahash, OldMetaSize = res == null ? -1 : res.metasize }); } } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "ProcessingMetadataFailed", ex, "Failed to process entry, path: {0}", path); } } } })); }
public static Task Run(Common.BackendHandler backend, Options options, Common.DatabaseCommon database, BackupResults results, Common.ITaskReader taskreader, StatsCollector stats) { return(AutomationExtensions.RunTask(new { Input = Channels.BackendRequest.ForRead, }, async self => { var inProgress = new Queue <KeyValuePair <int, Task> >(); var max_pending = options.AsynchronousUploadLimit == 0 ? long.MaxValue : options.AsynchronousUploadLimit; var noIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.None; var active = 0; var lastSize = -1L; while (!await self.Input.IsRetiredAsync && await taskreader.ProgressAsync) { try { var req = await self.Input.ReadAsync(); if (!await taskreader.ProgressAsync) { continue; } var task = default(KeyValuePair <int, Task>); if (req is VolumeUploadRequest) { lastSize = ((VolumeUploadRequest)req).BlockVolume.SourceSize; if (noIndexFiles || ((VolumeUploadRequest)req).IndexVolume == null) { task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((VolumeUploadRequest)req).BlockVolume, null)); } else { task = new KeyValuePair <int, Task>(2, backend.UploadFileAsync(((VolumeUploadRequest)req).BlockVolume, name => ((VolumeUploadRequest)req).IndexVolume.CreateVolume(name, options, database))); } } else if (req is FilesetUploadRequest) { task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((FilesetUploadRequest)req).Fileset)); } else if (req is IndexVolumeUploadRequest) { task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((IndexVolumeUploadRequest)req).IndexVolume)); } else if (req is FlushRequest) { try { while (inProgress.Count > 0) { await inProgress.Dequeue().Value; } active = 0; } finally { ((FlushRequest)req).SetFlushed(lastSize); } } if (task.Value != null) { inProgress.Enqueue(task); active += task.Key; } } catch (Exception ex) { if (!ex.IsRetiredException()) { throw; } } while (active >= max_pending) { var top = inProgress.Dequeue(); // See if we are done if (await Task.WhenAny(top.Value, Task.Delay(500)) != top.Value) { try { stats.SetBlocking(true); await top.Value; } finally { stats.SetBlocking(false); } } active -= top.Key; } } results.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload); try { stats.SetBlocking(true); while (inProgress.Count > 0) { await inProgress.Dequeue().Value; } } finally { stats.SetBlocking(false); } })); }
public static Task Run(Options options, BackupDatabase database, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.StreamBlock.ForRead, ProgressChannel = Channels.ProgressEvents.ForWrite, BlockOutput = Channels.OutputBlocks.ForWrite }, async self => { var blocksize = options.Blocksize; var filehasher = Duplicati.Library.Utility.HashAlgorithmHelper.Create(options.FileHashAlgorithm); var blockhasher = Duplicati.Library.Utility.HashAlgorithmHelper.Create(options.BlockHashAlgorithm); var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options); var maxmetadatasize = (options.Blocksize / (long)options.BlockhashSize) * options.Blocksize; if (blockhasher == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } if (filehasher == null) { throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(options.FileHashAlgorithm), "FileHashAlgorithmNotSupported"); } if (!blockhasher.CanReuseTransform) { throw new UserInformationException(Strings.Common.InvalidCryptoSystem(options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported"); } if (!filehasher.CanReuseTransform) { throw new UserInformationException(Strings.Common.InvalidCryptoSystem(options.FileHashAlgorithm), "FileHashAlgorithmNotSupported"); } using (var empty_metadata_stream = new MemoryStream(emptymetadata.Blob)) while (await taskreader.ProgressAsync) { var send_close = false; var filesize = 0L; var filename = string.Empty; var e = await self.Input.ReadAsync(); var cur = e.Result; try { var stream = e.Stream; using (var blocklisthashes = new Library.Utility.FileBackedStringList()) using (var hashcollector = new Library.Utility.FileBackedStringList()) { var blocklistbuffer = new byte[blocksize]; var blocklistoffset = 0L; long fslen = -1; try { fslen = stream.Length; } catch (Exception ex) { Logging.Log.WriteWarningMessage(FILELOGTAG, "FileLengthFailure", ex, "Failed to read file length for file {0}", e.Path); } if (e.IsMetadata && fslen > maxmetadatasize) { //TODO: To fix this, the "WriteFileset" method in BackupHandler needs to // be updated such that it can select sets even when there are multiple // blocklist hashes for the metadata. // This could be done such that an extra query is made if the metadata // spans multiple blocklist hashes, as it is not expected to be common Logging.Log.WriteWarningMessage(LOGTAG, "TooLargeMetadata", null, "Metadata size is {0}, but the largest accepted size is {1}, recording empty metadata for {2}", fslen, maxmetadatasize, e.Path); empty_metadata_stream.Position = 0; stream = empty_metadata_stream; fslen = stream.Length; } await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = fslen, Type = EventType.FileStarted }); send_close = true; filehasher.Initialize(); var lastread = 0; var buf = new byte[blocksize]; var lastupdate = DateTime.Now; // Core processing loop, read blocks of data and hash individually while (((lastread = await stream.ForceStreamReadAsync(buf, blocksize)) != 0)) { // Run file hashing concurrently to squeeze a little extra concurrency out of it var pftask = Task.Run(() => filehasher.TransformBlock(buf, 0, lastread, buf, 0)); var hashdata = blockhasher.ComputeHash(buf, 0, lastread); var hashkey = Convert.ToBase64String(hashdata); // If we have too many hashes, flush the blocklist if (blocklistbuffer.Length - blocklistoffset < hashdata.Length) { var blkey = Convert.ToBase64String(blockhasher.ComputeHash(blocklistbuffer, 0, (int)blocklistoffset)); blocklisthashes.Add(blkey); await DataBlock.AddBlockToOutputAsync(self.BlockOutput, blkey, blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true); blocklistoffset = 0; blocklistbuffer = new byte[blocksize]; } // Store the current hash in the blocklist Array.Copy(hashdata, 0, blocklistbuffer, blocklistoffset, hashdata.Length); blocklistoffset += hashdata.Length; hashcollector.Add(hashkey); filesize += lastread; // Don't spam updates if ((DateTime.Now - lastupdate).TotalSeconds > 10) { await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filesize, Type = EventType.FileProgressUpdate }); lastupdate = DateTime.Now; } // Make sure the filehasher is done with the buf instance before we pass it on await pftask; await DataBlock.AddBlockToOutputAsync(self.BlockOutput, hashkey, buf, 0, lastread, e.Hint, true); buf = new byte[blocksize]; } // If we have more than a single block of data, output the (trailing) blocklist if (hashcollector.Count > 1) { var blkey = Convert.ToBase64String(blockhasher.ComputeHash(blocklistbuffer, 0, (int)blocklistoffset)); blocklisthashes.Add(blkey); await DataBlock.AddBlockToOutputAsync(self.BlockOutput, blkey, blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true); } filehasher.TransformFinalBlock(new byte[0], 0, 0); var filehash = Convert.ToBase64String(filehasher.Hash); var blocksetid = await database.AddBlocksetAsync(filehash, filesize, blocksize, hashcollector, blocklisthashes); cur.SetResult(new StreamProcessResult() { Streamlength = filesize, Streamhash = filehash, Blocksetid = blocksetid }); cur = null; } } catch (Exception ex) { try { if (cur != null) { cur.TrySetException(ex); } } catch { } // Rethrow if (ex.IsRetiredException()) { throw; } } finally { if (cur != null) { try { cur.TrySetCanceled(); } catch { } cur = null; } if (send_close) { await self.ProgressChannel.WriteAsync(new ProgressEvent() { Filepath = e.Path, Length = filesize, Type = EventType.FileClosed }); } send_close = false; } } })); }