public static async Task ValidateBlockAsync(ICoreStorage coreStorage, ICoreRules rules, Chain newChain, ISourceBlock<ValidatableTx> validatableTxes, CancellationToken cancelToken = default(CancellationToken)) { // tally transactions object finalTally = null; var txTallier = new TransformBlock<ValidatableTx, ValidatableTx>( validatableTx => { var runningTally = finalTally; rules.TallyTransaction(newChain, validatableTx, ref runningTally); finalTally = runningTally; return validatableTx; }); validatableTxes.LinkTo(txTallier, new DataflowLinkOptions { PropagateCompletion = true }); // validate transactions var txValidator = InitTxValidator(rules, newChain, cancelToken); // begin feeding the tx validator txTallier.LinkTo(txValidator, new DataflowLinkOptions { PropagateCompletion = true }); // validate scripts var scriptValidator = InitScriptValidator(rules, newChain, cancelToken); // begin feeding the script validator txValidator.LinkTo(scriptValidator, new DataflowLinkOptions { PropagateCompletion = true }); //TODO await PipelineCompletion.Create( new Task[] { }, new IDataflowBlock[] { validatableTxes, txTallier, txValidator, scriptValidator }); // validate overall block rules.PostValidateBlock(newChain, finalTally); }
public void Consumer(ISourceBlock<string> source) { var ablock = new ActionBlock<string>( data => this.ParseRawIRC(data), new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = DataflowBlockOptions.Unbounded }); source.LinkTo(ablock); }
public TelemetryReaderNode(IEnumerable <ITelemetrySourceFactory> telemetrySourceFactories) { var createSource = TelemetrySourceSelector(telemetrySourceFactories); var externalGameTelemetrySource = new BufferBlock <V0.IGameTelemetry>(); GameTelemetrySource = externalGameTelemetrySource; RunningGameTarget = new ActionBlock <IRunningGame>(runningGame => { _currentGameTelemetrySource?.Complete(); _currentGameTelemetrySource = createSource(runningGame.Name); _currentGameTelemetrySource?.LinkTo(externalGameTelemetrySource, new DataflowLinkOptions()); }); }
private static async Task <IList <T> > GetBlockOutputsAsync <T>(ISourceBlock <T> block) { var ret = new List <T>(); var actionBlock = new ActionBlock <T>(item => { ret.Add(item); }); block.LinkTo(actionBlock); var __ = block.Completion.ContinueWith(_ => actionBlock.Complete()); await actionBlock.Completion; return(ret); }
public static IPropagatorBlock <TInput, TOutput> Select <TInput, TOutput>(this ISourceBlock <TInput> source, Func <TInput, Task <TOutput> > transform) { var transformBlock = new TransformBlock <TInput, TOutput>(transform, new ExecutionDataflowBlockOptions() { MaxDegreeOfParallelism = Environment.ProcessorCount + 1 }); source.LinkTo(transformBlock, new DataflowLinkOptions { PropagateCompletion = true }); return(transformBlock); }
/// <summary> /// Aggregates the results of two blocks /// </summary> /// <typeparam name="T">Specifies the type of data accepted by the blocks</typeparam> /// <param name="target1">target1</param> /// <param name="target2">target2</param> /// <returns>JoinBlock</returns> private static ISourceBlock <T> Aggregate <T>( ISourceBlock <T> target1, ISourceBlock <T> target2) { var joinBlock = new JoinBlock <T, T>(); target1.LinkTo(joinBlock.Target1); target2.LinkTo(joinBlock.Target2); var transformBlock = new TransformBlock <Tuple <T, T>, T>(data => data.Item1); joinBlock.LinkTo(transformBlock); return(transformBlock); }
public static ITargetBlock <TInput> ForEach <TInput>(this ISourceBlock <TInput> source, Func <TInput, Task> action) { var actionBlock = new ActionBlock <TInput>(action, new ExecutionDataflowBlockOptions() { MaxDegreeOfParallelism = Environment.ProcessorCount + 1 }); source.LinkTo(actionBlock, new DataflowLinkOptions { PropagateCompletion = true }); return(actionBlock); }
Task StoreFileFingerprintsAsync(ISourceBlock <FileFingerprint[]> storeBatchBlock, CancellationToken cancellationToken) { var block = new ActionBlock <FileFingerprint[]>( fileFingerprints => WriteBlobsAsync(fileFingerprints, cancellationToken), new ExecutionDataflowBlockOptions { CancellationToken = cancellationToken }); storeBatchBlock.LinkTo(block, new DataflowLinkOptions { PropagateCompletion = true }); return(block.Completion); }
private async Task BlockKeepsDeclinedMessages(Func <ISourceBlock <int> > BlockFactory, Func <ISourceBlock <int>, int> OutputCount) { ISourceBlock <int> block = BlockFactory(); ITargetBlock <int> blockT = (ITargetBlock <int>)block; TestTargetBlock <int> testTarget = new TestTargetBlock <int>(); testTarget.ConsumptionMode = DataflowMessageStatus.Declined; block.LinkTo(testTarget, PropagateCompletion); // Assumption: block will keep incoming messages even when its target is declining them Assert.True(blockT.Post(1)); Assert.True(blockT.Post(2)); Assert.True(blockT.Post(3)); // The block has run out of capacity Assert.False(blockT.Post(4)); // This message will be postponed (and, in fact, released when we ask the block to complete) blockT.SendAsync(5).Forget(); // Wait till the block offers a message // Assumption: only one message will be offered, the block will not offer more messages if the target declines bool oneMessageOffered = await TaskUtils.PollWaitAsync(() => testTarget.MessagesDeclined.Count == 1, MessageArrivalTimeout); Assert.True(oneMessageOffered); Assert.True(testTarget.MessagesConsumed.Count == 0); Assert.True(testTarget.MessagesPostponed.Count == 0); // The messages are buffered Assert.Equal(3, OutputCount(block)); // Assumption: the block will try NOT to deliver declined messages again when asked to complete. testTarget.ConsumptionMode = DataflowMessageStatus.Accepted; block.Complete(); bool someMessagesDelivered = await TaskUtils.PollWaitAsync(() => testTarget.MessagesConsumed.Count > 0, MessageArrivalTimeout); Assert.False(someMessagesDelivered); // Completion task should still be running await Task.WhenAny(block.Completion, Task.Delay(CompletionTimeout)); Assert.False(block.Completion.IsCompleted); // Assumption: block will not start target's completion until it itself completes await Task.WhenAny(testTarget.Completion, Task.Delay(CompletionTimeout)); Assert.True(testTarget.Completion.IsNotStarted()); }
/// <summary> /// Create a block which passes through items from a source block, and calls an action before completing. /// </summary> /// <typeparam name="T"></typeparam> /// <param name="source"></param> /// <param name="onCompleteAction"></param> /// <param name="cancelToken"></param> /// <returns></returns> public static ISourceBlock <T> Create <T>(ISourceBlock <T> source, Action onCompleteAction, CancellationToken cancelToken = default(CancellationToken)) { if (source == null) { throw new ArgumentNullException(nameof(source)); } if (onCompleteAction == null) { throw new ArgumentNullException(nameof(onCompleteAction)); } var passthrough = new TransformBlock <T, T>(item => item); source.LinkTo(passthrough, new DataflowLinkOptions { PropagateCompletion = false }); source.Completion.ContinueWith( task => { var passthroughBlock = (IDataflowBlock)passthrough; try { if (task.Status == TaskStatus.RanToCompletion) { onCompleteAction(); } if (task.IsCanceled) { passthroughBlock.Fault(new OperationCanceledException()); } else if (task.IsFaulted) { passthroughBlock.Fault(task.Exception); } else { passthrough.Complete(); } } catch (Exception ex) { passthroughBlock.Fault(ex); } }, cancelToken); return(passthrough); }
/// <summary> /// Links to the specified <see cref="Action{T}" /> to receive a cross-sectional slice of project /// data, including detailed descriptions of what changed between snapshots, as described by /// specified rules. /// </summary> /// <param name="source"> /// The broadcasting block that produces the messages. /// </param> /// <param name="target"> /// The <see cref="Action{T}"/> to receive the broadcasts. /// </param> /// <param name="suppressVersionOnlyUpdates"> /// A value indicating whether to prevent messages from propagating to the target /// block if no project changes are include other than an incremented version number. /// </param> /// <param name="ruleNames"> /// The names of the rules that describe the project data the caller is interested in. /// </param> /// <exception cref="ArgumentNullException"> /// <paramref name="source"/> is <see langword="null"/>. /// <para> /// -or- /// </para> /// <paramref name="target"/> is <see langword="null"/>. /// </exception> public static IDisposable LinkToAction( this ISourceBlock <IProjectVersionedValue <IProjectSubscriptionUpdate> > source, Action <IProjectVersionedValue <IProjectSubscriptionUpdate> > target, bool suppressVersionOnlyUpdates = true, IEnumerable <string>?ruleNames = null) { Requires.NotNull(source, nameof(source)); Requires.NotNull(target, nameof(target)); return(source.LinkTo(DataflowBlockSlim.CreateActionBlock(target), DataflowOption.PropagateCompletion, initialDataAsNew: true, suppressVersionOnlyUpdates: suppressVersionOnlyUpdates, ruleNames: ruleNames)); }
/// <summary> /// Links source to three targets /// </summary> /// <typeparam name="TInput">The input</typeparam> /// <typeparam name="TOutput">The output</typeparam> /// <param name="source">The source</param> /// <param name="first">The first target</param> /// <param name="second">The second target</param> /// <param name="third">The third target</param> /// <returns>ISourceBlock</returns> public static ISourceBlock <TOutput> Next <TInput, TOutput>( this ISourceBlock <TInput> source, IPropagatorBlock <TInput, TOutput> first, IPropagatorBlock <TInput, TOutput> second, IPropagatorBlock <TInput, TOutput> third) { var broadcast = new BroadcastBlock <TInput>(null); source.LinkTo(broadcast); broadcast.LinkTo(first); broadcast.LinkTo(second); broadcast.LinkTo(third); return(Aggregate(first, second, third)); }
/// <summary> /// Creates a source block that produces a transformed value for each value from original source block, /// skipping intermediate input and output states, and hence is not suitable for producing or consuming /// deltas. /// </summary> /// <typeparam name="TOut"> /// The type of value produced by <paramref name="transform"/>. /// </typeparam> /// <param name="source"> /// The source block whose values are to be transformed. /// </param> /// <param name="transform"> /// The function to execute on each value from <paramref name="source"/>. /// </param> /// <param name="suppressVersionOnlyUpdates"> /// A value indicating whether to prevent messages from propagating to the target /// block if no project changes are include other than an incremented version number. /// </param> /// <param name="ruleNames"> /// The names of the rules that describe the project data the caller is interested in. /// </param> /// <returns> /// The transformed source block and a disposable value that terminates the link. /// </returns> /// <exception cref="ArgumentNullException"> /// <paramref name="source"/> is <see langword="null"/>. /// <para> /// -or- /// </para> /// <paramref name="transform"/> is <see langword="null"/>. /// </exception> public static DisposableValue <ISourceBlock <TOut> > TransformWithNoDelta <TOut>(this ISourceBlock <IProjectVersionedValue <IProjectSubscriptionUpdate> > source, Func <IProjectVersionedValue <IProjectSubscriptionUpdate>, TOut> transform, bool suppressVersionOnlyUpdates, IEnumerable <string> ruleNames = null) { Requires.NotNull(source, nameof(source)); Requires.NotNull(transform, nameof(transform)); IPropagatorBlock <IProjectVersionedValue <IProjectSubscriptionUpdate>, TOut> transformBlock = DataflowBlockSlim.CreateTransformBlock(transform, skipIntermediateInputData: true, skipIntermediateOutputData: true); IDisposable link = source.LinkTo(transformBlock, DataflowOption.PropagateCompletion, initialDataAsNew: true, suppressVersionOnlyUpdates: suppressVersionOnlyUpdates, ruleNames: ruleNames); return(new DisposableValue <ISourceBlock <TOut> >(transformBlock, link)); }
private static void LinkDataflowBlocks <TBlock>(ISourceBlock <TBlock> source, ITargetBlock <TBlock> target) { source.LinkTo(target); source.Completion.ContinueWith(t => { if (t.IsFaulted) { target.Fault(t.Exception); } else { target.Complete(); } }); }
public static IDisposable LinkTo<TOutput> ( this ISourceBlock<TOutput> source, ITargetBlock<TOutput> target, DataflowLinkOptions linkOptions, Predicate<TOutput> predicate) { if (source == null) throw new ArgumentNullException ("source"); if (predicate == null) throw new ArgumentNullException ("predicate"); if (target == null) throw new ArgumentNullException ("target"); var predicateBlock = new PredicateBlock<TOutput> (source, target, predicate); return source.LinkTo (predicateBlock, linkOptions); }
/// <summary> /// Для двух заданных блоков создает задачу, объединяющую их /// входные данные и возвращющую агрегатный результат /// </summary> /// <typeparam name="T1">Тип данных блока <paramref name="source1"/></typeparam> /// <typeparam name="T2">Тип данных блока <paramref name="source2"/></typeparam> /// <typeparam name="T3">Тип данных блока <paramref name="source3"/></typeparam> /// <typeparam name="T">Тип данных результата</typeparam> /// <param name="source1">Блок, являющийся источником данных типа <typeparamref name="T1"/></param> /// <param name="source2">Блок, являющийся источником данных типа <typeparamref name="T2"/></param> /// <param name="source3">Блок, являющийся источником данных типа <typeparamref name="T3"/></param> /// <param name="pipeline">Конвейер, для которого производится создание задачи</param> /// <param name="transform">Функция преобразования агрегатного результата объединения</param> /// <returns>Задача по преобразованию объединенного входа трех блоков</returns> public static PropagationJob <Tuple <T1, T2, T3>, T> Join <T1, T2, T3, T>( this IPipeline pipeline, ISourceBlock <T1> source1, ISourceBlock <T2> source2, ISourceBlock <T3> source3, Func <Tuple <T1, T2, T3>, T> transform) { var joint = new JoinBlock <T1, T2, T3>(pipeline.GroupingOptions); source1.LinkTo(joint.Target1, pipeline.LinkOptions); source2.LinkTo(joint.Target2, pipeline.LinkOptions); source3.LinkTo(joint.Target3, pipeline.LinkOptions); return(Transform(pipeline, joint, transform)); }
public ISubscriptionTag SubscribeTo(ISourceBlock <IMessage> source) { this.connection = this.connectionFactory.CreateConnection(); this.model = this.connection.CreateModel(); var amqpBuilderBlock = new TransformBlock <IMessage, AmqpMessage>( (Func <IMessage, AmqpMessage>)amqpMessageBuilder.Serialize); var amqpSenderBlock = new ActionBlock <AmqpMessage>( (Action <AmqpMessage>) this.Send); this.link = source.LinkTo(amqpBuilderBlock); amqpBuilderBlock.LinkTo(amqpSenderBlock); return(new SubscribingTag(Guid.NewGuid().ToString(), this.CancelSending)); }
public SourceBlockAsyncEnumerator(ISourceBlock <T> source, CancellationToken cancellationToken) { this._source = source; this._unlinker = source.LinkTo(this, new DataflowLinkOptions() { PropagateCompletion = true }); this._cancellationToken = cancellationToken; this._taskHelper.RunContinuationsAsynchronously = true; if (cancellationToken.CanBeCanceled) { this._cancellationReg = cancellationToken.Register(state => ((IDataflowBlock)state).Complete(), this); } }
internal static void InstallComplexPluginSystem(ISourceBlock <IrcMessage> source, ITargetBlock <IrcMessage> destination) { // find all plugins implementing our complex iplugin interface, and create instances of them var plugins = Assembly.GetAssembly(typeof(IComplexPlugin)) .GetTypes() .Where(t => typeof(IComplexPlugin).IsAssignableFrom(t) && !t.IsInterface) .Select(t => (IComplexPlugin)Activator.CreateInstance(t)); // create a dataflow block for each plugin, and hook them up to the source/destination foreach (var plugin in plugins) { var pluginBlock = new TransformManyBlock <IrcMessage, IrcMessage>(msg => plugin.Respond(msg)); source.LinkTo(pluginBlock, msg => plugin.Accepts(msg)); pluginBlock.LinkTo(destination); } }
private void LinkPreProcessing(DataflowLinkOptions overrideOptions = null) { // Link Deserialize to DecryptBlock with predicate if its not null. if (_decryptBlock != null) { LinkWithFaultRoute(_currentBlock, _decryptBlock, x => x.IsFaulted, overrideOptions ?? _linkStepOptions); } if (_decompressBlock != null) { LinkWithFaultRoute(_currentBlock, _decompressBlock, x => x.IsFaulted, overrideOptions ?? _linkStepOptions); } _currentBlock.LinkTo(_readyBuffer, overrideOptions ?? _linkStepOptions, x => !x.IsFaulted); SetCurrentSourceBlock(_readyBuffer); // Not Neeeded }
public static ISourceBlock<LoadedTx> LoadTxes(ICoreStorage coreStorage, ISourceBlock<LoadingTx> loadingTxes, CancellationToken cancelToken = default(CancellationToken)) { // split incoming LoadingTx by its number of inputs var createTxInputList = InitCreateTxInputList(cancelToken); // link the loading txes to the input splitter loadingTxes.LinkTo(createTxInputList, new DataflowLinkOptions { PropagateCompletion = true }); // load each input, and return and fully loaded txes var loadTxInputAndReturnLoadedTx = InitLoadTxInputAndReturnLoadedTx(coreStorage, cancelToken); // link the input splitter to the input loader createTxInputList.LinkTo(loadTxInputAndReturnLoadedTx, new DataflowLinkOptions { PropagateCompletion = true }); return loadTxInputAndReturnLoadedTx; }
public static Task<bool> OutputAvailableAsync<TOutput> ( this ISourceBlock<TOutput> source, CancellationToken cancellationToken) { if (source == null) throw new ArgumentNullException ("source"); cancellationToken.ThrowIfCancellationRequested (); if (source.Completion.IsCompleted || source.Completion.IsCanceled || source.Completion.IsFaulted) return Task.FromResult (false); var block = new OutputAvailableBlock<TOutput> (); var bridge = source.LinkTo (block, new DataflowLinkOptions { PropagateCompletion = true }); return block.AsyncGet (bridge, cancellationToken); }
public SourceBlockAsyncEnumerator(ISourceBlock <T> source, CancellationToken cancellationToken) { this._source = source; this._unlinker = source.LinkTo(this, new DataflowLinkOptions() { PropagateCompletion = true }); this._cancellationToken = cancellationToken; // We should not run the continuation synchronously because the methods of ITargetBlock are called in the OutgoingLock of the source. this._taskHelper.RunContinuationsAsynchronously = true; if (cancellationToken.CanBeCanceled) { this._cancellationReg = cancellationToken.Register(state => ((IDataflowBlock)state).Complete(), this); } }
/// <summary> /// Links to the specified <see cref="Func{T, TResult}" /> to receive a cross-sectional slice of project /// data, including detailed descriptions of what changed between snapshots, as described by /// specified rules. /// </summary> /// <param name="source"> /// The broadcasting block that produces the messages. /// </param> /// <param name="target"> /// The <see cref="Action{T}"/> to receive the broadcasts. /// </param> /// <param name="project"> /// The project related to the failure, if applicable. /// </param> /// <param name="suppressVersionOnlyUpdates"> /// A value indicating whether to prevent messages from propagating to the target /// block if no project changes are include other than an incremented version number. /// </param> /// <param name="ruleNames"> /// The names of the rules that describe the project data the caller is interested in. /// </param> /// <exception cref="ArgumentNullException"> /// <paramref name="source"/> is <see langword="null"/>. /// <para> /// -or- /// </para> /// <paramref name="target"/> is <see langword="null"/>. /// <para> /// -or- /// </para> /// <paramref name="project"/> is <see langword="null"/>. /// </exception> public static IDisposable LinkToAsyncAction( this ISourceBlock <IProjectVersionedValue <IProjectSubscriptionUpdate> > source, Func <IProjectVersionedValue <IProjectSubscriptionUpdate>, Task> target, UnconfiguredProject project, bool suppressVersionOnlyUpdates = true, params string[] ruleNames) { Requires.NotNull(source, nameof(source)); Requires.NotNull(target, nameof(target)); Requires.NotNull(project, nameof(project)); return(source.LinkTo(DataflowBlockFactory.CreateActionBlock(target, project, ProjectFaultSeverity.Recoverable), DataflowOption.PropagateCompletion, initialDataAsNew: true, suppressVersionOnlyUpdates: suppressVersionOnlyUpdates, ruleNames: ruleNames)); }
private async Task ConsumeQueue(SchemaType type, ISourceBlock <Beta.Group> source) { var edfo = new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = MicrosoftTeamsMAConfigSection.Configuration.ImportThreads, CancellationToken = this.token, }; ActionBlock <Beta.Group> action = new ActionBlock <Beta.Group>(async group => { try { //if (this.ShouldFilterDelta(group, context)) //{ // return; //} CSEntryChange c = this.GroupToCSEntryChange(group, type); if (c != null) { await this.GroupMemberToCSEntryChange(c, type); await this.TeamToCSEntryChange(c, type).ConfigureAwait(false); this.context.ImportItems.Add(c, this.token); await this.CreateChannelCSEntryChanges(group.Id, type); } } catch (Exception ex) { logger.Error(ex); CSEntryChange csentry = CSEntryChange.Create(); csentry.DN = group.Id; csentry.ErrorCodeImport = MAImportError.ImportErrorCustomContinueRun; csentry.ErrorDetail = ex.StackTrace; csentry.ErrorName = ex.Message; this.context.ImportItems.Add(csentry, this.token); } }, edfo); source.LinkTo(action, new DataflowLinkOptions() { PropagateCompletion = true }); await action.Completion; }
public static Task<TOutput> ReceiveAsync<TOutput> ( this ISourceBlock<TOutput> source, TimeSpan timeout, CancellationToken cancellationToken) { if (source == null) throw new ArgumentNullException ("source"); if (timeout.TotalMilliseconds < -1) throw new ArgumentOutOfRangeException ("timeout"); if (timeout.TotalMilliseconds > int.MaxValue) throw new ArgumentOutOfRangeException ("timeout"); cancellationToken.ThrowIfCancellationRequested (); int timeoutMilliseconds = (int)timeout.TotalMilliseconds; var block = new ReceiveBlock<TOutput> (cancellationToken, timeoutMilliseconds); var bridge = source.LinkTo (block); return block.AsyncGet (bridge); }
public async Task CreateLinksAsync(AwsManager awsManager, ISourceBlock<Tuple<AnnotatedPath, IFileFingerprint>> blobSourceBlock, bool actuallyWrite, CancellationToken cancellationToken) { var collectionBlocks = new Dictionary<string, ITargetBlock<Tuple<AnnotatedPath, IFileFingerprint>>>(); var tasks = new List<Task>(); var routeBlock = new ActionBlock<Tuple<AnnotatedPath, IFileFingerprint>>(async blob => { var collection = blob.Item1.Collection; if (string.IsNullOrEmpty(collection)) return; ITargetBlock<Tuple<AnnotatedPath, IFileFingerprint>> collectionBlock; if (!collectionBlocks.TryGetValue(collection, out collectionBlock)) { var bufferBlock = new BufferBlock<Tuple<AnnotatedPath, IFileFingerprint>>(); collectionBlock = bufferBlock; collectionBlocks[collection] = collectionBlock; var task = CreateLinksBlockAsync(awsManager, collection, bufferBlock, actuallyWrite, cancellationToken); tasks.Add(task); } await collectionBlock.SendAsync(blob, cancellationToken).ConfigureAwait(false); }); blobSourceBlock.LinkTo(routeBlock, new DataflowLinkOptions { PropagateCompletion = true }); await routeBlock.Completion.ConfigureAwait(false); Debug.WriteLine("S3LinkCreateor.CreateLinkAsync() routeBlock is done"); foreach (var block in collectionBlocks.Values) block.Complete(); await Task.WhenAll(tasks).ConfigureAwait(false); Debug.WriteLine("S3LinkCreateor.CreateLinkAsync() all link blocks are done"); }
/// <summary> /// Connects source block which publishes list of <see cref="ResourceEvent{TResource}"/> to action block /// which invokes processing function specified by <paramref name="action"/> for each received item. /// </summary> /// <param name="workerQueue">The source action block to attach to</param> /// <param name="action">The action to invoke for each received batch of <see cref="ResourceEvent{TResource}"/></param> /// <param name="parallelWorkers">Number of allowed parallel invocations of <paramref name="action"/>. Default is 1</param> /// <typeparam name="TResource">The resource type</typeparam> /// <returns>The disposable that disconnects from the <paramref name="workerQueue"/> when disposed of</returns> public static IDisposable ProcessWith <TResource>(this ISourceBlock <List <ResourceEvent <TResource> > > workerQueue, Func <List <ResourceEvent <TResource> >, Task> action, ILogger logger, int parallelWorkers = 1) { var actionBlock = new ActionBlock <List <ResourceEvent <TResource> > >(action, new ExecutionDataflowBlockOptions { BoundedCapacity = parallelWorkers, // don't buffer more messages then we are actually able to work on MaxDegreeOfParallelism = parallelWorkers }); actionBlock.Completion.ContinueWith(x => { if (x.IsFaulted) { logger.LogCritical(x.Exception.Flatten(), "Controller encountered a critical error"); } }); return(workerQueue.LinkTo(actionBlock, new DataflowLinkOptions { PropagateCompletion = true })); }
public static Task <TOutput> ReceiveAsync <TOutput> (this ISourceBlock <TOutput> source, TimeSpan timeout, CancellationToken cancellationToken) { if (source == null) { throw new ArgumentNullException("source"); } if (timeout.TotalMilliseconds < -1) { throw new ArgumentOutOfRangeException("timeout"); } cancellationToken.ThrowIfCancellationRequested(); long tm = (long)timeout.TotalMilliseconds; ReceiveBlock <TOutput> block = new ReceiveBlock <TOutput> (); var bridge = source.LinkTo(block); return(block.AsyncGet(bridge, cancellationToken, tm)); }
public static async Task ValidateBlockAsync(ICoreStorage coreStorage, ICoreRules rules, Chain newChain, ISourceBlock <ValidatableTx> validatableTxes, CancellationToken cancelToken = default(CancellationToken)) { // tally transactions object finalTally = null; var txTallier = new TransformBlock <ValidatableTx, ValidatableTx>( validatableTx => { var runningTally = finalTally; rules.TallyTransaction(newChain, validatableTx, ref runningTally); finalTally = runningTally; return(validatableTx); }); validatableTxes.LinkTo(txTallier, new DataflowLinkOptions { PropagateCompletion = true }); // validate transactions var txValidator = InitTxValidator(rules, newChain, cancelToken); // begin feeding the tx validator txTallier.LinkTo(txValidator, new DataflowLinkOptions { PropagateCompletion = true }); // validate scripts var scriptValidator = InitScriptValidator(rules, newChain, cancelToken); // begin feeding the script validator txValidator.LinkTo(scriptValidator, new DataflowLinkOptions { PropagateCompletion = true }); //TODO await PipelineCompletion.Create( new Task[] { }, new IDataflowBlock[] { validatableTxes, txTallier, txValidator, scriptValidator }); // validate overall block rules.PostValidateBlock(newChain, finalTally); }
public ServiceDiscovery(string serviceName, ReachabilityChecker reachabilityChecker, IRemoteHostPoolFactory remoteHostPoolFactory, IDiscoverySourceLoader serviceDiscoveryLoader, ISourceBlock <DiscoveryConfig> configListener, Func <DiscoveryConfig> discoveryConfigFactory, ILog log) { Log = log; _serviceName = serviceName; _originatingDeployment = new DeploymentIdentifier(serviceName); _masterDeployment = new DeploymentIdentifier(serviceName); _reachabilityChecker = reachabilityChecker; _remoteHostPoolFactory = remoteHostPoolFactory; _serviceDiscoveryLoader = serviceDiscoveryLoader; _initTask = Task.Run(() => ReloadRemoteHost(discoveryConfigFactory())); _configBlockLink = configListener.LinkTo(new ActionBlock <DiscoveryConfig>(ReloadRemoteHost)); }
public static ISourceBlock <LoadedTx> LoadTxes(ICoreStorage coreStorage, ISourceBlock <LoadingTx> loadingTxes, CancellationToken cancelToken = default(CancellationToken)) { // split incoming LoadingTx by its number of inputs var createTxInputList = InitCreateTxInputList(cancelToken); // link the loading txes to the input splitter loadingTxes.LinkTo(createTxInputList, new DataflowLinkOptions { PropagateCompletion = true }); // load each input, and return and fully loaded txes var loadTxInputAndReturnLoadedTx = InitLoadTxInputAndReturnLoadedTx(coreStorage, cancelToken); // link the input splitter to the input loader createTxInputList.LinkTo(loadTxInputAndReturnLoadedTx, new DataflowLinkOptions { PropagateCompletion = true }); return(loadTxInputAndReturnLoadedTx); }
public ServiceDiscovery(string serviceName, ReachabilityChecker reachabilityChecker, IRemoteHostPoolFactory remoteHostPoolFactory, IDiscoverySourceLoader discoverySourceLoader, IEnvironmentVariableProvider environmentVariableProvider, ISourceBlock <DiscoveryConfig> configListener, Func <DiscoveryConfig> discoveryConfigFactory) { _serviceName = serviceName; _originatingDeployment = new ServiceDeployment(serviceName, environmentVariableProvider.DeploymentEnvironment); _masterDeployment = new ServiceDeployment(serviceName, MASTER_ENVIRONMENT); _reachabilityChecker = reachabilityChecker; _remoteHostPoolFactory = remoteHostPoolFactory; _discoverySourceLoader = discoverySourceLoader; // Must be run in Task.Run() because of incorrect Orleans scheduling _initTask = Task.Run(() => ReloadRemoteHost(discoveryConfigFactory())); _configBlockLink = configListener.LinkTo(new ActionBlock <DiscoveryConfig>(ReloadRemoteHost)); }
private async Task BlockKeepsPostponedMessages(Func <ISourceBlock <int> > BlockFactory) { // This test requires longer timeout than most of the other tests in this suite, otherwise it occasionally fails on slower machines // If it fails nevertheless, switch to custom TaskScheduler. TimeSpan longArrivalTimeout = TimeSpan.FromMilliseconds(MessageArrivalTimeout.TotalMilliseconds * 10.0); ISourceBlock <int> block = BlockFactory(); ITargetBlock <int> blockT = (ITargetBlock <int>)block; TestTargetBlock <int> testTarget = new TestTargetBlock <int>(); testTarget.ConsumptionMode = DataflowMessageStatus.Postponed; block.LinkTo(testTarget, PropagateCompletion); // Assumption: if the target of the block is postponing messages, // The block will accept incoming messages until it runs out of capacity. Assert.True(blockT.Post(1)); Assert.True(blockT.Post(2)); Assert.True(blockT.Post(3)); // Out of capacity Assert.False(blockT.Post(4)); // However SendAsync() will allow postponing the message, so the message will be eventually delivered blockT.SendAsync(5).Forget(); // Wait till the block offers a message // Assumption: only one message will be offered, the block will not offer more messages if the target postpones bool messageOffered = await TaskUtils.PollWaitAsync(() => testTarget.MessagesPostponed.Count == 1, longArrivalTimeout); Assert.True(messageOffered); // Assumption: once the block target stops postponing, the block will keep pushing data to target // until it runs out of buffered messages. testTarget.ConsumptionMode = DataflowMessageStatus.Accepted; testTarget.ConsumePostponedMessages(); bool gotAllMessages = await TaskUtils.PollWaitAsync(() => testTarget.MessagesConsumed.Count == 4, longArrivalTimeout); Assert.True(gotAllMessages, "We should have gotten 4 messages"); Assert.Equal(testTarget.MessagesConsumed.OrderBy((i) => i), new int[] { 1, 2, 3, 5 }); }
public static async Task ValidateBlockAsync(ICoreStorage coreStorage, IBlockchainRules rules, ChainedHeader chainedHeader, ISourceBlock<LoadedTx> loadedTxes, CancellationToken cancelToken = default(CancellationToken)) { // validate merkle root var merkleStream = new MerkleStream(); var merkleValidator = InitMerkleValidator(chainedHeader, merkleStream, cancelToken); // begin feeding the merkle validator loadedTxes.LinkTo(merkleValidator, new DataflowLinkOptions { PropagateCompletion = true }); // validate transactions var txValidator = InitTxValidator(rules, chainedHeader, cancelToken); // begin feeding the tx validator merkleValidator.LinkTo(txValidator, new DataflowLinkOptions { PropagateCompletion = true }); // validate scripts var scriptValidator = InitScriptValidator(rules, chainedHeader, cancelToken); // begin feeding the script validator txValidator.LinkTo(scriptValidator, new DataflowLinkOptions { PropagateCompletion = true }); await merkleValidator.Completion; await txValidator.Completion; await scriptValidator.Completion; if (!rules.BypassPrevTxLoading) { try { merkleStream.FinishPairing(); } //TODO catch (InvalidOperationException) { throw CreateMerkleRootException(chainedHeader); } if (merkleStream.RootNode.Hash != chainedHeader.MerkleRoot) throw CreateMerkleRootException(chainedHeader); } }
async Task TransformAnnotatedPathsToFileFingerprint(ISourceBlock<AnnotatedPath[]> annotatedPathSourceBlock, ITargetBlock<IFileFingerprint> fileFingerprintTargetBlock, CancellationToken cancellationToken) { try { var targets = new ConcurrentDictionary<string, TransformBlock<AnnotatedPath, IFileFingerprint>>(StringComparer.InvariantCultureIgnoreCase); var routeBlock = new ActionBlock<AnnotatedPath[]>( async filenames => { foreach (var filename in filenames) { if (null == filename) continue; var cachedBlob = GetCachedFileFingerprint(filename.FileInfo); if (null != cachedBlob) { await fileFingerprintTargetBlock.SendAsync(cachedBlob, cancellationToken).ConfigureAwait(false); continue; } var host = PathUtil.GetHost(filename.FileInfo.FullName); TransformBlock<AnnotatedPath, IFileFingerprint> target; while (!targets.TryGetValue(host, out target)) { target = new TransformBlock<AnnotatedPath, IFileFingerprint>(annotatedPath => ProcessFileAsync(annotatedPath.FileInfo, cancellationToken), new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 5, CancellationToken = cancellationToken }); if (!targets.TryAdd(host, target)) continue; Debug.WriteLine($"FileFingerprintManager.GenerateBlobsAsync() starting reader for host: '{host}'"); target.LinkTo(fileFingerprintTargetBlock, blob => null != blob); target.LinkTo(DataflowBlock.NullTarget<IFileFingerprint>()); break; } //Debug.WriteLine($"FileFingerprintManager.GenerateFileFingerprintsAsync() Sending {annotatedPath} for host '{host}'"); await target.SendAsync(filename, cancellationToken).ConfigureAwait(false); } }, new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 16, CancellationToken = cancellationToken }); var distinctPaths = new HashSet<string>(StringComparer.InvariantCultureIgnoreCase); var distinctBlock = new TransformBlock<AnnotatedPath[], AnnotatedPath[]>( annotatedPaths => { for (var i = 0; i < annotatedPaths.Length; ++i) { if (!distinctPaths.Add(annotatedPaths[i].FileInfo.FullName)) annotatedPaths[i] = null; } return annotatedPaths; }, new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 1, CancellationToken = cancellationToken }); distinctBlock.LinkTo(routeBlock, new DataflowLinkOptions { PropagateCompletion = true }); annotatedPathSourceBlock.LinkTo(distinctBlock, new DataflowLinkOptions { PropagateCompletion = true }); await routeBlock.Completion.ConfigureAwait(false); foreach (var target in targets.Values) target.Complete(); await Task.WhenAll(targets.Values.Select(target => target.Completion)); } catch (Exception ex) { Console.WriteLine("FileFingerprintManager.GenerateFileFingerprintsAsync() failed: " + ex.Message); } finally { Debug.WriteLine("FileFingerprintManager.GenerateFileFingerprintsAsync() is done"); fileFingerprintTargetBlock.Complete(); } }
public Task UploadBlobsAsync(AwsManager awsManager, ISourceBlock<Tuple<IFileFingerprint, AnnotatedPath>> uniqueBlobBlock, IReadOnlyDictionary<string, string> knowObjects, CancellationToken cancellationToken) { var blobCount = 0; var blobTotalSize = 0L; var builderBlock = new TransformBlock<Tuple<IFileFingerprint, AnnotatedPath>, S3Blobs.IUploadBlobRequest>( tuple => { string etag; var exists = knowObjects.TryGetValue(tuple.Item1.Fingerprint.Key(), out etag); //Debug.WriteLine($"{tuple.Item1.FullFilePath} {(exists ? "already exists" : "scheduled for upload")}"); if (exists) { // We can't check multipart uploads this way since we don't know the size // of the individual parts. if (etag.Contains("-")) { Debug.WriteLine($"{tuple.Item1.FullFilePath} is a multi-part upload with ETag {etag} {tuple.Item1.Fingerprint.Key().Substring(0, 12)}"); return null; } var expectedETag = tuple.Item1.Fingerprint.S3ETag(); if (string.Equals(expectedETag, etag, StringComparison.InvariantCultureIgnoreCase)) return null; Console.WriteLine($"ERROR: {tuple.Item1.FullFilePath} tag mismatch {etag}, expected {expectedETag} {tuple.Item1.Fingerprint.Key().Substring(0, 12)}"); } var request = awsManager.BuildUploadBlobRequest(tuple); if (null == request) return null; Interlocked.Increment(ref blobCount); Interlocked.Add(ref blobTotalSize, request.FileFingerprint.Fingerprint.Size); return request; }, new ExecutionDataflowBlockOptions { CancellationToken = cancellationToken, MaxDegreeOfParallelism = Environment.ProcessorCount }); var uploader = new ActionBlock<S3Blobs.IUploadBlobRequest>( blob => UploadBlobAsync(awsManager, blob, cancellationToken), new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 4, CancellationToken = cancellationToken }); builderBlock.LinkTo(uploader, new DataflowLinkOptions { PropagateCompletion = true }, r => null != r); builderBlock.LinkTo(DataflowBlock.NullTarget<S3Blobs.IUploadBlobRequest>()); uniqueBlobBlock.LinkTo(builderBlock, new DataflowLinkOptions { PropagateCompletion = true }); var tasks = new List<Task>(); #if DEBUG var uploadDoneTask = uploader.Completion .ContinueWith( _ => Debug.WriteLine($"Done uploading blobs: {blobCount} items {SizeConversion.BytesToGiB(blobTotalSize):F2}GiB"), cancellationToken); tasks.Add(uploadDoneTask); #endif tasks.Add(uploader.Completion); return Task.WhenAll(tasks); }
public ISourceBlock<ValidatableTx> CalculateUtxo(IChainStateCursor chainStateCursor, Chain chain, ISourceBlock<DecodedBlockTx> blockTxes, CancellationToken cancelToken = default(CancellationToken)) { var chainedHeader = chain.LastBlock; var blockSpentTxes = new BlockSpentTxesBuilder(); var utxoCalculator = new TransformBlock<DecodedBlockTx, ValidatableTx>( blockTx => { var tx = blockTx.Transaction; var txIndex = blockTx.Index; var prevTxOutputs = ImmutableArray.CreateBuilder<PrevTxOutput>(!blockTx.IsCoinbase ? tx.Inputs.Length : 0); //TODO apply real coinbase rule // https://github.com/bitcoin/bitcoin/blob/481d89979457d69da07edd99fba451fd42a47f5c/src/core.h#L219 if (!blockTx.IsCoinbase) { // spend each of the transaction's inputs in the utxo for (var inputIndex = 0; inputIndex < tx.Inputs.Length; inputIndex++) { var input = tx.Inputs[inputIndex]; var prevTxOutput = this.Spend(chainStateCursor, txIndex, tx, inputIndex, input, chainedHeader, blockSpentTxes); prevTxOutputs.Add(prevTxOutput); } } // there exist two duplicate coinbases in the blockchain, which the design assumes to be impossible // ignore the first occurrences of these duplicates so that they do not need to later be deleted from the utxo, an unsupported operation // no other duplicates will occur again, it is now disallowed var isDupeCoinbase = IsDupeCoinbase(chainedHeader, tx); // add transaction's outputs to utxo, except for the genesis block and the duplicate coinbases if (chainedHeader.Height > 0 && !isDupeCoinbase) { // mint the transaction's outputs in the utxo this.Mint(chainStateCursor, tx, txIndex, chainedHeader); // increase unspent output count chainStateCursor.UnspentOutputCount += tx.Outputs.Length; // increment unspent tx count chainStateCursor.UnspentTxCount++; chainStateCursor.TotalTxCount++; chainStateCursor.TotalInputCount += tx.Inputs.Length; chainStateCursor.TotalOutputCount += tx.Outputs.Length; } return new ValidatableTx(blockTx, chainedHeader, prevTxOutputs.MoveToImmutable()); }, new ExecutionDataflowBlockOptions { CancellationToken = cancelToken }); blockTxes.LinkTo(utxoCalculator, new DataflowLinkOptions { PropagateCompletion = true }); return OnCompleteBlock.Create(utxoCalculator, () => { if (!chainStateCursor.TryAddBlockSpentTxes(chainedHeader.Height, blockSpentTxes.ToImmutable())) throw new ValidationException(chainedHeader.Hash); }, cancelToken); }
async Task CreateLinksBlockAsync(AwsManager awsManager, string collection, ISourceBlock<Tuple<AnnotatedPath, IFileFingerprint>> collectionBlock, bool actuallyWrite, CancellationToken cancellationToken) { var links = await awsManager.GetLinksAsync(collection, cancellationToken).ConfigureAwait(false); Debug.WriteLine($"Link handler for {collection} found {links.Count} existing links"); var createLinkBlock = new ActionBlock<S3Links.ICreateLinkRequest>( link => CreateLinkAsync(awsManager, link, actuallyWrite, cancellationToken), new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 512, CancellationToken = cancellationToken }); var makeLinkBlock = new TransformBlock<Tuple<AnnotatedPath, IFileFingerprint>, S3Links.ICreateLinkRequest>( tuple => { var path = tuple.Item1; var file = tuple.Item2; if (collection != path.Collection) throw new InvalidOperationException($"Create link for {path.Collection} on {collection}"); var relativePath = path.RelativePath; if (relativePath.StartsWith("..")) throw new InvalidOperationException($"Create link for invalid path {relativePath}"); if (relativePath.StartsWith("file:", StringComparison.OrdinalIgnoreCase)) throw new InvalidOperationException($"Create link for invalid path {relativePath}"); relativePath = relativePath.Replace('\\', '/'); if (relativePath.StartsWith("/", StringComparison.Ordinal)) throw new InvalidOperationException($"Create link for invalid path {relativePath}"); string eTag; links.TryGetValue(relativePath, out eTag); return awsManager.BuildLinkRequest(collection, relativePath, file, eTag); }, new ExecutionDataflowBlockOptions { CancellationToken = cancellationToken, MaxDegreeOfParallelism = Environment.ProcessorCount }); makeLinkBlock.LinkTo(createLinkBlock, new DataflowLinkOptions { PropagateCompletion = true }, link => null != link); makeLinkBlock.LinkTo(DataflowBlock.NullTarget<S3Links.ICreateLinkRequest>()); collectionBlock.LinkTo(makeLinkBlock, new DataflowLinkOptions { PropagateCompletion = true }); await createLinkBlock.Completion.ConfigureAwait(false); Debug.WriteLine($"Link handler for {collection} is done"); }
private void ExtractData(ISourceBlock<List<Citizen>> source) { var extractData = new ActionBlock<List<Citizen>>( (citizens) => { var today = DateTime.Today; Dictionary<string, int> localNameFrequency = new Dictionary<string, int>(); Dictionary<int, int> localMonthFrequency = new Dictionary<int, int>(); foreach (Citizen citizen in citizens) { var bday = citizen.Birthday; int age = today.Year - bday.Year; if (bday > today.AddYears(-age)) { age--; } Interlocked.Add(ref totalAge, age); Interlocked.Increment(ref peopleCount); string name = citizen.Firstname; if (!localNameFrequency.ContainsKey(name)) { localNameFrequency[name] = 0; } localNameFrequency[name]++; int month = citizen.Birthday.Month; if (!localMonthFrequency.ContainsKey(month)) { localMonthFrequency[month] = 0; } localMonthFrequency[month]++; } mutex.WaitOne(); foreach (string key in localNameFrequency.Keys) { if (!globalNameFrequency.ContainsKey(key)) { globalNameFrequency[key] = 0; } globalNameFrequency[key] += localNameFrequency[key]; } mutex.ReleaseMutex(); mutex.WaitOne(); foreach (int key in localMonthFrequency.Keys) { if (!globalMonthFrequency.ContainsKey(key)) { globalMonthFrequency[key] = 0; } globalMonthFrequency[key] += localMonthFrequency[key]; } mutex.ReleaseMutex(); } , new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = Environment.ProcessorCount } ); source.LinkTo(extractData, new DataflowLinkOptions { PropagateCompletion = true }); extractionCompletion = extractData.Completion; }
Task StoreFileFingerprintsAsync(ISourceBlock<IFileFingerprint[]> storeBatchBlock, CancellationToken cancellationToken) { var block = new ActionBlock<IFileFingerprint[]>( fileFingerprints => WriteBlobsAsync(fileFingerprints, cancellationToken), new ExecutionDataflowBlockOptions { CancellationToken = cancellationToken }); storeBatchBlock.LinkTo(block, new DataflowLinkOptions { PropagateCompletion = true }); return block.Completion; }
// Demonstrates the consumption end of the producer and consumer pattern. public async Task<Parse> AscTransConsumerAsync(ISourceBlock<Parse> source) { // counter to track the number of items that are processed Int64 count = 0; var parse = new Parse(); var actionBlock = new ActionBlock<Parse>( data => { ProcessDataBuffer(data); // count has to be accessed in a thread-safe manner // be careful about using Interlocked, // for more complicated computations, locking might be more appropriate Interlocked.Increment(ref count); }, // some small constant might be better than Unbounded, depedning on circumstances new ExecutionDataflowBlockOptions {MaxDegreeOfParallelism = 5}); //DataflowBlockOptions.Unbounded}); source.LinkTo(actionBlock, new DataflowLinkOptions {PropagateCompletion = true}); // this assumes source will be completed when done, // you need to call ascbuffer.Complete() after AscBufferProducer() for this await actionBlock.Completion; return parse; }