示例#1
0
		public void BasicUsageTest ()
		{
			Tuple<int, int> tuple = null;
			var evt = new ManualResetEventSlim (false);

			var ablock = new ActionBlock<Tuple<int, int>> (t =>
			{
				tuple = t;
				evt.Set ();
			});
			var block = new JoinBlock<int, int> ();
			block.LinkTo (ablock);

			block.Target1.Post (42);

			evt.Wait (1000);
			Assert.IsNull (tuple);

			block.Target2.Post (24);

			evt.Wait ();
			Assert.IsNotNull (tuple);
			Assert.AreEqual (42, tuple.Item1);
			Assert.AreEqual (24, tuple.Item2);
		}
示例#2
0
		public void DeadlockTest ()
		{
			Tuple<int, int> tuple = null;
			var evt = new ManualResetEventSlim (false);

			var ablock = new ActionBlock<Tuple<int, int>> (t =>
			{
				tuple = t;
				evt.Set ();
			});
			var block = new JoinBlock<int, int> ();
			block.LinkTo (ablock);

			Task.Factory.StartNew (() => block.Target1.Post (42));
			Task.Factory.StartNew (() => block.Target2.Post (24));

			Assert.IsTrue (evt.Wait (1000));
			Assert.IsNotNull (tuple);
			Assert.AreEqual (42, tuple.Item1);
			Assert.AreEqual (24, tuple.Item2);
		}
示例#3
0
        static void Main(string[] args)
        {
            // Create three BufferBlock<T> objects. Each object holds a different
            // type of resource.
            var networkResources = new BufferBlock <NetworkResource>();
            var fileResources    = new BufferBlock <FileResource>();
            var memoryResource   = new BufferBlock <MemoryResource>();

            // Create two non-greedy JoinBlock<T1, T2> objects.
            // The first join works with network and memory resources;
            // the second pool works with file and memory resources.
            var joinNetworkAndMemoryResources = new JoinBlock <NetworkResource, MemoryResource>(
                new GroupingDataflowBlockOptions {
                Greedy = false
            });
            // A non-greedy join block postpones all incoming messages until one is available from each source, for efficient memory resource and prevent deadlock
            var joinFileAndMemoryResources = new JoinBlock <FileResource, MemoryResource>(
                new GroupingDataflowBlockOptions {
                Greedy = false
            });

            // Create two ActionBlock<T> objects.
            // The first block acts on a network resource and a memory resource.
            // The second block acts on a file resource and a memory resource.
            var networkMemoryAction = new ActionBlock <Tuple <NetworkResource, MemoryResource> >(
                data =>
            {
                Console.WriteLine("Networker using resource....");
                Thread.Sleep(new Random().Next(500, 2000));
                Console.WriteLine("Networker finish using resource.");
                // Release the resources back to their respective pools.
                networkResources.Post(data.Item1);
                memoryResource.Post(data.Item2);
            }
                );
            var fileMemoryAction = new ActionBlock <Tuple <FileResource, MemoryResource> >(
                data =>
            {
                Console.WriteLine("Fileworker using resource....");
                Thread.Sleep(new Random().Next(500, 2000));
                Console.WriteLine("Fileworker finish using resource.");
                // Release the resources back to their respective pools.
                fileResources.Post(data.Item1);
                memoryResource.Post(data.Item2);
            }
                );

            // Link the resource pools to the JoinBlock<T1, T2> objects.
            // Because these join blocks operate in non-greedy mode, they do not
            // take the resource from a pool until all resources are available from
            // all pools.
            networkResources.LinkTo(joinNetworkAndMemoryResources.Target1);
            memoryResource.LinkTo(joinNetworkAndMemoryResources.Target2);

            fileResources.LinkTo(joinFileAndMemoryResources.Target1);
            memoryResource.LinkTo(joinFileAndMemoryResources.Target2);

            // Link the JoinBlock<T1, T2> objects to the ActionBlock<T> objects.
            joinNetworkAndMemoryResources.LinkTo(networkMemoryAction);
            joinFileAndMemoryResources.LinkTo(fileMemoryAction);

            // Populate the resource pools. In this example, network and
            // file resources are more abundant than memory resources.
            networkResources.Post(new NetworkResource());
            networkResources.Post(new NetworkResource());
            networkResources.Post(new NetworkResource());

            memoryResource.Post(new MemoryResource());
            memoryResource.Post(new MemoryResource());

            fileResources.Post(new FileResource());
            fileResources.Post(new FileResource());
            fileResources.Post(new FileResource());

            Thread.Sleep(10000);
            Console.WriteLine("Press any key to exit.");
            Console.ReadKey();
        }
        public async Task BuffersToNonGreedyJoinToAction()
        {
            var b1 = new BufferBlock<string>();
            var b2 = new BufferBlock<int>();
            var j = new JoinBlock<string, int>(new GroupingDataflowBlockOptions { Greedy = false });
            b1.LinkTo(j.Target1, new DataflowLinkOptions { PropagateCompletion = true });
            b2.LinkTo(j.Target2, new DataflowLinkOptions { PropagateCompletion = true });
            var a = new ActionBlock<Tuple<string, int>>(t => Assert.True((t.Item1 == t.Item2.ToString())));
            j.LinkTo(a, new DataflowLinkOptions { PropagateCompletion = true });

            for (int i = 0; i < Iterations; i++)
            {
                b1.Post(i.ToString());
                b2.Post(i);
            }
            b1.Complete();
            b2.Complete();

            await a.Completion;
        }
        public ProductsPipeline()
        {
            var firstBlock = new BufferBlock <GetProductQuery>(
                new ExecutionDataflowBlockOptions
            {
                MaxDegreeOfParallelism    = 20,
                SingleProducerConstrained = true
            });

            var storeBlock = new TransformBlock <GetProductQuery, List <Store> >(
                cmd => new ServiceStores().GetStoresByProductId(cmd.ProductId),
                new ExecutionDataflowBlockOptions
            {
                MaxDegreeOfParallelism = 20
            });

            var priceBlock = new TransformBlock <GetProductQuery, double>(
                cmd => new ServicePrices().GetPriceByProductId(cmd.ProductId),
                new ExecutionDataflowBlockOptions
            {
                MaxDegreeOfParallelism = 20
            });

            var productBlock = new TransformBlock <GetProductQuery, Product>(
                cmd => new ServiceProducts().GetProductAsync(cmd.ProductId),
                new ExecutionDataflowBlockOptions
            {
                MaxDegreeOfParallelism = 20
            });

            var joinBlock = new JoinBlock <Product, List <Store>, double>(new GroupingDataflowBlockOptions
            {
                Greedy = false
            });

            var finalJoinBlock = new JoinBlock <Tuple <Product, List <Store>, double>, GetProductQuery>(new GroupingDataflowBlockOptions
            {
                Greedy = false
            });

            var finalBlock = new ActionBlock <Tuple <Tuple <Product, List <Store>, double>, GetProductQuery> >(tuple =>
            {
                tuple.Item2.SetResult(new ProductQueryResult
                {
                    Product = tuple.Item1.Item1,
                    Stores  = tuple.Item1.Item2,
                    Price   = tuple.Item1.Item3
                });
            },
                                                                                                               new ExecutionDataflowBlockOptions
            {
                EnsureOrdered             = true,
                MaxDegreeOfParallelism    = 20,
                SingleProducerConstrained = false
            });

            firstBlock.LinkTo(storeBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            firstBlock.LinkTo(productBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            firstBlock.LinkTo(priceBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            firstBlock.LinkTo(finalJoinBlock.Target2, new DataflowLinkOptions {
                PropagateCompletion = false
            });

            productBlock.LinkTo(joinBlock.Target1, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            storeBlock.LinkTo(joinBlock.Target2, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            priceBlock.LinkTo(joinBlock.Target3, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            joinBlock.LinkTo(finalJoinBlock.Target1, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            finalJoinBlock.LinkTo(finalBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            this.finalBlock = finalBlock;
            this.firstBlock = firstBlock;
        }
示例#6
0
        // DataFlow baseline. Similar CPU & throughput results, but memory keeps growing.
        public void RunDataFlowPipeline <T>(DeliveryPolicy policy, Func <int, T> create, Func <int, T, T> initialize, Func <T, T, T> increment, Func <T, T, T, T> add, Func <T, int> extract, bool validateNoLoss, bool validateSync)
        {
            int resultCount = 0;
            var dfo         = new DataflowLinkOptions();

            dfo.Append = true;
            List <object> saved = new List <object>();

            // create several parallel branches of components
            var branches = new ISourceBlock <Wrap <T> > [ParallelBranchCount];
            var sources  = new Time.TimerDelegate[SourceCount];

            for (int i = 0; i < SourceCount; i++)
            {
                // make a timer for each source
                var timerSeqId = 0;
                var timer      = new TransformBlock <int, int>(ts => timerSeqId++);
                sources[i] = new Time.TimerDelegate((uint timerID, uint msg, UIntPtr userCtx, UIntPtr dw1, UIntPtr dw2) => timer.Post(i));
                saved.Add(timer);

                // branch and generate data
                for (int k = 0; k < ParallelBranchMultiplier; k++)
                {
                    int b        = (i * ParallelBranchMultiplier) + k;
                    var initInst = new Wrap <T>(create(b), 0);
                    var init     = new TransformBlock <int, Wrap <T> >(seqId => initInst = new Wrap <T>(initialize(seqId, initInst.Inner), seqId).DeepClone());
                    timer.LinkTo(init, dfo);
                    branches[b] = init;
                    saved.Add(init);

                    // apply a sequence of transforms
                    for (int j = 0; j < TransformCount; j++)
                    {
                        var incInst = new Wrap <T>(create(b), 0);
                        var inc     = new TransformBlock <Wrap <T>, Wrap <T> >(src => incInst = new Wrap <T>(increment(incInst.Inner, src.Inner), src.ExpectedResult + 1).DeepClone());
                        branches[b].LinkTo(inc, dfo);
                        branches[b] = inc;
                        saved.Add(inc);
                    }

                    // make sure we didn't lose messages
                    // branches[b] = branches[b].DoT(m => CheckMessageId(m.SequenceId + TransformCount, m.Data.ExpectedResult, validateNoLoss), true, true);
                }
            }

            // join all
            var fullJoin = branches[0];

            for (int i = 1; i < ParallelBranchCount; i++)
            {
                var joinGo = new GroupingDataflowBlockOptions();
                joinGo.Greedy = false;
                var join = new JoinBlock <Wrap <T>, Wrap <T> >(joinGo);
                fullJoin.LinkTo(join.Target1, dfo);
                branches[i].LinkTo(join.Target2, dfo);
                var addInst = new Wrap <T>(create(i), 0);
                var select  = new TransformBlock <Tuple <Wrap <T>, Wrap <T> >, Wrap <T> >(tpl => addInst = new Wrap <T>(add(addInst.Inner, tpl.Item1.Inner, tpl.Item2.Inner), tpl.Item1.ExpectedResult + tpl.Item2.ExpectedResult).DeepClone());
                join.LinkTo(select, dfo);
                fullJoin = select;
                saved.Add(join);
                saved.Add(select);
            }

            // extract final result
            var result = new TransformBlock <Wrap <T>, Wrap <long> >(w => new Wrap <long>(extract(w.Inner), w.ExpectedResult));

            fullJoin.LinkTo(result, dfo);
            saved.Add(result);

            // validate result
            int actionSeqId = 0;
            var final       = new ActionBlock <Wrap <long> >(w =>
            {
                resultCount++;
                this.CheckMessageId(++actionSeqId, resultCount, validateNoLoss);
                if (w.Inner != w.ExpectedResult)
                {
                    throw new Exception("Unexpected computation result.");
                }
            });

            result.LinkTo(final, dfo);
            saved.Add(final);

            // run the pipeline
            for (int i = 0; i < SourceCount; i++)
            {
                Platform.Specific.TimerStart(1000 / this.frequency, sources[i]);
            }

            while (!final.Completion.Wait(1000))
            {
                Console.WriteLine(resultCount);
                if (sources.Length == 0)
                {
                    throw new Exception("This was here just to keeo source alive in release mode, why did it hit?");
                }
            }

            Console.WriteLine("Stopped");
            Assert.AreNotEqual(0, resultCount);
        }
示例#7
0
        //[Fact(Skip = "Outerloop")]
        public void RunJoinBlockConformanceTests()
        {
            // Test Post/Receive single block
            {
                int iter = 2;

                var block2 = new JoinBlock <int, int>();
                for (int i = 0; i < iter; i++)
                {
                    block2.Target1.Post(i);
                    block2.Target2.Post(i);
                    var msg = block2.Receive();

                    Assert.False(msg.Item1 != i || msg.Item2 != i, string.Format("JoinBlock Post/Receive failed expected {0},{1} and actual {2},{3}", i, i, msg.Item1, msg.Item2));
                }

                var block3 = new JoinBlock <int, int, int>();
                for (int i = 0; i < iter; i++)
                {
                    block3.Target1.Post(i);
                    block3.Target2.Post(i);
                    block3.Target3.Post(i);
                    var msg = block3.Receive();
                    Assert.False(msg.Item1 != i || msg.Item2 != i || msg.Item3 != i, string.Format("JoinBlock Post/Receive failed expected {0},{1},{2} and actual {3},{4},{5}", i, i, i, msg.Item1, msg.Item2, msg.Item3));
                }
            }

            // Test PostAll then Receive single block
            {
                int iter = 2;

                var block2 = new JoinBlock <int, int>();
                for (int i = 0; i < iter; i++)
                {
                    block2.Target1.Post(i);
                    block2.Target2.Post(i);
                }
                for (int i = 0; i < iter; i++)
                {
                    var msg = block2.Receive();
                    Assert.False(msg.Item1 != msg.Item2, "JoinBlock PostAll then Receive failed expected, incorrect msg pair");
                }

                var block3 = new JoinBlock <int, int, int>();
                for (int i = 0; i < iter; i++)
                {
                    block3.Target1.Post(i);
                    block3.Target2.Post(i);
                    block3.Target3.Post(i);
                }
                for (int i = 0; i < iter; i++)
                {
                    var msg = block3.Receive();
                    Assert.False(msg.Item1 != msg.Item2 || msg.Item2 != msg.Item3, "JoinBlock PostAll then Receive failed expected, incorrect msg pair");
                }
            }

            //Test one target Post with TryReceive
            {
                var block2 = new JoinBlock <int, int>();
                block2.Target1.Post(0);
                Tuple <int, int> result2;
                Assert.False(block2.TryReceive(out result2), "JoinBlock.TryReceive failed, returned true and only one target posted a message");
                Assert.False(block2.OutputCount > 0, "JoinBlock.OutputCount failed, returned count > 0 and only one target posted a message");
                var block3 = new JoinBlock <int, int, int>();
                block3.Target1.Post(0);
                Tuple <int, int, int> result3;
                Assert.False(block3.TryReceive(out result3), "JoinBlock.TryReceive failed, returned true and only one target posted a message");
                Assert.False(block3.OutputCount > 0, "JoinBlock.OutputCount failed, returned count > 0 and only one target posted a message");
            }

            // Test JoinBlock`2 using a precanceled token
            {
                var localPassed = true;
                try
                {
                    var cts = new CancellationTokenSource();
                    cts.Cancel();
                    var dbo = new GroupingDataflowBlockOptions {
                        CancellationToken = cts.Token, MaxNumberOfGroups = 1
                    };
                    var jb = new JoinBlock <int, int>(dbo);

                    Tuple <int, int>          ignoredValue;
                    IList <Tuple <int, int> > ignoredValues;
                    localPassed &= jb.LinkTo(new ActionBlock <Tuple <int, int> >(delegate { })) != null;
                    localPassed &= jb.Target1.Post(42) == false;
                    localPassed &= jb.Target2.Post(42) == false;
                    localPassed &= jb.Target1.SendAsync(42).Result == false;
                    localPassed &= jb.Target2.SendAsync(42).Result == false;
                    localPassed &= jb.TryReceiveAll(out ignoredValues) == false;
                    localPassed &= jb.TryReceive(out ignoredValue) == false;
                    localPassed &= jb.OutputCount == 0;
                    localPassed &= jb.Completion != null;
                    jb.Target1.Complete();
                    jb.Target2.Complete();
                }
                catch (Exception)
                {
                    localPassed = false;
                }

                Assert.True(localPassed, string.Format("Precanceled tokens on JB`2 - {0}", localPassed ? "Passed" : "FAILED"));
            }

            // Test JoinBlock`3 using a precanceled token
            {
                var localPassed = true;
                try
                {
                    var cts = new CancellationTokenSource();
                    cts.Cancel();
                    var dbo = new GroupingDataflowBlockOptions {
                        CancellationToken = cts.Token, MaxNumberOfGroups = 1
                    };
                    var jb = new JoinBlock <int, int, int>(dbo);

                    Tuple <int, int, int>          ignoredValue;
                    IList <Tuple <int, int, int> > ignoredValues;
                    localPassed &= jb.LinkTo(new ActionBlock <Tuple <int, int, int> >(delegate { })) != null;
                    localPassed &= jb.Target1.Post(42) == false;
                    localPassed &= jb.Target2.Post(42) == false;
                    localPassed &= jb.Target3.Post(42) == false;
                    localPassed &= jb.Target1.SendAsync(42).Result == false;
                    localPassed &= jb.Target2.SendAsync(42).Result == false;
                    localPassed &= jb.Target3.SendAsync(42).Result == false;
                    localPassed &= jb.TryReceiveAll(out ignoredValues) == false;
                    localPassed &= jb.TryReceive(out ignoredValue) == false;
                    localPassed &= jb.OutputCount == 0;
                    localPassed &= jb.Completion != null;
                    jb.Target1.Complete();
                    jb.Target2.Complete();
                    jb.Target3.Complete();
                }
                catch (Exception)
                {
                    localPassed = false;
                }

                Assert.True(localPassed, string.Format("Precanceled tokens on JB`3 - {0}", localPassed ? "Passed" : "FAILED"));
            }

            // Test JoinBlock`2 completion through all targets
            {
                var localPassed = true;
                var join        = new JoinBlock <int, int>();
                join.Target1.Post(1);
                join.Target1.Complete();
                join.Target2.Complete();
                localPassed = join.Completion.Wait(2000);

                Assert.True(localPassed, string.Format("JoinBlock`2 completed through targets - {0}", localPassed ? "Passed" : "FAILED"));
            }

            // Test JoinBlock`3 completion through all targets
            {
                var localPassed = true;
                var join        = new JoinBlock <int, int, int>();
                join.Target1.Post(1);
                join.Target1.Complete();
                join.Target2.Complete();
                join.Target3.Complete();
                localPassed = join.Completion.Wait(2000);

                Assert.True(localPassed, string.Format("JoinBlock`3 completed through targets - {0}", localPassed ? "Passed" : "FAILED"));
            }

            // Test JoinBlock`2 completion through block
            {
                var localPassed = true;
                var join        = new JoinBlock <int, int>();
                join.Target1.Post(1);
                join.Complete();
                localPassed = join.Completion.Wait(2000);

                Assert.True(localPassed, string.Format("JoinBlock`2 completed through block - {0}", localPassed ? "Passed" : "FAILED"));
            }

            // Test JoinBlock`3 completion through block
            {
                var localPassed = true;
                var join        = new JoinBlock <int, int, int>();
                join.Target1.Post(1);
                join.Complete();
                localPassed = join.Completion.Wait(2000);

                Assert.True(localPassed, string.Format("JoinBlock`3 completed through block - {0}", localPassed ? "Passed" : "FAILED"));
            }
        }
示例#8
0
        //[Fact(Skip = "outerloop")]
        public void TestJoinCancelationWaits()
        {
            bool passed = true;
            bool localPassed;

            var cts = new CancellationTokenSource();
            var nonGreedyOptionsWithCancellation = new GroupingDataflowBlockOptions {
                CancellationToken = cts.Token, Greedy = false
            };

            var badSource  = new BlockOnConsumeSource <int>(1, true);
            var goodSource = new BlockOnConsumeSource <int>(2, false);

            var join = new JoinBlock <int, int>(nonGreedyOptionsWithCancellation);

            // Linking a target behind the Join and feeding messages into the targets
            // is important to trigger the functionality implemented by SourceCore.
            var terminator = new ActionBlock <Tuple <int, int> >(x => { Console.WriteLine("terminator: ({0},{1})", x.Item1, x.Item2); });

            join.LinkTo(terminator);
            var send1 = join.Target1.SendAsync(98);
            var send2 = join.Target2.SendAsync(99);

            // Wait for the sent messages to be consumed
            send1.Wait();
            send2.Wait();

            // Each linking will offer a message
            badSource.LinkTo(join.Target1);
            goodSource.LinkTo(join.Target2);

            // Both messages must be reserved
            badSource.ReservedEvent.WaitOne();
            goodSource.ReservedEvent.WaitOne();

            // The message from badSource must be consumed
            badSource.ConsumedEvent.WaitOne();

            // Cancel the Join
            cts.Cancel();

            // The Join must not complete, because the targets are still working
            try
            {
                localPassed = !join.Completion.Wait(1000);
            }
            catch (AggregateException ae)
            {
                ae.Handle(e => e is TaskCanceledException);
                localPassed = false;
            }
            passed &= localPassed;
            Assert.True(localPassed, string.Format("Join is not complete ({0}) - {1}", join.Completion.Status, localPassed ? "Passed" : "FAILED"));

            // Unblock the blocked operation
            badSource.BlockingEvent.Set();

            // The Join must become Canceled now
            try
            {
                join.Completion.Wait(1000);
                localPassed = false;
            }
            catch (AggregateException ae)
            {
                ae.Handle(e => e is TaskCanceledException);
                localPassed = join.Completion.Status == TaskStatus.Canceled;
            }
            passed &= localPassed;
            Assert.True(localPassed, string.Format("Join is canceled ({0}) - {1}", join.Completion.Status, localPassed ? "Passed" : "FAILED"));
        }
示例#9
0
        /// <summary>
        /// <see cref="DualParallelDispatcherRemoteNode{TInput1,TInput2,TOutput1,TOutput2}"/>
        /// </summary>
        /// <param name="persistentCache">Persistent cache to avoid dropped data on system crash</param>
        /// <param name="progress">Progress of the current bulk</param>
        /// <param name="host"><see cref="Host"/></param>
        /// <param name="cts"><see cref="CancellationTokenSource"/></param>
        /// <param name="circuitBreakerOptions"><see cref="CircuitBreakerOptions"/></param>
        /// <param name="clusterOptions"><see cref="ClusterOptions"/></param>
        /// <param name="logger"><see cref="ILogger"/></param>
        public DualParallelDispatcherRemoteNode(
            IAppCache persistentCache,
            IProgress <double> progress,
            Host host,
            CancellationTokenSource cts,
            CircuitBreakerOptions circuitBreakerOptions,
            ClusterOptions clusterOptions,
            ILogger logger) : base(
                Policy.Handle <Exception>()
                .AdvancedCircuitBreakerAsync(circuitBreakerOptions.CircuitBreakerFailureThreshold,
                                             circuitBreakerOptions.CircuitBreakerSamplingDuration,
                                             circuitBreakerOptions.CircuitBreakerMinimumThroughput,
                                             circuitBreakerOptions.CircuitBreakerDurationOfBreak,
                                             onBreak: (ex, timespan, context) =>
        {
            logger.LogError(
                $"Batch processor breaker: Breaking the circuit for {timespan.TotalMilliseconds}ms due to {ex.Message}.");
        },
                                             onReset: context =>
        {
            logger.LogInformation(
                "Batch processor breaker: Succeeded, closed the circuit.");
        },
                                             onHalfOpen: () =>
        {
            logger.LogWarning(
                "Batch processor breaker: Half-open, next call is a trial.");
        }), clusterOptions, progress, cts, logger)
        {
            _logger         = logger;
            _clusterOptions = clusterOptions;

            ISubject <LinkedItem <TInput1> > item1DispatcherSubject = new Subject <LinkedItem <TInput1> >();
            _item1SynchronizedDispatcherSubject             = Subject.Synchronize(item1DispatcherSubject);
            _item1SynchronizedDispatcherSubjectSubscription = _item1SynchronizedDispatcherSubject
                                                              .ObserveOn(new EventLoopScheduler(ts => new Thread(ts)))
                                                              .Select(item =>
            {
                return(Observable.FromAsync(() => persistentCache.AddItem1Async(item.Key.ToString(), item.Entity,
                                                                                item.CancellationTokenSource.Token)));
            })
                                                              .Merge()
                                                              .Subscribe();

            ISubject <LinkedItem <TInput2> > item2DispatcherSubject = new Subject <LinkedItem <TInput2> >();
            _item2SynchronizedDispatcherSubject             = Subject.Synchronize(item2DispatcherSubject);
            _item2SynchronizedDispatcherSubjectSubscription = _item2SynchronizedDispatcherSubject
                                                              .ObserveOn(new EventLoopScheduler(ts => new Thread(ts)))
                                                              .Select(item =>
            {
                return(Observable.FromAsync(() => persistentCache.AddItem2Async(item.Key.ToString(), item.Entity,
                                                                                item.CancellationTokenSource.Token)));
            })
                                                              .Merge()
                                                              .Subscribe();

            var channel = new Channel(host.MachineName, host.Port,
                                      ChannelCredentials.Insecure);
            _remoteContract      = MagicOnionClient.Create <IRemoteContract <TOutput1, TOutput2> >(channel);
            _item1RemoteContract = MagicOnionClient.Create <IOutputItem1RemoteContract <TInput1, TOutput1> >(channel);
            _item2RemoteContract = MagicOnionClient.Create <IOutputItem2RemoteContract <TInput2, TOutput2> >(channel);
            IRemoteNodeSubject nodeReceiver = new NodeReceiver(_logger);
            _remoteNodeHealthSubscription =
                nodeReceiver.RemoteNodeHealthSubject.Subscribe(remoteNodeHealth =>
            {
                NodeMetrics.RemoteNodeHealth = remoteNodeHealth;
            });
            _nodeHub = StreamingHubClient.Connect <INodeHub, INodeReceiver>(channel, (INodeReceiver)nodeReceiver);

            NodeMetrics = new NodeMetrics(Guid.NewGuid());

            var item1ProcessSource = new ConcurrentDictionary <Guid, TOutput1>();
            var item2ProcessSource = new ConcurrentDictionary <Guid, TOutput2>();
            var joinBlock          =
                new JoinBlock <KeyValuePair <Guid, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> >(
                    new GroupingDataflowBlockOptions {
                Greedy = false
            });
            _item1Source =
                new TransformBlock <Tuple <Guid, TOutput1, CancellationTokenSource>,
                                    KeyValuePair <Guid, CancellationTokenSource>
                                    >(source =>
            {
                if (!item1ProcessSource.ContainsKey(source.Item1) &&
                    !item1ProcessSource.TryAdd(source.Item1, source.Item2))
                {
                    _logger.LogError(
                        $"Could not add item of type {source.Item2.GetType()} and key {source.Item1.ToString()} to the buffer.");
                }

                return(new KeyValuePair <Guid, CancellationTokenSource>(source.Item1, source.Item3));
            });
            _item2Source =
                new TransformBlock <Tuple <Guid, TOutput2, CancellationTokenSource>,
                                    KeyValuePair <Guid, CancellationTokenSource>
                                    >(
                    source =>
            {
                if (!item2ProcessSource.ContainsKey(source.Item1) &&
                    !item2ProcessSource.TryAdd(source.Item1, source.Item2))
                {
                    _logger.LogError(
                        $"Could not add item of type {source.Item2.GetType()} and key {source.Item1.ToString()} to the buffer.");
                }

                return(new KeyValuePair <Guid, CancellationTokenSource>(source.Item1, source.Item3));
            });

            var processBlock = new ActionBlock <Tuple <KeyValuePair <Guid, CancellationTokenSource>,
                                                       KeyValuePair <Guid, CancellationTokenSource> > >(
                async combined =>
            {
                var policy = Policy
                             .Handle <Exception>(ex => !(ex is TaskCanceledException || ex is OperationCanceledException))
                             .WaitAndRetryAsync(_clusterOptions.RetryAttempt,
                                                retryAttempt =>
                                                TimeSpan.FromSeconds(Math.Pow(2, retryAttempt)),
                                                (exception, sleepDuration, retry, context) =>
                {
                    if (retry >= _clusterOptions.RetryAttempt)
                    {
                        _logger.LogError(
                            $"Could not process item after {retry} retry times: {exception.Message}");
                    }
                });

                var policyResult = await policy.ExecuteAndCaptureAsync(async ct =>
                {
                    try
                    {
                        if (CpuUsage > _clusterOptions.LimitCpuUsage)
                        {
                            var suspensionTime = (CpuUsage - _clusterOptions.LimitCpuUsage) / CpuUsage * 100;
                            await Task.Delay((int)suspensionTime, ct);
                        }

                        if (item1ProcessSource.ContainsKey(combined.Item1.Key) &&
                            item2ProcessSource.ContainsKey(combined.Item2.Key) &&
                            item1ProcessSource.TryGetValue(combined.Item1.Key, out var item1) &&
                            item2ProcessSource.TryGetValue(combined.Item2.Key, out var item2))
                        {
                            await _remoteContract.ProcessRemotely(item1, item2, NodeMetrics);
                            combined.Item1.Value.Cancel();
                            combined.Item2.Value.Cancel();
                        }
                    }
                    catch (Exception ex) when(ex is TaskCanceledException || ex is OperationCanceledException)
                    {
                        _logger.LogTrace("The item process has been cancelled.");
                    }
                }, cts.Token).ConfigureAwait(false);

                if (policyResult.Outcome == OutcomeType.Failure)
                {
                    _logger.LogCritical(
                        policyResult.FinalException != null
                                ? $"Could not process item: {policyResult.FinalException.Message}."
                                : "An error has occured while processing the item.");
                }

                if (!item1ProcessSource.TryRemove(combined.Item1.Key, out _))
                {
                    _logger.LogWarning(
                        $"Could not remove item of key {combined.Item1.ToString()} from the buffer.");
                }

                if (!item2ProcessSource.TryRemove(combined.Item2.Key, out _))
                {
                    _logger.LogWarning(
                        $"Could not remove item of key {combined.Item2.ToString()} from the buffer.");
                }
            });

            var options = new DataflowLinkOptions
            {
                PropagateCompletion = true
            };

            _item1Source.LinkTo(joinBlock.Target1, options);
            _item2Source.LinkTo(joinBlock.Target2, options);
            joinBlock.LinkTo(processBlock, options);
        }