示例#1
0
        public async Task BufferBlocksToBatchNonGreedyToAction()
        {
            var inputs         = Enumerable.Range(0, 1).Select(_ => new BufferBlock <int>()).ToList();
            var b              = new BatchBlock <int>(inputs.Count);
            int completedCount = 0;
            var c              = new ActionBlock <int[]>(i => completedCount++);

            foreach (var input in inputs)
            {
                input.LinkTo(b);
            }
            var ignored = Task.WhenAll(inputs.Select(s => s.Completion)).ContinueWith(
                _ => b.Complete(), CancellationToken.None, TaskContinuationOptions.None, TaskScheduler.Default);

            b.LinkTo(c, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            for (int i = 0; i < Iterations; i++)
            {
                inputs[i % inputs.Count].Post(i);
            }
            foreach (var input in inputs)
            {
                input.Complete();
            }

            await c.Completion;

            Assert.Equal(expected: Iterations / b.BatchSize, actual: completedCount);
        }
示例#2
0
        static void Main(string[] args)
        {
            Task.Run(async() =>
            {
                bufferBlock    = new BufferBlock <string>();
                var batchBlock = new BatchBlock <string>(7);
                var action     = new ActionBlock <IEnumerable <string> >(async t =>
                {
                    await SendUsers(t);
                });

                batchBlock.LinkTo(action, new DataflowLinkOptions()
                {
                    PropagateCompletion = true
                });
                bufferBlock.LinkTo(batchBlock, new DataflowLinkOptions()
                {
                    PropagateCompletion = true
                });

                await InsertUsers();

                Console.WriteLine("Now the buffer will complete. It's complition will be followed by completing the batch. Press any key to continue...");
                Console.ReadKey();

                bufferBlock.Complete();
                await bufferBlock.Completion.ContinueWith(delegate
                {
                    batchBlock.Complete();
                });

                Console.ReadKey();
            }).GetAwaiter().GetResult();
        }
示例#3
0
        /// <summary>
        /// Inserts records of the appropriate type to the database
        /// </summary>
        /// <param name="broadcast"></param>
        /// <param name="batchSize"></param>
        /// <param name="context"></param>
        public static void Insert(BatchBlock <TEntity> broadcast, KeepaContext context)
        {
            // Create a BatchBlock<best_sellers> that holds several best_seller objects and
            // then propagates them out as an array.
            //var batchRecs = new BatchBlock<TEntity>(1000);
            //var queue = new BufferBlock<best_sellers>();

            // Create an ActionBlock<best_seller[]> object that adds multiple
            // best_seller entries to the database.
            var insertEmployees = new ActionBlock <TEntity[]>(a =>
                                                              context.Set <TEntity>().AddRange(a)
                                                              );

            //Link broadcast to batch
            broadcast.LinkTo(insertEmployees, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            // Link the batch block to the action block.
            //batchRecs.LinkTo(insertEmployees, new DataflowLinkOptions { PropagateCompletion = true });

            // When the batch block completes, set the action block also to complete.
            broadcast.Completion.ContinueWith(delegate { insertEmployees.Complete(); });

            // Set the batch block to the completed state and wait for
            // all insert operations to complete.
            broadcast.Complete();
            insertEmployees.Completion.Wait();
        }
        public void Batch_pipeline_one_input_accumulate_to_one_output()
        {
            var result = new List <string>();

            var head   = new TransformBlock <string, string>(str => str + "A");
            var block2 = new BatchBlock <string>(3);
            var block3 = new TransformBlock <string[], string>(str => string.Join(',', str));
            var block4 = new ActionBlock <string>(str => result.Add(str));

            var linkOptions = new DataflowLinkOptions {
                PropagateCompletion = true
            };

            head.LinkTo(block2, linkOptions);
            block2.LinkTo(block3, linkOptions);
            block3.LinkTo(block4, linkOptions);

            head.Post("C");
            head.Post("C");
            head.Post("C");
            // this one will be lost because of the batching
            head.Post("C");
            head.Complete();
            block4.Completion.Wait();

            result.Count.Should().Be(2);
            result[0].Should().Be("CA,CA,CA");
            result[1].Should().Be("CA");
        }
 public JournalActor(IJournalPersistor journalPersistor, long batchDelayMs = 10L, int batchSize = 100)
 {
     _journalPersistor = journalPersistor;
     _journalingBlock = new ActionBlock<IJournalable[]>(journalables =>
     {
         var journalStatisticEntry = new JournalStatisticEntry {BatchSize = journalables.Length};
         journalStatisticEntry.OverallElapsed = StopWatchUtil.Measure(() =>
         {
             journalStatisticEntry.WritingOnlyElapsed =
                 StopWatchUtil.Measure(
                     () => { _journalPersistor.WriteAsync(DateTimeOffset.Now, journalables); });
         });
         Statistic.Add(journalStatisticEntry);
     }, new ExecutionDataflowBlockOptions
     {
         MaxDegreeOfParallelism = 1
     });
     _requestBlock = new BatchBlock<IJournalable>(batchSize);
     _requestBlock.LinkTo(_journalingBlock, new DataflowLinkOptions
     {
         PropagateCompletion = true
     });
     _timer = new Timer();
     _timer.Elapsed += (sender, args) => { _requestBlock.TriggerBatch(); };
     _timer.Interval = batchDelayMs;
     _timer.Enabled = true;
 }
示例#6
0
        protected override void OpenCore()
        {
            _vehicleTotal    = 0;
            _bikeTotal       = 0;
            _pedestrainTotal = 0;
            _unknown         = 0;

            _vehicleBatchBlock = new BatchBlock <VideoVehicle>(BatchSize);
            _vehicleDbBlock    = new VehicleDbBlock(ThreadCount, _serviceProvider);
            _vehicleBatchBlock.LinkTo(_vehicleDbBlock.InputBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            _bikeBatchBlock = new BatchBlock <VideoBike>(BatchSize);
            _bikeDbBlock    = new BikeDbBlock(ThreadCount, _serviceProvider);
            _bikeBatchBlock.LinkTo(_bikeDbBlock.InputBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            _pedestrainBatchBlock = new BatchBlock <VideoPedestrain>(BatchSize);
            _pedestrainDbBlock    = new PedestrainDbBlock(ThreadCount, _serviceProvider);
            _pedestrainBatchBlock.LinkTo(_pedestrainDbBlock.InputBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });
        }
示例#7
0
        private void ConfigureEntity <T>(Func <Message, T> action, Action <T[]> execution, int batchSize, DataflowLinkOptions linkOptions)
            where T : class
        {
            var transformBlock = new TransformBlock <Message, T>(action);
            var batchBlock     = new BatchBlock <T>(batchSize);
            var actionBlock    = new ActionBlock <T[]>(m =>
            {
                var temp   = m.Where(x => x != null).ToArray();
                var entity = temp.GetType().Name;
                _logger.LogDebug($"Bulk insert {entity} - {temp.Length}");
                execution.Invoke(temp);
                _logger.LogInformation($"Process finished for {entity}");
            });

            batchBlock.LinkTo(actionBlock, linkOptions);
            batchBlock.Completion.ContinueWith(delegate { actionBlock.Complete(); });

            transformBlock.LinkTo(batchBlock, linkOptions);
            transformBlock.Completion.ContinueWith(delegate { batchBlock.Complete(); });

            var bufferBlock = new BufferBlock <Message>();

            bufferBlock.LinkTo(transformBlock, linkOptions, m => m != null && m.Type == typeof(T).Name);
            _bufferBlocks.Add(typeof(T).Name, bufferBlock);
        }
示例#8
0
        public BatchingBlock(TimeSpan timeSpan, ITargetBlock <T[]> processor,
                             CancellationToken cancellation = default(CancellationToken))
        {
            _timeSpan   = timeSpan;
            _batchBlock = new BatchBlock <T>(100, new GroupingDataflowBlockOptions
            {
                CancellationToken = cancellation,
                BoundedCapacity   = DataflowBlockOptions.Unbounded
            });

            _batchBlock.Completion.ContinueWith(x =>
            {
                if (x.IsFaulted)
                {
                    Console.WriteLine(x.Exception);
                }
            });

            _trigger = new Timer(o =>
            {
                try
                {
                    _batchBlock.TriggerBatch();
                }
                catch (Exception)
                {
                    // ignored
                }
            }, null, Timeout.Infinite, Timeout.Infinite);


            _batchBlock.LinkTo(processor);
        }
示例#9
0
        public void Subscribe(Func <IEnumerable <T>, Task> action, CancellationToken token)
        {
            var bufferDataflowBlockOptions = new DataflowBlockOptions {
                CancellationToken = token
            };

            if (BufferBoundedCapacity != default(int))
            {
                bufferDataflowBlockOptions.BoundedCapacity = BufferBoundedCapacity;
            }
            if (BufferMaxMessagesPerTask != default(int))
            {
                bufferDataflowBlockOptions.MaxMessagesPerTask = BufferMaxMessagesPerTask;
            }

            bufferBlock = new BufferBlock <T>(bufferDataflowBlockOptions);

            batchBlock = new BatchBlock <T>(BatchSize);

            var actionBlock = new ActionBlock <IEnumerable <T> >(action);

            batchBlock.LinkTo(actionBlock);
            bufferBlock.LinkTo(batchBlock);

            _timer = new Timer(state => { batchBlock.TriggerBatch(); }, bufferBlock, TimerDueTime, TimerPeriod);
        }
示例#10
0
        public IDisposable Start(CancellationToken cancellationToken)
        {
            _Logger.LogInformation("TracingTPLPipelineHander Start");
            List <IDisposable> disposables = new List <IDisposable>();
            var batchSpan    = new BatchBlock <Span>(_TracingTPLPipelineOption.BatchSpanSize);
            var batchProcess = new BatchBlock <SpanServiceOperation>(_TracingTPLPipelineOption.BatchProcessSize);

            disposables.Add(batchSpan.LinkTo(new ActionBlock <IEnumerable <Span> >(CreateSpan, new ExecutionDataflowBlockOptions()
            {
                MaxDegreeOfParallelism = _TracingTPLPipelineOption.MaxHanderParallelism <= 0? DEFAUKT_CONSUMER_PARALLELISM: _TracingTPLPipelineOption.MaxHanderParallelism,
            })));
            disposables.Add(batchProcess.LinkTo(new ActionBlock <IEnumerable <SpanServiceOperation> >(CreateSpanServiceOperation)));
            foreach (var item in _TracingTPLPipelines)
            {
                var disposable = item.SourceBlock.LinkTo(new ActionBlock <TracingBatch>(c =>
                {
                    foreach (var item in c.Spans)
                    {
                        item.Process = c.Process;
                        batchSpan.Post(item);
                        batchProcess.Post(new SpanServiceOperation()
                        {
                            Operation = item.OperationName,
                            Process   = c.Process
                        });
                    }
                }), t => t != null && t.Spans != null);
                disposables.Add(disposable);
            }
            Disposable = new CollectionDisposable(disposables);
            return(Disposable);
        }
示例#11
0
        /// <summary>
        /// GeneratePipeline_DataFlowSource_to_DataFlowDestination generates a TPL-DataFlowPipeline between two vertices of a graph.
        /// v_source.UserDefinedObjects[0] has to be Type of IDataFlowSource
        /// v_dest.UserDefinedObjects[0] has to be Type of IDataFlowDestination
        /// </summary>
        /// <param name="v_source"></param>
        /// <param name="v_dest"></param>
        /// <param name="ToCompleteCollection"></param>
        /// <param name="WatingForCompletitionCollection"></param>
        private void GeneratePipeline_DataFlowSource_to_DataFlowDestination(Vertex v_source, Vertex v_dest, ref List <object> ToCompleteCollection, ref List <object> WatingForCompletitionCollection, ref Dictionary <IDataFlowSource <DS>, object> DataFlowReaderCollection)
        {
            IDataFlowSource <DS>      t_b_source = (IDataFlowSource <DS>)v_source.UserDefinedObjects[0];
            IDataFlowDestination <DS> dest       = (IDataFlowDestination <DS>)v_dest.UserDefinedObjects[0];

            TransformBlock <DS, DS> t_b_dummy = new TransformBlock <DS, DS>(DS => { return(DS); }
                                                                            , new ExecutionDataflowBlockOptions {
                MaxDegreeOfParallelism = this.MaxDegreeOfParallelism
            });

            ToCompleteCollection.Add(t_b_dummy);
            v_dest.UserDefinedObjects.Add(t_b_dummy);
            DataFlowReaderCollection.Add(t_b_source, t_b_dummy);


            var bacthBlock = new BatchBlock <DS>(BatchSize);
            var DataFlowDestinationBlock = new ActionBlock <DS[]>(outp => dest.WriteBatch(outp));

            t_b_dummy.LinkTo(bacthBlock, linkOptions);
            bacthBlock.LinkTo(DataFlowDestinationBlock, linkOptions);

            t_b_dummy.Completion.ContinueWith(t => { bacthBlock.Complete(); });
            bacthBlock.Completion.ContinueWith(t => { DataFlowDestinationBlock.Complete(); });

            WatingForCompletitionCollection.Add(DataFlowDestinationBlock);
        }
示例#12
0
        public void HndlingCompletion()
        {
            var batchBlock     = new BatchBlock <int>(10);
            var broadcastBlock = new BroadcastBlock <int[]>(_ => _);
            var xForm1         = new TransformBlock <int[], int[]>(_ => _);
            var xForm2         = new TransformBlock <int[], int[]>(_ => _);

            batchBlock.LinkTo(broadcastBlock, new DataflowLinkOptions()
            {
                PropagateCompletion = true
            });
            broadcastBlock.LinkTo(xForm1);
            broadcastBlock.LinkTo(xForm1);

            broadcastBlock.Completion.ContinueWith(broadcastBlockCompletionTask => {
                if (!broadcastBlockCompletionTask.IsFaulted)
                {
                    xForm1.Complete();
                    xForm2.Complete();
                }
                else
                {
                    ((IDataflowBlock)xForm1).Fault(broadcastBlockCompletionTask.Exception);
                    ((IDataflowBlock)xForm2).Fault(broadcastBlockCompletionTask.Exception);
                }
            });

            xForm1.Completion.ContinueWith(async _ => {
                try {
                    await xForm2.Completion;
                    //continue passing completion / fault on to rest of pipeline
                } catch  {
                }
            });
        }
    public override async Task ProcessAsync()
    {
        var settings = new ExecutionDataflowBlockOptions()
        {
            MaxDegreeOfParallelism = 10,
        };

        var listFilesBlock   = new TransformManyBlock <string, string>(_fileSystem.GetFileNames, settings);
        var getVehiclesBlock = new TransformManyBlock <string, IVehicle>(GetVehicles, settings);
        var transformBlock   = new TransformBlock <IVehicle, Truck>(TransformAsync, settings);
        var doubleBlock      = new TransformBlock <Truck, Truck>(DoubleDoorsAsync, settings);
        var batchBlock       = new BatchBlock <Truck>(10);
        var saveBlock        = new ActionBlock <IEnumerable <Truck> >(SaveTrucksAsync, settings);

        DataflowLinkOptions linkOptions = new DataflowLinkOptions()
        {
            PropagateCompletion = true
        };

        listFilesBlock.LinkTo(getVehiclesBlock, linkOptions);
        getVehiclesBlock.LinkTo(transformBlock, linkOptions);
        transformBlock.LinkTo(doubleBlock, linkOptions);
        doubleBlock.LinkTo(batchBlock, linkOptions);
        batchBlock.LinkTo(saveBlock, linkOptions);

        await listFilesBlock.SendAsync(_directory);

        listFilesBlock.Complete();

        await saveBlock.Completion;
    }
示例#14
0
        public void ActionQueueTestInPipeline()
        {
            var list = new List <List <int> >();

            var option = new DataflowLinkOptions
            {
                PropagateCompletion = true,
            };

            var batchBlock  = new BatchBlock <int>(10);
            var actionBlock = new ActionBlock <int[]>(x => list.Add(new List <int>(x)));

            batchBlock.LinkTo(actionBlock, option);

            var p = new PipelineManager <IWorkContext, int>
            {
                new Pipeline <IWorkContext, int>() + ((c, x) => { batchBlock.Post(x); return(true); }),
            };

            Enumerable.Range(0, 100)
            .ForEach(x => p.Post(null !, x));

            batchBlock.Complete();
            Task.WaitAll(batchBlock.Completion, actionBlock.Completion);

            list.Count.Should().Be(10);
            list.All(x => x.Count == 10).Should().BeTrue();
        }
示例#15
0
    public void Configure(string collectorName, XElement configElement, ISystemMetricsService systemMetrics)
    {
      _log = SuperCheapIOC.Resolve<ILog>();
      _systemMetrics = systemMetrics;

      var config = new SqlServerConfiguration(configElement.Attribute("connectionString").Value, configElement.ToInt("writeBatchSize"));

      _connectionString = config.ConnectionString;
      _collectorName = collectorName;
      _retries = config.Retries;

      InitialiseRetryHandling();

      _batchBlock = new BatchBlock<GraphiteLine>(config.WriteBatchSize);
      _actionBlock = new ActionBlock<GraphiteLine[]>(p => SendToDB(p), new ExecutionDataflowBlockOptions() { MaxDegreeOfParallelism = 1 });
      _batchBlock.LinkTo(_actionBlock);

      _batchBlock.Completion.ContinueWith(p => _actionBlock.Complete());
      _actionBlock.Completion.ContinueWith(p => { _isActive = false; });

      _completionTask = new Task(() =>
      {
        _log.Info("SqlServerBackend - Completion has been signaled. Waiting for action block to complete.");
        _batchBlock.Complete();
        _actionBlock.Completion.Wait();
      });

    }
示例#16
0
        public MediaHashSorter(HashSet <long> NewHash, DBHandler db, int MaxHammingDistance, int ExtraBlock)
        {
            this.NewHash            = NewHash; //nullだったら全hashが処理対象
            this.MaxHammingDistance = MaxHammingDistance;
            Combi = new Combinations(MaxHammingDistance + ExtraBlock, ExtraBlock);

            //このブロックは全MultipleSortUnitで共有する
            PairStoreBlock = new ActionBlock <HashPair[]>(
                async(p) =>
            {
                int AddCount;
                do
                {
                    AddCount = await db.StoreMediaPairs(p).ConfigureAwait(false);
                } while (AddCount < 0);                                                                       //失敗したら無限に再試行
                if (0 < AddCount)
                {
                    Interlocked.Add(ref DBAddCount, AddCount);
                }
            },
                new ExecutionDataflowBlockOptions()
            {
                SingleProducerConstrained = true,
                MaxDegreeOfParallelism    = Environment.ProcessorCount
            });
            PairBatchBlock.LinkTo(PairStoreBlock, new DataflowLinkOptions()
            {
                PropagateCompletion = true
            });
        }
示例#17
0
        /// <summary>
        /// Create loader
        /// </summary>
        /// <param name="executor"></param>
        /// <param name="serializer"></param>
        /// <param name="logger"></param>
        /// <param name="addOnly"></param>
        /// <param name="bulkSize"></param>
        internal BulkImporter(IBulkExecutor executor, JsonSerializerSettings serializer,
                              ILogger logger, bool addOnly = false, int bulkSize = 10000)
        {
            _executor   = executor ?? throw new ArgumentNullException(nameof(executor));
            _logger     = logger ?? throw new ArgumentNullException(nameof(logger));
            _serializer = serializer == null?
                          JsonSerializer.CreateDefault() : JsonSerializer.Create(serializer);

            _bulkSize = bulkSize;
            _addOnly  = addOnly;

            // Set up batch blocks
            _batcher = new BatchBlock <object>(_bulkSize,
                                               new GroupingDataflowBlockOptions());
            var importer = new ActionBlock <object[]>(ProcessBatch,
                                                      new ExecutionDataflowBlockOptions {
                BoundedCapacity           = 1,
                MaxDegreeOfParallelism    = 1,
                SingleProducerConstrained = true
            });

            // Connect the output to the action handler
            _batcher.LinkTo(importer, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            // When done, cause end to be called
            _complete = _batcher.Completion
                        .ContinueWith(async t => {
                importer.Complete();
                // Drain
                await importer.Completion;
            });
            _cts = new CancellationTokenSource();
        }
        public IDictionary <string, string[]> BatchBlockUsage(int numberOfIteration, int batchsize)
        {
            Console.WriteLine($"Inside {nameof(TplDataflow3GroupingBlocksController)} - {nameof(BatchBlockUsage)}");

            var ouputCollection = new Dictionary <string, string[]>();

            Functions.ClearCounterForBatchBlockUsage();

            // Create the members of the pipeline.
            var batchBlockWithSizeGivenInInput      = new BatchBlock <string>(batchsize);
            var actionBlockPerformActionOnBatchData = new ActionBlock <string[]>(batchedInput =>
                                                                                 Functions.DisplayByGroups(ouputCollection, batchedInput)
                                                                                 );

            // Connect the dataflow blocks to form a pipeline.
            batchBlockWithSizeGivenInInput.LinkTo(actionBlockPerformActionOnBatchData, DataflowOptions.LinkOptions);

            // Start BatchBlockUsage pipeline with the input values.
            for (var i = 0; i < numberOfIteration; i++)
            {
                batchBlockWithSizeGivenInInput.Post($"Value = {i}");
            }

            // Mark the head of the pipeline as complete.
            batchBlockWithSizeGivenInInput.Complete();

            // Wait for the last block in the pipeline to process all messages.
            actionBlockPerformActionOnBatchData.Completion.Wait();

            return(ouputCollection);
        }
        private static async Task Synch(CancellationToken cancellationToken)
        {
            BatchBlock <int>    syncBatch  = new BatchBlock <int>(1);
            ActionBlock <int[]> syncAction = new ActionBlock <int[]>(async i =>
            {
                Console.WriteLine("syncAction");
                //exists pairs
                throw null;
            }, new ExecutionDataflowBlockOptions {
                MaxDegreeOfParallelism = 2
            });

            _ = syncBatch.LinkTo(syncAction, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            await foreach (int i in AsyncEnumerable(cancellationToken))
            {
                bool isMsgRecived = await syncBatch.SendAsync(i);

                if (isMsgRecived is false || syncAction.Completion.Status == TaskStatus.Faulted)
                {
                    break;
                }
            }
            syncBatch.Complete();
            await syncAction.Completion;
        }
示例#20
0
        static void Main(string[] args)
        {
            var options = new GroupingDataflowBlockOptions {
                Greedy = false
            };
            var sync = new BatchBlock <string>(3, options);

            // the delegate returns value tuple (.MET 4.7 / standard 2.0)
            var chA = new TransformBlock <int, string>(
                (Func <int, string>)ChannelA);
            var chB = new TransformBlock <int, string>(
                (Func <int, string>)ChannelB);
            var chC = new TransformBlock <int, string>(
                (Func <int, Task <string> >)ChannelC);

            var presenter = new ActionBlock <string[]>((Action <string[]>)Present);

            chA.LinkTo(sync);
            chB.LinkTo(sync);
            chC.LinkTo(sync);
            sync.LinkTo(presenter);


            for (int i = 1; i <= 20; i++)
            {
                chA.Post(i);
                chB.Post(i);
                chC.Post(i);
            }

            Console.ReadKey();
        }
示例#21
0
        private static void SimpleNetwork()
        {
            var rand = new Random(DateTime.Now.Millisecond);

            var broadcastBlock = new BroadcastBlock <int>(x => x);

            var transformPositive = new TransformBlock <int, int>(x =>
            {
                Thread.Sleep(1000);
                return(x);
            });

            var transformNegative = new TransformBlock <int, int>(x =>
            {
                Thread.Sleep(2000);
                return(x * -1);
            });

            var join = new JoinBlock <int, int>();

            var batchBlock = new BatchBlock <Tuple <int, int> >(5);

            var sumBlock = new ActionBlock <Tuple <int, int>[]>(tuples =>
            {
                foreach (var tuple in tuples)
                {
                    Console.WriteLine($"{tuple.Item1}+({tuple.Item2})={tuple.Item1 + tuple.Item2}");
                }
            });


            broadcastBlock.LinkTo(transformPositive, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            broadcastBlock.LinkTo(transformNegative, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            transformPositive.LinkTo(@join.Target1, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            transformNegative.LinkTo(@join.Target2, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            @join.LinkTo(batchBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            batchBlock.LinkTo(sumBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            for (int i = 0; i < 30; i++)
            {
                broadcastBlock.Post(rand.Next(100));
                Thread.Sleep(1000);
            }

            broadcastBlock.Complete();
        }
示例#22
0
        // </snippet5>

        // <snippet6>
        // Adds random employee data to the database by using dataflow.
        // This method is similar to AddEmployees except that it uses batching
        // to add multiple employees to the database at a time.
        static void AddEmployeesBatched(string connectionString, int batchSize,
                                        int count)
        {
            // Create a BatchBlock<Employee> that holds several Employee objects and
            // then propagates them out as an array.
            var batchEmployees = new BatchBlock <Employee>(batchSize);

            // Create an ActionBlock<Employee[]> object that adds multiple
            // employee entries to the database.
            var insertEmployees = new ActionBlock <Employee[]>(a =>
                                                               InsertEmployees(a, connectionString));

            // Link the batch block to the action block.
            batchEmployees.LinkTo(insertEmployees);

            // When the batch block completes, set the action block also to complete.
            batchEmployees.Completion.ContinueWith(delegate { insertEmployees.Complete(); });

            // Post several random Employee objects to the batch block.
            PostRandomEmployees(batchEmployees, count);

            // Set the batch block to the completed state and wait for
            // all insert operations to complete.
            batchEmployees.Complete();
            insertEmployees.Completion.Wait();
        }
示例#23
0
        public void BasicUsageTest()
        {
            int[] array = null;
            var   evt   = new ManualResetEventSlim(false);

            var buffer = new BatchBlock <int> (10);
            var block  = new ActionBlock <int[]> (i => { array = i; evt.Set(); });

            buffer.LinkTo <int[]>(block);

            for (int i = 0; i < 9; i++)
            {
                Assert.IsTrue(buffer.Post(i));
            }

            evt.Wait(1600);

            Assert.IsNull(array);

            Assert.IsTrue(buffer.Post(42));
            evt.Wait();

            Assert.IsNotNull(array);
            CollectionAssert.AreEquivalent(new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 42 }, array);
        }
        public async Task BatchBlockWillCompleteTarget()
        {
            BatchBlock <int> bb = new BatchBlock <int>(batchSize: 2,
                                                       dataflowBlockOptions: new GroupingDataflowBlockOptions()
            {
                BoundedCapacity = 3
            });

            TestTargetBlock <int[]> testTarget = new TestTargetBlock <int[]>();

            testTarget.ConsumptionMode = DataflowMessageStatus.Accepted;
            bb.LinkTo(testTarget, PropagateCompletion);

            // Rapidly send 50 messages
            TimeSpan sendTimeout = TimeSpan.FromSeconds(10);

            Task.WaitAll(Enumerable.Range(0, 50).Select((i) => bb.SendAsync(i)).ToArray(), sendTimeout);

            bb.Complete();

            // Completion should run to successful conclusion
            await Task.WhenAny(bb.Completion, Task.Delay(CompletionTimeout));

            Assert.Equal(TaskStatus.RanToCompletion, bb.Completion.Status);

            // Assumption: BufferBlock should also have completed its target
            await Task.WhenAny(testTarget.Completion, Task.Delay(CompletionTimeout));

            Assert.Equal(TaskStatus.RanToCompletion, testTarget.Completion.Status);

            // Assumption: we should have gotten 25 batches
            bool allMessagesReceived = await TaskUtils.PollWaitAsync(() => testTarget.MessagesConsumed.Count == 25, MessageArrivalTimeout);

            Assert.True(allMessagesReceived);
        }
示例#25
0
        public void TriggerBatchLateBinding()
        {
            int[] array = null;
            var   evt   = new ManualResetEventSlim(false);

            var buffer = new BatchBlock <int> (10);
            var block  = new ActionBlock <int[]> (i =>
            {
                array = i;
                evt.Set();
            });

            for (int i = 0; i < 9; i++)
            {
                Assert.IsTrue(buffer.Post(i));
            }

            buffer.TriggerBatch();
            buffer.LinkTo(block);

            evt.Wait();
            Assert.IsNotNull(array);

            CollectionAssert.AreEquivalent(new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8 },
                                           array);
        }
        public ExecutionPipeline(Kernel kernel)
        {
            _kernel = kernel;
            _commandQueue = new BufferBlock<CommandRequest[]>();
            _queryQueue = new BatchBlock<QueryRequest>(MaxConcurrentQueries);

            var transactionHandler = new ActionBlock<object>(t =>
            {
                if (t is QueryRequest[])
                {
                    var queries = t as QueryRequest[];
                    Task[] tasks = queries.Select(q => Task.Factory.StartNew(_ => ExecuteQuery(q), null)).ToArray();
                    Task.WaitAll(tasks);
                }
                else if (t is CommandRequest[])
                {
                    var commands = t as CommandRequest[];
                    foreach (var commandContext in commands)
                    {
                        var result = _kernel.Execute(commandContext.Command);
                        commandContext.Response.Post(result);
                    }
                }

            });
            _commandQueue.LinkTo(transactionHandler);
            _queryQueue.LinkTo(transactionHandler);
            _timer = new Timer(_ => _queryQueue.TriggerBatch());
            _timer.Change(Interval, Interval);
        }
示例#27
0
		public void BasicUsageTest ()
		{
			int[] array = null;
			var evt = new ManualResetEventSlim (false);

			var buffer = new BatchBlock<int> (10);
			var block = new ActionBlock<int[]> (i =>
			{
				array = i;
				evt.Set ();
			});
			buffer.LinkTo<int[]> (block);

			for (int i = 0; i < 9; i++)
				Assert.IsTrue (buffer.Post (i));

			Assert.IsFalse (evt.Wait (100));

			Assert.IsNull (array);

			Assert.IsTrue (buffer.Post (42));
			Assert.IsTrue (evt.Wait (1000));

			Assert.IsNotNull (array);
			CollectionAssert.AreEqual (new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 42 }, array);
		}
示例#28
0
        public SqlServerBackend(string connectionString,
                                string collectorName,
                                ISystemMetricsService systemMetrics,
                                int retries   = 3,
                                int batchSize = 50)
        {
            _log = SuperCheapIOC.Resolve <ILog>();
            _connectionString = connectionString;
            _collectorName    = collectorName;
            _systemMetrics    = systemMetrics;
            _retries          = retries;

            InitialiseRetryHandling();

            _batchBlock  = new BatchBlock <GraphiteLine>(batchSize);
            _actionBlock = new ActionBlock <GraphiteLine[]>(p => SendToDB(p), new ExecutionDataflowBlockOptions()
            {
                MaxDegreeOfParallelism = 1
            });
            _batchBlock.LinkTo(_actionBlock);

            _batchBlock.Completion.ContinueWith(p => _actionBlock.Complete());
            _actionBlock.Completion.ContinueWith(p => { _isActive = false; });

            _completionTask = new Task(() =>
            {
                _log.Info("SqlServerBackend - Completion has been signaled. Waiting for action block to complete.");
                _batchBlock.Complete();
                _actionBlock.Completion.Wait();
            });
        }
        private void InitializeFlow(int maxBacklog, TimeSpan maxFlushInterval)
        {
            _buffer = new BufferBlock <FunctionInstanceLogEntry>(
                new ExecutionDataflowBlockOptions()
            {
                BoundedCapacity = maxBacklog
            });

            _batcher = new BatchBlock <FunctionInstanceLogEntry>(maxBacklog,
                                                                 new GroupingDataflowBlockOptions()
            {
                BoundedCapacity = maxBacklog,
                Greedy          = true
            });

            TransformBlock <IEnumerable <FunctionInstanceLogEntry>, IEnumerable <FunctionResultAggregate> > aggregator =
                new TransformBlock <IEnumerable <FunctionInstanceLogEntry>, IEnumerable <FunctionResultAggregate> >(transform: (e) => Aggregate(e));

            ActionBlock <IEnumerable <FunctionResultAggregate> > publisher = new ActionBlock <IEnumerable <FunctionResultAggregate> >(
                (e) => Publish(e),
                new ExecutionDataflowBlockOptions()
            {
                MaxDegreeOfParallelism = 1,
                BoundedCapacity        = 32
            });

            _disposables = new IDisposable[]
            {
                _buffer.LinkTo(_batcher),
                _batcher.LinkTo(aggregator),
                aggregator.LinkTo(publisher)
            };

            _windowTimer = new Timer(async(o) => await FlushAsync(), null, maxFlushInterval, maxFlushInterval);
        }
示例#30
0
        public async Task TestLinkToOptions()
        {
            const int Messages = 2;

            foreach (bool append in DataflowTestHelpers.BooleanValues)
            {
                var bb      = new BatchBlock <int>(1);
                var values  = new int[Messages][];
                var targets = new ActionBlock <int[]> [Messages];
                for (int i = 0; i < Messages; i++)
                {
                    int slot = i;
                    targets[i] = new ActionBlock <int[]>(item => values[slot] = item);
                    bb.LinkTo(targets[i], new DataflowLinkOptions {
                        MaxMessages = 1, Append = append
                    });
                }
                bb.PostRange(0, Messages);
                bb.Complete();
                await bb.Completion;

                for (int i = 0; i < Messages; i++)
                {
                    targets[i].Complete();
                    await targets[i].Completion;
                    Assert.Equal(
                        expected: append ? i : Messages - i - 1,
                        actual: values[i][0]);
                }
            }
        }
示例#31
0
		public void TriggerBatchTest ()
		{
			int[] array = null;
			var evt = new ManualResetEventSlim (false);

			var buffer = new BatchBlock<int> (10);
			var block = new ActionBlock<int[]> (i =>
			{
				array = i;
				evt.Set ();
			});
			buffer.LinkTo (block);

			for (int i = 0; i < 9; i++)
				Assert.IsTrue (buffer.Post (i));

			buffer.TriggerBatch ();
			evt.Wait ();

			Assert.IsNotNull (array);
			Assert.IsTrue (buffer.Post (42));
			evt.Wait (1600);

			CollectionAssert.AreEquivalent (new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8 },
				array);
		}
示例#32
0
        public static async Task <List <string> > LoadFiles(ConcurrentDictionary <string, MemoryStream> streams)
        {
            List <string> _errors = new List <string>();
            Queue <KeyValuePair <string, MemoryStream> > files = new Queue <KeyValuePair <string, MemoryStream> >(streams);

            var batchBlock = new BatchBlock <KeyValuePair <string, MemoryStream> >(75, new GroupingDataflowBlockOptions {
                BoundedCapacity = 100
            });
            var actionBlock = new ActionBlock <KeyValuePair <string, MemoryStream>[]>(t =>
            {
                for (int i = 0; i < t.Length; i++)
                {
                    var s = files.Dequeue();
                    try
                    {
                        DBReader reader = new DBReader();
                        DBEntry entry   = reader.Read(s.Value, s.Key);
                        if (entry != null)
                        {
                            var current = Entries.FirstOrDefault(x => x.FileName == entry.FileName && x.Build == entry.Build);
                            if (current != null)
                            {
                                Entries.Remove(current);
                            }

                            Entries.Add(entry);

                            if (!string.IsNullOrWhiteSpace(reader.ErrorMessage))
                            {
                                _errors.Add(FormatError(s.Key, ErrorType.Warning, reader.ErrorMessage));
                            }
                        }
                    }
                    catch (ConstraintException ex) { _errors.Add(FormatError(s.Key, ErrorType.Error, "Id column contains duplicates.")); }
                    catch (Exception ex) { _errors.Add(FormatError(s.Key, ErrorType.Error, ex.Message)); }

                    if (i % 100 == 0 && i > 0)
                    {
                        ForceGC();
                    }
                }

                ForceGC();
            });

            batchBlock.LinkTo(actionBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            foreach (KeyValuePair <string, MemoryStream> i in streams)
            {
                await batchBlock.SendAsync(i); // wait synchronously for the block to accept.
            }
            batchBlock.Complete();
            await actionBlock.Completion;

            ForceGC();

            return(_errors);
        }
        protected ITargetBlock <TSource> BuildPipeline(object context, out Task completionTask)
        {
            var readerBlock = new BufferBlock <TSource>(new DataflowBlockOptions {
                BoundedCapacity = Options.ReadChunkSize
            });
            var processorBlock = new TransformBlock <TSource, TTarget>(async item =>
            {
                return(await Processor().Process(item, context));
            },
                                                                       new ExecutionDataflowBlockOptions
            {
                BoundedCapacity           = Options.ReadChunkSize,
                MaxDegreeOfParallelism    = Options.MaxDegreeOfParallelism,
                SingleProducerConstrained = true
            });
            var batchblock  = new BatchBlock <TTarget>(Options.ReadChunkSize);
            var writerBlock = new ActionBlock <ICollection <TTarget> >(async items =>
            {
                var writer = Writer();
                await writer.Write(items, context);
            },
                                                                       new ExecutionDataflowBlockOptions
            {
                MaxDegreeOfParallelism    = Options.MaxDegreeOfParallelism,
                SingleProducerConstrained = true
            });

            readerBlock.LinkTo(processorBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            processorBlock.LinkTo(batchblock, new DataflowLinkOptions {
                PropagateCompletion = true
            });
            batchblock.LinkTo(writerBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            completionTask = new Task(() =>
            {
                try
                {
                    writerBlock.Completion.Wait();
                    Logger.LogDebug("writerBlock.Completion.Wait() returned");
                }
                catch (Exception e)
                {
                    Logger.LogError(e, "ERROR");
                }
                finally
                {
                    readerBlock.Complete();
                }
            });

            completionTask.Start();

            return(readerBlock);
        }
        public async Task BatchBlockKeepsDeclinedMessages()
        {
            CancellationTokenSource cts = new CancellationTokenSource();
            BatchBlock <int>        bb  = new BatchBlock <int>(batchSize: 2,
                                                               dataflowBlockOptions: new GroupingDataflowBlockOptions()
            {
                BoundedCapacity = 3, CancellationToken = cts.Token
            });

            TestTargetBlock <int[]> testTarget = new TestTargetBlock <int[]>();

            testTarget.ConsumptionMode = DataflowMessageStatus.Declined;
            bb.LinkTo(testTarget, PropagateCompletion);

            // Assumption: BatchBlock will keep incoming messages even when its target is declining them
            Assert.True(bb.Post(1));
            Assert.True(bb.Post(2));
            Assert.True(bb.Post(3));

            // The block has run out of capacity
            Assert.False(bb.Post(4));
            Assert.False(bb.Post(5));

            // This message will be postponed (and, in fact, released when we ask the block to complete)
            bb.SendAsync(6).Forget();

            // The messages are buffered and there is one batch ready to be consumed (the second one is not full)
            Assert.Equal(1, bb.OutputCount);

            // Wait till the block offers a message
            // Assumption: only one message will be offered, the block will not offer more messages if the target declines
            bool oneMessageOffered = await TaskUtils.PollWaitAsync(() => testTarget.MessagesDeclined.Count == 1, MessageArrivalTimeout);

            Assert.True(oneMessageOffered);
            Assert.True(testTarget.MessagesConsumed.Count == 0);
            Assert.True(testTarget.MessagesPostponed.Count == 0);

            // Assumption: the block will NOT try to deliver declined messages again when asked to complete.
            // The fact that the buffer is not empty will prevent it from completing
            testTarget.ConsumptionMode = DataflowMessageStatus.Accepted;
            bb.Complete();
            bool someMessagesDelivered = await TaskUtils.PollWaitAsync(() => testTarget.MessagesConsumed.Count > 0, MessageArrivalTimeout);

            Assert.False(someMessagesDelivered);

            // Because we asked the BatchBlock for completion, it now formed a second, undersize batch
            Assert.Equal(2, bb.OutputCount);

            // Completion task should still be running
            await Task.WhenAny(bb.Completion, Task.Delay(CompletionTimeout));

            Assert.False(bb.Completion.IsCompleted);

            // Assumption: BatchBlock will not start target's completion until it itself completes
            await Task.WhenAny(testTarget.Completion, Task.Delay(CompletionTimeout));

            Assert.True(testTarget.Completion.IsNotStarted());
        }
示例#35
0
        public static async Task <List <string> > LoadFiles(IEnumerable <string> filenames)
        {
            ConcurrentBag <string>   _errors = new ConcurrentBag <string>();
            ConcurrentQueue <string> files   = new ConcurrentQueue <string>(filenames.Distinct().OrderBy(x => x).ThenByDescending(x => Path.GetExtension(x)));
            string firstFile = files.First();

            var batchBlock = new BatchBlock <string>(100, new GroupingDataflowBlockOptions {
                BoundedCapacity = 100
            });
            var actionBlock = new ActionBlock <string[]>(t =>
            {
                for (int i = 0; i < t.Length; i++)
                {
                    string file;
                    files.TryDequeue(out file);
                    try
                    {
                        DBReader reader = new DBReader();
                        DBEntry entry   = reader.Read(file);
                        if (entry != null)
                        {
                            var current = Entries.FirstOrDefault(x => x.FileName == entry.FileName && x.Build == entry.Build);
                            if (current != null)
                            {
                                Entries.Remove(current);
                            }

                            Entries.Add(entry);
                            //if (file != firstFile)
                            //    entry.Detach();

                            if (!string.IsNullOrWhiteSpace(reader.ErrorMessage))
                            {
                                _errors.Add(FormatError(file, ErrorType.Warning, reader.ErrorMessage));
                            }
                        }
                    }
                    catch (ConstraintException ex) { _errors.Add(FormatError(file, ErrorType.Error, "Id column contains duplicates.")); }
                    catch (Exception ex) { _errors.Add(FormatError(file, ErrorType.Error, ex.Message)); }
                }

                ForceGC();
            });

            batchBlock.LinkTo(actionBlock, new DataflowLinkOptions {
                PropagateCompletion = true
            });

            foreach (string i in files)
            {
                await batchBlock.SendAsync(i); // wait synchronously for the block to accept.
            }
            batchBlock.Complete();
            await actionBlock.Completion;

            files = null;
            return(_errors.ToList());
        }
示例#36
0
        public void AddWriter(IWriteInterface writer)
        {
            var batchBlock = new BatchBlock<ValueList>(int.MaxValue);

            var timer = new Timer(state => batchBlock.TriggerBatch());
            timer.Change(0, 10000);

            broadcast.LinkTo(batchBlock);

            batchBlock.LinkTo(new ActionBlock<ValueList[]>(vl => writer.Write(vl)));
        }
        public TplBatchingJournaler(IJournalWriter journalWriter, ExecutionPipeline dispatcher, int batchSize)
        {
            Interval = TimeSpan.FromMilliseconds(16);
            _journalWriter = journalWriter;
            _dispatcher = dispatcher;

            _writerBlock = new ActionBlock<CommandRequest[]>(batch => Go(batch));

            _requestQueue = new BatchBlock<CommandRequest>(batchSize);
            _requestQueue.LinkTo(_writerBlock);
        }
示例#38
0
        public static Task GenerateAnnotatedPathsAsync(IEnumerable<CollectionPath> paths, Func<FileInfo, bool> filePredicate,
            ITargetBlock<AnnotatedPath[]> filePathTargetBlock, CancellationToken cancellationToken)
        {
            var shuffleBlock = new TransformBlock<AnnotatedPath[], AnnotatedPath[]>(
                filenames =>
                {
                    // Sequential names tend to fall into the same AWS S3 partition, so we
                    // shuffle things around.
                    RandomUtil.Shuffle(filenames);

                    return filenames;
                }, new ExecutionDataflowBlockOptions { CancellationToken = cancellationToken, MaxDegreeOfParallelism = Environment.ProcessorCount });

            shuffleBlock.LinkTo(filePathTargetBlock, new DataflowLinkOptions { PropagateCompletion = true });

            var batcher = new BatchBlock<AnnotatedPath>(2048, new GroupingDataflowBlockOptions { CancellationToken = cancellationToken });

            batcher.LinkTo(shuffleBlock, new DataflowLinkOptions
            {
                PropagateCompletion = true
            });

            return PostAllFilePathsAsync(paths, filePredicate, batcher, cancellationToken);
        }
示例#39
0
    public void Configure(string collectorName, XElement configElement, ISystemMetricsService systemMetrics)
    {
      _completionTask = new Task(() => IsActive = false);
      _log = SuperCheapIOC.Resolve<ILog>();
      _systemMetrics = systemMetrics;

      var config = new LibratoBackendConfiguration(
          email: configElement.Attribute("email").Value,
          token: configElement.Attribute("token").Value,
          numRetries: configElement.ToInt("numRetries"),
          retryDelay: Utility.ConvertToTimespan(configElement.Attribute("retryDelay").Value),
          postTimeout: Utility.ConvertToTimespan(configElement.Attribute("postTimeout").Value),
          maxBatchSize: configElement.ToInt("maxBatchSize"),
          countersAsGauges: configElement.ToBoolean("countersAsGauges")
        );
      
      _config = config;
      _source = collectorName;
      _serviceVersion = Assembly.GetEntryAssembly().GetName().Version.ToString();

      _preprocessorBlock = new ActionBlock<Bucket>(bucket => ProcessBucket(bucket), Utility.UnboundedExecution());
      _batchBlock = new BatchBlock<LibratoMetric>(_config.MaxBatchSize);
      _outputBlock = new ActionBlock<LibratoMetric[]>(lines => PostToLibrato(lines), Utility.OneAtATimeExecution());
      _batchBlock.LinkTo(_outputBlock);

      _client = new RestClient(LIBRATO_API_URL);
      _client.Authenticator = new HttpBasicAuthenticator(_config.Email, _config.Token);
      _client.Timeout = (int)_config.PostTimeout.TotalMilliseconds;

      _retryPolicy = new RetryPolicy<LibratoErrorDetectionStrategy>(_config.NumRetries);
      _retryPolicy.Retrying += (sender, args) =>
      {
        _log.Warn(String.Format("Retry {0} failed. Trying again. Delay {1}, Error: {2}", args.CurrentRetryCount, args.Delay, args.LastException.Message), args.LastException);
        _systemMetrics.LogCount("backends.librato.retry");
      };
      _retryStrategy = new Incremental(_config.NumRetries, _config.RetryDelay, TimeSpan.FromSeconds(2));
      IsActive = true;
    }
示例#40
0
        public async Task TestLinkToOptions()
        {
            const int Messages = 2;
            foreach (bool append in DataflowTestHelpers.BooleanValues)
            {
                var bb = new BatchBlock<int>(1);
                var values = new int[Messages][];
                var targets = new ActionBlock<int[]>[Messages];
                for (int i = 0; i < Messages; i++)
                {
                    int slot = i;
                    targets[i] = new ActionBlock<int[]>(item => values[slot] = item);
                    bb.LinkTo(targets[i], new DataflowLinkOptions { MaxMessages = 1, Append = append });
                }
                bb.PostRange(0, Messages);
                bb.Complete();
                await bb.Completion;

                for (int i = 0; i < Messages; i++)
                {
                    targets[i].Complete();
                    await targets[i].Completion;
                    Assert.Equal(
                        expected: append ? i : Messages - i - 1,
                        actual: values[i][0]);
                }
            }
        }
示例#41
0
        public async Task TestPrecancellation()
        {
            var b = new BatchBlock<int>(42, new GroupingDataflowBlockOptions { 
                CancellationToken = new CancellationToken(canceled: true), MaxNumberOfGroups = 1 
            });

            Assert.Equal(expected: 42, actual: b.BatchSize);
            Assert.NotNull(b.LinkTo(DataflowBlock.NullTarget<int[]>()));
            Assert.False(b.Post(42));
            Task<bool> t = b.SendAsync(42);
            Assert.True(t.IsCompleted);
            Assert.False(t.Result);
            int[] ignoredValue;
            IList<int[]> ignoredValues;
            Assert.False(b.TryReceive(out ignoredValue));
            Assert.False(b.TryReceiveAll(out ignoredValues));
            Assert.Equal(expected: 0, actual: b.OutputCount);
            Assert.NotNull(b.Completion);
            b.Complete(); // verify doesn't throw

            await Assert.ThrowsAnyAsync<OperationCanceledException>(() => b.Completion);
        }
示例#42
0
        private BatchBlock<AudioChunk> GetAudioBufferBlock(string token)
        {
            return _audioBufferBlocks.GetOrAdd(token, s =>
              {
            var batchBlock = new BatchBlock<AudioChunk>(BATCH_CHUNKS_SIZE, new GroupingDataflowBlockOptions
            {
              MaxMessagesPerTask = 1
            });

            var mergeChunksToWavBufferBlock = GetMergeChunksToWavBufferBlock(token);
            var saveToWaveFileBlock = GetSaveToWaveFileBlock();

            batchBlock.LinkTo(mergeChunksToWavBufferBlock);
            mergeChunksToWavBufferBlock.LinkTo(saveToWaveFileBlock, buffer => true);

            //batchBlock.LinkTo(new ActionBlock<AudioChunk[]>(batchOfChunks =>
            //{
            //  _logger.Debug("Receive batch of chunks in action block - Token: {0} Total length: {1}", token, batchOfChunks.Sum(x => x.Length));

            //  SaveChunksToWaveFile(batchOfChunks);
            //},
            //new ExecutionDataflowBlockOptions
            //{
            //  MaxDegreeOfParallelism = 1
            //}), chunks => true);

            return batchBlock;
              });
        }
        private async void StartRefreshStationsAsync()
        {
            IsStationWorkerRunning = true;
            while (refreshingPool.Count > 0)
            {
                await Task.Delay(15000).ConfigureAwait(false);
                if (CrossConnectivity.Current.IsConnected)
                {
                    BatchBlock<Station> batchBlock = new BatchBlock<Station>(5);
                    var actionBlock = new ActionBlock<Station[]>(
                       async stations =>
                       {
                           foreach (var station in stations)
                           {
                               if (await station.Contract.RefreshAsync(station))
                               {
                                   if (station.IsUiRefreshNeeded)
                                   {
                                       StationRefreshed?.Invoke(station, EventArgs.Empty);
                                   }
                               }
                           }
                       },
                new ExecutionDataflowBlockOptions
                {
                    MaxDegreeOfParallelism = 5
                });

                    batchBlock.LinkTo(actionBlock, new DataflowLinkOptions { PropagateCompletion = true });


                    foreach (var station in refreshingPool)
                    {
                        await batchBlock.SendAsync(station); // wait synchronously for the block to accept.
                    }

                    batchBlock.Complete();
                    actionBlock.Completion.Wait(15000);

                }

            }
            IsStationWorkerRunning = false;
        }
示例#44
0
        public async Task BufferBlocksToBatchNonGreedyToAction()
        {
            var inputs = Enumerable.Range(0, 1).Select(_ => new BufferBlock<int>()).ToList();
            var b = new BatchBlock<int>(inputs.Count);
            int completedCount = 0;
            var c = new ActionBlock<int[]>(i => completedCount++);

            foreach (var input in inputs) input.LinkTo(b);
            var ignored = Task.WhenAll(inputs.Select(s => s.Completion)).ContinueWith(
                _ => b.Complete(), CancellationToken.None, TaskContinuationOptions.None, TaskScheduler.Default);
            b.LinkTo(c, new DataflowLinkOptions { PropagateCompletion = true });

            for (int i = 0; i < Iterations; i++)
            {
                inputs[i % inputs.Count].Post(i);
            }
            foreach (var input in inputs) input.Complete();

            await c.Completion;
            Assert.Equal(expected: Iterations / b.BatchSize, actual: completedCount);
        }
示例#45
0
        public async Task BatchGreedyToAction()
        {
            var b = new BatchBlock<int>(1);
            int completedCount = 0;
            var c = new ActionBlock<int[]>(i => completedCount++);
            b.LinkTo(c, new DataflowLinkOptions { PropagateCompletion = true });

            b.PostRange(0, Iterations);
            b.Complete();

            await c.Completion;
            Assert.Equal(expected: Iterations / b.BatchSize, actual: completedCount);
        }
示例#46
0
        public void RunBatchBlockConformanceTests()
        {
            bool localPassed;
            // Greedy batching
            {
                localPassed = true;
                const int NUM_MESSAGES = 1;
                const int BATCH_SIZE = 1;

                var batch = new BatchBlock<int>(BATCH_SIZE);
                for (int i = 0; i < NUM_MESSAGES * BATCH_SIZE; i++) batch.Post(i);
                for (int i = 0; i < NUM_MESSAGES; i++)
                {
                    int[] result = batch.Receive();
                    localPassed &= result.Length == BATCH_SIZE;
                    for (int j = 0; j < result.Length - 1; j++)
                    {
                        localPassed &= (result[j] + 1 == result[j + 1]);
                    }
                }

                Assert.True(localPassed, string.Format("{0}: Greedy batching", localPassed ? "Success" : "Failure"));
            }

            // Non-greedy batching with BATCH_SIZE sources used repeatedly
            {
                localPassed = true;
                const int NUM_MESSAGES = 1;
                const int BATCH_SIZE = 1;

                var batch = new BatchBlock<int>(BATCH_SIZE, new GroupingDataflowBlockOptions { Greedy = false });
                var buffers = Enumerable.Range(0, BATCH_SIZE).Select(_ => new BufferBlock<int>()).ToList();
                foreach (var buffer in buffers) buffer.LinkTo(batch);

                int prevSum = -1;
                for (int i = 0; i < NUM_MESSAGES; i++)
                {
                    for (int j = 0; j < BATCH_SIZE; j++) buffers[j].Post(i);
                    int sum = batch.Receive().Sum();
                    localPassed &= (sum > prevSum);
                    prevSum = sum;
                }

                Assert.True(localPassed, string.Format("{0}: Non-greedy batching with BATCH_SIZE sources used repeatedly", localPassed ? "Success" : "Failure"));
            }

            // Non-greedy batching with BATCH_SIZE * NUM_MESSAGES sources
            {
                localPassed = true;
                const int NUM_MESSAGES = 1;
                const int BATCH_SIZE = 2;

                var batch = new BatchBlock<int>(BATCH_SIZE, new GroupingDataflowBlockOptions { Greedy = false });
                var buffers = Enumerable.Range(0, BATCH_SIZE * NUM_MESSAGES).Select(_ => new BufferBlock<int>()).ToList();
                foreach (var buffer in buffers)
                {
                    buffer.LinkTo(batch);
                    buffer.Post(1);
                }

                for (int i = 0; i < NUM_MESSAGES; i++)
                {
                    localPassed &= batch.Receive().Sum() == BATCH_SIZE;
                }

                Assert.True(localPassed, string.Format("{0}: Non-greedy batching with N*M sources", localPassed ? "Success" : "Failure"));
            }

            // Non-greedy batching with missed messages
            {
                localPassed = true;
                const int BATCH_SIZE = 2;

                var batch = new BatchBlock<int>(BATCH_SIZE, new GroupingDataflowBlockOptions { Greedy = false });
                var buffers = Enumerable.Range(0, BATCH_SIZE - 1).Select(_ => new BufferBlock<int>()).ToList();
                using (var ce = new CountdownEvent(BATCH_SIZE - 1))
                {
                    foreach (var buffer in buffers)
                    {
                        buffer.LinkTo(batch);
                        buffer.LinkTo(new ActionBlock<int>(i => ce.Signal()));
                        buffer.Post(42);
                    }
                    ce.Wait();
                }

                buffers = Enumerable.Range(0, BATCH_SIZE).Select(_ => new BufferBlock<int>()).ToList();
                foreach (var buffer in buffers)
                {
                    buffer.LinkTo(batch);
                    buffer.Post(42);
                    buffer.Complete();
                }

                localPassed &= Task.WaitAll(buffers.Select(b => b.Completion).ToArray(), 2000);

                Assert.True(localPassed, string.Format("{0}: Non-greedy batching with missed messages", localPassed ? "Success" : "Failure"));
            }

            // Test using a precanceled token
            {
                localPassed = true;
                try
                {
                    var cts = new CancellationTokenSource();
                    cts.Cancel();
                    var dbo = new GroupingDataflowBlockOptions { CancellationToken = cts.Token, MaxNumberOfGroups = 1 };
                    var b = new BatchBlock<int>(42, dbo);

                    int[] ignoredValue;
                    IList<int[]> ignoredValues;
                    localPassed &= b.BatchSize == 42;
                    localPassed &= b.LinkTo(new ActionBlock<int[]>(delegate { })) != null;
                    localPassed &= b.SendAsync(42).Result == false;
                    localPassed &= b.TryReceiveAll(out ignoredValues) == false;
                    localPassed &= b.Post(42) == false;
                    localPassed &= b.OutputCount == 0;
                    localPassed &= b.TryReceive(out ignoredValue) == false;
                    localPassed &= b.Completion != null;
                    b.Complete();
                }
                catch (Exception)
                {
                    localPassed = false;
                }

                Assert.True(localPassed, string.Format("{0}: Precanceled tokens work correctly", localPassed ? "Success" : "Failure"));
            }

            // Test completing block while still items buffered
            {
                localPassed = true;
                var b = new BatchBlock<int>(5);
                b.Post(1);
                b.Post(2);
                b.Post(3);
                b.Complete();
                localPassed &= b.Receive().Length == 3;
                Assert.True(localPassed, string.Format("{0}: Makes batches of remaining items", localPassed ? "Success" : "Failure"));
            }
        }
示例#47
0
        static async Task<long> skynetTpl( long num, long size, long div )
        {
            BatchBlock<long> source = new BatchBlock<long>( 1024 );

            long sum = 0;
            ActionBlock<long[]> actAggregate = new ActionBlock<long[]>( vals => sum += vals.Sum(),
                new ExecutionDataflowBlockOptions() { MaxDegreeOfParallelism = 1, SingleProducerConstrained = true } );

            source.LinkTo( actAggregate, new DataflowLinkOptions() { PropagateCompletion = true } );

            skynetTplRecursion( source, num, size, div );
            source.Complete();

            await actAggregate.Completion;

            return sum;
        }
示例#48
0
        public void Init()
        {
            _massMail = new MassMail(Config.BlockSize, Config.UserAgent, Config.ConnectionString, Config.Mode);
            _templateCache = new ConcurrentDictionary<long, Lazy<Template>>();
            _attachmentCache = new ConcurrentDictionary<long, Lazy<Attach>>();
            _dkimSignerCache = new ConcurrentDictionary<string, DkimSigner>();
            _domailKeySignerCache = new ConcurrentDictionary<string, DomainKeySigner>();

            //Get all private keys
            GetDkimSigners();

            //*** Create pipeline ***
            //Create TransformBlock that gets table of client data and make a list of objects from them.
            _parseXmlDataBlock = new TransformBlock<DataTable, List<Mail>>(sendData => ParseXmlData(sendData),
                new ExecutionDataflowBlockOptions
                {
                    MaxDegreeOfParallelism = Config.ParseXmlMaxdop,
                    BoundedCapacity = Config.ParseXmlBufferSize
                });
            //Create TransformBlock that gets a list of client objects, send them email, and stores result in DataTable.
            _sendEmailsBlock = new TransformBlock<List<Mail>, DataTable>(mails => SendEmails(_massMail, mails),
                new ExecutionDataflowBlockOptions
                {
                    MaxDegreeOfParallelism = Config.SendEmailsMaxdop,
                    BoundedCapacity = Config.SendEmailsMaxdop
                });
            //Create BatchBlock that holds several DataTable and then propagates them out as an array.
            _batchResultBlock = new BatchBlock<DataTable>(Config.BatchSize,
                new GroupingDataflowBlockOptions
                {
                    BoundedCapacity = Config.BatchSize
                });
            //Create ActionBlock that writes result into DB
            _writeResultsBlock = new ActionBlock<DataTable[]>(results => WriteResults(_massMail, results),
                new ExecutionDataflowBlockOptions
                {
                    BoundedCapacity = 1
                });

            //*** Build pipeline ***
            // POST --> _parseXmlDataBlock --> _sendEmailsBlock --> _batchResultBlock --> _writeResultsBlock
            _parseXmlDataBlock.LinkTo(_sendEmailsBlock);
            _sendEmailsBlock.LinkTo(_batchResultBlock);
            _batchResultBlock.LinkTo(_writeResultsBlock);

            _parseXmlDataBlock.Completion.ContinueWith(t =>
            {
                if (t.IsFaulted) ((IDataflowBlock)_sendEmailsBlock).Fault(t.Exception);
                else _sendEmailsBlock.Complete();
            });
            _sendEmailsBlock.Completion.ContinueWith(t =>
            {
                if (t.IsFaulted) ((IDataflowBlock)_batchResultBlock).Fault(t.Exception);
                else _batchResultBlock.Complete();
            });
            _batchResultBlock.Completion.ContinueWith(t =>
            {
                if (t.IsFaulted) ((IDataflowBlock)_writeResultsBlock).Fault(t.Exception);
                else _writeResultsBlock.Complete();
            });
        }
示例#49
0
        private static bool TestTriggerBatchRacingWithSendAsync(bool greedy)
        {
            bool passed = true;
            const int batchSize = 2;
            const int iterations = 1;
            const int waitTimeout = 100;
            var dbo = new GroupingDataflowBlockOptions { Greedy = greedy };

            for (int iter = 0; iter < iterations; iter++)
            {
                bool localPassed = true;
                var sendAsyncTasks = new Task<bool>[batchSize - 1];
                Task<bool> lastSendAsyncTask = null;
                var racerReady = new ManualResetEventSlim();
                var racerDone = new ManualResetEventSlim();
                int[] output1 = null;
                int[] output2 = null;

                // Blocks
                var batch = new BatchBlock<int>(batchSize, dbo);
                var terminator = new ActionBlock<int[]>(x => { if (output1 == null) output1 = x; else output2 = x; });
                batch.LinkTo(terminator);

                // Queue up batchSize-1 input items
                for (int i = 0; i < batchSize - 1; i++) sendAsyncTasks[i] = batch.SendAsync(i);
                var racer = Task.Factory.StartNew(() =>
                                    {
                                        racerReady.Set();
                                        lastSendAsyncTask = batch.SendAsync(batchSize - 1);
                                        racerDone.Set();
                                    });

                // Wait for the racer to get ready and trigger
                localPassed &= (racerReady.Wait(waitTimeout));
                batch.TriggerBatch();
                Assert.True(localPassed, "The racer task FAILED to start.");

                // Wait for the SendAsync tasks to complete
                localPassed &= Task.WaitAll(sendAsyncTasks, waitTimeout);
                Assert.True(localPassed, "SendAsync tasks FAILED to complete");

                // Wait for a batch to be produced
                if (localPassed)
                {
                    localPassed &= SpinWait.SpinUntil(() => output1 != null, waitTimeout);
                    Assert.True(localPassed, "FAILED to produce a batch");
                }

                if (localPassed && output1.Length < batchSize)
                {
                    // If the produced batch is not full, we'll trigger one more and count the items.
                    // However, we need to make sure the last message has been offered. Otherwise this 
                    // trigger will have no effect.
                    racerDone.Wait(waitTimeout);
                    batch.TriggerBatch();

                    if (localPassed)
                    {
                        // Wait for the last SendAsync task to complete
                        localPassed &= SpinWait.SpinUntil(() => lastSendAsyncTask != null, waitTimeout);
                        localPassed &= lastSendAsyncTask.Wait(waitTimeout);
                        Assert.True(localPassed, "The last SendAsync task FAILED to complete");
                    }

                    // Wait for a second batch to be produced
                    if (localPassed)
                    {
                        localPassed &= SpinWait.SpinUntil(() => output2 != null, waitTimeout);
                        Assert.True(localPassed, "FAILED to produce a second batch");
                    }

                    //Verify the total number of input items propagated
                    if (localPassed)
                    {
                        localPassed &= output1.Length + output2.Length == batchSize;
                        Assert.True(localPassed, string.Format("FAILED to propagate {0} input items. count1={1}, count2={2}",
                                                                            batchSize, output1.Length, output2.Length));
                    }
                }

                passed &= localPassed;
                if (!localPassed)
                {
                    Assert.True(localPassed, string.Format("Iteration={0}", iter));
                    Assert.True(localPassed, string.Format("Count1={0}", output1 == null ? "null" : output1.Length.ToString()));
                    Assert.True(localPassed, string.Format("Count2={0}", output2 == null ? "null" : output2.Length.ToString()));
                    break;
                }
            }

            return passed;
        }
示例#50
0
        private static bool TestTriggerBatchRacingWithComplete(bool greedy)
        {
            bool passed = true;
            const int batchSize = 2;
            const int iterations = 1;
            const int waitTimeout = 100;
            var dbo = new GroupingDataflowBlockOptions { Greedy = greedy };

            for (int iter = 0; iter < iterations; iter++)
            {
                bool localPassed = true;
                var sendAsyncTasks = new Task<bool>[batchSize - 1];
                var racerReady = new ManualResetEventSlim();
                int[] output1 = null;
                int[] output2 = null;

                // Blocks
                var batch = new BatchBlock<int>(batchSize, dbo);
                var terminator = new ActionBlock<int[]>(x => { if (output1 == null) output1 = x; else output2 = x; });
                batch.LinkTo(terminator);

                // Queue up batchSize-1 input items
                for (int i = 0; i < batchSize - 1; i++) sendAsyncTasks[i] = batch.SendAsync(i);
                var racer = Task.Factory.StartNew(() =>
                {
                    racerReady.Set();
                    batch.Complete();
                });

                // Wait for the racer to get ready and trigger
                localPassed &= racerReady.Wait(waitTimeout);
                batch.TriggerBatch();
                Assert.True(localPassed, "The racer task FAILED to start.");

                if (localPassed)
                {
                    // Wait for the SendAsync tasks to complete
                    localPassed &= Task.WaitAll(sendAsyncTasks, waitTimeout);
                    Assert.True(localPassed, "SendAsync tasks FAILED to complete");
                }

                // Do this verification only in greedy mode, because non-greedy is non-deterministic
                if (greedy)
                {
                    // Wait for a batch to be produced
                    if (localPassed)
                    {
                        localPassed &= SpinWait.SpinUntil(() => output1 != null, waitTimeout);
                        Assert.True(localPassed, "FAILED to produce a batch");
                    }

                    if (localPassed)
                    {
                        //Verify the number of input items propagated
                        localPassed &= output1.Length == batchSize - 1;
                        Assert.True(localPassed, string.Format("FAILED to propagate {0} input items. count1={1}",
                                                                            batchSize, output1.Length));
                    }
                }

                // Wait for the block to complete
                if (localPassed)
                {
                    localPassed &= batch.Completion.Wait(waitTimeout);
                    Assert.True(localPassed, "The block FAILED to complete");
                }

                // There should never be a second batch produced
                if (localPassed)
                {
                    localPassed &= output2 == null;
                    Assert.True(localPassed, "FAILED not to produce a second batch");
                }

                passed &= localPassed;
                if (!localPassed)
                {
                    Assert.True(localPassed, string.Format("Iteration={0}", iter));
                    Assert.True(localPassed, string.Format("Count1={0}", output1 == null ? "null" : output1.Length.ToString()));
                    break;
                }
            }

            return passed;
        }