public async Task StartPipelineAsync(CancellationToken token) { _decoder.LoadSensorConfigs(); // Step 1 - Create our producer as a cold observable var source = _dataBusReader.StartConsuming(token, TimeSpan.FromMilliseconds(100)); // Step 2 - Add file writing and decoding stages to our cold observable pipeline var writeStream = source.ObserveOn(ThreadPoolScheduler.Instance) .Select(x => Observable.FromAsync(async() => { await _messageFileWriter.WriteAsync(x); return(x); })).Concat(); var decodedStream = writeStream.Select(x => { return(_decoder.Decode(x).ToObservable()); }).Concat(); // Step 3 - Create a hot observable that acts as a broadcast // and allows multiple subscribers without duplicating the work of the producer var multiCastStream = Observable.Publish(decodedStream); // Step 4 - Create our subscriptions // create a subscription to the hot obeservable that buffers in 1 second periods and performs up to 4 concurrent db writes var dbPersistenceComplete = false; var dbPersistenceSub = multiCastStream .Buffer(TimeSpan.FromSeconds(1)) .Where(messages => messages.Any()) .Select(messages => Observable.FromAsync(async() => await _dbPersister.PersistAsync(messages))) .Merge(4) // up to 4 concurrent executions of PersistAsync .Subscribe( (Unit u) => { }, (Exception ex) => { Console.WriteLine("DB Persistence error: " + ex); }, () => { dbPersistenceComplete = true; Console.WriteLine("DB Persistence complete!"); }); // create a subscription to the hot obeservable that buffers in 1 second periods and performs sequential processing of each batch bool statsFeed1Complete = false; var oneSecondStatsFeedSub = multiCastStream .Buffer(TimeSpan.FromSeconds(1)) .Where(messages => messages.Any()) .Select(messages => Observable.FromAsync(async() => await _statsFeedPublisher.PublishAsync(messages, TimeSpan.FromSeconds(1)))) .Concat() // one batch at a time .Subscribe( (Unit u) => { }, (Exception ex) => { Console.WriteLine("1 Second Stats Feed Error: " + ex); }, () => { statsFeed1Complete = true; Console.WriteLine("1 Second Stats Feed Complete!"); }); // create a subscription to the hot obeservable that buffers in 30 second periods and performs sequential processing of each batch bool statsFeed30Complete = false; var thirtySecondStatsFeedSub = multiCastStream .Buffer(TimeSpan.FromSeconds(30)) .Where(messages => messages.Any()) .Select(messages => Observable.FromAsync(async() => await _statsFeedPublisher.PublishAsync(messages, TimeSpan.FromSeconds(30)))) .Concat() // one batch at a time .Subscribe( (Unit u) => { }, (Exception ex) => { Console.WriteLine("30 Second Stats Feed Error: " + ex); }, () => { statsFeed30Complete = true; Console.WriteLine("30 Second Stats Feed Error Complete!"); }); // create a subscription to the hot obeservable that sequentially processes one message at a time in order bool realTimePubComplete = false; var realTimePubSub = multiCastStream .Select(messages => Observable.FromAsync(async() => await _realTimeFeedPublisher.PublishAsync(messages))) .Concat() // one message at a time .Subscribe( (Unit u) => { }, (Exception ex) => { Console.WriteLine("Real-time Pub Error: " + ex); }, () => { realTimePubComplete = true; Console.WriteLine("Real-time Pub Complete!"); }); // Step 6. Start the producer multiCastStream.Connect(); // Step 7. Keep things going until the CancellationToken gets cancelled while (!token.IsCancellationRequested) { await Task.Delay(500); } // Step 8. Safe shutdown of the pipeline // Wait for all subscriptions to complete their work while (!realTimePubComplete || !dbPersistenceComplete || !statsFeed1Complete || !statsFeed30Complete) { await Task.Delay(500); } Console.WriteLine("All subscribers complete!"); // dispose of all subscriptions dbPersistenceSub.Dispose(); oneSecondStatsFeedSub.Dispose(); thirtySecondStatsFeedSub.Dispose(); realTimePubSub.Dispose(); // safely clean up any other resources, for example, ZeroMQ }
public async Task StartPipelineAsync(CancellationToken token) { _decoder.LoadSensorConfigs(); // Step 1 - Configure the pipeline // make sure our complete call gets propagated throughout the whole pipeline var linkOptions = new DataflowLinkOptions { PropagateCompletion = true }; // create our block configurations var largeBufferOptions = new ExecutionDataflowBlockOptions() { BoundedCapacity = 600000 }; var smallBufferOptions = new ExecutionDataflowBlockOptions() { BoundedCapacity = 1000 }; var realTimeBufferOptions = new ExecutionDataflowBlockOptions() { BoundedCapacity = 6000 }; var parallelizedOptions = new ExecutionDataflowBlockOptions() { BoundedCapacity = 1000, MaxDegreeOfParallelism = 4 }; var batchOptions = new GroupingDataflowBlockOptions() { BoundedCapacity = 1000 }; // define each block var writeRawMessageBlock = new TransformBlock <RawBusMessage, RawBusMessage>(async(RawBusMessage msg) => { await _messageFileWriter.WriteAsync(msg); return(msg); }, largeBufferOptions); var decoderBlock = new TransformManyBlock <RawBusMessage, DecodedMessage>( (RawBusMessage msg) => _decoder.Decode(msg), largeBufferOptions); var broadcast = new BroadcastBlock <DecodedMessage>(msg => msg); var realTimeFeedBlock = new ActionBlock <DecodedMessage>(async (DecodedMessage msg) => await _realTimeFeedPublisher.PublishAsync(msg), realTimeBufferOptions); var oneSecondBatchBlock = new BatchBlock <DecodedMessage>(3000); var thirtySecondBatchBlock = new BatchBlock <DecodedMessage>(90000); var batchBroadcastBlock = new BroadcastBlock <DecodedMessage[]>(msg => msg); var oneSecondStatsFeedBlock = new ActionBlock <DecodedMessage[]>(async (DecodedMessage[] messages) => await _statsFeedPublisher.PublishAsync(messages.ToList(), TimeSpan.FromSeconds(1)), smallBufferOptions); var dbPersistenceBlock = new ActionBlock <DecodedMessage[]>(async (DecodedMessage[] messages) => await _dbPersister.PersistAsync(messages.ToList()), smallBufferOptions); var thirtySecondStatsFeedBlock = new ActionBlock <DecodedMessage[]>(async (DecodedMessage[] messages) => await _statsFeedPublisher.PublishAsync(messages.ToList(), TimeSpan.FromSeconds(30)), smallBufferOptions); // link the blocks to together writeRawMessageBlock.LinkTo(decoderBlock, linkOptions); decoderBlock.LinkTo(broadcast, linkOptions); broadcast.LinkTo(realTimeFeedBlock, linkOptions); broadcast.LinkTo(oneSecondBatchBlock, linkOptions); broadcast.LinkTo(thirtySecondBatchBlock, linkOptions); oneSecondBatchBlock.LinkTo(batchBroadcastBlock, linkOptions); batchBroadcastBlock.LinkTo(oneSecondStatsFeedBlock, linkOptions); batchBroadcastBlock.LinkTo(dbPersistenceBlock, linkOptions); thirtySecondBatchBlock.LinkTo(thirtySecondStatsFeedBlock, linkOptions); // Step 2 - Start consuming the machine bus interface (the producer) var consumerTask = _dataBusReader.StartConsuming(writeRawMessageBlock, token, TimeSpan.FromMilliseconds(1000), FlowControlMode.LoadShed); // Step 3 - Keep going until the CancellationToken is cancelled or a leaf block is the the completed state either due to a fault or the completion of the pipeline. while (!token.IsCancellationRequested && !realTimeFeedBlock.Completion.IsCompleted && !oneSecondStatsFeedBlock.Completion.IsCompleted && !dbPersistenceBlock.Completion.IsCompleted && !thirtySecondStatsFeedBlock.Completion.IsCompleted) { await Task.Delay(500); } // Step 4 - the CancellationToken has been cancelled and our producer has stopped producing // call Complete on the first block, this will propagate down the pipeline writeRawMessageBlock.Complete(); // wait for all leaf blocks to finish processing their data await Task.WhenAll(realTimeFeedBlock.Completion, oneSecondStatsFeedBlock.Completion, dbPersistenceBlock.Completion, thirtySecondStatsFeedBlock.Completion, consumerTask); // clean up any other resources like ZeroMQ for example }