public async Task <TransactionBatch> NextAsync(CancellationToken token, TimeSpan waitPeriod)
        {
            var sw = new Stopwatch();

            sw.Start();
            TransactionBatch batch = null;

            while (!_transactionBatchBuffer.TryTake(out batch) && sw.Elapsed <= waitPeriod && !token.IsCancellationRequested)
            {
                await Task.Delay(100);
            }

            return(batch);
        }
        private async Task PostBatch(CancellationToken token, BlockingCollection <TransactionBatch> transactionBatchBuffer, TransactionBatch transactionBatch)
        {
            // If the buffer is full then wait until capacity is available
            // this ensures we have back pressure to prevent using up all our memory
            bool added = false;

            while (!added && !token.IsCancellationRequested)
            {
                if (transactionBatchBuffer.TryAdd(transactionBatch))
                {
                    added = true;
                }
                else
                {
                    await Task.Delay(100);
                }
            }
        }
        private async Task GroupTransactionsAsync(CancellationToken token,
                                                  List <TableSchema> tableSchemas,
                                                  Dictionary <string, BlockingCollection <ChangeRecord> > tableChangeBuffers,
                                                  BlockingCollection <TransactionBatch> transactionBatchBuffer,
                                                  int transactionBatchSizeLimit)
        {
            // create a "one item per table" buffer
            var currentBuffer = new Dictionary <string, ChangeRecord>();

            foreach (var tableSchema in tableSchemas)
            {
                currentBuffer.Add(tableSchema.TableName, null);
            }

            while (!token.IsCancellationRequested)
            {
                // Pull in next values of any table we don't have values for yet or any who previously signalled they had no data at that time
                var keys = currentBuffer.Keys.ToList();
                foreach (var tableKey in keys)
                {
                    var bufferedValue = currentBuffer[tableKey];
                    if (bufferedValue == null || bufferedValue.LsnInt == NoDataAvailable)
                    {
                        ChangeRecord change = null;
                        if (tableChangeBuffers[tableKey].TryTake(out change))
                        {
                            currentBuffer[tableKey] = change;
                        }
                    }
                }

                if (currentBuffer.Values.All(x => x == null || x.LsnInt == NoDataAvailable))
                {
                    await Task.Delay(1000);

                    continue;
                }

                // identify the lowest LSN currently available in our current buffer and a create a new batch for it
                var currentTransaction = GetEarliestTransactionId(currentBuffer);
                var transactionBatch   = new TransactionBatch();
                transactionBatch.Id   = currentTransaction;
                transactionBatch.Part = 1;

                // Pull data from all table buffers for this LSN
                var matchingTables = GetTablesOfTransaction(currentBuffer, currentTransaction.LsnInt);
                foreach (var tableName in matchingTables)
                {
                    // add the first change of the transaction from our current buffer
                    transactionBatch.Changes.Add(currentBuffer[tableName]);

                    // keep consuming from this table buffer until either the LSN does not match our current LSN
                    // or the table reader signals that we have reached the end of the buffer and no more items will arrive
                    var  tableBuffer = tableChangeBuffers[tableName];
                    bool consume     = true;
                    while (consume && !tableBuffer.IsAddingCompleted)
                    {
                        ChangeRecord next = null;
                        if (tableChangeBuffers[tableName].TryTake(out next))
                        {
                            if (next.LsnInt == currentTransaction.LsnInt)
                            {
                                transactionBatch.Changes.Add(next);

                                // if we have reached the maximum batch size the post and and create another
                                if (transactionBatch.Changes.Count > transactionBatchSizeLimit)
                                {
                                    await PostBatch(token, transactionBatchBuffer, transactionBatch);

                                    var nextPart = new TransactionBatch();
                                    nextPart.Part        = transactionBatch.Part + 1;
                                    nextPart.Id          = transactionBatch.Id;
                                    nextPart.IsMultiPart = true;

                                    transactionBatch = nextPart;
                                }
                            }
                            else
                            {
                                // store the value in our current buffer for a future transaction batch
                                currentBuffer[tableName] = next;
                                consume = false;
                            }
                        }
                        else
                        {
                            // there is no data in the buffer but the buffer is not completed. This means that the table reader is slower than us. Wait a little.
                            await Task.Delay(100);
                        }
                    }
                }

                // all data has been pulled from the buffers for this transaction
                // now we post it to the TransactionBatch buffer.
                await PostBatch(token, transactionBatchBuffer, transactionBatch);
            }
        }