public void AddWorkItem_ItemsProcessed(int totItems, int totThreads, int maxQueuedItems) { MockWorker mw = new MockWorker(); CancellationTokenSource ts = new CancellationTokenSource(); List <MockWorkOut> doneWork = new List <MockWorkOut>(); var cfg = GetConfig(totThreads, maxQueuedItems); using (FIFOWorker <MockWorkIn, MockWorkOut> fifo = new FIFOWorker <MockWorkIn, MockWorkOut>(cfg, mw.DoMockWork_Simple)) { foreach (int inputIx in Enumerable.Range(1, totItems)) { foreach (var outItem in fifo.AddWorkItem(new MockWorkIn(inputIx), ts.Token)) { doneWork.Add(outItem); } } foreach (var outItem in fifo.Flush(ts.Token)) { doneWork.Add(outItem); } } Assert.AreEqual(Enumerable.Range(1, totItems), doneWork.Select(f => f.originalInputItem.ix)); Assert.AreEqual(Enumerable.Range(1, totItems), mw.doneWork.Where(f => !f.Item1).OrderBy(f => f.Item2.ix).Select(f => f.Item2.ix)); }
public async Task FlushAsync(CancellationToken token = default) { await CompleteBatch(true, token).ConfigureAwait(false); foreach (var ms in fifow.Flush(token)) { await BatchToStreamAsync(ms, token).ConfigureAwait(false); } }
public async IAsyncEnumerable <Frame <T> > DeserializeAsync(Stream stream, [EnumeratorCancellation] CancellationToken token = default) { Interlocked.Increment(ref ParallelGatekeeperSingleton.wrapperDepth); try { BatchWithBufferWriters currentBatch = new BatchWithBufferWriters(); int currentBatchTotalSize = 0; while (TryReadHeader(stream, out int itemLength)) { if (currentBatchTotalSize + itemLength > desiredBatchSize_bytes && currentBatchTotalSize > 0) { // send prev batch foreach (Frame <T> t in IterateOutputBatch(fifow.AddWorkItem(currentBatch, token))) { yield return(t); } currentBatchTotalSize = 0; } if (currentBatchTotalSize == 0) { currentBatch.concatenatedBodies = objPoolBufferWriterBodies.Get(); currentBatch.lengths = objPoolBufferWriterBodyLengths.Get(); } // read element from stream and add to batch currentBatch.lengths.GetSpan(1)[0] = itemLength; currentBatch.lengths.Advance(1); int totRead = await stream.ReadAsync(currentBatch.concatenatedBodies.GetMemory(itemLength).Slice(0, itemLength), token).ConfigureAwait(false); if (totRead != itemLength) { throw new StreamSerializationException($"Unexpected number of bytes read from stream ({totRead}). Expected {itemLength}"); } currentBatch.concatenatedBodies.Advance(itemLength); currentBatchTotalSize += itemLength; } if (currentBatchTotalSize > 0) // send unfinished batch { foreach (Frame <T> t in IterateOutputBatch(fifow.AddWorkItem(currentBatch, token))) { yield return(t); } } foreach (Frame <T> t in IterateOutputBatch(fifow.Flush(token))) { yield return(t); } } finally { Interlocked.Decrement(ref ParallelGatekeeperSingleton.wrapperDepth); } }
public async Task FlushAsync(CancellationToken token = default) { Interlocked.Increment(ref ParallelGatekeeperSingleton.wrapperDepth); try { await CompleteBatch(true, token).ConfigureAwait(false); foreach (var bw in fifow.Flush(token)) { await BatchToStreamAsync(bw, token).ConfigureAwait(false); } } finally { Interlocked.Decrement(ref ParallelGatekeeperSingleton.wrapperDepth); } }
public async IAsyncEnumerable <T> DeserializeAsync(Stream stream, [EnumeratorCancellation] CancellationToken token = default) { BatchIn currentBatch = new BatchIn(); int currentBatchTotalSize = 0; int currentBatchTotalElements = 0; while (TryReadHeader(stream, out int itemLength)) { if (currentBatchTotalSize + itemLength > desiredBatchSize_bytes && currentBatchTotalElements > 0) { // send prev batch foreach (T t in IterateOutputBatch(fifow.AddWorkItem(currentBatch, token))) { yield return(t); } currentBatchTotalSize = 0; currentBatchTotalElements = 0; } if (currentBatchTotalElements == 0) { currentBatch.concatenatedBodies = objPoolBufferWriterSerializedBatch.Get(); currentBatch.Lengths = objPoolList.Get(); } await BufferFromStreamAsync(stream, currentBatch.concatenatedBodies, itemLength, token).ConfigureAwait(false); currentBatchTotalSize += itemLength; currentBatchTotalElements++; currentBatch.Lengths.Add(itemLength); } if (currentBatchTotalElements > 0) // send unfinished batch { foreach (T t in IterateOutputBatch(fifow.AddWorkItem(currentBatch, token))) { yield return(t); } } foreach (T t in IterateOutputBatch(fifow.Flush(token))) { yield return(t); } }
public void Serialize(ref MessagePackWriter writer, TFrameList value, MessagePackSerializerOptions options) { if (value == null) { writer.WriteNil(); return; } Interlocked.Increment(ref ParallelGatekeeperSingleton.wrapperDepth); try { FrameFormatterSerializationOptions frameOptions = options.GetOptionParams(); if (frameOptions.FIFOWorkerConfig.MaxConcurrentTasks < 1 || ParallelGatekeeperSingleton.wrapperDepth != 1) { SerializeSynchronous(ref writer, value, options); return; } int count = value.Count; writer.WriteArrayHeader(count); BatchSizeEstimator batchEstimator = new BatchSizeEstimator(frameOptions.BatchSizeEstimatorConfig); IMessagePackFormatter <T> formatterT = options.Resolver.GetFormatterWithVerify <T>(); bool isOldSpec = writer.OldSpec; BatchWithBufferWriters ProcessItems(ArraySegment <Frame <T> > batch, CancellationToken token) { BatchWithBufferWriters batchOut = new BatchWithBufferWriters(); batchOut.concatenatedBodies = objPoolBufferWriterBodies.Get(); batchOut.lengths = objPoolBufferWriterBodyLengths.Get(); MessagePackWriter writerBody = new MessagePackWriter(batchOut.concatenatedBodies) { OldSpec = isOldSpec, CancellationToken = token }; var spanIn = batch.AsSpan(); int prevWrittenBytesCount = 0; int sumLen = 0; for (int ix = 0; ix < spanIn.Length; ix++) { formatterT.Serialize(ref writerBody, spanIn[ix], options); writerBody.Flush(); int currWrittenBytesCount = batchOut.concatenatedBodies.WrittenCount; int objLen = currWrittenBytesCount - prevWrittenBytesCount; prevWrittenBytesCount = currWrittenBytesCount; batchOut.lengths.GetSpan(1)[0] = objLen; batchOut.lengths.Advance(1); sumLen += objLen; } if (spanIn.Length > 0) { batchEstimator.UpdateEstimate((float)sumLen / (float)spanIn.Length); // update with avg instead of updating for every loop item. It's not exact, but it's faster } return(batchOut); } ListFrameWrapper valueWrapper = GetTFrameListWrapper(value); Frame <T>[] valueArray = valueWrapper.AsFrameArray(); using (var fifow = new FIFOWorker <ArraySegment <Frame <T> >, BatchWithBufferWriters>(frameOptions.FIFOWorkerConfig, ProcessItems)) { int i = 0; while (i < count) { int batchSize = Math.Min(count - i, batchEstimator.RecomendedBatchSize); if (batchSize <= 0) { throw new StreamSerializationException($"Invalid batch sequence length: {batchSize}"); } ArraySegment <Frame <T> > sourceSegment = new ArraySegment <Frame <T> >(valueArray, i, batchSize); foreach (BatchWithBufferWriters batchOutput in fifow.AddWorkItem(sourceSegment, writer.CancellationToken)) { BatchToStream(ref writer, batchOutput); } i += batchSize; } foreach (BatchWithBufferWriters batchOutput in fifow.Flush(writer.CancellationToken)) { BatchToStream(ref writer, batchOutput); } } } finally { Interlocked.Decrement(ref ParallelGatekeeperSingleton.wrapperDepth); } }
public void AddWorkItem_OneItemProcessed() { MockWorker mw = new MockWorker(); CancellationTokenSource ts = new CancellationTokenSource(); List <MockWorkOut> doneWork = new List <MockWorkOut>(); int inputIx = 1; int totThreads = 1; int maxQueuedItems = 1; var cfg = GetConfig(totThreads, maxQueuedItems); using (FIFOWorker <MockWorkIn, MockWorkOut> fifo = new FIFOWorker <MockWorkIn, MockWorkOut>(cfg, mw.DoMockWork_Simple)) { foreach (var outItem in fifo.AddWorkItem(new MockWorkIn(inputIx), ts.Token).Concat(fifo.Flush(ts.Token))) { doneWork.Add(outItem); } } Assert.AreEqual(1, doneWork.Count); Assert.AreEqual(inputIx, doneWork.First().originalInputItem.ix); Assert.AreEqual(1, mw.doneWork.Count); Assert.AreEqual(false, mw.doneWork.First().Item1); Assert.AreEqual(inputIx, mw.doneWork.First().Item2.ix); Assert.AreEqual(inputIx, mw.doneWork.First().Item3.originalInputItem.ix); }