public AsyncReadContext <QueueItem> Read(string sourceUri, CompressionMode compressionMode) { IProducerConsumerCollection <QueueItem> queue = new ConcurrentQueue <QueueItem>(); var sourceStreamClosedSrc = new CancellationTokenSource(); var exceptionSrc = new CancellationTokenSource(); var emptyInputEvent = new ManualResetEventSlim(false); var inputOverflowEvent = new ManualResetEventSlim(true); var readerThread = new Thread(() => { try { int order = 0; using (var sourceStream = _sourceService.OpenRead(sourceUri)) { while (true) { inputOverflowEvent.Wait(); var chunkLength = _streamUtilsService.GetChunkLength(sourceStream, compressionMode, _compressionSettings.ChunkSize); if (chunkLength == 0) { sourceStreamClosedSrc.Cancel(); return; } var chunk = _streamUtilsService.GetNextChunk(sourceStream, chunkLength); if (chunk.Length == 0) { sourceStreamClosedSrc.Cancel(); return; } // implementation of ConcurrentQueue never returns false, so just skip return value queue.TryAdd(new QueueItem(++order, chunk)); emptyInputEvent.Set(); } } } catch (Exception e) { _statusUpdateService.Error(e.Message); exceptionSrc.Cancel(); } }); readerThread.Start(); return(new AsyncReadContext <QueueItem>(queue, readerThread, sourceStreamClosedSrc, exceptionSrc, emptyInputEvent, inputOverflowEvent)); }
private void CompressChunk( CompressionMode compressionMode, AsyncReadContext <QueueItem> asyncReadContext, ConcurrentDictionary <int, byte[]> resultPieces, CancellationTokenSource exceptionSrc, CancellationToken[] cancellationTokens, ManualResetEventSlim outputOverflowEvent) { try { while (!cancellationTokens.Any(ct => ct.IsCancellationRequested) || asyncReadContext.Queue.Count > 0) { outputOverflowEvent.Wait(); if (!asyncReadContext.Queue.TryTake(out var queueItem)) { asyncReadContext.EmptyInputEvent.Reset(); asyncReadContext.EmptyInputEvent.Wait(TimeSpan.FromMilliseconds(10)); continue; } byte[] data; switch (compressionMode) { case CompressionMode.Compress: data = _compressionService.Compress(queueItem.Data); break; case CompressionMode.Decompress: data = _compressionService.Decompress(queueItem.Data); break; default: throw new ApplicationException($"Managing of compression mode '{compressionMode}' not implemented'"); } resultPieces.AddOrUpdate(queueItem.Order, data, (i1, byteArray) => byteArray); ControlInputOverflow(asyncReadContext.Queue, asyncReadContext.InputOverflowEvent); } } catch (Exception e) { _statusUpdateService.Error(e.Message); exceptionSrc.Cancel(); } }
private void WriteInternal(string destinationUri, CompressionMode compressionMode, ConcurrentDictionary <int, byte[]> resultPieces, CancellationToken[] cancellationTokens, CancellationTokenSource exceptionSrc, ManualResetEventSlim outputOverflowEvent) { try { var index = 1; using (var outputStream = _destinationStreamService.OpenWrite(destinationUri)) { while (true) { if (cancellationTokens.Any(ct => ct.IsCancellationRequested) && resultPieces.Count == 0) { return; } if (resultPieces.TryRemove(index, out var dataBytes)) { WriteMeta(compressionMode, dataBytes, outputStream); outputStream.Write(dataBytes); index++; continue; } ControlOutputOverflow(resultPieces, outputOverflowEvent); // Actually I don't know why, but with this small sleep interval performance much better // (for enwik9 (10^9 bytes) without this line ~8000ms, with it ~7150ms) // (for enwik8 (10^8 bytes) without this line ~920ms, with it ~850ms) // need additional investigation here Thread.Sleep(1); } } } catch (Exception e) { _statusUpdateService.Error(e.Message); exceptionSrc.Cancel(); } }