private void CompressionWorker(BlockingCollection <Block> toCompress, BlockingCollection <Block> toWrite, int columns, OrderedWaiter waiter, ExceptionMarshaller exMarshaller) { Contracts.AssertValue(exMarshaller); try { _host.AssertValue(toCompress); _host.AssertValue(toWrite); _host.Assert(columns > 0); _host.Assert(_deterministicBlockOrder == (waiter != null)); foreach (Block block in toCompress.GetConsumingEnumerable(exMarshaller.Token)) { MemoryStream compressed = _memPool.Get(); int uncompLength; using (Stream stream = _compression.CompressStream(compressed)) { MemoryStream uncompressed = block.BlockData; uncompLength = (int)uncompressed.Length; ArraySegment <byte> buffer; bool tmp = uncompressed.TryGetBuffer(out buffer); Contracts.Assert(tmp); stream.Write(buffer.Array, buffer.Offset, buffer.Count); _memPool.Return(ref uncompressed); } if (_deterministicBlockOrder) { waiter.Wait((long)columns * block.BlockIndex + block.ColumnIndex, exMarshaller.Token); } toWrite.Add(new Block(compressed, block.ColumnIndex, block.BlockIndex, uncompLength), exMarshaller.Token); if (_deterministicBlockOrder) { waiter.Increment(); } } } catch (Exception ex) { exMarshaller.Set("compressing", ex); } }
private void Parse(int tid) { long blk = tid; int iblk = tid; Contracts.Assert(iblk < _blockCount - 3); var helper = _curs._parser.CreateHelper(_rows.Stats, _curs._srcNeeded); while (!_done) { // Algorithm: // * When it is our turn, grab a block of lines. // * Parse rows. // * When it is our turn, enqueue the batch. // When it is our turn, read the lines and signal the next worker that it is ok to read. LineBatch lines; _waiterReading.Wait(blk); if (_done) { return; } try { lines = _reader.GetBatch(); } finally { _waiterReading.Increment(); } Contracts.Assert(lines.Exception == null); if (lines.Infos == null || _done) { return; } // Parse the lines into rows. Contracts.Assert(lines.Infos.Length <= BlockSize); var batch = new RowBatch(iblk * BlockSize, iblk * BlockSize + lines.Infos.Length, lines.Total); int irow = batch.IrowMin; foreach (var info in lines.Infos) { Contracts.Assert(info.Line > 0); Contracts.AssertNonEmpty(info.Text); if (_done) { return; } _curs._parser.ParseRow(_rows, irow, helper, _curs._active, lines.Path, info.Line, info.Text); irow++; } Contracts.Assert(irow == batch.IrowLim); if (_done) { return; } // When it is our turn, publish the rows and signal the next worker that it is ok to publish. _waiterPublish.Wait(blk); if (_done) { return; } while (!_queue.TryAdd(batch, TimeOut)) { if (_done) { return; } } _waiterPublish.Increment(); blk += _threads.Length; iblk += _threads.Length; if (iblk >= _blockCount) { iblk -= _blockCount; } Contracts.Assert(0 <= iblk && iblk < _blockCount); } }