/// <summary> /// Allocates memory from the pipeline to write into. /// </summary> /// <param name="minimumSize">The minimum size buffer to allocate</param> /// <returns>A <see cref="WritableBuffer"/> that can be written to.</returns> WritableBuffer IPipeWriter.Alloc(int minimumSize) { if (_writerCompletion.IsCompleted) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.NoWritingAllowed, _writerCompletion.Location); } if (minimumSize < 0) { PipelinesThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.minimumSize); } lock (_sync) { // CompareExchange not required as its setting to current value if test fails _writingState.Begin(ExceptionResource.AlreadyWriting); if (minimumSize > 0) { try { AllocateWriteHeadUnsynchronized(minimumSize); } catch (Exception) { // Reset producing state if allocation failed _writingState.End(ExceptionResource.NoWriteToComplete); throw; } } _currentWriteLength = 0; return(new WritableBuffer(this)); } }
// Reading void IPipeReader.Advance(ReadCursor consumed, ReadCursor examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; int consumedBytes = 0; if (!consumed.IsDefault) { returnStart = _readHead; consumedBytes = ReadCursor.GetLength(returnStart, returnStart.Start, consumed.Segment, consumed.Index); returnEnd = consumed.Segment; _readHead = consumed.Segment; _readHead.Start = consumed.Index; } // Reading commit head shared with writer Action continuation = null; lock (_sync) { var oldLength = _length; _length -= consumedBytes; if (oldLength >= _maximumSizeLow && _length < _maximumSizeLow) { continuation = _writerAwaitable.Complete(); } // We reset the awaitable to not completed if we've examined everything the producer produced so far if (examined.Segment == _commitHead && examined.Index == _commitHeadIndex && !_writerCompletion.IsCompleted) { if (!_writerAwaitable.IsCompleted) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.BackpressureDeadlock); } _readerAwaitable.Reset(); } _readingState.End(ExceptionResource.NoReadToComplete); } while (returnStart != null && returnStart != returnEnd) { var returnSegment = returnStart; returnStart = returnStart.Next; returnSegment.Dispose(); } TrySchedule(_writerScheduler, continuation); }
// Reading void IPipeReader.Advance(ReadCursor consumed, ReadCursor examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; int consumedBytes = 0; if (!consumed.IsDefault) { consumedBytes = ReadCursor.GetLength(_readHead, _readHead.Start, consumed.Segment, consumed.Index); returnStart = _readHead; returnEnd = consumed.Segment; _readHead = consumed.Segment; _readHead.Start = consumed.Index; } // Reading commit head shared with writer Action continuation = null; lock (_sync) { var oldLength = _length; _length -= consumedBytes; if (oldLength >= _maximumSizeLow && _length < _maximumSizeLow) { continuation = _writerAwaitable.Complete(); } // We reset the awaitable to not completed if we've consumed everything the producer produced so far if (examined.Segment == _commitHead && examined.Index == _commitHeadIndex && !_writerCompletion.IsCompleted) { _readerAwaitable.Reset(); } } while (returnStart != null && returnStart != returnEnd) { var returnSegment = returnStart; returnStart = returnStart.Next; returnSegment.Dispose(); } // CompareExchange not required as its setting to current value if test fails _readingState.End(ExceptionResource.NoReadToComplete); TrySchedule(_writerScheduler, continuation); }
// Reading void IPipeReader.Advance(ReadCursor consumed, ReadCursor examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; // Reading commit head shared with writer Action continuation = null; lock (_sync) { var examinedEverything = examined.Segment == _commitHead && examined.Index == _commitHeadIndex; if (!consumed.IsDefault) { if (_readHead == null) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.AdvanceToInvalidCursor); return; } var consumedSegment = consumed.GetSegment(); returnStart = _readHead; returnEnd = consumedSegment; // Check if we crossed _maximumSizeLow and complete backpressure var consumedBytes = ReadCursor.GetLength(returnStart, returnStart.Start, consumedSegment, consumed.Index); var oldLength = _length; _length -= consumedBytes; if (oldLength >= _maximumSizeLow && _length < _maximumSizeLow) { continuation = _writerAwaitable.Complete(); } // Check if we consumed entire last segment // if we are going to return commit head // we need to check that there is no writing operation that // might be using tailspace if (consumed.Index == returnEnd.End && !(_commitHead == returnEnd && _writingState.IsActive)) { var nextBlock = returnEnd.Next; if (_commitHead == returnEnd) { _commitHead = nextBlock; _commitHeadIndex = nextBlock?.Start ?? 0; } _readHead = nextBlock; returnEnd = nextBlock; } else { _readHead = consumedSegment; _readHead.Start = consumed.Index; } } // We reset the awaitable to not completed if we've examined everything the producer produced so far // but only if writer is not completed yet if (examinedEverything && !_writerCompletion.IsCompleted) { // Prevent deadlock where reader awaits new data and writer await backpressure if (!_writerAwaitable.IsCompleted) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.BackpressureDeadlock); } _readerAwaitable.Reset(); } _readingState.End(ExceptionResource.NoReadToComplete); while (returnStart != null && returnStart != returnEnd) { returnStart.ResetMemory(); ReturnSegmentUnsynchronized(returnStart); returnStart = returnStart.Next; } } TrySchedule(_writerScheduler, continuation); }