private void GetResult(ref ReadResult result) { if (_writerCompletion.IsCompletedOrThrow()) { result.ResultFlags |= ResultFlags.Completed; } if (_readerAwaitable.ObserveCancelation()) { result.ResultFlags |= ResultFlags.Cancelled; } // No need to read end if there is no head var head = _readHead; if (head != null) { // Reading commit head shared with writer result.ResultBuffer.BufferEnd.Segment = _commitHead; result.ResultBuffer.BufferEnd.Index = _commitHeadIndex; result.ResultBuffer.BufferLength = ReadCursor.GetLength(head, head.Start, _commitHead, _commitHeadIndex); result.ResultBuffer.BufferStart.Segment = head; result.ResultBuffer.BufferStart.Index = head.Start; } _readingState.Begin(ExceptionResource.AlreadyReading); }
// Reading void IPipeReader.Advance(ReadCursor consumed, ReadCursor examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; int consumedBytes = 0; if (!consumed.IsDefault) { returnStart = _readHead; consumedBytes = ReadCursor.GetLength(returnStart, returnStart.Start, consumed.Segment, consumed.Index); returnEnd = consumed.Segment; _readHead = consumed.Segment; _readHead.Start = consumed.Index; } // Reading commit head shared with writer Action continuation = null; lock (_sync) { var oldLength = _length; _length -= consumedBytes; if (oldLength >= _maximumSizeLow && _length < _maximumSizeLow) { continuation = _writerAwaitable.Complete(); } // We reset the awaitable to not completed if we've examined everything the producer produced so far if (examined.Segment == _commitHead && examined.Index == _commitHeadIndex && !_writerCompletion.IsCompleted) { if (!_writerAwaitable.IsCompleted) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.BackpressureDeadlock); } _readerAwaitable.Reset(); } _readingState.End(ExceptionResource.NoReadToComplete); } while (returnStart != null && returnStart != returnEnd) { var returnSegment = returnStart; returnStart = returnStart.Next; returnSegment.Dispose(); } TrySchedule(_writerScheduler, continuation); }
// Reading void IPipeReader.Advance(ReadCursor consumed, ReadCursor examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; int consumedBytes = 0; if (!consumed.IsDefault) { consumedBytes = ReadCursor.GetLength(_readHead, _readHead.Start, consumed.Segment, consumed.Index); returnStart = _readHead; returnEnd = consumed.Segment; _readHead = consumed.Segment; _readHead.Start = consumed.Index; } // Reading commit head shared with writer Action continuation = null; lock (_sync) { var oldLength = _length; _length -= consumedBytes; if (oldLength >= _maximumSizeLow && _length < _maximumSizeLow) { continuation = _writerAwaitable.Complete(); } // We reset the awaitable to not completed if we've consumed everything the producer produced so far if (examined.Segment == _commitHead && examined.Index == _commitHeadIndex && !_writerCompletion.IsCompleted) { _readerAwaitable.Reset(); } } while (returnStart != null && returnStart != returnEnd) { var returnSegment = returnStart; returnStart = returnStart.Next; returnSegment.Dispose(); } // CompareExchange not required as its setting to current value if test fails _readingState.End(ExceptionResource.NoReadToComplete); TrySchedule(_writerScheduler, continuation); }
internal ReadableBuffer(ReadCursor start, ReadCursor end) { BufferStart = start; BufferEnd = end; BufferLength = start.GetLength(end); }
// Reading void IPipeReader.Advance(ReadCursor consumed, ReadCursor examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; // Reading commit head shared with writer Action continuation = null; lock (_sync) { var examinedEverything = examined.Segment == _commitHead && examined.Index == _commitHeadIndex; if (!consumed.IsDefault) { if (_readHead == null) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.AdvanceToInvalidCursor); return; } var consumedSegment = consumed.GetSegment(); returnStart = _readHead; returnEnd = consumedSegment; // Check if we crossed _maximumSizeLow and complete backpressure var consumedBytes = ReadCursor.GetLength(returnStart, returnStart.Start, consumedSegment, consumed.Index); var oldLength = _length; _length -= consumedBytes; if (oldLength >= _maximumSizeLow && _length < _maximumSizeLow) { continuation = _writerAwaitable.Complete(); } // Check if we consumed entire last segment // if we are going to return commit head // we need to check that there is no writing operation that // might be using tailspace if (consumed.Index == returnEnd.End && !(_commitHead == returnEnd && _writingState.IsActive)) { var nextBlock = returnEnd.Next; if (_commitHead == returnEnd) { _commitHead = nextBlock; _commitHeadIndex = nextBlock?.Start ?? 0; } _readHead = nextBlock; returnEnd = nextBlock; } else { _readHead = consumedSegment; _readHead.Start = consumed.Index; } } // We reset the awaitable to not completed if we've examined everything the producer produced so far // but only if writer is not completed yet if (examinedEverything && !_writerCompletion.IsCompleted) { // Prevent deadlock where reader awaits new data and writer await backpressure if (!_writerAwaitable.IsCompleted) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.BackpressureDeadlock); } _readerAwaitable.Reset(); } _readingState.End(ExceptionResource.NoReadToComplete); while (returnStart != null && returnStart != returnEnd) { returnStart.ResetMemory(); ReturnSegmentUnsynchronized(returnStart); returnStart = returnStart.Next; } } TrySchedule(_writerScheduler, continuation); }
// Reading void IPipeReader.Advance(ReadCursor consumed, ReadCursor examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; int consumedBytes = 0; if (!consumed.IsDefault) { consumedBytes = ReadCursor.GetLength(_readHead, _readHead.Start, consumed.Segment, consumed.Index); returnStart = _readHead; returnEnd = consumed.Segment; _readHead = consumed.Segment; _readHead.Start = consumed.Index; } bool resumeWriter; // Reading commit head shared with writer lock (_sync) { _length -= consumedBytes; resumeWriter = _length < _maximumSizeLow; // Change the state from observed -> not cancelled. We only want to reset the cancelled state if it was observed Interlocked.CompareExchange(ref _cancelledState, CancelledState.NotCancelled, CancelledState.CancellationObserved); var consumedEverything = examined.Segment == _commitHead && examined.Index == _commitHeadIndex && Reading.Status == TaskStatus.WaitingForActivation; // We reset the awaitable to not completed if // 1. We've consumed everything the producer produced so far // 2. Cancellation wasn't requested if (consumedEverything && _cancelledState != CancelledState.CancellationRequested) { Reset(ref _readerCallback); } } while (returnStart != null && returnStart != returnEnd) { var returnSegment = returnStart; returnStart = returnStart.Next; returnSegment.Dispose(); } #if CONSUMING_LOCATION_TRACKING _consumingLocation = null; #endif // CompareExchange not required as its setting to current value if test fails if (Interlocked.Exchange(ref _consumingState, State.NotActive) != State.Active) { ThrowHelper.ThrowInvalidOperationException(ExceptionResource.NotConsumingToComplete); } if (resumeWriter) { Resume(_writerScheduler, ref _writerCallback); } }
internal ReadableBuffer(ReadCursor start, ReadCursor end) { _start = start; _end = end; _length = start.GetLength(end); }