internal bool TryRead(out ReadResult result) { lock (_sync) { if (_readerCompletion.IsCompleted) { ThrowHelper.ThrowInvalidOperationException_NoReadingAllowed(); } result = new ReadResult(); if (_length > 0 || _readerAwaitable.IsCompleted) { GetResult(ref result); return(true); } if (_readerAwaitable.HasContinuation) { ThrowHelper.ThrowInvalidOperationException_AlreadyReading(); } return(false); } }
/// <summary> /// Initializes the <see cref="Pipe"/> with the specified <see cref="PipeOptions"/>. /// </summary> public Pipe(PipeOptions options) { if (options == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.options); } _bufferSegmentPool = new BufferSegment[SegmentPoolSize]; _readingState = default; _readerCompletion = default; _writerCompletion = default; _pool = options.Pool; _minimumSegmentSize = options.MinimumSegmentSize; _pauseWriterThreshold = options.PauseWriterThreshold; _resumeWriterThreshold = options.ResumeWriterThreshold; _readerScheduler = options.ReaderScheduler ?? PipeScheduler.Inline; _writerScheduler = options.WriterScheduler ?? PipeScheduler.Inline; _readerAwaitable = new PipeAwaitable(completed: false); _writerAwaitable = new PipeAwaitable(completed: true); _reader = new DefaultPipeReader(this); _writer = new DefaultPipeWriter(this); }
internal FlushResult GetFlushAsyncResult() { var result = new FlushResult(); lock (_sync) { if (!_writerAwaitable.IsCompleted) { ThrowHelper.ThrowInvalidOperationException_GetResultNotCompleted(); } // Change the state from to be canceled -> observed if (_writerAwaitable.ObserveCancelation()) { result._resultFlags |= ResultFlags.Canceled; } if (_readerCompletion.IsCompletedOrThrow()) { result._resultFlags |= ResultFlags.Completed; } } return(result); }
public override void Write(byte[] buffer, int offset, int count) { ThrowHelper.ThrowNotSupportedException(); }
public override void SetLength(long value) { ThrowHelper.ThrowNotSupportedException(); }
public override long Seek(long offset, SeekOrigin origin) { ThrowHelper.ThrowNotSupportedException(); return(0); }
public override int Read(byte[] buffer, int offset, int count) { ThrowHelper.ThrowNotSupportedException(); return(0); }
public override void Flush() { ThrowHelper.ThrowNotSupportedException(); }
private void AdvanceTo(BufferSegment?consumedSegment, int consumedIndex, BufferSegment?examinedSegment, int examinedIndex) { if (consumedSegment == null || examinedSegment == null) { return; } if (_readHead == null) { ThrowHelper.ThrowInvalidOperationException_AdvanceToInvalidCursor(); } BufferSegment returnStart = _readHead; BufferSegment?returnEnd = consumedSegment; long consumedBytes = BufferSegment.GetLength(returnStart, _readIndex, consumedSegment, consumedIndex); _bufferedBytes -= consumedBytes; Debug.Assert(_bufferedBytes >= 0); _examinedEverything = false; if (examinedSegment == _readTail) { // If we examined everything, we force ReadAsync to actually read from the underlying stream // instead of returning a ReadResult from TryRead. _examinedEverything = examinedIndex == _readTail.End; } // Two cases here: // 1. All data is consumed. If so, we empty clear everything so we don't hold onto any // excess memory. // 2. A segment is entirely consumed but there is still more data in nextSegments // We are allowed to remove an extra segment. by setting returnEnd to be the next block. // 3. We are in the middle of a segment. // Move _readHead and _readIndex to consumedSegment and index if (_bufferedBytes == 0) { returnEnd = null; _readHead = null; _readTail = null; _readIndex = 0; } else if (consumedIndex == returnEnd.Length) { BufferSegment?nextBlock = returnEnd.NextSegment; _readHead = nextBlock; _readIndex = 0; returnEnd = nextBlock; } else { _readHead = consumedSegment; _readIndex = consumedIndex; } // Remove all blocks that are freed (except the last one) while (returnStart != returnEnd) { BufferSegment next = returnStart.NextSegment !; returnStart.ResetMemory(); ReturnSegmentUnsynchronized(returnStart); returnStart = next; } }
internal void AdvanceReader(SequencePosition consumed, SequencePosition examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; Action continuation = null; lock (_sync) { var examinedEverything = false; if (examined.Segment == _commitHead) { examinedEverything = _commitHead != null ? examined.Index == _commitHeadIndex - _commitHead.Start : examined.Index == 0; } if (consumed.Segment != null) { if (_readHead == null) { ThrowHelper.ThrowInvalidOperationException_AdvanceToInvalidCursor(); return; } var consumedSegment = (BufferSegment)consumed.Segment; returnStart = _readHead; returnEnd = consumedSegment; // Check if we crossed _maximumSizeLow and complete backpressure long consumedBytes = new ReadOnlySequence <byte>(returnStart, _readHeadIndex, consumedSegment, consumed.Index).Length; long oldLength = _length; _length -= consumedBytes; if (oldLength >= _resumeWriterThreshold && _length < _resumeWriterThreshold) { continuation = _writerAwaitable.Complete(); } // Check if we consumed entire last segment // if we are going to return commit head we need to check that there is no writing operation that // might be using tailspace if (consumed.Index == returnEnd.Length && _writingHead != returnEnd) { BufferSegment nextBlock = returnEnd.NextSegment; if (_commitHead == returnEnd) { _commitHead = nextBlock; _commitHeadIndex = 0; } _readHead = nextBlock; _readHeadIndex = 0; returnEnd = nextBlock; } else { _readHead = consumedSegment; _readHeadIndex = consumed.Index; } } // We reset the awaitable to not completed if we've examined everything the producer produced so far // but only if writer is not completed yet if (examinedEverything && !_writerCompletion.IsCompleted) { // Prevent deadlock where reader awaits new data and writer await backpressure if (!_writerAwaitable.IsCompleted) { ThrowHelper.ThrowInvalidOperationException_BackpressureDeadlock(); } _readerAwaitable.Reset(); } while (returnStart != null && returnStart != returnEnd) { returnStart.ResetMemory(); ReturnSegmentUnsynchronized(returnStart); returnStart = returnStart.NextSegment; } _readingState.End(); } TrySchedule(_writerScheduler, continuation); }