/// <inheritdoc /> public override void Complete(Exception exception = null) { if (_isCompleted) { return; } _isCompleted = true; _internalTokenSource?.Dispose(); BufferSegment segment = _head; while (segment != null) { BufferSegment returnSegment = segment; segment = segment.NextSegment; returnSegment.ResetMemory(); } _head = null; _tail = null; // REVIEW: Do we need a leaveOpen to avoid this? InnerStream.Dispose(); }
private void CompletePipe() { lock (_sync) { if (_disposed) { return; } _disposed = true; // Return all segments // if _readHead is null we need to try return _commitHead // because there might be a block allocated for writing BufferSegment segment = _readHead ?? _commitHead; while (segment != null) { BufferSegment returnSegment = segment; segment = segment.NextSegment; returnSegment.ResetMemory(); } _writingHead = null; _readHead = null; _commitHead = null; } }
private void FlushInternal(bool writeToStream) { // Write all completed segments and whatever remains in the current segment // and flush the result. if (_tailBytesBuffered > 0) { Debug.Assert(_tail != null); // Update any buffered data _tail.End += _tailBytesBuffered; _tailBytesBuffered = 0; } BufferSegment?segment = _head; while (segment != null) { BufferSegment returnSegment = segment; segment = segment.NextSegment; if (returnSegment.Length > 0 && writeToStream) { #if (!NETSTANDARD2_0 && !NETFRAMEWORK) InnerStream.Write(returnSegment.Memory.Span); #else InnerStream.Write(returnSegment.Memory); #endif } returnSegment.ResetMemory(); ReturnSegmentUnsynchronized(returnSegment); // Update the head segment after we return the current segment _head = segment; } if (_bytesBuffered > 0 && writeToStream) { InnerStream.Flush(); } // Mark bytes as written *after* flushing _head = null; _tail = null; _tailMemory = default; _bytesBuffered = 0; }
private void FlushInternal() { // Write all completed segments and whatever remains in the current segment // and flush the result. if (_tailBytesBuffered > 0) { // Update any buffered data _tail.End += _tailBytesBuffered; _tailBytesBuffered = 0; } BufferSegment segment = _head; while (segment != null) { BufferSegment returnSegment = segment; segment = segment.NextSegment; if (returnSegment.Length > 0) { #if netcoreapp InnerStream.Write(returnSegment.Memory.Span); #else InnerStream.Write(returnSegment.Memory); #endif } returnSegment.ResetMemory(); ReturnSegmentUnsynchronized(returnSegment); // Update the head segment after we return the current segment _head = segment; } if (_bytesBuffered > 0) { InnerStream.Flush(); } // Mark bytes as written *after* flushing _head = null; _tail = null; _bytesBuffered = 0; }
private bool CompleteAndGetNeedsDispose() { if (_isReaderCompleted) { return(false); } _isReaderCompleted = true; BufferSegment?segment = _readHead; while (segment != null) { BufferSegment returnSegment = segment; segment = segment.NextSegment; returnSegment.ResetMemory(); } return(!LeaveOpen); }
/// <inheritdoc /> public override void Complete(Exception exception = null) { if (_isReaderCompleted) { return; } _isReaderCompleted = true; BufferSegment segment = _readHead; while (segment != null) { BufferSegment returnSegment = segment; segment = segment.NextSegment; returnSegment.ResetMemory(); } // REVIEW: Do we need a way to avoid this (leaveOpen?) InnerStream.Dispose(); }
/// <inheritdoc /> public override void Complete(Exception?exception = null) { if (_isReaderCompleted) { return; } _isReaderCompleted = true; BufferSegment?segment = _readHead; while (segment != null) { BufferSegment returnSegment = segment; segment = segment.NextSegment; returnSegment.ResetMemory(); } if (!LeaveOpen) { InnerStream.Dispose(); } }
private void AdvanceTo(BufferSegment?consumedSegment, int consumedIndex, BufferSegment?examinedSegment, int examinedIndex) { if (consumedSegment == null || examinedSegment == null) { return; } if (_readHead == null) { ThrowHelper.ThrowInvalidOperationException_AdvanceToInvalidCursor(); } BufferSegment returnStart = _readHead; BufferSegment?returnEnd = consumedSegment; long consumedBytes = BufferSegment.GetLength(returnStart, _readIndex, consumedSegment, consumedIndex); _bufferedBytes -= consumedBytes; Debug.Assert(_bufferedBytes >= 0); _examinedEverything = false; if (examinedSegment == _readTail) { // If we examined everything, we force ReadAsync to actually read from the underlying stream // instead of returning a ReadResult from TryRead. _examinedEverything = examinedIndex == _readTail.End; } // Two cases here: // 1. All data is consumed. If so, we empty clear everything so we don't hold onto any // excess memory. // 2. A segment is entirely consumed but there is still more data in nextSegments // We are allowed to remove an extra segment. by setting returnEnd to be the next block. // 3. We are in the middle of a segment. // Move _readHead and _readIndex to consumedSegment and index if (_bufferedBytes == 0) { returnEnd = null; _readHead = null; _readTail = null; _readIndex = 0; } else if (consumedIndex == returnEnd.Length) { BufferSegment?nextBlock = returnEnd.NextSegment; _readHead = nextBlock; _readIndex = 0; returnEnd = nextBlock; } else { _readHead = consumedSegment; _readIndex = consumedIndex; } // Remove all blocks that are freed (except the last one) while (returnStart != returnEnd) { BufferSegment next = returnStart.NextSegment !; returnStart.ResetMemory(); ReturnSegmentUnsynchronized(returnStart); returnStart = next; } }
private async ValueTask <FlushResult> FlushAsyncInternal(bool writeToStream, ReadOnlyMemory <byte> data, CancellationToken cancellationToken = default) { // Write all completed segments and whatever remains in the current segment // and flush the result. CancellationTokenRegistration reg = default; if (cancellationToken.CanBeCanceled) { reg = cancellationToken.UnsafeRegister(state => ((StreamPipeWriter)state !).Cancel(), this); } if (_tailBytesBuffered > 0) { Debug.Assert(_tail != null); // Update any buffered data _tail.End += _tailBytesBuffered; _tailBytesBuffered = 0; } using (reg) { CancellationToken localToken = InternalTokenSource.Token; try { BufferSegment?segment = _head; while (segment != null) { BufferSegment returnSegment = segment; segment = segment.NextSegment; if (returnSegment.Length > 0 && writeToStream) { await InnerStream.WriteAsync(returnSegment.Memory, localToken).ConfigureAwait(false); } returnSegment.ResetMemory(); ReturnSegmentUnsynchronized(returnSegment); // Update the head segment after we return the current segment _head = segment; } if (writeToStream) { // Write data after the buffered data if (data.Length > 0) { await InnerStream.WriteAsync(data, localToken).ConfigureAwait(false); } if (_bytesBuffered > 0 || data.Length > 0) { await InnerStream.FlushAsync(localToken).ConfigureAwait(false); } } // Mark bytes as written *after* flushing _head = null; _tail = null; _tailMemory = default; _bytesBuffered = 0; return(new FlushResult(isCanceled: false, isCompleted: false)); } catch (OperationCanceledException) { // Remove the cancellation token such that the next time Flush is called // A new CTS is created. lock (_lockObject) { _internalTokenSource = null; } if (localToken.IsCancellationRequested && !cancellationToken.IsCancellationRequested) { // Catch cancellation and translate it into setting isCanceled = true return(new FlushResult(isCanceled: true, isCompleted: false)); } throw; } } }
internal void Advance(Position consumed, Position examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; // Reading commit head shared with writer Action continuation = null; lock (_sync) { bool examinedEverything = false; if (examined.Segment == _commitHead) { examinedEverything = _commitHead != null ? examined.Index == _commitHeadIndex - _commitHead.Start : examined.Index == 0; } if (consumed.Segment != null) { if (_readHead == null) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.AdvanceToInvalidCursor); return; } var consumedSegment = (BufferSegment)consumed.Segment; returnStart = _readHead; returnEnd = consumedSegment; // Check if we crossed _maximumSizeLow and complete backpressure var consumedBytes = new ReadOnlyBuffer <byte>(returnStart, _readHeadIndex, consumedSegment, consumed.Index).Length; var oldLength = _length; _length -= consumedBytes; if (oldLength >= _maximumSizeLow && _length < _maximumSizeLow) { continuation = _writerAwaitable.Complete(); } // Check if we consumed entire last segment // if we are going to return commit head // we need to check that there is no writing operation that // might be using tailspace if (consumed.Index == returnEnd.Length && !(_commitHead == returnEnd && _writingState.IsStarted)) { var nextBlock = returnEnd.NextSegment; if (_commitHead == returnEnd) { _commitHead = nextBlock; _commitHeadIndex = 0; } _readHead = nextBlock; _readHeadIndex = 0; returnEnd = nextBlock; } else { _readHead = consumedSegment; _readHeadIndex = consumed.Index; } } // We reset the awaitable to not completed if we've examined everything the producer produced so far // but only if writer is not completed yet if (examinedEverything && !_writerCompletion.IsCompleted) { // Prevent deadlock where reader awaits new data and writer await backpressure if (!_writerAwaitable.IsCompleted) { PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.BackpressureDeadlock); } _readerAwaitable.Reset(); } _readingState.End(ExceptionResource.NoReadToComplete); while (returnStart != null && returnStart != returnEnd) { returnStart.ResetMemory(); ReturnSegmentUnsynchronized(returnStart); returnStart = returnStart.NextSegment; } } TrySchedule(_writerScheduler, continuation); }
internal void AdvanceReader(SequencePosition consumed, SequencePosition examined) { BufferSegment returnStart = null; BufferSegment returnEnd = null; Action continuation = null; lock (_sync) { var examinedEverything = false; if (examined.GetObject() == _commitHead) { examinedEverything = _commitHead != null?examined.GetInteger() == _commitHeadIndex - _commitHead.Start : examined.GetInteger() == 0; } if (consumed.GetObject() != null) { if (_readHead == null) { ThrowHelper.ThrowInvalidOperationException_AdvanceToInvalidCursor(); return; } var consumedSegment = (BufferSegment)consumed.GetObject(); returnStart = _readHead; returnEnd = consumedSegment; // Check if we crossed _maximumSizeLow and complete backpressure long consumedBytes = new ReadOnlySequence <byte>(returnStart, _readHeadIndex, consumedSegment, consumed.GetInteger()).Length; long oldLength = _length; _length -= consumedBytes; if (oldLength >= _resumeWriterThreshold && _length < _resumeWriterThreshold) { continuation = _writerAwaitable.Complete(); } // Check if we consumed entire last segment // if we are going to return commit head we need to check that there is no writing operation that // might be using tailspace if (consumed.GetInteger() == returnEnd.Length && _writingHead != returnEnd) { BufferSegment nextBlock = returnEnd.NextSegment; if (_commitHead == returnEnd) { _commitHead = nextBlock; _commitHeadIndex = 0; } _readHead = nextBlock; _readHeadIndex = 0; returnEnd = nextBlock; } else { _readHead = consumedSegment; _readHeadIndex = consumed.GetInteger(); } } // We reset the awaitable to not completed if we've examined everything the producer produced so far // but only if writer is not completed yet if (examinedEverything && !_writerCompletion.IsCompleted) { // Prevent deadlock where reader awaits new data and writer await backpressure if (!_writerAwaitable.IsCompleted) { ThrowHelper.ThrowInvalidOperationException_BackpressureDeadlock(); } _readerAwaitable.Reset(); } while (returnStart != null && returnStart != returnEnd) { returnStart.ResetMemory(); ReturnSegmentUnsynchronized(returnStart); returnStart = returnStart.NextSegment; } _readingState.End(); } TrySchedule(_writerScheduler, continuation); }