Exemplo n.º 1
0
        internal ValueTask <FlushResult> FlushAsync(CancellationToken cancellationToken)
        {
            CompletionData          completionData;
            ValueTask <FlushResult> result;

            lock (_sync)
            {
                var wasEmpty = CommitUnsynchronized();

                // AttachToken before completing reader awaiter in case cancellationToken is already completed
                _writerAwaitable.BeginOperation(cancellationToken, s_signalWriterAwaitable, this);

                // If the writer is completed (which it will be most of the time) then return a completed ValueTask
                if (_writerAwaitable.IsCompleted)
                {
                    var flushResult = new FlushResult();
                    GetFlushResult(ref flushResult);
                    result = new ValueTask <FlushResult>(flushResult);
                }
                else
                {
                    // Otherwise it's async
                    result = new ValueTask <FlushResult>(_writer, token: 0);
                }

                // Complete reader only if new data was pushed into the pipe
                // Avoid throwing in between completing the reader and scheduling the callback
                // if the intent is to allow pipe to continue reading the data
                if (!wasEmpty)
                {
                    _readerAwaitable.Complete(out completionData);
                }
                else
                {
                    completionData = default;
                }

                // I couldn't find a way for flush to induce backpressure deadlock
                // if it always adds new data to pipe and wakes up the reader but assert anyway
                Debug.Assert(_writerAwaitable.IsCompleted || _readerAwaitable.IsCompleted);
            }

            TrySchedule(_readerScheduler, completionData);

            return(result);
        }
Exemplo n.º 2
0
        internal WritableBufferAwaitable FlushAsync()
        {
            if (_writingState.IsActive)
            {
                // Commit the data as not already committed
                Commit();
            }

            Action awaitable;

            lock (_sync)
            {
                awaitable = _readerAwaitable.Complete();
            }

            TrySchedule(_readerScheduler, awaitable);

            return(new WritableBufferAwaitable(this));
        }
Exemplo n.º 3
0
        internal WritableBufferAwaitable FlushAsync(CancellationToken cancellationToken = default)
        {
            Action awaitable;
            CancellationTokenRegistration cancellationTokenRegistration;

            lock (_sync)
            {
                if (_writingState.IsActive)
                {
                    // Commit the data as not already committed
                    CommitUnsynchronized();
                }

                awaitable = _readerAwaitable.Complete();

                cancellationTokenRegistration = _writerAwaitable.AttachToken(cancellationToken, _signalWriterAwaitable, this);
            }

            cancellationTokenRegistration.Dispose();

            TrySchedule(_readerScheduler, awaitable);

            return(new WritableBufferAwaitable(this));
        }
Exemplo n.º 4
0
        internal PipeAwaiter <FlushResult> FlushAsync(CancellationToken cancellationToken)
        {
            Action awaitable;
            CancellationTokenRegistration cancellationTokenRegistration;

            lock (_sync)
            {
                if (_writingHead != null)
                {
                    // Commit the data as not already committed
                    CommitUnsynchronized();
                }

                awaitable = _readerAwaitable.Complete();

                cancellationTokenRegistration = _writerAwaitable.AttachToken(cancellationToken, s_signalWriterAwaitable, this);
            }

            cancellationTokenRegistration.Dispose();

            TrySchedule(_readerScheduler, awaitable);

            return(new PipeAwaiter <FlushResult>(_writer));
        }
Exemplo n.º 5
0
        internal ValueTask <FlushResult> FlushAsync(CancellationToken cancellationToken)
        {
            CompletionData completionData;
            CancellationTokenRegistration cancellationTokenRegistration;

            lock (_sync)
            {
                if (_writingHead != null)
                {
                    // Commit the data as not already committed
                    CommitUnsynchronized();
                }

                _readerAwaitable.Complete(out completionData);

                cancellationTokenRegistration = _writerAwaitable.AttachToken(cancellationToken, s_signalWriterAwaitable, this);
            }

            cancellationTokenRegistration.Dispose();

            TrySchedule(_readerScheduler, completionData);

            return(new ValueTask <FlushResult>(_writer, token: 0));
        }
Exemplo n.º 6
0
        internal void AdvanceReader(SequencePosition consumed, SequencePosition examined)
        {
            BufferSegment returnStart = null;
            BufferSegment returnEnd   = null;

            Action continuation = null;

            lock (_sync)
            {
                var examinedEverything = false;
                if (examined.Segment == _commitHead)
                {
                    examinedEverything = _commitHead != null ? examined.Index == _commitHeadIndex - _commitHead.Start : examined.Index == 0;
                }

                if (consumed.Segment != null)
                {
                    if (_readHead == null)
                    {
                        ThrowHelper.ThrowInvalidOperationException_AdvanceToInvalidCursor();
                        return;
                    }

                    var consumedSegment = (BufferSegment)consumed.Segment;

                    returnStart = _readHead;
                    returnEnd   = consumedSegment;

                    // Check if we crossed _maximumSizeLow and complete backpressure
                    long consumedBytes = new ReadOnlySequence <byte>(returnStart, _readHeadIndex, consumedSegment, consumed.Index).Length;
                    long oldLength     = _length;
                    _length -= consumedBytes;

                    if (oldLength >= _resumeWriterThreshold &&
                        _length < _resumeWriterThreshold)
                    {
                        continuation = _writerAwaitable.Complete();
                    }

                    // Check if we consumed entire last segment
                    // if we are going to return commit head we need to check that there is no writing operation that
                    // might be using tailspace
                    if (consumed.Index == returnEnd.Length && _writingHead != returnEnd)
                    {
                        BufferSegment nextBlock = returnEnd.NextSegment;
                        if (_commitHead == returnEnd)
                        {
                            _commitHead      = nextBlock;
                            _commitHeadIndex = 0;
                        }

                        _readHead      = nextBlock;
                        _readHeadIndex = 0;
                        returnEnd      = nextBlock;
                    }
                    else
                    {
                        _readHead      = consumedSegment;
                        _readHeadIndex = consumed.Index;
                    }
                }

                // We reset the awaitable to not completed if we've examined everything the producer produced so far
                // but only if writer is not completed yet
                if (examinedEverything && !_writerCompletion.IsCompleted)
                {
                    // Prevent deadlock where reader awaits new data and writer await backpressure
                    if (!_writerAwaitable.IsCompleted)
                    {
                        ThrowHelper.ThrowInvalidOperationException_BackpressureDeadlock();
                    }
                    _readerAwaitable.Reset();
                }

                while (returnStart != null && returnStart != returnEnd)
                {
                    returnStart.ResetMemory();
                    ReturnSegmentUnsynchronized(returnStart);
                    returnStart = returnStart.NextSegment;
                }

                _readingState.End();
            }

            TrySchedule(_writerScheduler, continuation);
        }
Exemplo n.º 7
0
        // Reading

        void IPipeReader.Advance(ReadCursor consumed, ReadCursor examined)
        {
            BufferSegment returnStart = null;
            BufferSegment returnEnd   = null;

            // Reading commit head shared with writer
            Action continuation = null;

            lock (_sync)
            {
                var examinedEverything = examined.Segment == _commitHead && examined.Index == _commitHeadIndex;

                if (!consumed.IsDefault)
                {
                    if (_readHead == null)
                    {
                        PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.AdvanceToInvalidCursor);
                        return;
                    }

                    var consumedSegment = consumed.GetSegment();

                    returnStart = _readHead;
                    returnEnd   = consumedSegment;

                    // Check if we crossed _maximumSizeLow and complete backpressure
                    var consumedBytes = ReadCursor.GetLength(returnStart, returnStart.Start, consumedSegment, consumed.Index);
                    var oldLength     = _length;
                    _length -= consumedBytes;

                    if (oldLength >= _maximumSizeLow &&
                        _length < _maximumSizeLow)
                    {
                        continuation = _writerAwaitable.Complete();
                    }

                    // Check if we consumed entire last segment
                    // if we are going to return commit head
                    // we need to check that there is no writing operation that
                    // might be using tailspace
                    if (consumed.Index == returnEnd.End &&
                        !(_commitHead == returnEnd && _writingState.IsActive))
                    {
                        var nextBlock = returnEnd.Next;
                        if (_commitHead == returnEnd)
                        {
                            _commitHead      = nextBlock;
                            _commitHeadIndex = nextBlock?.Start ?? 0;
                        }

                        _readHead = nextBlock;
                        returnEnd = nextBlock;
                    }
                    else
                    {
                        _readHead       = consumedSegment;
                        _readHead.Start = consumed.Index;
                    }
                }

                // We reset the awaitable to not completed if we've examined everything the producer produced so far
                // but only if writer is not completed yet
                if (examinedEverything && !_writerCompletion.IsCompleted)
                {
                    // Prevent deadlock where reader awaits new data and writer await backpressure
                    if (!_writerAwaitable.IsCompleted)
                    {
                        PipelinesThrowHelper.ThrowInvalidOperationException(ExceptionResource.BackpressureDeadlock);
                    }
                    _readerAwaitable.Reset();
                }

                _readingState.End(ExceptionResource.NoReadToComplete);

                while (returnStart != null && returnStart != returnEnd)
                {
                    returnStart.ResetMemory();
                    ReturnSegmentUnsynchronized(returnStart);
                    returnStart = returnStart.Next;
                }
            }

            TrySchedule(_writerScheduler, continuation);
        }