A ManualResetEventSlim that also remembers a value that was stored at the last Set().
Наследование: ManualResetEventSlim
Пример #1
0
        internal AsynchronousChannel(int index, int capacity, int chunkSize, CancellationToken cancellationToken, IntValueEvent consumerEvent)
        {
            if (chunkSize == 0)
            {
                chunkSize = Scheduling.GetDefaultChunkSize <T>();
            }

            Debug.Assert(chunkSize > 0, "chunk size must be greater than 0");
            Debug.Assert(capacity > 1, "this impl doesn't support capacity of 1 or 0");

            // Initialize a buffer with enough space to hold 'capacity' elements.
            // We need one extra unused element as a sentinel to detect a full buffer,
            // thus we add one to the capacity requested.
            _index  = index;
            _buffer = new T[capacity + 1][];
            _producerBufferIndex = 0;
            _consumerBufferIndex = 0;

            _producerEvent      = new ManualResetEventSlim();
            _consumerEvent      = consumerEvent;
            _chunkSize          = chunkSize;
            _producerChunk      = new T[chunkSize];
            _producerChunkIndex = 0;
            _cancellationToken  = cancellationToken;
        }
        public override void Dispose()
        {
            if (_consumerEvent != null)
            {
                // MergeEnumerator.Dispose() will wait until all producers complete.
                // So, we can be sure that no producer will attempt to signal the consumer event, and
                // we can dispose it.
                base.Dispose();

                _consumerEvent.Dispose();
                _consumerEvent = null;
            }
        }
Пример #3
0
        private readonly bool _ignoreOutput;                                       // Whether we're enumerating "for effect".

        //-----------------------------------------------------------------------------------
        // Instantiates a new merge helper.
        //
        // Arguments:
        //     partitions   - the source partitions from which to consume data.
        //     ignoreOutput - whether we're enumerating "for effect" or for output.
        //     pipeline     - whether to use a pipelined merge.
        //

        internal DefaultMergeHelper(PartitionedStream <TInputOutput, TIgnoreKey> partitions, bool ignoreOutput, ParallelMergeOptions options,
                                    TaskScheduler taskScheduler, CancellationState cancellationState, int queryId)
        {
            Debug.Assert(partitions != null);

            _taskGroupState = new QueryTaskGroupState(cancellationState, queryId);
            _partitions     = partitions;
            _taskScheduler  = taskScheduler;
            _ignoreOutput   = ignoreOutput;
            IntValueEvent consumerEvent = new IntValueEvent();

            TraceHelpers.TraceInfo("DefaultMergeHelper::.ctor(..): creating a default merge helper");

            // If output won't be ignored, we need to manufacture a set of channels for the consumer.
            // Otherwise, when the merge is executed, we'll just invoke the activities themselves.
            if (!ignoreOutput)
            {
                // Create the asynchronous or synchronous channels, based on whether we're pipelining.
                if (options != ParallelMergeOptions.FullyBuffered)
                {
                    if (partitions.PartitionCount > 1)
                    {
                        Debug.Assert(!ParallelEnumerable.SinglePartitionMode);
                        _asyncChannels =
                            MergeExecutor <TInputOutput> .MakeAsynchronousChannels(partitions.PartitionCount, options, consumerEvent, cancellationState.MergedCancellationToken);

                        _channelEnumerator = new AsynchronousChannelMergeEnumerator <TInputOutput>(_taskGroupState, _asyncChannels, consumerEvent);
                    }
                    else
                    {
                        // If there is only one partition, we don't need to create channels. The only producer enumerator
                        // will be used as the result enumerator.
                        _channelEnumerator = ExceptionAggregator.WrapQueryEnumerator(partitions[0], _taskGroupState.CancellationState).GetEnumerator();
                    }
                }
                else
                {
                    _syncChannels =
                        MergeExecutor <TInputOutput> .MakeSynchronousChannels(partitions.PartitionCount);

                    _channelEnumerator = new SynchronousChannelMergeEnumerator <TInputOutput>(_taskGroupState, _syncChannels);
                }

                Debug.Assert(_asyncChannels == null || _asyncChannels.Length == partitions.PartitionCount);
                Debug.Assert(_syncChannels == null || _syncChannels.Length == partitions.PartitionCount);
                Debug.Assert(_channelEnumerator != null, "enumerator can't be null if we're not ignoring output");
            }
        }
        private T _currentElement;                            // The remembered element from the previous MoveNext.

        //-----------------------------------------------------------------------------------
        // Allocates a new enumerator over a set of one-to-one channels.
        //

        internal AsynchronousChannelMergeEnumerator(
            QueryTaskGroupState taskGroupState, AsynchronousChannel <T>[] channels, IntValueEvent consumerEvent)
            : base(taskGroupState)
        {
            Debug.Assert(channels != null);
#if DEBUG
            foreach (AsynchronousChannel <T> c in channels)
            {
                Debug.Assert(c != null);
            }
#endif

            _channels      = channels;
            _channelIndex  = -1;                         // To catch calls to Current before MoveNext.
            _done          = new bool[_channels.Length]; // Initialized to { false }, i.e. no channels done.
            _consumerEvent = consumerEvent;
        }
Пример #5
0
        //-----------------------------------------------------------------------------------
        // Closes Win32 events possibly allocated during execution.
        //

        public void Dispose()
        {
            // We need to take a lock to deal with consumer threads racing to call Dispose
            // and producer threads racing inside of SetDone.
            //
            // Update 8/2/2011: Dispose() should never be called with SetDone() concurrently,
            // but in order to reduce churn late in the product cycle, we decided not to
            // remove the lock.
            lock (this)
            {
                Debug.Assert(_done, "Expected channel to be done before disposing");
                Debug.Assert(_producerEvent != null);
                Debug.Assert(_consumerEvent != null);
                _producerEvent.Dispose();
                _producerEvent = null;
                _consumerEvent = null;
            }
        }
Пример #6
0
        //-----------------------------------------------------------------------------------
        // Initializes a new channel with the specific capacity and chunk size.
        //
        // Arguments:
        //     orderingHelper - the ordering helper to use for order preservation
        //     capacity   - the maximum number of elements before a producer blocks
        //     chunkSize  - the granularity of chunking on enqueue/dequeue. 0 means default size.
        //
        // Notes:
        //     The capacity represents the maximum number of chunks a channel can hold. That
        //     means producers will actually block after enqueueing capacity*chunkSize
        //     individual elements.
        //

        internal AsynchronousChannel(int index, int chunkSize, CancellationToken cancellationToken, IntValueEvent consumerEvent) :
            this(index, Scheduling.DEFAULT_BOUNDED_BUFFER_CAPACITY, chunkSize, cancellationToken, consumerEvent)
        {
        }
Пример #7
0
        //-----------------------------------------------------------------------------------
        // This internal helper method is used to generate a set of asynchronous channels.
        // The algorithm used by each channel contains the necessary synchronizationis to
        // ensure it is suitable for pipelined consumption.
        //
        // Arguments:
        //     partitionsCount - the number of partitions for which to create new channels.
        //
        // Return Value:
        //     An array of asynchronous channels, one for each partition.
        //

        internal static AsynchronousChannel<TInputOutput>[] MakeAsynchronousChannels(int partitionCount, ParallelMergeOptions options, IntValueEvent consumerEvent, CancellationToken cancellationToken)
        {
            AsynchronousChannel<TInputOutput>[] channels = new AsynchronousChannel<TInputOutput>[partitionCount];

            Debug.Assert(options == ParallelMergeOptions.NotBuffered || options == ParallelMergeOptions.AutoBuffered);
            TraceHelpers.TraceInfo("MergeExecutor::MakeChannels: setting up {0} async channels in prep for pipeline", partitionCount);

            // If we are pipelining, we need a channel that contains the necessary synchronization
            // in it. We choose a bounded/blocking channel data structure: bounded so that we can
            // limit the amount of memory overhead used by the query by putting a cap on the
            // buffer size into which producers place data, and blocking so that the consumer can
            // wait for additional data to arrive in the case that it's found to be empty.

            int chunkSize = 0; // 0 means automatic chunk size
            if (options == ParallelMergeOptions.NotBuffered)
            {
                chunkSize = 1;
            }

            for (int i = 0; i < channels.Length; i++)
            {
                channels[i] = new AsynchronousChannel<TInputOutput>(i, chunkSize, cancellationToken, consumerEvent);
            }

            return channels;
        }