internal DefaultMergeHelper(PartitionedStream <TInputOutput, TIgnoreKey> partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId) { this.m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId); this.m_partitions = partitions; this.m_taskScheduler = taskScheduler; this.m_ignoreOutput = ignoreOutput; if (!ignoreOutput) { if (options != ParallelMergeOptions.FullyBuffered) { if (partitions.PartitionCount > 1) { this.m_asyncChannels = MergeExecutor <TInputOutput> .MakeAsynchronousChannels(partitions.PartitionCount, options, cancellationState.MergedCancellationToken); this.m_channelEnumerator = new AsynchronousChannelMergeEnumerator <TInputOutput>(this.m_taskGroupState, this.m_asyncChannels); } else { this.m_channelEnumerator = ExceptionAggregator.WrapQueryEnumerator <TInputOutput, TIgnoreKey>(partitions[0], this.m_taskGroupState.CancellationState).GetEnumerator(); } } else { this.m_syncChannels = MergeExecutor <TInputOutput> .MakeSynchronousChannels(partitions.PartitionCount); this.m_channelEnumerator = new SynchronousChannelMergeEnumerator <TInputOutput>(this.m_taskGroupState, this.m_syncChannels); } } }
private readonly bool _ignoreOutput; // Whether we're enumerating "for effect". //----------------------------------------------------------------------------------- // Instantiates a new merge helper. // // Arguments: // partitions - the source partitions from which to consume data. // ignoreOutput - whether we're enumerating "for effect" or for output. // pipeline - whether to use a pipelined merge. // internal DefaultMergeHelper(PartitionedStream <TInputOutput, TIgnoreKey> partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId) { Debug.Assert(partitions != null); _taskGroupState = new QueryTaskGroupState(cancellationState, queryId); _partitions = partitions; _taskScheduler = taskScheduler; _ignoreOutput = ignoreOutput; IntValueEvent consumerEvent = new IntValueEvent(); TraceHelpers.TraceInfo("DefaultMergeHelper::.ctor(..): creating a default merge helper"); // If output won't be ignored, we need to manufacture a set of channels for the consumer. // Otherwise, when the merge is executed, we'll just invoke the activities themselves. if (!ignoreOutput) { // Create the asynchronous or synchronous channels, based on whether we're pipelining. if (options != ParallelMergeOptions.FullyBuffered) { if (partitions.PartitionCount > 1) { Debug.Assert(!ParallelEnumerable.SinglePartitionMode); _asyncChannels = MergeExecutor <TInputOutput> .MakeAsynchronousChannels(partitions.PartitionCount, options, consumerEvent, cancellationState.MergedCancellationToken); _channelEnumerator = new AsynchronousChannelMergeEnumerator <TInputOutput>(_taskGroupState, _asyncChannels, consumerEvent); } else { // If there is only one partition, we don't need to create channels. The only producer enumerator // will be used as the result enumerator. _channelEnumerator = ExceptionAggregator.WrapQueryEnumerator(partitions[0], _taskGroupState.CancellationState).GetEnumerator(); } } else { _syncChannels = MergeExecutor <TInputOutput> .MakeSynchronousChannels(partitions.PartitionCount); _channelEnumerator = new SynchronousChannelMergeEnumerator <TInputOutput>(_taskGroupState, _syncChannels); } Debug.Assert(_asyncChannels == null || _asyncChannels.Length == partitions.PartitionCount); Debug.Assert(_syncChannels == null || _syncChannels.Length == partitions.PartitionCount); Debug.Assert(_channelEnumerator != null, "enumerator can't be null if we're not ignoring output"); } }