//----------------------------------------------------------------------------------- // Creates and begins execution of a new spooling task. Runs asynchronously. // // Arguments: // groupState - values for inter-task communication // partitions - the producer enumerators // channels - the producer-consumer channels // taskScheduler - the task manager on which to execute // internal static void SpoolPipeline <TInputOutput, TIgnoreKey>( QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, AsynchronousChannel <TInputOutput>[] channels, TaskScheduler taskScheduler) { Contract.Requires(partitions.PartitionCount == channels.Length); Contract.Requires(groupState != null); // Ensure all tasks in this query are parented under a common root. Because this // is a pipelined query, we detach it from the parent (to avoid blocking the calling // thread), and run the query on a separate thread. Task rootTask = new Task( () => { // Create tasks that will enumerate the partitions in parallel. Because we're pipelining, // we will begin running these tasks in parallel and then return. for (int i = 0; i < partitions.PartitionCount; i++) { TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i); QueryTask asyncTask = new PipelineSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i], channels[i]); asyncTask.RunAsynchronously(taskScheduler); } }); // Begin the query on the calling thread. groupState.QueryBegin(rootTask); // And schedule it for execution. This is done after beginning to ensure no thread tries to // end the query before its root task has been recorded properly. rootTask.Start(taskScheduler); // We don't call QueryEnd here; when we return, the query is still executing, and the // last enumerator to be disposed of will call QueryEnd for us. }
//----------------------------------------------------------------------------------- // Instantiates a new merge helper. // // Arguments: // partitions - the source partitions from which to consume data. // ignoreOutput - whether we're enumerating "for effect" or for output. // internal OrderPreservingPipeliningMergeHelper( PartitionedStream <TOutput, TKey> partitions, TaskScheduler taskScheduler, CancellationState cancellationState, bool autoBuffered, int queryId, IComparer <TKey> keyComparer) { Debug.Assert(partitions != null); TraceHelpers.TraceInfo("KeyOrderPreservingMergeHelper::.ctor(..): creating an order preserving merge helper"); _taskGroupState = new QueryTaskGroupState(cancellationState, queryId); _partitions = partitions; _taskScheduler = taskScheduler; _autoBuffered = autoBuffered; int partitionCount = _partitions.PartitionCount; _buffers = new Queue <Pair <TKey, TOutput> > [partitionCount]; _producerDone = new bool[partitionCount]; _consumerWaiting = new bool[partitionCount]; _producerWaiting = new bool[partitionCount]; _bufferLocks = new object[partitionCount]; if (keyComparer == Util.GetDefaultComparer <int>()) { Debug.Assert(typeof(TKey) == typeof(int)); _producerComparer = (IComparer <Producer <TKey> >) new ProducerComparerInt(); } else { _producerComparer = new ProducerComparer(keyComparer); } }
internal OrderPreservingMergeHelper(PartitionedStream <TInputOutput, TKey> partitions, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId) { this.m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId); this.m_partitions = partitions; this.m_results = new System.Linq.Parallel.Shared <TInputOutput[]>(null); this.m_taskScheduler = taskScheduler; }
/// <summary> /// Constructor /// </summary> internal OrderPreservingPipeliningSpoolingTask( QueryOperatorEnumerator <TOutput, TKey> partition, QueryTaskGroupState taskGroupState, bool[] consumerWaiting, bool[] producerWaiting, bool[] producerDone, int partitionIndex, Queue <Pair <TKey, TOutput> >[] buffers, object bufferLock, TaskScheduler taskScheduler, bool autoBuffered) : base(partitionIndex, taskGroupState) { Contract.Requires(partition != null); Contract.Requires(taskGroupState != null); Contract.Requires(consumerWaiting != null); Contract.Requires(producerWaiting != null && producerWaiting.Length == consumerWaiting.Length); Contract.Requires(producerDone != null && producerDone.Length == consumerWaiting.Length); Contract.Requires(buffers != null && buffers.Length == consumerWaiting.Length); Contract.Requires(partitionIndex >= 0 && partitionIndex < consumerWaiting.Length); _partition = partition; _taskGroupState = taskGroupState; _producerDone = producerDone; _consumerWaiting = consumerWaiting; _producerWaiting = producerWaiting; _partitionIndex = partitionIndex; _buffers = buffers; _bufferLock = bufferLock; _taskScheduler = taskScheduler; _autoBuffered = autoBuffered; }
internal DefaultMergeHelper(PartitionedStream <TInputOutput, TIgnoreKey> partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId) { this.m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId); this.m_partitions = partitions; this.m_taskScheduler = taskScheduler; this.m_ignoreOutput = ignoreOutput; if (!ignoreOutput) { if (options != ParallelMergeOptions.FullyBuffered) { if (partitions.PartitionCount > 1) { this.m_asyncChannels = MergeExecutor <TInputOutput> .MakeAsynchronousChannels(partitions.PartitionCount, options, cancellationState.MergedCancellationToken); this.m_channelEnumerator = new AsynchronousChannelMergeEnumerator <TInputOutput>(this.m_taskGroupState, this.m_asyncChannels); } else { this.m_channelEnumerator = ExceptionAggregator.WrapQueryEnumerator <TInputOutput, TIgnoreKey>(partitions[0], this.m_taskGroupState.CancellationState).GetEnumerator(); } } else { this.m_syncChannels = MergeExecutor <TInputOutput> .MakeSynchronousChannels(partitions.PartitionCount); this.m_channelEnumerator = new SynchronousChannelMergeEnumerator <TInputOutput>(this.m_taskGroupState, this.m_syncChannels); } } }
//----------------------------------------------------------------------------------- // Creates, but does not execute, a new spooling task. // // Arguments: // taskIndex - the unique index of this task // source - the producer enumerator // destination - the destination channel into which to spool elements // // Assumptions: // Source cannot be null, although the other arguments may be. // internal ForAllSpoolingTask( int taskIndex, QueryTaskGroupState groupState, QueryOperatorEnumerator <TInputOutput, TIgnoreKey> source) : base(taskIndex, groupState) { Contract.Assert(source != null); _source = source; }
//----------------------------------------------------------------------------------- // Creates, but does not execute, a new spooling task. // // Arguments: // taskIndex - the unique index of this task // source - the producer enumerator // destination - the destination channel into which to spool elements // // Assumptions: // Source cannot be null, although the other arguments may be. // internal StopAndGoSpoolingTask( int taskIndex, QueryTaskGroupState groupState, QueryOperatorEnumerator <TInputOutput, TIgnoreKey> source, SynchronousChannel <TInputOutput> destination) : base(taskIndex, groupState) { Contract.Requires(source != null); _source = source; _destination = destination; }
//----------------------------------------------------------------------------------- // Creates, but does not execute, a new spooling task. // // Arguments: // taskIndex - the unique index of this task // source - the producer enumerator // destination - the destination channel into which to spool elements // // Assumptions: // Source cannot be null, although the other arguments may be. // internal PipelineSpoolingTask( int taskIndex, QueryTaskGroupState groupState, QueryOperatorEnumerator <TInputOutput, TIgnoreKey> source, AsynchronousChannel <TInputOutput> destination) : base(taskIndex, groupState) { Contract.Assert(source != null); _source = source; _destination = destination; }
private readonly SortHelper <TInputOutput> _sortHelper; // A helper that performs the sorting. //----------------------------------------------------------------------------------- // Creates, but does not execute, a new spooling task. // // Arguments: // taskIndex - the unique index of this task // ordinalIndexState - the state of ordinal indices // source - the producer enumerator // destination - the destination channel into which to spool elements // // Assumptions: // Source cannot be null, although the other arguments may be. // private OrderPreservingSpoolingTask( int taskIndex, QueryTaskGroupState groupState, Shared <TInputOutput[]> results, SortHelper <TInputOutput> sortHelper) : base(taskIndex, groupState) { Debug.Assert(groupState != null); Debug.Assert(results != null); Debug.Assert(sortHelper != null); _results = results; _sortHelper = sortHelper; }
private readonly TaskScheduler _taskScheduler; // The task manager to execute the query. //----------------------------------------------------------------------------------- // Instantiates a new merge helper. // // Arguments: // partitions - the source partitions from which to consume data. // ignoreOutput - whether we're enumerating "for effect" or for output. // internal OrderPreservingMergeHelper(PartitionedStream <TInputOutput, TKey> partitions, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId) { Debug.Assert(partitions != null); TraceHelpers.TraceInfo("KeyOrderPreservingMergeHelper::.ctor(..): creating an order preserving merge helper"); _taskGroupState = new QueryTaskGroupState(cancellationState, queryId); _partitions = partitions; _results = new Shared <TInputOutput[]>(null); _taskScheduler = taskScheduler; }
private SortHelper <TInputOutput> _sortHelper; // A helper that performs the sorting. //----------------------------------------------------------------------------------- // Creates, but does not execute, a new spooling task. // // Arguments: // taskIndex - the unique index of this task // ordinalIndexState - the state of ordinal indices // source - the producer enumerator // destination - the destination channel into which to spool elements // // Assumptions: // Source cannot be null, although the other arguments may be. // private OrderPreservingSpoolingTask( int taskIndex, QueryTaskGroupState groupState, Shared <TInputOutput[]> results, SortHelper <TInputOutput> sortHelper) : base(taskIndex, groupState) { Contract.Requires(groupState != null); Contract.Requires(results != null); Contract.Requires(sortHelper != null); _results = results; _sortHelper = sortHelper; }
internal static void SpoolPipeline <TInputOutput, TIgnoreKey>(QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, AsynchronousChannel <TInputOutput>[] channels, TaskScheduler taskScheduler) { Task rootTask = new Task(delegate { for (int j = 0; j < partitions.PartitionCount; j++) { new PipelineSpoolingTask <TInputOutput, TIgnoreKey>(j, groupState, partitions[j], channels[j]).RunAsynchronously(taskScheduler); } }); groupState.QueryBegin(rootTask); rootTask.Start(taskScheduler); }
//----------------------------------------------------------------------------------- // Creates and begins execution of a new spooling task. If pipelineMerges is specified, // we will execute the task asynchronously; otherwise, this is done synchronously, // and by the time this API has returned all of the results have been produced. // // Arguments: // source - the producer enumerator // destination - the destination channel into which to spool elements // ordinalIndexState - state of the index of the input to the merge // // Assumptions: // Source cannot be null, although the other arguments may be. // internal static void Spool( QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TKey> partitions, Shared <TInputOutput[]> results, TaskScheduler taskScheduler) { Contract.Requires(groupState != null); Contract.Requires(partitions != null); Contract.Requires(results != null); Contract.Requires(results.Value == null); // Determine how many async tasks to create. int maxToRunInParallel = partitions.PartitionCount - 1; // Generate a set of sort helpers. SortHelper <TInputOutput, TKey>[] sortHelpers = SortHelper <TInputOutput, TKey> .GenerateSortHelpers(partitions, groupState); // Ensure all tasks in this query are parented under a common root. Task rootTask = new Task( () => { // Create tasks that will enumerate the partitions in parallel. We'll use the current // thread for one task and then block before returning to the caller, until all results // have been accumulated. Pipelining is not supported by sort merges. for (int i = 0; i < maxToRunInParallel; i++) { TraceHelpers.TraceInfo("OrderPreservingSpoolingTask::Spool: Running partition[{0}] asynchronously", i); QueryTask asyncTask = new OrderPreservingSpoolingTask <TInputOutput, TKey>( i, groupState, results, sortHelpers[i]); asyncTask.RunAsynchronously(taskScheduler); } // Run one task synchronously on the current thread. TraceHelpers.TraceInfo("OrderPreservingSpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel); QueryTask syncTask = new OrderPreservingSpoolingTask <TInputOutput, TKey>( maxToRunInParallel, groupState, results, sortHelpers[maxToRunInParallel]); syncTask.RunSynchronously(taskScheduler); }); // Begin the query on the calling thread. groupState.QueryBegin(rootTask); // We don't want to return until the task is finished. Run it on the calling thread. rootTask.RunSynchronously(taskScheduler); // Destroy the state associated with our sort helpers. for (int i = 0; i < sortHelpers.Length; i++) { sortHelpers[i].Dispose(); } // End the query, which has the effect of propagating any unhandled exceptions. groupState.QueryEnd(false); }
private SortHelper(QueryOperatorEnumerator <TInputOutput, TKey> source, int partitionCount, int partitionIndex, QueryTaskGroupState groupState, int[][] sharedIndices, OrdinalIndexState indexState, IComparer <TKey> keyComparer, GrowingArray <TKey>[] sharedkeys, TInputOutput[][] sharedValues, Barrier[,] sharedBarriers) { this.m_source = source; this.m_partitionCount = partitionCount; this.m_partitionIndex = partitionIndex; this.m_groupState = groupState; this.m_sharedIndices = sharedIndices; this.m_indexState = indexState; this.m_keyComparer = keyComparer; this.m_sharedKeys = sharedkeys; this.m_sharedValues = sharedValues; this.m_sharedBarriers = sharedBarriers; }
internal OrderPreservingPipeliningSpoolingTask(QueryOperatorEnumerator <TOutput, int> partition, QueryTaskGroupState taskGroupState, bool[] consumerWaiting, bool[] producerWaiting, bool[] producerDone, int partitionIndex, Queue <Pair <int, TOutput> >[] buffers, object bufferLock, TaskScheduler taskScheduler, bool autoBuffered) : base(partitionIndex, taskGroupState) { this.m_partition = partition; this.m_taskGroupState = taskGroupState; this.m_producerDone = producerDone; this.m_consumerWaiting = consumerWaiting; this.m_producerWaiting = producerWaiting; this.m_partitionIndex = partitionIndex; this.m_buffers = buffers; this.m_bufferLock = bufferLock; this.m_taskScheduler = taskScheduler; this.m_autoBuffered = autoBuffered; }
internal OrderPreservingPipeliningMergeHelper(PartitionedStream <TOutput, int> partitions, TaskScheduler taskScheduler, CancellationState cancellationState, bool autoBuffered, int queryId) { this.m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId); this.m_partitions = partitions; this.m_taskScheduler = taskScheduler; this.m_autoBuffered = autoBuffered; int partitionCount = this.m_partitions.PartitionCount; this.m_buffers = new Queue <Pair <int, TOutput> > [partitionCount]; this.m_producerDone = new bool[partitionCount]; this.m_consumerWaiting = new bool[partitionCount]; this.m_producerWaiting = new bool[partitionCount]; this.m_bufferLocks = new object[partitionCount]; }
private T _currentElement = default !; // The last element remembered during enumeration. //----------------------------------------------------------------------------------- // Instantiates a new enumerator for a set of channels. // internal SynchronousChannelMergeEnumerator( QueryTaskGroupState taskGroupState, SynchronousChannel <T>[] channels) : base(taskGroupState) { Debug.Assert(channels != null); #if DEBUG foreach (SynchronousChannel <T> c in channels) { Debug.Assert(c != null); } #endif _channels = channels; _channelIndex = -1; }
private T m_currentElement; // The remembered element from the previous MoveNext. //----------------------------------------------------------------------------------- // Allocates a new enumerator over a set of one-to-one channels. // internal AsynchronousChannelMergeEnumerator( QueryTaskGroupState taskGroupState, AsynchronousChannel <T>[] channels) : base(taskGroupState) { Contract.Assert(channels != null); #if DEBUG foreach (AsynchronousChannel <T> c in channels) { Contract.Assert(c != null); } #endif m_channels = channels; m_channelIndex = -1; // To catch calls to Current before MoveNext. m_done = new bool[m_channels.Length]; // Initialized to { false }, i.e. no channels done. }
internal static void SpoolForAll <TInputOutput, TIgnoreKey>(QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, TaskScheduler taskScheduler) { Task rootTask = new Task(delegate { int taskIndex = partitions.PartitionCount - 1; for (int j = 0; j < taskIndex; j++) { new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(j, groupState, partitions[j]).RunAsynchronously(taskScheduler); } new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(taskIndex, groupState, partitions[taskIndex]).RunSynchronously(taskScheduler); }); groupState.QueryBegin(rootTask); rootTask.RunSynchronously(taskScheduler); groupState.QueryEnd(false); }
private readonly bool _ignoreOutput; // Whether we're enumerating "for effect". //----------------------------------------------------------------------------------- // Instantiates a new merge helper. // // Arguments: // partitions - the source partitions from which to consume data. // ignoreOutput - whether we're enumerating "for effect" or for output. // pipeline - whether to use a pipelined merge. // internal DefaultMergeHelper(PartitionedStream <TInputOutput, TIgnoreKey> partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId) { Debug.Assert(partitions != null); _taskGroupState = new QueryTaskGroupState(cancellationState, queryId); _partitions = partitions; _taskScheduler = taskScheduler; _ignoreOutput = ignoreOutput; IntValueEvent consumerEvent = new IntValueEvent(); TraceHelpers.TraceInfo("DefaultMergeHelper::.ctor(..): creating a default merge helper"); // If output won't be ignored, we need to manufacture a set of channels for the consumer. // Otherwise, when the merge is executed, we'll just invoke the activities themselves. if (!ignoreOutput) { // Create the asynchronous or synchronous channels, based on whether we're pipelining. if (options != ParallelMergeOptions.FullyBuffered) { if (partitions.PartitionCount > 1) { Debug.Assert(!ParallelEnumerable.SinglePartitionMode); _asyncChannels = MergeExecutor <TInputOutput> .MakeAsynchronousChannels(partitions.PartitionCount, options, consumerEvent, cancellationState.MergedCancellationToken); _channelEnumerator = new AsynchronousChannelMergeEnumerator <TInputOutput>(_taskGroupState, _asyncChannels, consumerEvent); } else { // If there is only one partition, we don't need to create channels. The only producer enumerator // will be used as the result enumerator. _channelEnumerator = ExceptionAggregator.WrapQueryEnumerator(partitions[0], _taskGroupState.CancellationState).GetEnumerator(); } } else { _syncChannels = MergeExecutor <TInputOutput> .MakeSynchronousChannels(partitions.PartitionCount); _channelEnumerator = new SynchronousChannelMergeEnumerator <TInputOutput>(_taskGroupState, _syncChannels); } Debug.Assert(_asyncChannels == null || _asyncChannels.Length == partitions.PartitionCount); Debug.Assert(_syncChannels == null || _syncChannels.Length == partitions.PartitionCount); Debug.Assert(_channelEnumerator != null, "enumerator can't be null if we're not ignoring output"); } }
[MaybeNull, AllowNull] private T _currentElement = default; // The remembered element from the previous MoveNext. TODO-NULLABLE: https://github.com/dotnet/roslyn/issues/37511 //----------------------------------------------------------------------------------- // Allocates a new enumerator over a set of one-to-one channels. // internal AsynchronousChannelMergeEnumerator( QueryTaskGroupState taskGroupState, AsynchronousChannel <T>[] channels, IntValueEvent?consumerEvent) : base(taskGroupState) { Debug.Assert(channels != null); #if DEBUG foreach (AsynchronousChannel <T> c in channels) { Debug.Assert(c != null); } #endif _channels = channels; _channelIndex = -1; // To catch calls to Current before MoveNext. _done = new bool[_channels.Length]; // Initialized to { false }, i.e. no channels done. _consumerEvent = consumerEvent; }
/// <summary> /// Creates and begins execution of a new set of spooling tasks. /// </summary> public static void Spool( QueryTaskGroupState groupState, PartitionedStream <TOutput, TKey> partitions, bool[] consumerWaiting, bool[] producerWaiting, bool[] producerDone, Queue <Pair <TKey, TOutput> >[] buffers, object[] bufferLocks, TaskScheduler taskScheduler, bool autoBuffered) { Contract.Requires(groupState != null); Contract.Requires(partitions != null); Contract.Requires(producerDone != null && producerDone.Length == partitions.PartitionCount); Contract.Requires(buffers != null && buffers.Length == partitions.PartitionCount); Contract.Requires(bufferLocks != null); int degreeOfParallelism = partitions.PartitionCount; // Initialize the buffers and buffer locks. for (int i = 0; i < degreeOfParallelism; i++) { buffers[i] = new Queue <Pair <TKey, TOutput> >(OrderPreservingPipeliningMergeHelper <TOutput, TKey> .INITIAL_BUFFER_SIZE); bufferLocks[i] = new object(); } // Ensure all tasks in this query are parented under a common root. Because this // is a pipelined query, we detach it from the parent (to avoid blocking the calling // thread), and run the query on a separate thread. Task rootTask = new Task( () => { for (int i = 0; i < degreeOfParallelism; i++) { QueryTask asyncTask = new OrderPreservingPipeliningSpoolingTask <TOutput, TKey>( partitions[i], groupState, consumerWaiting, producerWaiting, producerDone, i, buffers, bufferLocks[i], taskScheduler, autoBuffered); asyncTask.RunAsynchronously(taskScheduler); } }); // Begin the query on the calling thread. groupState.QueryBegin(rootTask); // And schedule it for execution. This is done after beginning to ensure no thread tries to // end the query before its root task has been recorded properly. rootTask.Start(taskScheduler); // We don't call QueryEnd here; when we return, the query is still executing, and the // last enumerator to be disposed of will call QueryEnd for us. }
public static void Spool(QueryTaskGroupState groupState, PartitionedStream <TOutput, int> partitions, bool[] consumerWaiting, bool[] producerWaiting, bool[] producerDone, Queue <Pair <int, TOutput> >[] buffers, object[] bufferLocks, TaskScheduler taskScheduler, bool autoBuffered) { int degreeOfParallelism = partitions.PartitionCount; for (int j = 0; j < degreeOfParallelism; j++) { buffers[j] = new Queue <Pair <int, TOutput> >(0x80); bufferLocks[j] = new object(); } Task rootTask = new Task(delegate { for (int k = 0; k < degreeOfParallelism; k++) { new OrderPreservingPipeliningSpoolingTask <TOutput>(partitions[k], groupState, consumerWaiting, producerWaiting, producerDone, k, buffers, bufferLocks[k], taskScheduler, autoBuffered).RunAsynchronously(taskScheduler); } }); groupState.QueryBegin(rootTask); rootTask.Start(taskScheduler); }
//----------------------------------------------------------------------------------- // Creates and begins execution of a new spooling task. Executes synchronously, // and by the time this API has returned all of the results have been produced. // // Arguments: // groupState - values for inter-task communication // partitions - the producer enumerators // channels - the producer-consumer channels // taskScheduler - the task manager on which to execute // internal static void SpoolStopAndGo <TInputOutput, TIgnoreKey>( QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, SynchronousChannel <TInputOutput>[] channels, TaskScheduler taskScheduler) { Contract.Requires(partitions.PartitionCount == channels.Length); Contract.Requires(groupState != null); // Ensure all tasks in this query are parented under a common root. Task rootTask = new Task( () => { int maxToRunInParallel = partitions.PartitionCount - 1; // A stop-and-go merge uses the current thread for one task and then blocks before // returning to the caller, until all results have been accumulated. We do this by // running the last partition on the calling thread. for (int i = 0; i < maxToRunInParallel; i++) { TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i); QueryTask asyncTask = new StopAndGoSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i], channels[i]); asyncTask.RunAsynchronously(taskScheduler); } TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel); // Run one task synchronously on the current thread. QueryTask syncTask = new StopAndGoSpoolingTask <TInputOutput, TIgnoreKey>( maxToRunInParallel, groupState, partitions[maxToRunInParallel], channels[maxToRunInParallel]); syncTask.RunSynchronously(taskScheduler); }); // Begin the query on the calling thread. groupState.QueryBegin(rootTask); // We don't want to return until the task is finished. Run it on the calling thread. rootTask.RunSynchronously(taskScheduler); // Wait for the query to complete, propagate exceptions, and so on. // For pipelined queries, this step happens in the async enumerator. groupState.QueryEnd(false); }
internal static void Spool(QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TKey> partitions, System.Linq.Parallel.Shared <TInputOutput[]> results, TaskScheduler taskScheduler) { int maxToRunInParallel = partitions.PartitionCount - 1; SortHelper <TInputOutput, TKey>[] sortHelpers = SortHelper <TInputOutput, TKey> .GenerateSortHelpers(partitions, groupState); Task rootTask = new Task(delegate { for (int k = 0; k < maxToRunInParallel; k++) { new OrderPreservingSpoolingTask <TInputOutput, TKey>(k, groupState, results, sortHelpers[k]).RunAsynchronously(taskScheduler); } new OrderPreservingSpoolingTask <TInputOutput, TKey>(maxToRunInParallel, groupState, results, sortHelpers[maxToRunInParallel]).RunSynchronously(taskScheduler); }); groupState.QueryBegin(rootTask); rootTask.RunSynchronously(taskScheduler); for (int j = 0; j < sortHelpers.Length; j++) { sortHelpers[j].Dispose(); } groupState.QueryEnd(false); }
//----------------------------------------------------------------------------------- // Creates and begins execution of a new spooling task. This is a for-all style // execution, meaning that the query will be run fully (for effect) before returning // and that there are no channels into which data will be queued. // // Arguments: // groupState - values for inter-task communication // partitions - the producer enumerators // taskScheduler - the task manager on which to execute // internal static void SpoolForAll <TInputOutput, TIgnoreKey>( QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, TaskScheduler taskScheduler) { Contract.Requires(groupState != null); // Ensure all tasks in this query are parented under a common root. Task rootTask = new Task( () => { int maxToRunInParallel = partitions.PartitionCount - 1; // Create tasks that will enumerate the partitions in parallel "for effect"; in other words, // no data will be placed into any kind of producer-consumer channel. for (int i = 0; i < maxToRunInParallel; i++) { TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i); QueryTask asyncTask = new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i]); asyncTask.RunAsynchronously(taskScheduler); } TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel); // Run one task synchronously on the current thread. QueryTask syncTask = new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(maxToRunInParallel, groupState, partitions[maxToRunInParallel]); syncTask.RunSynchronously(taskScheduler); }); // Begin the query on the calling thread. groupState.QueryBegin(rootTask); // We don't want to return until the task is finished. Run it on the calling thread. rootTask.RunSynchronously(taskScheduler); // Wait for the query to complete, propagate exceptions, and so on. // For pipelined queries, this step happens in the async enumerator. groupState.QueryEnd(false); }
//----------------------------------------------------------------------------------- // Instantiates a new merge helper. // // Arguments: // partitions - the source partitions from which to consume data. // ignoreOutput - whether we're enumerating "for effect" or for output. // internal OrderPreservingPipeliningMergeHelper( PartitionedStream <TOutput, int> partitions, TaskScheduler taskScheduler, CancellationState cancellationState, bool autoBuffered, int queryId) { Contract.Assert(partitions != null); TraceHelpers.TraceInfo("KeyOrderPreservingMergeHelper::.ctor(..): creating an order preserving merge helper"); m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId); m_partitions = partitions; m_taskScheduler = taskScheduler; m_autoBuffered = autoBuffered; int partitionCount = m_partitions.PartitionCount; m_buffers = new Queue <Pair <int, TOutput> > [partitionCount]; m_producerDone = new bool[partitionCount]; m_consumerWaiting = new bool[partitionCount]; m_producerWaiting = new bool[partitionCount]; m_bufferLocks = new object[partitionCount]; }
protected QueryTaskGroupState _groupState; // State shared among the tasks. //----------------------------------------------------------------------------------- // Constructs a new task with the specified shared state. // protected QueryTask(int taskIndex, QueryTaskGroupState groupState) { Debug.Assert(groupState != null); _taskIndex = taskIndex; _groupState = groupState; }
protected QueryTaskGroupState _groupState; // State shared among the tasks. //----------------------------------------------------------------------------------- // Constructs a new task with the specified shared state. // protected QueryTask(int taskIndex, QueryTaskGroupState groupState) { Contract.Assert(groupState != null); _taskIndex = taskIndex; _groupState = groupState; }
//----------------------------------------------------------------------------------- // Constructs a new spooling task. // // Arguments: // taskIndex - the unique index of this task // protected SpoolingTaskBase(int taskIndex, QueryTaskGroupState groupState) : base(taskIndex, groupState) { }
internal StopAndGoSpoolingTask(int taskIndex, QueryTaskGroupState groupState, QueryOperatorEnumerator <TInputOutput, TIgnoreKey> source, SynchronousChannel <TInputOutput> destination) : base(taskIndex, groupState) { this.m_source = source; this.m_destination = destination; }
protected QueryTask(int taskIndex, QueryTaskGroupState groupState) { this.m_taskIndex = taskIndex; this.m_groupState = groupState; }
//----------------------------------------------------------------------------------- // Constructs a new spooling task. // // Arguments: // taskIndex - the unique index of this task // protected SpoolingTaskBase(int taskIndex, QueryTaskGroupState groupState) : base(taskIndex, groupState) { }
protected QueryTaskGroupState m_groupState; // State shared among the tasks. //----------------------------------------------------------------------------------- // Constructs a new task with the specified shared state. // protected QueryTask(int taskIndex, QueryTaskGroupState groupState) { Contract.Assert(groupState != null); m_taskIndex = taskIndex; m_groupState = groupState; }