//----------------------------------------------------------------------------------- // Creates and begins execution of a new spooling task. If pipelineMerges is specified, // we will execute the task asynchronously; otherwise, this is done synchronously, // and by the time this API has returned all of the results have been produced. // // Arguments: // source - the producer enumerator // destination - the destination channel into which to spool elements // ordinalIndexState - state of the index of the input to the merge // // Assumptions: // Source cannot be null, although the other arguments may be. // internal static void Spool( QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TKey> partitions, Shared <TInputOutput[]> results, TaskScheduler taskScheduler) { Contract.Requires(groupState != null); Contract.Requires(partitions != null); Contract.Requires(results != null); Contract.Requires(results.Value == null); // Determine how many async tasks to create. int maxToRunInParallel = partitions.PartitionCount - 1; // Generate a set of sort helpers. SortHelper <TInputOutput, TKey>[] sortHelpers = SortHelper <TInputOutput, TKey> .GenerateSortHelpers(partitions, groupState); // Ensure all tasks in this query are parented under a common root. Task rootTask = new Task( () => { // Create tasks that will enumerate the partitions in parallel. We'll use the current // thread for one task and then block before returning to the caller, until all results // have been accumulated. Pipelining is not supported by sort merges. for (int i = 0; i < maxToRunInParallel; i++) { TraceHelpers.TraceInfo("OrderPreservingSpoolingTask::Spool: Running partition[{0}] asynchronously", i); QueryTask asyncTask = new OrderPreservingSpoolingTask <TInputOutput, TKey>( i, groupState, results, sortHelpers[i]); asyncTask.RunAsynchronously(taskScheduler); } // Run one task synchronously on the current thread. TraceHelpers.TraceInfo("OrderPreservingSpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel); QueryTask syncTask = new OrderPreservingSpoolingTask <TInputOutput, TKey>( maxToRunInParallel, groupState, results, sortHelpers[maxToRunInParallel]); syncTask.RunSynchronously(taskScheduler); }); // Begin the query on the calling thread. groupState.QueryBegin(rootTask); // We don't want to return until the task is finished. Run it on the calling thread. rootTask.RunSynchronously(taskScheduler); // Destroy the state associated with our sort helpers. for (int i = 0; i < sortHelpers.Length; i++) { sortHelpers[i].Dispose(); } // End the query, which has the effect of propagating any unhandled exceptions. groupState.QueryEnd(false); }
internal static void Spool(QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TKey> partitions, System.Linq.Parallel.Shared <TInputOutput[]> results, TaskScheduler taskScheduler) { int maxToRunInParallel = partitions.PartitionCount - 1; SortHelper <TInputOutput, TKey>[] sortHelpers = SortHelper <TInputOutput, TKey> .GenerateSortHelpers(partitions, groupState); Task rootTask = new Task(delegate { for (int k = 0; k < maxToRunInParallel; k++) { new OrderPreservingSpoolingTask <TInputOutput, TKey>(k, groupState, results, sortHelpers[k]).RunAsynchronously(taskScheduler); } new OrderPreservingSpoolingTask <TInputOutput, TKey>(maxToRunInParallel, groupState, results, sortHelpers[maxToRunInParallel]).RunSynchronously(taskScheduler); }); groupState.QueryBegin(rootTask); rootTask.RunSynchronously(taskScheduler); for (int j = 0; j < sortHelpers.Length; j++) { sortHelpers[j].Dispose(); } groupState.QueryEnd(false); }