QueryBegin() private method

private QueryBegin ( Task rootTask ) : void
rootTask Task
return void
Esempio n. 1
0
        //-----------------------------------------------------------------------------------
        // Creates and begins execution of a new spooling task. Runs asynchronously.
        //
        // Arguments:
        //     groupState      - values for inter-task communication
        //     partitions      - the producer enumerators
        //     channels        - the producer-consumer channels
        //     taskScheduler   - the task manager on which to execute
        //

        internal static void SpoolPipeline <TInputOutput, TIgnoreKey>(
            QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions,
            AsynchronousChannel <TInputOutput>[] channels, TaskScheduler taskScheduler)
        {
            Contract.Requires(partitions.PartitionCount == channels.Length);
            Contract.Requires(groupState != null);

            // Ensure all tasks in this query are parented under a common root. Because this
            // is a pipelined query, we detach it from the parent (to avoid blocking the calling
            // thread), and run the query on a separate thread.
            Task rootTask = new Task(
                () =>
            {
                // Create tasks that will enumerate the partitions in parallel. Because we're pipelining,
                // we will begin running these tasks in parallel and then return.
                for (int i = 0; i < partitions.PartitionCount; i++)
                {
                    TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i);

                    QueryTask asyncTask = new PipelineSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i], channels[i]);
                    asyncTask.RunAsynchronously(taskScheduler);
                }
            });

            // Begin the query on the calling thread.
            groupState.QueryBegin(rootTask);

            // And schedule it for execution.  This is done after beginning to ensure no thread tries to
            // end the query before its root task has been recorded properly.
            rootTask.Start(taskScheduler);
            // We don't call QueryEnd here; when we return, the query is still executing, and the
            // last enumerator to be disposed of will call QueryEnd for us.
        }
        //-----------------------------------------------------------------------------------
        // Creates and begins execution of a new spooling task. If pipelineMerges is specified,
        // we will execute the task asynchronously; otherwise, this is done synchronously,
        // and by the time this API has returned all of the results have been produced.
        //
        // Arguments:
        //     source      - the producer enumerator
        //     destination - the destination channel into which to spool elements
        //     ordinalIndexState - state of the index of the input to the merge
        //
        // Assumptions:
        //     Source cannot be null, although the other arguments may be.
        //

        internal static void Spool(
            QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TKey> partitions,
            Shared <TInputOutput[]> results, TaskScheduler taskScheduler)
        {
            Contract.Requires(groupState != null);
            Contract.Requires(partitions != null);
            Contract.Requires(results != null);
            Contract.Requires(results.Value == null);

            // Determine how many async tasks to create.
            int maxToRunInParallel = partitions.PartitionCount - 1;

            // Generate a set of sort helpers.
            SortHelper <TInputOutput, TKey>[] sortHelpers =
                SortHelper <TInputOutput, TKey> .GenerateSortHelpers(partitions, groupState);

            // Ensure all tasks in this query are parented under a common root.
            Task rootTask = new Task(
                () =>
            {
                // Create tasks that will enumerate the partitions in parallel.  We'll use the current
                // thread for one task and then block before returning to the caller, until all results
                // have been accumulated. Pipelining is not supported by sort merges.
                for (int i = 0; i < maxToRunInParallel; i++)
                {
                    TraceHelpers.TraceInfo("OrderPreservingSpoolingTask::Spool: Running partition[{0}] asynchronously", i);
                    QueryTask asyncTask = new OrderPreservingSpoolingTask <TInputOutput, TKey>(
                        i, groupState, results, sortHelpers[i]);
                    asyncTask.RunAsynchronously(taskScheduler);
                }

                // Run one task synchronously on the current thread.
                TraceHelpers.TraceInfo("OrderPreservingSpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel);
                QueryTask syncTask = new OrderPreservingSpoolingTask <TInputOutput, TKey>(
                    maxToRunInParallel, groupState, results, sortHelpers[maxToRunInParallel]);
                syncTask.RunSynchronously(taskScheduler);
            });

            // Begin the query on the calling thread.
            groupState.QueryBegin(rootTask);

            // We don't want to return until the task is finished.  Run it on the calling thread.
            rootTask.RunSynchronously(taskScheduler);

            // Destroy the state associated with our sort helpers.
            for (int i = 0; i < sortHelpers.Length; i++)
            {
                sortHelpers[i].Dispose();
            }

            // End the query, which has the effect of propagating any unhandled exceptions.
            groupState.QueryEnd(false);
        }
Esempio n. 3
0
        internal static void SpoolPipeline <TInputOutput, TIgnoreKey>(QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, AsynchronousChannel <TInputOutput>[] channels, TaskScheduler taskScheduler)
        {
            Task rootTask = new Task(delegate {
                for (int j = 0; j < partitions.PartitionCount; j++)
                {
                    new PipelineSpoolingTask <TInputOutput, TIgnoreKey>(j, groupState, partitions[j], channels[j]).RunAsynchronously(taskScheduler);
                }
            });

            groupState.QueryBegin(rootTask);
            rootTask.Start(taskScheduler);
        }
Esempio n. 4
0
        internal static void SpoolForAll <TInputOutput, TIgnoreKey>(QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, TaskScheduler taskScheduler)
        {
            Task rootTask = new Task(delegate {
                int taskIndex = partitions.PartitionCount - 1;
                for (int j = 0; j < taskIndex; j++)
                {
                    new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(j, groupState, partitions[j]).RunAsynchronously(taskScheduler);
                }
                new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(taskIndex, groupState, partitions[taskIndex]).RunSynchronously(taskScheduler);
            });

            groupState.QueryBegin(rootTask);
            rootTask.RunSynchronously(taskScheduler);
            groupState.QueryEnd(false);
        }
        /// <summary>
        /// Creates and begins execution of a new set of spooling tasks.
        /// </summary>
        public static void Spool(
            QueryTaskGroupState groupState, PartitionedStream <TOutput, TKey> partitions,
            bool[] consumerWaiting, bool[] producerWaiting, bool[] producerDone,
            Queue <Pair <TKey, TOutput> >[] buffers, object[] bufferLocks,
            TaskScheduler taskScheduler, bool autoBuffered)
        {
            Contract.Requires(groupState != null);
            Contract.Requires(partitions != null);
            Contract.Requires(producerDone != null && producerDone.Length == partitions.PartitionCount);
            Contract.Requires(buffers != null && buffers.Length == partitions.PartitionCount);
            Contract.Requires(bufferLocks != null);

            int degreeOfParallelism = partitions.PartitionCount;

            // Initialize the buffers and buffer locks.
            for (int i = 0; i < degreeOfParallelism; i++)
            {
                buffers[i]     = new Queue <Pair <TKey, TOutput> >(OrderPreservingPipeliningMergeHelper <TOutput, TKey> .INITIAL_BUFFER_SIZE);
                bufferLocks[i] = new object();
            }

            // Ensure all tasks in this query are parented under a common root. Because this
            // is a pipelined query, we detach it from the parent (to avoid blocking the calling
            // thread), and run the query on a separate thread.
            Task rootTask = new Task(
                () =>
            {
                for (int i = 0; i < degreeOfParallelism; i++)
                {
                    QueryTask asyncTask = new OrderPreservingPipeliningSpoolingTask <TOutput, TKey>(
                        partitions[i], groupState, consumerWaiting, producerWaiting,
                        producerDone, i, buffers, bufferLocks[i], taskScheduler, autoBuffered);
                    asyncTask.RunAsynchronously(taskScheduler);
                }
            });

            // Begin the query on the calling thread.
            groupState.QueryBegin(rootTask);

            // And schedule it for execution.  This is done after beginning to ensure no thread tries to
            // end the query before its root task has been recorded properly.
            rootTask.Start(taskScheduler);

            // We don't call QueryEnd here; when we return, the query is still executing, and the
            // last enumerator to be disposed of will call QueryEnd for us.
        }
        public static void Spool(QueryTaskGroupState groupState, PartitionedStream <TOutput, int> partitions, bool[] consumerWaiting, bool[] producerWaiting, bool[] producerDone, Queue <Pair <int, TOutput> >[] buffers, object[] bufferLocks, TaskScheduler taskScheduler, bool autoBuffered)
        {
            int degreeOfParallelism = partitions.PartitionCount;

            for (int j = 0; j < degreeOfParallelism; j++)
            {
                buffers[j]     = new Queue <Pair <int, TOutput> >(0x80);
                bufferLocks[j] = new object();
            }
            Task rootTask = new Task(delegate {
                for (int k = 0; k < degreeOfParallelism; k++)
                {
                    new OrderPreservingPipeliningSpoolingTask <TOutput>(partitions[k], groupState, consumerWaiting, producerWaiting, producerDone, k, buffers, bufferLocks[k], taskScheduler, autoBuffered).RunAsynchronously(taskScheduler);
                }
            });

            groupState.QueryBegin(rootTask);
            rootTask.Start(taskScheduler);
        }
Esempio n. 7
0
        //-----------------------------------------------------------------------------------
        // Creates and begins execution of a new spooling task. Executes synchronously,
        // and by the time this API has returned all of the results have been produced.
        //
        // Arguments:
        //     groupState      - values for inter-task communication
        //     partitions      - the producer enumerators
        //     channels        - the producer-consumer channels
        //     taskScheduler   - the task manager on which to execute
        //

        internal static void SpoolStopAndGo <TInputOutput, TIgnoreKey>(
            QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions,
            SynchronousChannel <TInputOutput>[] channels, TaskScheduler taskScheduler)
        {
            Contract.Requires(partitions.PartitionCount == channels.Length);
            Contract.Requires(groupState != null);

            // Ensure all tasks in this query are parented under a common root.
            Task rootTask = new Task(
                () =>
            {
                int maxToRunInParallel = partitions.PartitionCount - 1;

                // A stop-and-go merge uses the current thread for one task and then blocks before
                // returning to the caller, until all results have been accumulated. We do this by
                // running the last partition on the calling thread.
                for (int i = 0; i < maxToRunInParallel; i++)
                {
                    TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i);

                    QueryTask asyncTask = new StopAndGoSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i], channels[i]);
                    asyncTask.RunAsynchronously(taskScheduler);
                }

                TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel);

                // Run one task synchronously on the current thread.
                QueryTask syncTask = new StopAndGoSpoolingTask <TInputOutput, TIgnoreKey>(
                    maxToRunInParallel, groupState, partitions[maxToRunInParallel], channels[maxToRunInParallel]);
                syncTask.RunSynchronously(taskScheduler);
            });

            // Begin the query on the calling thread.
            groupState.QueryBegin(rootTask);

            // We don't want to return until the task is finished.  Run it on the calling thread.
            rootTask.RunSynchronously(taskScheduler);

            // Wait for the query to complete, propagate exceptions, and so on.
            // For pipelined queries, this step happens in the async enumerator.
            groupState.QueryEnd(false);
        }
        internal static void Spool(QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TKey> partitions, System.Linq.Parallel.Shared <TInputOutput[]> results, TaskScheduler taskScheduler)
        {
            int maxToRunInParallel = partitions.PartitionCount - 1;

            SortHelper <TInputOutput, TKey>[] sortHelpers = SortHelper <TInputOutput, TKey> .GenerateSortHelpers(partitions, groupState);

            Task rootTask = new Task(delegate {
                for (int k = 0; k < maxToRunInParallel; k++)
                {
                    new OrderPreservingSpoolingTask <TInputOutput, TKey>(k, groupState, results, sortHelpers[k]).RunAsynchronously(taskScheduler);
                }
                new OrderPreservingSpoolingTask <TInputOutput, TKey>(maxToRunInParallel, groupState, results, sortHelpers[maxToRunInParallel]).RunSynchronously(taskScheduler);
            });

            groupState.QueryBegin(rootTask);
            rootTask.RunSynchronously(taskScheduler);
            for (int j = 0; j < sortHelpers.Length; j++)
            {
                sortHelpers[j].Dispose();
            }
            groupState.QueryEnd(false);
        }
Esempio n. 9
0
        //-----------------------------------------------------------------------------------
        // Creates and begins execution of a new spooling task. This is a for-all style
        // execution, meaning that the query will be run fully (for effect) before returning
        // and that there are no channels into which data will be queued.
        //
        // Arguments:
        //     groupState      - values for inter-task communication
        //     partitions      - the producer enumerators
        //     taskScheduler   - the task manager on which to execute
        //

        internal static void SpoolForAll <TInputOutput, TIgnoreKey>(
            QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, TaskScheduler taskScheduler)
        {
            Contract.Requires(groupState != null);

            // Ensure all tasks in this query are parented under a common root.
            Task rootTask = new Task(
                () =>
            {
                int maxToRunInParallel = partitions.PartitionCount - 1;

                // Create tasks that will enumerate the partitions in parallel "for effect"; in other words,
                // no data will be placed into any kind of producer-consumer channel.
                for (int i = 0; i < maxToRunInParallel; i++)
                {
                    TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i);

                    QueryTask asyncTask = new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i]);
                    asyncTask.RunAsynchronously(taskScheduler);
                }

                TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel);

                // Run one task synchronously on the current thread.
                QueryTask syncTask = new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(maxToRunInParallel, groupState, partitions[maxToRunInParallel]);
                syncTask.RunSynchronously(taskScheduler);
            });

            // Begin the query on the calling thread.
            groupState.QueryBegin(rootTask);

            // We don't want to return until the task is finished.  Run it on the calling thread.
            rootTask.RunSynchronously(taskScheduler);

            // Wait for the query to complete, propagate exceptions, and so on.
            // For pipelined queries, this step happens in the async enumerator.
            groupState.QueryEnd(false);
        }