Example #1
0
        internal override void WrapPartitionedStream <TKey>(
            PartitionedStream <TInput, TKey> inputStream, IPartitionedStreamRecipient <bool> recipient, bool preferStriping, QuerySettings settings)
        {
            // Create a shared cancellation variable and then return a possibly wrapped new enumerator.
            Shared <bool> resultFoundFlag = new Shared <bool>(false);

            int partitionCount = inputStream.PartitionCount;
            PartitionedStream <bool, int> outputStream = new PartitionedStream <bool, int>(
                partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Correct);

            for (int i = 0; i < partitionCount; i++)
            {
                outputStream[i] = new AnyAllSearchOperatorEnumerator <TKey>(inputStream[i], _qualification, _predicate, i, resultFoundFlag,
                                                                            settings.CancellationState.MergedCancellationToken);
            }

            recipient.Receive(outputStream);
        }
Example #2
0
 public override void WrapPartitionedStream <TLeftKey, TRightKey>(
     PartitionedStream <TSource, TLeftKey> leftStream, PartitionedStream <TSource, TRightKey> rightStream,
     IPartitionedStreamRecipient <TSource> outputRecipient, bool preferStriping, QuerySettings settings)
 {
     // Prematurely merge the left results, if necessary
     if (_prematureMergeLeft)
     {
         ListQueryResults <TSource> leftStreamResults =
             ExecuteAndCollectResults(leftStream, leftStream.PartitionCount, LeftChild.OutputOrdered, preferStriping, settings);
         PartitionedStream <TSource, int> leftStreamInc = leftStreamResults.GetPartitionedStream();
         WrapHelper <int, TRightKey>(leftStreamInc, rightStream, outputRecipient, settings, preferStriping);
     }
     else
     {
         Contract.Assert(!ExchangeUtilities.IsWorseThan(leftStream.OrdinalIndexState, OrdinalIndexState.Increasing));
         WrapHelper <TLeftKey, TRightKey>(leftStream, rightStream, outputRecipient, settings, preferStriping);
     }
 }
Example #3
0
        //---------------------------------------------------------------------------------------
        // This is a helper method. WrapPartitionedStream decides what type TLeftKey is going
        // to be, and then call this method with that key as a generic parameter.
        //

        private void WrapPartitionedStreamHelper <TLeftKey, TRightKey>(
            PartitionedStream <Pair <TLeftInput, TKey>, TLeftKey> leftHashStream, PartitionedStream <TRightInput, TRightKey> rightPartitionedStream,
            IPartitionedStreamRecipient <TOutput> outputRecipient, int partitionCount, CancellationToken cancellationToken)
        {
            PartitionedStream <Pair <TRightInput, TKey>, int> rightHashStream = ExchangeUtilities.HashRepartition(
                rightPartitionedStream, m_rightKeySelector, m_keyComparer, null, cancellationToken);

            PartitionedStream <TOutput, TLeftKey> outputStream = new PartitionedStream <TOutput, TLeftKey>(
                partitionCount, leftHashStream.KeyComparer, OrdinalIndexState);

            for (int i = 0; i < partitionCount; i++)
            {
                outputStream[i] = new HashJoinQueryOperatorEnumerator <TLeftInput, TLeftKey, TRightInput, TKey, TOutput>(
                    leftHashStream[i], rightHashStream[i], null, m_resultSelector, m_keyComparer, cancellationToken);
            }

            outputRecipient.Receive(outputStream);
        }
Example #4
0
        private void WrapHelper <TKey>(PartitionedStream <TResult, TKey> inputStream, IPartitionedStreamRecipient <TResult> recipient, QuerySettings settings)
        {
            int partitionCount = inputStream.PartitionCount;
            FixedMaxHeap <TKey> sharedIndices = new FixedMaxHeap <TKey>(_count, inputStream.KeyComparer); // an array used to track the sequence of indices leading up to the Nth index
            CountdownEvent      sharedBarrier = new CountdownEvent(partitionCount);                       // a barrier to synchronize before yielding

            PartitionedStream <TResult, TKey> outputStream =
                new PartitionedStream <TResult, TKey>(partitionCount, inputStream.KeyComparer, OrdinalIndexState);

            for (int i = 0; i < partitionCount; i++)
            {
                outputStream[i] = new TakeOrSkipQueryOperatorEnumerator <TKey>(
                    inputStream[i], _take, sharedIndices, sharedBarrier,
                    settings.CancellationState.MergedCancellationToken, inputStream.KeyComparer);
            }

            recipient.Receive(outputStream);
        }
        private void WrapPartitionedStreamHelper <TKey>(PartitionedStream <Pair <TInputOutput, NoKeyMemoizationRequired>, TKey> hashStream, IPartitionedStreamRecipient <TInputOutput> recipient, CancellationToken cancellationToken)
        {
            int partitionCount = hashStream.PartitionCount;
            PartitionedStream <TInputOutput, TKey> partitionedStream = new PartitionedStream <TInputOutput, TKey>(partitionCount, hashStream.KeyComparer, OrdinalIndexState.Shuffled);

            for (int i = 0; i < partitionCount; i++)
            {
                if (base.OutputOrdered)
                {
                    partitionedStream[i] = new OrderedDistinctQueryOperatorEnumerator <TInputOutput, TKey>(hashStream[i], this.m_comparer, hashStream.KeyComparer, cancellationToken);
                }
                else
                {
                    partitionedStream[i] = (QueryOperatorEnumerator <TInputOutput, TKey>) new DistinctQueryOperatorEnumerator <TInputOutput, TKey>(hashStream[i], this.m_comparer, cancellationToken);
                }
            }
            recipient.Receive <TKey>(partitionedStream);
        }
Example #6
0
 private void WrapHelper <TLeftKey, TRightKey>(
     PartitionedStream <TSource, TLeftKey> leftStreamInc, PartitionedStream <TSource, TRightKey> rightStream,
     IPartitionedStreamRecipient <TSource> outputRecipient, QuerySettings settings, bool preferStriping)
 {
     // Prematurely merge the right results, if necessary
     if (_prematureMergeRight)
     {
         ListQueryResults <TSource> rightStreamResults =
             ExecuteAndCollectResults(rightStream, leftStreamInc.PartitionCount, LeftChild.OutputOrdered, preferStriping, settings);
         PartitionedStream <TSource, int> rightStreamInc = rightStreamResults.GetPartitionedStream();
         WrapHelper2 <TLeftKey, int>(leftStreamInc, rightStreamInc, outputRecipient);
     }
     else
     {
         Debug.Assert(!ExchangeUtilities.IsWorseThan(rightStream.OrdinalIndexState, OrdinalIndexState.Increasing));
         WrapHelper2 <TLeftKey, TRightKey>(leftStreamInc, rightStream, outputRecipient);
     }
 }
 internal override void GivePartitionedStream(IPartitionedStreamRecipient <TOutput> recipient)
 {
     if ((((ParallelExecutionMode)this.m_settings.ExecutionMode.Value) == ParallelExecutionMode.Default) && this.m_op.LimitsParallelism)
     {
         IEnumerable <TOutput>            source            = this.m_op.AsSequentialQuery(this.m_settings.CancellationState.ExternalCancellationToken);
         PartitionedStream <TOutput, int> partitionedStream = ExchangeUtilities.PartitionDataSource <TOutput>(source, this.m_settings.DegreeOfParallelism.Value, this.m_preferStriping);
         recipient.Receive <int>(partitionedStream);
     }
     else if (this.IsIndexible)
     {
         PartitionedStream <TOutput, int> stream2 = ExchangeUtilities.PartitionDataSource <TOutput>(this, this.m_settings.DegreeOfParallelism.Value, this.m_preferStriping);
         recipient.Receive <int>(stream2);
     }
     else
     {
         this.m_childQueryResults.GivePartitionedStream(new ChildResultsRecipient <TInput, TOutput>(recipient, this.m_op, this.m_preferStriping, this.m_settings));
     }
 }
Example #8
0
        private void WrapHelper2 <TLeftKey, TRightKey>(
            PartitionedStream <TSource, TLeftKey> leftStreamInc, PartitionedStream <TSource, TRightKey> rightStreamInc,
            IPartitionedStreamRecipient <TSource> outputRecipient)
        {
            int partitionCount = leftStreamInc.PartitionCount;

            // Generate the shared data.
            IComparer <ConcatKey> comparer = ConcatKey.MakeComparer(
                leftStreamInc.KeyComparer, rightStreamInc.KeyComparer);
            var outputStream = new PartitionedStream <TSource, ConcatKey>(partitionCount, comparer, OrdinalIndexState);

            for (int i = 0; i < partitionCount; i++)
            {
                outputStream[i] = new ConcatQueryOperatorEnumerator <TLeftKey, TRightKey>(leftStreamInc[i], rightStreamInc[i]);
            }

            outputRecipient.Receive(outputStream);
        }
Example #9
0
        internal override void WrapPartitionedStream <TKey>(
            PartitionedStream <TResult, TKey> inputStream, IPartitionedStreamRecipient <TResult> recipient, bool preferStriping, QuerySettings settings)
        {
            Debug.Assert(Child.OrdinalIndexState != OrdinalIndexState.Indexible, "Don't take this code path if the child is indexible.");

            // If the index is not at least increasing, we need to reindex.
            if (_prematureMerge)
            {
                ListQueryResults <TResult> results = ExecuteAndCollectResults(
                    inputStream, inputStream.PartitionCount, Child.OutputOrdered, preferStriping, settings);
                PartitionedStream <TResult, int> inputIntStream = results.GetPartitionedStream();
                WrapHelper <int>(inputIntStream, recipient, settings);
            }
            else
            {
                WrapHelper <TKey>(inputStream, recipient, settings);
            }
        }
        //-----------------------------------------------------------------------------------
        // A factory method to construct a partitioned stream over a data source.
        //
        // Arguments:
        //    source                      - the data source to be partitioned
        //    partitionCount              - the number of partitions desired
        //    useOrdinalOrderPreservation - whether ordinal position must be tracked
        //    useStriping                 - whether striped partitioning should be used instead of range partitioning
        //

        internal static PartitionedStream <T, int> PartitionDataSource <T>(IEnumerable <T> source, int partitionCount, bool useStriping)
        {
            // The partitioned stream to return.
            PartitionedStream <T, int> returnValue;

            if (source is IParallelPartitionable <T> sourceAsPartitionable)
            {
                // The type overrides the partitioning algorithm, so we will use it instead of the default.
                // The returned enumerator must be the same size that we requested, otherwise we throw.
                QueryOperatorEnumerator <T, int>[] enumerators = sourceAsPartitionable.GetPartitions(partitionCount);
                if (enumerators == null)
                {
                    throw new InvalidOperationException(SR.ParallelPartitionable_NullReturn);
                }
                else if (enumerators.Length != partitionCount)
                {
                    throw new InvalidOperationException(SR.ParallelPartitionable_IncorretElementCount);
                }

                // Now just copy the enumerators into the stream, validating that the result is non-null.
                PartitionedStream <T, int> stream =
                    new PartitionedStream <T, int>(partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Correct);
                for (int i = 0; i < partitionCount; i++)
                {
                    QueryOperatorEnumerator <T, int> currentEnumerator = enumerators[i];
                    if (currentEnumerator == null)
                    {
                        throw new InvalidOperationException(SR.ParallelPartitionable_NullElement);
                    }
                    stream[i] = currentEnumerator;
                }

                returnValue = stream;
            }
            else
            {
                returnValue = new PartitionedDataSource <T>(source, partitionCount, useStriping);
            }

            Debug.Assert(returnValue.PartitionCount == partitionCount);

            return(returnValue);
        }
Example #11
0
        private void WrapPartitionedStreamHelper <TIgnoreKey, TKey>(PartitionedStream <Pair <TSource, TGroupKey>, TKey> hashStream, IPartitionedStreamRecipient <IGrouping <TGroupKey, TElement> > recipient, CancellationToken cancellationToken)
        {
            int partitionCount = hashStream.PartitionCount;
            PartitionedStream <IGrouping <TGroupKey, TElement>, TKey> partitionedStream = new PartitionedStream <IGrouping <TGroupKey, TElement>, TKey>(partitionCount, hashStream.KeyComparer, OrdinalIndexState.Shuffled);

            for (int i = 0; i < partitionCount; i++)
            {
                if (this.m_elementSelector == null)
                {
                    GroupByIdentityQueryOperatorEnumerator <TSource, TGroupKey, TKey> enumerator = new GroupByIdentityQueryOperatorEnumerator <TSource, TGroupKey, TKey>(hashStream[i], this.m_keyComparer, cancellationToken);
                    partitionedStream[i] = (QueryOperatorEnumerator <IGrouping <TGroupKey, TElement>, TKey>)enumerator;
                }
                else
                {
                    partitionedStream[i] = new GroupByElementSelectorQueryOperatorEnumerator <TSource, TGroupKey, TElement, TKey>(hashStream[i], this.m_keyComparer, this.m_elementSelector, cancellationToken);
                }
            }
            recipient.Receive <TKey>(partitionedStream);
        }
Example #12
0
        private void WrapHelper <TKey>(PartitionedStream <TSource, TKey> inputStream, IPartitionedStreamRecipient <TSource> recipient, QuerySettings settings)
        {
            int partitionCount = inputStream.PartitionCount;

            // Generate the shared data.
            LastQueryOperatorState <TKey> operatorState = new LastQueryOperatorState <TKey>();
            CountdownEvent sharedBarrier = new CountdownEvent(partitionCount);

            PartitionedStream <TSource, int> outputStream =
                new PartitionedStream <TSource, int>(partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Shuffled);

            for (int i = 0; i < partitionCount; i++)
            {
                outputStream[i] = new LastQueryOperatorEnumerator <TKey>(
                    inputStream[i], _predicate, operatorState, sharedBarrier, settings.CancellationState.MergedCancellationToken,
                    inputStream.KeyComparer, i);
            }
            recipient.Receive(outputStream);
        }
        public static void Spool(QueryTaskGroupState groupState, PartitionedStream <TOutput, int> partitions, bool[] consumerWaiting, bool[] producerWaiting, bool[] producerDone, Queue <Pair <int, TOutput> >[] buffers, object[] bufferLocks, TaskScheduler taskScheduler, bool autoBuffered)
        {
            int degreeOfParallelism = partitions.PartitionCount;

            for (int j = 0; j < degreeOfParallelism; j++)
            {
                buffers[j]     = new Queue <Pair <int, TOutput> >(0x80);
                bufferLocks[j] = new object();
            }
            Task rootTask = new Task(delegate {
                for (int k = 0; k < degreeOfParallelism; k++)
                {
                    new OrderPreservingPipeliningSpoolingTask <TOutput>(partitions[k], groupState, consumerWaiting, producerWaiting, producerDone, k, buffers, bufferLocks[k], taskScheduler, autoBuffered).RunAsynchronously(taskScheduler);
                }
            });

            groupState.QueryBegin(rootTask);
            rootTask.Start(taskScheduler);
        }
        private void WrapPartitionedStreamHelper <TLeftKey, TRightKey>(PartitionedStream <Pair <TInputOutput, NoKeyMemoizationRequired>, TLeftKey> leftHashStream, PartitionedStream <TInputOutput, TRightKey> rightPartitionedStream, IPartitionedStreamRecipient <TInputOutput> outputRecipient, CancellationToken cancellationToken)
        {
            int partitionCount = leftHashStream.PartitionCount;
            PartitionedStream <Pair <TInputOutput, NoKeyMemoizationRequired>, int> stream = ExchangeUtilities.HashRepartition <TInputOutput, NoKeyMemoizationRequired, TRightKey>(rightPartitionedStream, null, null, this.m_comparer, cancellationToken);
            PartitionedStream <TInputOutput, TLeftKey> partitionedStream = new PartitionedStream <TInputOutput, TLeftKey>(partitionCount, leftHashStream.KeyComparer, OrdinalIndexState.Shuffled);

            for (int i = 0; i < partitionCount; i++)
            {
                if (base.OutputOrdered)
                {
                    partitionedStream[i] = new OrderedExceptQueryOperatorEnumerator <TInputOutput, TLeftKey>(leftHashStream[i], stream[i], this.m_comparer, leftHashStream.KeyComparer, cancellationToken);
                }
                else
                {
                    partitionedStream[i] = (QueryOperatorEnumerator <TInputOutput, TLeftKey>) new ExceptQueryOperatorEnumerator <TInputOutput, TLeftKey>(leftHashStream[i], stream[i], this.m_comparer, cancellationToken);
                }
            }
            outputRecipient.Receive <TLeftKey>(partitionedStream);
        }
Example #15
0
        public override void WrapPartitionedStream <TLeftKey, TRightKey>(
            PartitionedStream <TLeftInput, TLeftKey> leftStream, PartitionedStream <TRightInput, TRightKey> rightStream,
            IPartitionedStreamRecipient <TOutput> outputRecipient, bool preferStriping, QuerySettings settings)
        {
            Debug.Assert(rightStream.PartitionCount == leftStream.PartitionCount);

            if (LeftChild.OutputOrdered)
            {
                WrapPartitionedStreamHelper <TLeftKey, TRightKey>(
                    ExchangeUtilities.HashRepartitionOrdered(leftStream, _leftKeySelector, _keyComparer, null, settings.CancellationState.MergedCancellationToken),
                    rightStream, outputRecipient, settings.CancellationState.MergedCancellationToken);
            }
            else
            {
                WrapPartitionedStreamHelper <int, TRightKey>(
                    ExchangeUtilities.HashRepartition(leftStream, _leftKeySelector, _keyComparer, null, settings.CancellationState.MergedCancellationToken),
                    rightStream, outputRecipient, settings.CancellationState.MergedCancellationToken);
            }
        }
Example #16
0
 internal override void WrapPartitionedStream <TKey>(
     PartitionedStream <TInputOutput, TKey> inputStream, IPartitionedStreamRecipient <TInputOutput> recipient, bool preferStriping, QuerySettings settings)
 {
     // Hash-repartition the source stream
     if (OutputOrdered)
     {
         WrapPartitionedStreamHelper <TKey>(
             ExchangeUtilities.HashRepartitionOrdered <TInputOutput, NoKeyMemoizationRequired, TKey>(
                 inputStream, null, null, _comparer, settings.CancellationState.MergedCancellationToken),
             recipient, settings.CancellationState.MergedCancellationToken);
     }
     else
     {
         WrapPartitionedStreamHelper <int>(
             ExchangeUtilities.HashRepartition <TInputOutput, NoKeyMemoizationRequired, TKey>(
                 inputStream, null, null, _comparer, settings.CancellationState.MergedCancellationToken),
             recipient, settings.CancellationState.MergedCancellationToken);
     }
 }
Example #17
0
        //-----------------------------------------------------------------------------------
        // Creates and begins execution of a new spooling task. Executes synchronously,
        // and by the time this API has returned all of the results have been produced.
        //
        // Arguments:
        //     groupState      - values for inter-task communication
        //     partitions      - the producer enumerators
        //     channels        - the producer-consumer channels
        //     taskScheduler   - the task manager on which to execute
        //

        internal static void SpoolStopAndGo <TInputOutput, TIgnoreKey>(
            QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions,
            SynchronousChannel <TInputOutput>[] channels, TaskScheduler taskScheduler)
        {
            Contract.Requires(partitions.PartitionCount == channels.Length);
            Contract.Requires(groupState != null);

            // Ensure all tasks in this query are parented under a common root.
            Task rootTask = new Task(
                () =>
            {
                int maxToRunInParallel = partitions.PartitionCount - 1;

                // A stop-and-go merge uses the current thread for one task and then blocks before
                // returning to the caller, until all results have been accumulated. We do this by
                // running the last partition on the calling thread.
                for (int i = 0; i < maxToRunInParallel; i++)
                {
                    TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i);

                    QueryTask asyncTask = new StopAndGoSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i], channels[i]);
                    asyncTask.RunAsynchronously(taskScheduler);
                }

                TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel);

                // Run one task synchronously on the current thread.
                QueryTask syncTask = new StopAndGoSpoolingTask <TInputOutput, TIgnoreKey>(
                    maxToRunInParallel, groupState, partitions[maxToRunInParallel], channels[maxToRunInParallel]);
                syncTask.RunSynchronously(taskScheduler);
            });

            // Begin the query on the calling thread.
            groupState.QueryBegin(rootTask);

            // We don't want to return until the task is finished.  Run it on the calling thread.
            rootTask.RunSynchronously(taskScheduler);

            // Wait for the query to complete, propagate exceptions, and so on.
            // For pipelined queries, this step happens in the async enumerator.
            groupState.QueryEnd(false);
        }
        internal override void WrapPartitionedStream <TKey>(
            PartitionedStream <TSource, TKey> inputStream, IPartitionedStreamRecipient <TSource> recipient, bool preferStriping, QuerySettings settings)
        {
            int partitionCount = inputStream.PartitionCount;

            // Generate the shared data.
            Shared <int>   sharedEmptyCount = new Shared <int>(0);
            CountdownEvent sharedLatch      = new CountdownEvent(partitionCount - 1);

            PartitionedStream <TSource, TKey> outputStream =
                new PartitionedStream <TSource, TKey>(partitionCount, inputStream.KeyComparer, OrdinalIndexState);

            for (int i = 0; i < partitionCount; i++)
            {
                outputStream[i] = new DefaultIfEmptyQueryOperatorEnumerator <TKey>(
                    inputStream[i], _defaultValue, i, partitionCount, sharedEmptyCount, sharedLatch, settings.CancellationState.MergedCancellationToken);
            }

            recipient.Receive(outputStream);
        }
        private void WrapPartitionedStreamHelper <TLeftKey, TRightKey, TOutputKey>(
            PartitionedStream <Pair <TLeftInput, TKey>, TLeftKey> leftHashStream, PartitionedStream <Pair <TRightInput, TKey>, TRightKey> rightHashStream,
            HashJoinOutputKeyBuilder <TLeftKey, TRightKey, TOutputKey> outputKeyBuilder, IComparer <TOutputKey> outputKeyComparer,
            IPartitionedStreamRecipient <TOutput> outputRecipient, CancellationToken cancellationToken)
        {
            int partitionCount = leftHashStream.PartitionCount;

            PartitionedStream <TOutput, TOutputKey> outputStream =
                new PartitionedStream <TOutput, TOutputKey>(partitionCount, outputKeyComparer, OrdinalIndexState);

            for (int i = 0; i < partitionCount; i++)
            {
                JoinHashLookupBuilder <TRightInput, TRightKey, TKey> rightLookupBuilder =
                    new JoinHashLookupBuilder <TRightInput, TRightKey, TKey>(rightHashStream[i], _keyComparer);
                outputStream[i] = new HashJoinQueryOperatorEnumerator <TLeftInput, TLeftKey, TRightInput, TRightKey, TKey, TOutput, TOutputKey>(
                    leftHashStream[i], rightLookupBuilder, _resultSelector, outputKeyBuilder, cancellationToken);
            }

            outputRecipient.Receive(outputStream);
        }
        internal override void WrapPartitionedStream <TKey>(PartitionedStream <TInput, TKey> inputStream, IPartitionedStreamRecipient <TOutput> recipient, bool preferStriping, QuerySettings settings)
        {
            PartitionedStream <TInput, int> stream;
            int partitionCount = inputStream.PartitionCount;

            if (this.m_prematureMerge)
            {
                stream = QueryOperator <TInput> .ExecuteAndCollectResults <TKey>(inputStream, partitionCount, base.Child.OutputOrdered, preferStriping, settings).GetPartitionedStream();
            }
            else
            {
                stream = (PartitionedStream <TInput, int>)inputStream;
            }
            PartitionedStream <TOutput, int> partitionedStream = new PartitionedStream <TOutput, int>(partitionCount, Util.GetDefaultComparer <int>(), this.OrdinalIndexState);

            for (int i = 0; i < partitionCount; i++)
            {
                partitionedStream[i] = new IndexedSelectQueryOperatorEnumerator <TInput, TOutput>(stream[i], this.m_selector);
            }
            recipient.Receive <int>(partitionedStream);
        }
Example #21
0
        public override void WrapPartitionedStream <TLeftKey, TRightKey>(
            PartitionedStream <TInputOutput, TLeftKey> leftPartitionedStream, PartitionedStream <TInputOutput, TRightKey> rightPartitionedStream,
            IPartitionedStreamRecipient <TInputOutput> outputRecipient, bool preferStriping, QuerySettings settings)
        {
            Debug.Assert(leftPartitionedStream.PartitionCount == rightPartitionedStream.PartitionCount);

            if (OutputOrdered)
            {
                WrapPartitionedStreamHelper <TLeftKey, TRightKey>(
                    ExchangeUtilities.HashRepartitionOrdered <TInputOutput, NoKeyMemoizationRequired, TLeftKey>(
                        leftPartitionedStream, null, null, _comparer, settings.CancellationState.MergedCancellationToken),
                    rightPartitionedStream, outputRecipient, settings.CancellationState.MergedCancellationToken);
            }
            else
            {
                WrapPartitionedStreamHelper <int, TRightKey>(
                    ExchangeUtilities.HashRepartition <TInputOutput, NoKeyMemoizationRequired, TLeftKey>(
                        leftPartitionedStream, null, null, _comparer, settings.CancellationState.MergedCancellationToken),
                    rightPartitionedStream, outputRecipient, settings.CancellationState.MergedCancellationToken);
            }
        }
        internal override void WrapPartitionedStream <TLeftKey>(
            PartitionedStream <TLeftInput, TLeftKey> inputStream, IPartitionedStreamRecipient <TOutput> recipient, bool preferStriping, QuerySettings settings)
        {
            int partitionCount = inputStream.PartitionCount;

            if (_indexedRightChildSelector != null)
            {
                PartitionedStream <TLeftInput, int> inputStreamInt;

                // If the index is not correct, we need to reindex.
                if (_prematureMerge)
                {
                    ListQueryResults <TLeftInput> listResults =
                        QueryOperator <TLeftInput> .ExecuteAndCollectResults(inputStream, partitionCount, OutputOrdered, preferStriping, settings);

                    inputStreamInt = listResults.GetPartitionedStream();
                }
                else
                {
                    inputStreamInt = (PartitionedStream <TLeftInput, int>)(object) inputStream;
                }
                WrapPartitionedStreamIndexed(inputStreamInt, recipient, settings);
                return;
            }

            //
            //
            if (_prematureMerge)
            {
                PartitionedStream <TLeftInput, int> inputStreamInt =
                    QueryOperator <TLeftInput> .ExecuteAndCollectResults(inputStream, partitionCount, OutputOrdered, preferStriping, settings)
                    .GetPartitionedStream();

                WrapPartitionedStreamNotIndexed(inputStreamInt, recipient, settings);
            }
            else
            {
                WrapPartitionedStreamNotIndexed(inputStream, recipient, settings);
            }
        }
Example #23
0
        internal override void WrapPartitionedStream <TKey>(PartitionedStream <TSource, TKey> inputStream, IPartitionedStreamRecipient <TSource> recipient, bool preferStriping, QuerySettings settings)
        {
            PartitionedStream <TSource, int> stream;
            int partitionCount = inputStream.PartitionCount;

            if (this.m_prematureMerge)
            {
                stream = QueryOperator <TSource> .ExecuteAndCollectResults <TKey>(inputStream, partitionCount, base.Child.OutputOrdered, preferStriping, settings).GetPartitionedStream();
            }
            else
            {
                stream = (PartitionedStream <TSource, int>)inputStream;
            }
            Shared <bool> resultFoundFlag = new Shared <bool>(false);
            PartitionedStream <TSource, int> partitionedStream = new PartitionedStream <TSource, int>(partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Correct);

            for (int i = 0; i < partitionCount; i++)
            {
                partitionedStream[i] = new ElementAtQueryOperatorEnumerator <TSource>(stream[i], this.m_index, resultFoundFlag, settings.CancellationState.MergedCancellationToken);
            }
            recipient.Receive <int>(partitionedStream);
        }
        internal static void Spool(QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TKey> partitions, System.Linq.Parallel.Shared <TInputOutput[]> results, TaskScheduler taskScheduler)
        {
            int maxToRunInParallel = partitions.PartitionCount - 1;

            SortHelper <TInputOutput, TKey>[] sortHelpers = SortHelper <TInputOutput, TKey> .GenerateSortHelpers(partitions, groupState);

            Task rootTask = new Task(delegate {
                for (int k = 0; k < maxToRunInParallel; k++)
                {
                    new OrderPreservingSpoolingTask <TInputOutput, TKey>(k, groupState, results, sortHelpers[k]).RunAsynchronously(taskScheduler);
                }
                new OrderPreservingSpoolingTask <TInputOutput, TKey>(maxToRunInParallel, groupState, results, sortHelpers[maxToRunInParallel]).RunSynchronously(taskScheduler);
            });

            groupState.QueryBegin(rootTask);
            rootTask.RunSynchronously(taskScheduler);
            for (int j = 0; j < sortHelpers.Length; j++)
            {
                sortHelpers[j].Dispose();
            }
            groupState.QueryEnd(false);
        }
        private void WrapPartitionedStreamHelper <TLeftKey, TRightKey>(
            PartitionedStream <Pair <TLeftInput, TKey>, TLeftKey> leftHashStream,
            HashLookupBuilder <IEnumerable <TRightInput>, TRightKey, TKey>[] rightLookupBuilders,
            IComparer <TRightKey>?rightKeyComparer, IPartitionedStreamRecipient <TOutput> outputRecipient,
            int partitionCount, CancellationToken cancellationToken)
        {
            if (RightChild.OutputOrdered && LeftChild.OutputOrdered)
            {
                PairOutputKeyBuilder <TLeftKey, TRightKey> outputKeyBuilder  = new PairOutputKeyBuilder <TLeftKey, TRightKey>();
                IComparer <Pair <TLeftKey, TRightKey> >    outputKeyComparer = new PairComparer <TLeftKey, TRightKey>(leftHashStream.KeyComparer, rightKeyComparer);

                WrapPartitionedStreamHelper <TLeftKey, TRightKey, Pair <TLeftKey, TRightKey> >(leftHashStream, rightLookupBuilders,
                                                                                               outputKeyBuilder, outputKeyComparer, outputRecipient, partitionCount, cancellationToken);
            }
            else
            {
                LeftKeyOutputKeyBuilder <TLeftKey, TRightKey> outputKeyBuilder = new LeftKeyOutputKeyBuilder <TLeftKey, TRightKey>();

                WrapPartitionedStreamHelper <TLeftKey, TRightKey, TLeftKey>(leftHashStream, rightLookupBuilders,
                                                                            outputKeyBuilder, leftHashStream.KeyComparer, outputRecipient, partitionCount, cancellationToken);
            }
        }
Example #26
0
        //-----------------------------------------------------------------------------------
        // Creates and begins execution of a new spooling task. This is a for-all style
        // execution, meaning that the query will be run fully (for effect) before returning
        // and that there are no channels into which data will be queued.
        //
        // Arguments:
        //     groupState      - values for inter-task communication
        //     partitions      - the producer enumerators
        //     taskScheduler   - the task manager on which to execute
        //

        internal static void SpoolForAll <TInputOutput, TIgnoreKey>(
            QueryTaskGroupState groupState, PartitionedStream <TInputOutput, TIgnoreKey> partitions, TaskScheduler taskScheduler)
        {
            Contract.Requires(groupState != null);

            // Ensure all tasks in this query are parented under a common root.
            Task rootTask = new Task(
                () =>
            {
                int maxToRunInParallel = partitions.PartitionCount - 1;

                // Create tasks that will enumerate the partitions in parallel "for effect"; in other words,
                // no data will be placed into any kind of producer-consumer channel.
                for (int i = 0; i < maxToRunInParallel; i++)
                {
                    TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] asynchronously", i);

                    QueryTask asyncTask = new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(i, groupState, partitions[i]);
                    asyncTask.RunAsynchronously(taskScheduler);
                }

                TraceHelpers.TraceInfo("SpoolingTask::Spool: Running partition[{0}] synchronously", maxToRunInParallel);

                // Run one task synchronously on the current thread.
                QueryTask syncTask = new ForAllSpoolingTask <TInputOutput, TIgnoreKey>(maxToRunInParallel, groupState, partitions[maxToRunInParallel]);
                syncTask.RunSynchronously(taskScheduler);
            });

            // Begin the query on the calling thread.
            groupState.QueryBegin(rootTask);

            // We don't want to return until the task is finished.  Run it on the calling thread.
            rootTask.RunSynchronously(taskScheduler);

            // Wait for the query to complete, propagate exceptions, and so on.
            // For pipelined queries, this step happens in the async enumerator.
            groupState.QueryEnd(false);
        }
        internal static SortHelper <TInputOutput, TKey>[] GenerateSortHelpers(PartitionedStream <TInputOutput, TKey> partitions, QueryTaskGroupState groupState)
        {
            int partitionCount = partitions.PartitionCount;

            SortHelper <TInputOutput, TKey>[] helperArray = new SortHelper <TInputOutput, TKey> [partitionCount];
            int num2 = 1;
            int num3 = 0;

            while (num2 < partitionCount)
            {
                num3++;
                num2 = num2 << 1;
            }
            int[][] sharedIndices              = new int[partitionCount][];
            GrowingArray <TKey>[] sharedkeys   = new GrowingArray <TKey> [partitionCount];
            TInputOutput[][]      sharedValues = new TInputOutput[partitionCount][];
            Barrier[,] sharedBarriers = new Barrier[num3, partitionCount];
            if (partitionCount > 1)
            {
                int num4 = 1;
                for (int j = 0; j < sharedBarriers.GetLength(0); j++)
                {
                    for (int k = 0; k < sharedBarriers.GetLength(1); k++)
                    {
                        if ((k % num4) == 0)
                        {
                            sharedBarriers[j, k] = new Barrier(2);
                        }
                    }
                    num4 *= 2;
                }
            }
            for (int i = 0; i < partitionCount; i++)
            {
                helperArray[i] = new SortHelper <TInputOutput, TKey>(partitions[i], partitionCount, i, groupState, sharedIndices, partitions.OrdinalIndexState, partitions.KeyComparer, sharedkeys, sharedValues, sharedBarriers);
            }
            return(helperArray);
        }
Example #28
0
        //---------------------------------------------------------------------------------------
        // A helper method that allows WrapPartitionedStream to fix the TLeftKey type parameter.
        //

        private void WrapPartitionedStreamFixedLeftType <TLeftKey, TRightKey>(
            PartitionedStream <Pair <TInputOutput, NoKeyMemoizationRequired>, TLeftKey> leftHashStream, PartitionedStream <TInputOutput, TRightKey> rightStream,
            IPartitionedStreamRecipient <TInputOutput> outputRecipient, int partitionCount, CancellationToken cancellationToken)
        {
            if (RightChild.OutputOrdered)
            {
                PartitionedStream <Pair <TInputOutput, NoKeyMemoizationRequired>, TRightKey> rightHashStream =
                    ExchangeUtilities.HashRepartitionOrdered <TInputOutput, NoKeyMemoizationRequired, TRightKey>(
                        rightStream, null, null, _comparer, cancellationToken);

                WrapPartitionedStreamFixedBothTypes <TLeftKey, TRightKey>(
                    leftHashStream, rightHashStream, outputRecipient, partitionCount, cancellationToken);
            }
            else
            {
                PartitionedStream <Pair <TInputOutput, NoKeyMemoizationRequired>, int> rightHashStream =
                    ExchangeUtilities.HashRepartition <TInputOutput, NoKeyMemoizationRequired, TRightKey>(
                        rightStream, null, null, _comparer, cancellationToken);

                WrapPartitionedStreamFixedBothTypes <TLeftKey, int>(
                    leftHashStream, rightHashStream, outputRecipient, partitionCount, cancellationToken);
            }
        }
Example #29
0
        private void WrapHelper <TKey>(PartitionedStream <TResult, TKey> inputStream, IPartitionedStreamRecipient <TResult> recipient, QuerySettings settings)
        {
            int partitionCount = inputStream.PartitionCount;

            // Create shared data.
            OperatorState <TKey> operatorState = new OperatorState <TKey>();
            CountdownEvent       sharedBarrier = new CountdownEvent(partitionCount);

            Debug.Assert(_indexedPredicate == null || typeof(TKey) == typeof(int));
            Func <TResult, TKey, bool>?convertedIndexedPredicate = (Func <TResult, TKey, bool>?)(object?) _indexedPredicate;

            PartitionedStream <TResult, TKey> partitionedStream =
                new PartitionedStream <TResult, TKey>(partitionCount, inputStream.KeyComparer, OrdinalIndexState);

            for (int i = 0; i < partitionCount; i++)
            {
                partitionedStream[i] = new TakeOrSkipWhileQueryOperatorEnumerator <TKey>(
                    inputStream[i], _predicate, convertedIndexedPredicate, _take, operatorState, sharedBarrier,
                    settings.CancellationState.MergedCancellationToken, inputStream.KeyComparer);
            }

            recipient.Receive(partitionedStream);
        }
        private void WrapPartitionedStreamFixedBothTypes <TLeftKey, TRightKey>(PartitionedStream <Pair <TInputOutput, NoKeyMemoizationRequired>, TLeftKey> leftHashStream, PartitionedStream <Pair <TInputOutput, NoKeyMemoizationRequired>, TRightKey> rightHashStream, IPartitionedStreamRecipient <TInputOutput> outputRecipient, int partitionCount, CancellationToken cancellationToken)
        {
            if (base.LeftChild.OutputOrdered || base.RightChild.OutputOrdered)
            {
                IComparer <ConcatKey <TLeftKey, TRightKey> > keyComparer = ConcatKey <TLeftKey, TRightKey> .MakeComparer(leftHashStream.KeyComparer, rightHashStream.KeyComparer);

                PartitionedStream <TInputOutput, ConcatKey <TLeftKey, TRightKey> > partitionedStream = new PartitionedStream <TInputOutput, ConcatKey <TLeftKey, TRightKey> >(partitionCount, keyComparer, OrdinalIndexState.Shuffled);
                for (int i = 0; i < partitionCount; i++)
                {
                    partitionedStream[i] = new OrderedUnionQueryOperatorEnumerator <TInputOutput, TLeftKey, TRightKey>(leftHashStream[i], rightHashStream[i], base.LeftChild.OutputOrdered, base.RightChild.OutputOrdered, this.m_comparer, keyComparer, cancellationToken);
                }
                outputRecipient.Receive <ConcatKey <TLeftKey, TRightKey> >(partitionedStream);
            }
            else
            {
                PartitionedStream <TInputOutput, int> stream2 = new PartitionedStream <TInputOutput, int>(partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Shuffled);
                for (int j = 0; j < partitionCount; j++)
                {
                    stream2[j] = new UnionQueryOperatorEnumerator <TInputOutput, TLeftKey, TRightKey>(leftHashStream[j], rightHashStream[j], j, this.m_comparer, cancellationToken);
                }
                outputRecipient.Receive <int>(stream2);
            }
        }