internal override void WrapPartitionedStream <TKey>( PartitionedStream <TInputOutput, TKey> inputStream, IPartitionedStreamRecipient <TInputOutput> recipient, bool preferStriping, QuerySettings settings) { // Hash-repartition the source stream if (OutputOrdered) { WrapPartitionedStreamHelper <TKey>( ExchangeUtilities.HashRepartitionOrdered <TInputOutput, NoKeyMemoizationRequired, TKey>( inputStream, null, null, _comparer, settings.CancellationState.MergedCancellationToken), recipient, settings.CancellationState.MergedCancellationToken); } else { WrapPartitionedStreamHelper <int>( ExchangeUtilities.HashRepartition <TInputOutput, NoKeyMemoizationRequired, TKey>( inputStream, null, null, _comparer, settings.CancellationState.MergedCancellationToken), recipient, settings.CancellationState.MergedCancellationToken); } }
internal override void WrapPartitionedStream <TKey>( PartitionedStream <TResult, TKey> inputStream, IPartitionedStreamRecipient <TResult> recipient, bool preferStriping, QuerySettings settings) { if (_prematureMerge) { ListQueryResults <TResult> results = ExecuteAndCollectResults(inputStream, inputStream.PartitionCount, Child.OutputOrdered, preferStriping, settings); PartitionedStream <TResult, int> listInputStream = results.GetPartitionedStream(); WrapHelper <int>(listInputStream, recipient, settings); } else { WrapHelper <TKey>(inputStream, recipient, settings); } }
//--------------------------------------------------------------------------------------- // Just opens the current operator, including opening the child and wrapping it with // partitions as needed. // internal override QueryResults <TResult> Open(QuerySettings settings, bool preferStriping) { QueryResults <TResult> childQueryResults = Child.Open(settings, true); return(new UnaryQueryOperatorResults(childQueryResults, this, settings, preferStriping)); }
internal QueryOperator(bool isOrdered, QuerySettings settings) : base(settings) { _outputOrdered = isOrdered; }
internal override void WrapPartitionedStream <TKey>( PartitionedStream <TSource, TKey> inputStream, IPartitionedStreamRecipient <TSource> recipient, bool preferStriping, QuerySettings settings) { Debug.Assert(Child.OrdinalIndexState != OrdinalIndexState.Indexable, "Don't take this code path if the child is indexable."); int partitionCount = inputStream.PartitionCount; PartitionedStream <TSource, TKey> outputStream = new PartitionedStream <TSource, TKey>( partitionCount, new ReverseComparer <TKey>(inputStream.KeyComparer), OrdinalIndexState.Shuffled); for (int i = 0; i < partitionCount; i++) { outputStream[i] = new ReverseQueryOperatorEnumerator <TKey>(inputStream[i], settings.CancellationState.MergedCancellationToken); } recipient.Receive(outputStream); }
//--------------------------------------------------------------------------------------- // Accessor the key selector. // //--------------------------------------------------------------------------------------- // Accessor the key comparer. // //--------------------------------------------------------------------------------------- // Opens the current operator. This involves opening the child operator tree, enumerating // the results, sorting them, and then returning an enumerator that walks the result. // internal override QueryResults <TInputOutput> Open(QuerySettings settings, bool preferStriping) { QueryResults <TInputOutput> childQueryResults = Child.Open(settings, false); return(new SortQueryOperatorResults <TInputOutput, TSortKey>(childQueryResults, this, settings)); }
//--------------------------------------------------------------------------------------- // Executes the query and returns the results in an array. // internal TOutput[] ExecuteAndGetResultsAsArray() { QuerySettings querySettings = SpecifiedQuerySettings .WithPerExecutionSettings() .WithDefaults(); QueryLifecycle.LogicalQueryExecutionBegin(querySettings.QueryId); try { if (querySettings.ExecutionMode.Value == ParallelExecutionMode.Default && LimitsParallelism) { IEnumerable <TOutput> opSequential = AsSequentialQuery(querySettings.CancellationState.ExternalCancellationToken); IEnumerable <TOutput> opSequentialWithCancelChecks = CancellableEnumerable.Wrap(opSequential, querySettings.CancellationState.ExternalCancellationToken); return(ExceptionAggregator.WrapEnumerable(opSequentialWithCancelChecks, querySettings.CancellationState).ToArray()); } QueryResults <TOutput> results = GetQueryResults(querySettings); // Top-level preemptive cancellation test. // This handles situations where cancellation has occurred before execution commences // The handling for in-execution occurs in QueryTaskGroupState.QueryEnd() if (querySettings.CancellationState.MergedCancellationToken.IsCancellationRequested) { if (querySettings.CancellationState.ExternalCancellationToken.IsCancellationRequested) { throw new OperationCanceledException(querySettings.CancellationState.ExternalCancellationToken); } else { throw new OperationCanceledException(); } } if (results.IsIndexible && OutputOrdered) { // The special array-based merge performs better if the output is ordered, because // it does not have to pay for ordering. In the unordered case, we it appears that // the stop-and-go merge performs a little better. ArrayMergeHelper <TOutput> merger = new ArrayMergeHelper <TOutput>(SpecifiedQuerySettings, results); merger.Execute(); TOutput[] output = merger.GetResultsAsArray(); querySettings.CleanStateAtQueryEnd(); return(output); } else { PartitionedStreamMerger <TOutput> merger = new PartitionedStreamMerger <TOutput>(false, ParallelMergeOptions.FullyBuffered, querySettings.TaskScheduler, OutputOrdered, querySettings.CancellationState, querySettings.QueryId); results.GivePartitionedStream(merger); TOutput[] output = merger.MergeExecutor.GetResultsAsArray(); querySettings.CleanStateAtQueryEnd(); return(output); } } finally { QueryLifecycle.LogicalQueryExecutionEnd(querySettings.QueryId); } }
private QuerySettings _settings; // Settings collected from the query internal ScanEnumerableQueryOperatorResults(IEnumerable <TElement> data, QuerySettings settings) { _data = data; _settings = settings; }
internal override void WrapPartitionedStream <TKey>( PartitionedStream <TInput, TKey> inputStream, IPartitionedStreamRecipient <TInput> recipient, bool preferStriping, QuerySettings settings) { int partitionCount = inputStream.PartitionCount; PartitionedStream <TInput, int> outputStream = new PartitionedStream <TInput, int>( partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Correct); for (int i = 0; i < partitionCount; i++) { outputStream[i] = new ForAllEnumerator <TKey>( inputStream[i], _elementAction, settings.CancellationState.MergedCancellationToken); } recipient.Receive(outputStream); }
private readonly bool _preferStriping; // If the results are indexable, should we use striping when partitioning them internal UnaryQueryOperatorResults(QueryResults <TInput> childQueryResults, UnaryQueryOperator <TInput, TOutput> op, QuerySettings settings, bool preferStriping) { _childQueryResults = childQueryResults; _op = op; _settings = settings; _preferStriping = preferStriping; }
internal override void WrapPartitionedStream <TKey>( PartitionedStream <TSource, TKey> inputStream, IPartitionedStreamRecipient <TSource> recipient, bool preferStriping, QuerySettings settings) { int partitionCount = inputStream.PartitionCount; // Generate the shared data. Shared <int> sharedEmptyCount = new Shared <int>(0); CountdownEvent sharedLatch = new CountdownEvent(partitionCount - 1); PartitionedStream <TSource, TKey> outputStream = new PartitionedStream <TSource, TKey>(partitionCount, inputStream.KeyComparer, OrdinalIndexState); for (int i = 0; i < partitionCount; i++) { outputStream[i] = new DefaultIfEmptyQueryOperatorEnumerator <TKey>( inputStream[i], m_defaultValue, i, partitionCount, sharedEmptyCount, sharedLatch, settings.CancellationState.MergedCancellationToken); } recipient.Receive(outputStream); }
//--------------------------------------------------------------------------------------- // This method wraps each enumerator in inputStream with an enumerator performing this // operator's transformation. However, instead of returning the transformed partitioned // stream, we pass it to a recipient object by calling recipient.Give<TNewKey>(..). That // way, we can "return" a partitioned stream that potentially uses a different order key // from the order key of the input stream. // internal abstract void WrapPartitionedStream <TKey>( PartitionedStream <TInput, TKey> inputStream, IPartitionedStreamRecipient <TOutput> recipient, bool preferStriping, QuerySettings settings);
private UnaryQueryOperator(QueryOperator <TInput> child, bool outputOrdered, QuerySettings settings) : base(outputOrdered, settings) { _child = child; }
internal ChildResultsRecipient( IPartitionedStreamRecipient <TOutput> outputRecipient, UnaryQueryOperator <TInput, TOutput> op, bool preferStriping, QuerySettings settings) { _outputRecipient = outputRecipient; _op = op; _preferStriping = preferStriping; _settings = settings; }
internal QuerySettings WithPerExecutionSettings(CancellationTokenSource topLevelCancellationTokenSource, Shared<bool> topLevelDisposedFlag) { //Initialize a new QuerySettings structure and copy in the current settings. //Note: this has the very important effect of newing a fresh CancellationSettings, // and _not_ copying in the current internalCancellationSource or topLevelDisposedFlag which should not be // propogated to internal query executions. (This affects SelectMany execution) // The fresh toplevel parameters are used instead. QuerySettings settings = new QuerySettings(TaskScheduler, DegreeOfParallelism, CancellationState.ExternalCancellationToken, ExecutionMode, MergeOptions); Contract.Assert(topLevelCancellationTokenSource != null, "There should always be a top-level cancellation signal specified."); settings.CancellationState.InternalCancellationTokenSource = topLevelCancellationTokenSource; //Merge internal and external tokens to form the combined token settings.CancellationState.MergedCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(settings.CancellationState.InternalCancellationTokenSource.Token, settings.CancellationState.ExternalCancellationToken); // and copy in the topLevelDisposedFlag settings.CancellationState.TopLevelDisposedFlag = topLevelDisposedFlag; Contract.Assert(settings.CancellationState.InternalCancellationTokenSource != null); Contract.Assert(settings.CancellationState.MergedCancellationToken.CanBeCanceled); Contract.Assert(settings.CancellationState.TopLevelDisposedFlag != null); // Finally, assign a query Id to the settings settings._queryId = PlinqEtwProvider.NextQueryId(); return settings; }
public override void WrapPartitionedStream <TLeftKey, TRightKey>( PartitionedStream <TInputOutput, TLeftKey> leftStream, PartitionedStream <TInputOutput, TRightKey> rightStream, IPartitionedStreamRecipient <TInputOutput> outputRecipient, bool preferStriping, QuerySettings settings) { Debug.Assert(leftStream.PartitionCount == rightStream.PartitionCount); int partitionCount = leftStream.PartitionCount; // Wrap both child streams with hash repartition if (LeftChild.OutputOrdered) { PartitionedStream <Pair, TLeftKey> leftHashStream = ExchangeUtilities.HashRepartitionOrdered <TInputOutput, NoKeyMemoizationRequired, TLeftKey>( leftStream, null, null, _comparer, settings.CancellationState.MergedCancellationToken); WrapPartitionedStreamFixedLeftType <TLeftKey, TRightKey>( leftHashStream, rightStream, outputRecipient, partitionCount, settings.CancellationState.MergedCancellationToken); } else { PartitionedStream <Pair, int> leftHashStream = ExchangeUtilities.HashRepartition <TInputOutput, NoKeyMemoizationRequired, TLeftKey>( leftStream, null, null, _comparer, settings.CancellationState.MergedCancellationToken); WrapPartitionedStreamFixedLeftType <int, TRightKey>( leftHashStream, rightStream, outputRecipient, partitionCount, settings.CancellationState.MergedCancellationToken); } }
internal QuerySettings Merge(QuerySettings settings2) { if ((this.TaskScheduler != null) && (settings2.TaskScheduler != null)) { throw new InvalidOperationException(System.Linq.SR.GetString("ParallelQuery_DuplicateTaskScheduler")); } if (this.DegreeOfParallelism.HasValue && settings2.DegreeOfParallelism.HasValue) { throw new InvalidOperationException(System.Linq.SR.GetString("ParallelQuery_DuplicateDOP")); } if (this.CancellationState.ExternalCancellationToken.CanBeCanceled && settings2.CancellationState.ExternalCancellationToken.CanBeCanceled) { throw new InvalidOperationException(System.Linq.SR.GetString("ParallelQuery_DuplicateWithCancellation")); } if (this.ExecutionMode.HasValue && settings2.ExecutionMode.HasValue) { throw new InvalidOperationException(System.Linq.SR.GetString("ParallelQuery_DuplicateExecutionMode")); } if (this.MergeOptions.HasValue && settings2.MergeOptions.HasValue) { throw new InvalidOperationException(System.Linq.SR.GetString("ParallelQuery_DuplicateMergeOptions")); } System.Threading.Tasks.TaskScheduler taskScheduler = (this.TaskScheduler == null) ? settings2.TaskScheduler : this.TaskScheduler; int? degreeOfParallelism = this.DegreeOfParallelism.HasValue ? this.DegreeOfParallelism : settings2.DegreeOfParallelism; CancellationToken externalCancellationToken = this.CancellationState.ExternalCancellationToken.CanBeCanceled ? this.CancellationState.ExternalCancellationToken : settings2.CancellationState.ExternalCancellationToken; ParallelExecutionMode? executionMode = this.ExecutionMode.HasValue ? this.ExecutionMode : settings2.ExecutionMode; return new QuerySettings(taskScheduler, degreeOfParallelism, externalCancellationToken, executionMode, this.MergeOptions.HasValue ? this.MergeOptions : settings2.MergeOptions); }
internal override void WrapPartitionedStream <TKey>( PartitionedStream <TSource, TKey> inputStream, IPartitionedStreamRecipient <TSource> recipient, bool preferStriping, QuerySettings settings) { int partitionCount = inputStream.PartitionCount; PartitionedStream <TSource, int> outputStream = new PartitionedStream <TSource, int>( partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Shuffled); Shared <int> totalElementCount = new Shared <int>(0); for (int i = 0; i < partitionCount; i++) { outputStream[i] = new SingleQueryOperatorEnumerator <TKey>(inputStream[i], m_predicate, totalElementCount); } recipient.Receive(outputStream); }
internal override void WrapPartitionedStream <TKey>( PartitionedStream <TInputOutput, TKey> inputStream, IPartitionedStreamRecipient <TInputOutput> recipient, bool preferStriping, QuerySettings settings) { PartitionedStream <TInputOutput, TSortKey> outputStream = new PartitionedStream <TInputOutput, TSortKey>(inputStream.PartitionCount, this._comparer, OrdinalIndexState); for (int i = 0; i < outputStream.PartitionCount; i++) { outputStream[i] = new SortQueryOperatorEnumerator <TInputOutput, TKey, TSortKey>( inputStream[i], _keySelector); } recipient.Receive <TSortKey>(outputStream); }
internal override void WrapPartitionedStream <TLeftKey>( PartitionedStream <TLeftInput, TLeftKey> inputStream, IPartitionedStreamRecipient <TOutput> recipient, bool preferStriping, QuerySettings settings) { int partitionCount = inputStream.PartitionCount; if (_indexedRightChildSelector != null) { PartitionedStream <TLeftInput, int> inputStreamInt; // If the index is not correct, we need to reindex. if (_prematureMerge) { ListQueryResults <TLeftInput> listResults = QueryOperator <TLeftInput> .ExecuteAndCollectResults(inputStream, partitionCount, OutputOrdered, preferStriping, settings); inputStreamInt = listResults.GetPartitionedStream(); } else { inputStreamInt = (PartitionedStream <TLeftInput, int>)(object) inputStream; } WrapPartitionedStreamIndexed(inputStreamInt, recipient, settings); return; } // // if (_prematureMerge) { PartitionedStream <TLeftInput, int> inputStreamInt = QueryOperator <TLeftInput> .ExecuteAndCollectResults(inputStream, partitionCount, OutputOrdered, preferStriping, settings) .GetPartitionedStream(); WrapPartitionedStreamNotIndexed(inputStreamInt, recipient, settings); } else { WrapPartitionedStreamNotIndexed(inputStream, recipient, settings); } }
internal QueryOperator(QuerySettings settings) : this(false, settings) { }
/// <summary> /// Similar helper method to WrapPartitionedStreamNotIndexed, except that this one is for the indexed variant /// of SelectMany (i.e., the SelectMany that passes indices into the user sequence-generating delegate) /// </summary> private void WrapPartitionedStreamIndexed( PartitionedStream <TLeftInput, int> inputStream, IPartitionedStreamRecipient <TOutput> recipient, QuerySettings settings) { var keyComparer = new PairComparer <int, int>(inputStream.KeyComparer, Util.GetDefaultComparer <int>()); var outputStream = new PartitionedStream <TOutput, Pair <int, int> >(inputStream.PartitionCount, keyComparer, OrdinalIndexState); for (int i = 0; i < inputStream.PartitionCount; i++) { outputStream[i] = new IndexedSelectManyQueryOperatorEnumerator(inputStream[i], this, settings.CancellationState.MergedCancellationToken); } recipient.Receive(outputStream); }
//--------------------------------------------------------------------------------------- // Opening the query operator will do whatever is necessary to begin enumerating its // results. This includes in some cases actually introducing parallelism, enumerating // other query operators, and so on. This is abstract and left to the specific concrete // operator classes to implement. // // Arguments: // settings - various flags and settings to control query execution // preferStriping - flag representing whether the caller prefers striped partitioning // over range partitioning // // Return Values: // Either a single enumerator, or a partition (for partition parallelism). // internal abstract QueryResults <TOutput> Open(QuerySettings settings, bool preferStriping);
private QuerySettings _settings; // Settings collected from the query internal PartitionerQueryOperatorResults(Partitioner <TElement> partitioner, QuerySettings settings) { _partitioner = partitioner; _settings = settings; }
//--------------------------------------------------------------------------------------- // Just opens the current operator, including opening the child and wrapping it with // partitions as needed. // internal override QueryResults <TSource> Open(QuerySettings settings, bool preferStriping) { QueryResults <TSource> childQueryResults = Child.Open(settings, false); return(ReverseQueryOperatorResults.NewResults(childQueryResults, this, settings, preferStriping)); }
//--------------------------------------------------------------------------------------- // Just opens the current operator, including opening the child and wrapping it with // partitions as needed. // internal override QueryResults <TOutput> Open(QuerySettings settings, bool preferStriping) { QueryResults <TInput> childQueryResults = Child.Open(settings, preferStriping); return(SelectQueryOperatorResults.NewResults(childQueryResults, this, settings, preferStriping)); }
private void WrapHelper <TKey>(PartitionedStream <TResult, TKey> inputStream, IPartitionedStreamRecipient <TResult> recipient, QuerySettings settings) { int partitionCount = inputStream.PartitionCount; if (ParallelEnumerable.SinglePartitionMode) { Debug.Assert(partitionCount == 1); } // Create shared data. OperatorState <TKey> operatorState = new OperatorState <TKey>(); CountdownEvent sharedBarrier = new CountdownEvent(partitionCount); Debug.Assert(_indexedPredicate == null || typeof(TKey) == typeof(int)); Func <TResult, TKey, bool>?convertedIndexedPredicate = (Func <TResult, TKey, bool>?)(object?) _indexedPredicate; PartitionedStream <TResult, TKey> partitionedStream = new PartitionedStream <TResult, TKey>(partitionCount, inputStream.KeyComparer, OrdinalIndexState); for (int i = 0; i < partitionCount; i++) { partitionedStream[i] = new TakeOrSkipWhileQueryOperatorEnumerator <TKey>( inputStream[i], _predicate, convertedIndexedPredicate, _take, operatorState, sharedBarrier, settings.CancellationState.MergedCancellationToken, inputStream.KeyComparer); } recipient.Receive(partitionedStream); }
public override void WrapPartitionedStream <TLeftKey, TRightKey>( PartitionedStream <TSource, TLeftKey> leftStream, PartitionedStream <TSource, TRightKey> rightStream, IPartitionedStreamRecipient <TSource> outputRecipient, bool preferStriping, QuerySettings settings) { // Prematurely merge the left results, if necessary if (_prematureMergeLeft) { ListQueryResults <TSource> leftStreamResults = ExecuteAndCollectResults(leftStream, leftStream.PartitionCount, LeftChild.OutputOrdered, preferStriping, settings); PartitionedStream <TSource, int> leftStreamInc = leftStreamResults.GetPartitionedStream(); WrapHelper <int, TRightKey>(leftStreamInc, rightStream, outputRecipient, settings, preferStriping); } else { Debug.Assert(!ExchangeUtilities.IsWorseThan(leftStream.OrdinalIndexState, OrdinalIndexState.Increasing)); WrapHelper <TLeftKey, TRightKey>(leftStream, rightStream, outputRecipient, settings, preferStriping); } }
internal override void WrapPartitionedStream <TKey>( PartitionedStream <TInput, TKey> inputStream, IPartitionedStreamRecipient <bool> recipient, bool preferStriping, QuerySettings settings) { // Create a shared cancellation variable and then return a possibly wrapped new enumerator. Shared <bool> resultFoundFlag = new Shared <bool>(false); int partitionCount = inputStream.PartitionCount; PartitionedStream <bool, int> outputStream = new PartitionedStream <bool, int>( partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Correct); for (int i = 0; i < partitionCount; i++) { outputStream[i] = new AnyAllSearchOperatorEnumerator <TKey>(inputStream[i], _qualification, _predicate, i, resultFoundFlag, settings.CancellationState.MergedCancellationToken); } recipient.Receive(outputStream); }
internal override void WrapPartitionedStream <TKey>( PartitionedStream <TSource, TKey> inputStream, IPartitionedStreamRecipient <TSource> recipient, bool preferStriping, QuerySettings settings) { // If the index is not at least increasing, we need to reindex. if (_prematureMergeNeeded) { PartitionedStream <TSource, int> intKeyStream = ExecuteAndCollectResults(inputStream, inputStream.PartitionCount, Child.OutputOrdered, preferStriping, settings).GetPartitionedStream(); WrapHelper <int>(intKeyStream, recipient, settings); } else { WrapHelper <TKey>(inputStream, recipient, settings); } }
internal ChildResultsRecipient(IPartitionedStreamRecipient <TInputOutput> outputRecipient, SortQueryOperator <TInputOutput, TSortKey> op, QuerySettings settings) { _outputRecipient = outputRecipient; _op = op; _settings = settings; }
private void WrapHelper <TKey>(PartitionedStream <TSource, TKey> inputStream, IPartitionedStreamRecipient <TSource> recipient, QuerySettings settings) { int partitionCount = inputStream.PartitionCount; // Generate the shared data. LastQueryOperatorState <TKey> operatorState = new LastQueryOperatorState <TKey>(); CountdownEvent sharedBarrier = new CountdownEvent(partitionCount); PartitionedStream <TSource, int> outputStream = new PartitionedStream <TSource, int>(partitionCount, Util.GetDefaultComparer <int>(), OrdinalIndexState.Shuffled); for (int i = 0; i < partitionCount; i++) { outputStream[i] = new LastQueryOperatorEnumerator <TKey>( inputStream[i], _predicate, operatorState, sharedBarrier, settings.CancellationState.MergedCancellationToken, inputStream.KeyComparer, i); } recipient.Receive(outputStream); }
//----------------------------------------------------------------------------------- // Combines two sets of options. // internal QuerySettings Merge(QuerySettings settings2) { if (this.TaskScheduler != null && settings2.TaskScheduler != null) { throw new InvalidOperationException(SR.ParallelQuery_DuplicateTaskScheduler); } if (this.DegreeOfParallelism != null && settings2.DegreeOfParallelism != null) { throw new InvalidOperationException(SR.ParallelQuery_DuplicateDOP); } if (this.CancellationState.ExternalCancellationToken.CanBeCanceled && settings2.CancellationState.ExternalCancellationToken.CanBeCanceled) { throw new InvalidOperationException(SR.ParallelQuery_DuplicateWithCancellation); } if (this.ExecutionMode != null && settings2.ExecutionMode != null) { throw new InvalidOperationException(SR.ParallelQuery_DuplicateExecutionMode); } if (this.MergeOptions != null && settings2.MergeOptions != null) { throw new InvalidOperationException(SR.ParallelQuery_DuplicateMergeOptions); } TaskScheduler tm = (this.TaskScheduler == null) ? settings2.TaskScheduler : this.TaskScheduler; int? dop = this.DegreeOfParallelism.HasValue ? this.DegreeOfParallelism : settings2.DegreeOfParallelism; CancellationToken externalCancellationToken = (this.CancellationState.ExternalCancellationToken.CanBeCanceled) ? this.CancellationState.ExternalCancellationToken : settings2.CancellationState.ExternalCancellationToken; ParallelExecutionMode? executionMode = this.ExecutionMode.HasValue ? this.ExecutionMode : settings2.ExecutionMode; ParallelMergeOptions? mergeOptions = this.MergeOptions.HasValue ? this.MergeOptions : settings2.MergeOptions; return new QuerySettings(tm, dop, externalCancellationToken, executionMode, mergeOptions); }
internal QuerySettings WithPerExecutionSettings(CancellationTokenSource topLevelCancellationTokenSource, System.Linq.Parallel.Shared<bool> topLevelDisposedFlag) { QuerySettings settings = new QuerySettings(this.TaskScheduler, this.DegreeOfParallelism, this.CancellationState.ExternalCancellationToken, this.ExecutionMode, this.MergeOptions); settings.CancellationState.InternalCancellationTokenSource = topLevelCancellationTokenSource; settings.CancellationState.MergedCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(settings.CancellationState.InternalCancellationTokenSource.Token, settings.CancellationState.ExternalCancellationToken); settings.CancellationState.TopLevelDisposedFlag = topLevelDisposedFlag; settings.m_queryId = PlinqEtwProvider.NextQueryId(); return settings; }