/// <summary> /// Initializes cross partition query execution context by initializing the necessary document producers. /// </summary> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="partitionKeyRanges">The partitions to target.</param> /// <param name="initialPageSize">The page size to start the document producers off with.</param> /// <param name="querySpecForInit">The query specification for the rewritten query.</param> /// <param name="targetRangeToContinuationMap">Map from partition to it's corresponding continuation token.</param> /// <param name="deferFirstPage">Whether or not we should defer the fetch of the first page from each partition.</param> /// <param name="filter">The filter to inject in the predicate.</param> /// <param name="filterCallback">The callback used to filter each partition.</param> /// <param name="token">The cancellation token.</param> /// <returns>A task to await on.</returns> protected async Task InitializeAsync( string collectionRid, IReadOnlyList <PartitionKeyRange> partitionKeyRanges, int initialPageSize, SqlQuerySpec querySpecForInit, Dictionary <string, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func <ItemProducerTree, Task> filterCallback, CancellationToken token) { CollectionCache collectionCache = await this.queryContext.QueryClient.GetCollectionCacheAsync(); this.TraceInformation(string.Format( CultureInfo.InvariantCulture, "parallel~contextbase.initializeasync, queryspec {0}, maxbuffereditemcount: {1}, target partitionkeyrange count: {2}, maximumconcurrencylevel: {3}, documentproducer initial page size {4}", JsonConvert.SerializeObject(querySpecForInit, DefaultJsonSerializationSettings.Value), this.actualMaxBufferedItemCount, partitionKeyRanges.Count, this.comparableTaskScheduler.MaximumConcurrencyLevel, initialPageSize)); List <ItemProducerTree> itemProducerTrees = new List <ItemProducerTree>(); foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { string initialContinuationToken = (targetRangeToContinuationMap != null && targetRangeToContinuationMap.ContainsKey(partitionKeyRange.Id)) ? targetRangeToContinuationMap[partitionKeyRange.Id] : null; ItemProducerTree itemProducerTree = new ItemProducerTree( this.queryContext, querySpecForInit, partitionKeyRange, this.OnItemProducerTreeCompleteFetching, this.itemProducerForest.Comparer as IComparer <ItemProducerTree>, this.equalityComparer, deferFirstPage, collectionRid, initialPageSize, initialContinuationToken) { Filter = filter }; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(itemProducerTree); } itemProducerTrees.Add(itemProducerTree); } // Using loop fisson so that we can load the document producers in parallel foreach (ItemProducerTree itemProducerTree in itemProducerTrees) { if (!deferFirstPage) { (bool successfullyMovedNext, QueryResponse failureResponse)response = await itemProducerTree.MoveNextIfNotSplitAsync(token); if (response.failureResponse != null) { // Set the failure so on drain it can be returned. this.FailureResponse = response.failureResponse; // No reason to enqueue the rest of the itemProducerTrees since there is a failure. break; } } if (filterCallback != null) { await filterCallback(itemProducerTree); } if (itemProducerTree.HasMoreResults) { this.itemProducerForest.Enqueue(itemProducerTree); } } }
/// <summary> /// Initializes cross partition query execution context by initializing the necessary document producers. /// </summary> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="partitionKeyRanges">The partitions to target.</param> /// <param name="initialPageSize">The page size to start the document producers off with.</param> /// <param name="querySpecForInit">The query specification for the rewritten query.</param> /// <param name="targetRangeToContinuationMap">Map from partition to it's corresponding continuation token.</param> /// <param name="deferFirstPage">Whether or not we should defer the fetch of the first page from each partition.</param> /// <param name="filter">The filter to inject in the predicate.</param> /// <param name="filterCallback">The callback used to filter each partition.</param> /// <param name="token">The cancellation token.</param> /// <returns>A task to await on.</returns> protected async Task InitializeAsync( string collectionRid, IReadOnlyList <PartitionKeyRange> partitionKeyRanges, int initialPageSize, SqlQuerySpec querySpecForInit, Dictionary <string, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func <ItemProducerTree, Task> filterCallback, CancellationToken token) { List <ItemProducerTree> itemProducerTrees = new List <ItemProducerTree>(); foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { string initialContinuationToken = (targetRangeToContinuationMap != null && targetRangeToContinuationMap.ContainsKey(partitionKeyRange.Id)) ? targetRangeToContinuationMap[partitionKeyRange.Id] : null; ItemProducerTree itemProducerTree = new ItemProducerTree( this.queryContext, querySpecForInit, partitionKeyRange, this.OnItemProducerTreeCompleteFetching, this.itemProducerForest.Comparer as IComparer <ItemProducerTree>, this.equalityComparer, deferFirstPage, collectionRid, initialPageSize, initialContinuationToken) { Filter = filter }; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(itemProducerTree); } itemProducerTrees.Add(itemProducerTree); } // Using loop fission so that we can load the document producers in parallel foreach (ItemProducerTree itemProducerTree in itemProducerTrees) { if (!deferFirstPage) { (bool successfullyMovedNext, QueryResponseCore? failureResponse)response = await itemProducerTree.MoveNextIfNotSplitAsync(token); if (response.failureResponse != null) { // Set the failure so on drain it can be returned. this.FailureResponse = response.failureResponse; // No reason to enqueue the rest of the itemProducerTrees since there is a failure. break; } } if (filterCallback != null) { await filterCallback(itemProducerTree); } if (itemProducerTree.HasMoreResults) { this.itemProducerForest.Enqueue(itemProducerTree); } } }
/// <summary> /// Initializes cross partition query execution context by initializing the necessary document producers. /// </summary> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="partitionKeyRanges">The partitions to target.</param> /// <param name="initialPageSize">The page size to start the document producers off with.</param> /// <param name="querySpecForInit">The query specification for the rewritten query.</param> /// <param name="targetRangeToContinuationMap">Map from partition to it's corresponding continuation token.</param> /// <param name="deferFirstPage">Whether or not we should defer the fetch of the first page from each partition.</param> /// <param name="filter">The filter to inject in the predicate.</param> /// <param name="tryFilterAsync">The callback used to filter each partition.</param> /// <param name="cancellationToken">The cancellation token.</param> /// <returns>A task to await on.</returns> protected async Task<TryCatch<bool>> TryInitializeAsync( string collectionRid, IReadOnlyList<PartitionKeyRange> partitionKeyRanges, int initialPageSize, SqlQuerySpec querySpecForInit, IReadOnlyDictionary<string, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func<ItemProducerTree, Task<TryCatch<bool>>> tryFilterAsync, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); List<ItemProducerTree> itemProducerTrees = new List<ItemProducerTree>(); foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { string initialContinuationToken; if (targetRangeToContinuationMap != null) { if (!targetRangeToContinuationMap.TryGetValue(partitionKeyRange.Id, out initialContinuationToken)) { initialContinuationToken = null; } } else { initialContinuationToken = null; } ItemProducerTree itemProducerTree = new ItemProducerTree( this.queryContext, querySpecForInit, partitionKeyRange, this.OnItemProducerTreeCompleteFetching, this.itemProducerForest.Comparer as IComparer<ItemProducerTree>, this.equalityComparer, deferFirstPage, collectionRid, initialPageSize, initialContinuationToken) { Filter = filter }; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(itemProducerTree); } itemProducerTrees.Add(itemProducerTree); } // Using loop fission so that we can load the document producers in parallel foreach (ItemProducerTree itemProducerTree in itemProducerTrees) { if (!deferFirstPage) { (bool successfullyMovedNext, QueryResponseCore? failureResponse) = await itemProducerTree.MoveNextIfNotSplitAsync(cancellationToken); if (failureResponse != null) { // Set the failure so on drain it can be returned. this.FailureResponse = failureResponse; // No reason to enqueue the rest of the itemProducerTrees since there is a failure. break; } } if (tryFilterAsync != null) { TryCatch<bool> tryFilter = await tryFilterAsync(itemProducerTree); if (!tryFilter.Succeeded) { return tryFilter; } } if (itemProducerTree.HasMoreResults) { this.itemProducerForest.Enqueue(itemProducerTree); } } return TryCatch<bool>.FromResult(true); }