/// <summary> /// Initializes cross partition query execution context by initializing the necessary document producers. /// </summary> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="partitionKeyRanges">The partitions to target.</param> /// <param name="initialPageSize">The page size to start the document producers off with.</param> /// <param name="querySpecForInit">The query specification for the rewritten query.</param> /// <param name="targetRangeToContinuationMap">Map from partition to it's corresponding continuation token.</param> /// <param name="deferFirstPage">Whether or not we should defer the fetch of the first page from each partition.</param> /// <param name="filter">The filter to inject in the predicate.</param> /// <param name="filterCallback">The callback used to filter each partition.</param> /// <param name="token">The cancellation token.</param> /// <returns>A task to await on.</returns> protected async Task InitializeAsync( string collectionRid, IReadOnlyList <PartitionKeyRange> partitionKeyRanges, int initialPageSize, SqlQuerySpec querySpecForInit, Dictionary <string, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func <DocumentProducerTree, Task> filterCallback, CancellationToken token) { CollectionCache collectionCache = await this.Client.GetCollectionCacheAsync(); INameValueCollection requestHeaders = await this.CreateCommonHeadersAsync(this.GetFeedOptions(null)); this.TraceInformation(string.Format( CultureInfo.InvariantCulture, "parallel~contextbase.initializeasync, queryspec {0}, maxbuffereditemcount: {1}, target partitionkeyrange count: {2}, maximumconcurrencylevel: {3}, documentproducer initial page size {4}", JsonConvert.SerializeObject(this.querySpec, DefaultJsonSerializationSettings.Value), this.actualMaxBufferedItemCount, partitionKeyRanges.Count, this.comparableTaskScheduler.MaximumConcurrencyLevel, initialPageSize)); List <DocumentProducerTree> documentProducerTrees = new List <DocumentProducerTree>(); foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { string initialContinuationToken = (targetRangeToContinuationMap != null && targetRangeToContinuationMap.ContainsKey(partitionKeyRange.Id)) ? targetRangeToContinuationMap[partitionKeyRange.Id] : null; DocumentProducerTree documentProducerTree = new DocumentProducerTree( partitionKeyRange, //// Create Document Service Request callback (pkRange, continuationToken, pageSize) => { INameValueCollection headers = requestHeaders.Clone(); headers[HttpConstants.HttpHeaders.Continuation] = continuationToken; headers[HttpConstants.HttpHeaders.PageSize] = pageSize.ToString(CultureInfo.InvariantCulture); return(this.CreateDocumentServiceRequest( headers, querySpecForInit, pkRange, collectionRid)); }, this.ExecuteRequestLazyAsync, //// Retry policy callback () => new NonRetriableInvalidPartitionExceptionRetryPolicy(collectionCache, this.Client.ResetSessionTokenRetryPolicy.GetRequestPolicy()), this.OnDocumentProducerTreeCompleteFetching, this.documentProducerForest.Comparer as IComparer <DocumentProducerTree>, this.equalityComparer, this.Client, deferFirstPage, collectionRid, initialPageSize, initialContinuationToken); documentProducerTree.Filter = filter; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(documentProducerTree); } documentProducerTrees.Add(documentProducerTree); } // Using loop fisson so that we can load the document producers in parallel foreach (DocumentProducerTree documentProducerTree in documentProducerTrees) { if (!deferFirstPage) { await documentProducerTree.MoveNextIfNotSplit(token); } if (filterCallback != null) { await filterCallback(documentProducerTree); } if (documentProducerTree.HasMoreResults) { this.documentProducerForest.Enqueue(documentProducerTree); } } }