private async Task <DocumentProducer <T> > FetchAsync(IDocumentClientRetryPolicy retryPolicyInstance, CancellationToken cancellationToken) { // TODO: This workflow could be simplified. FetchResult exceptionFetchResult = null; try { this.fetchSchedulingMetrics.Start(); this.fetchExecutionRangeAccumulator.BeginFetchRange(); FeedResponse <T> feedResponse = null; double requestCharge = 0; long responseLengthBytes = 0; QueryMetrics queryMetrics = QueryMetrics.Zero; do { int pageSize = (int)Math.Min(this.PageSize, (long)int.MaxValue); Debug.Assert(pageSize >= 0, string.Format("pageSize was negative ... this.PageSize: {0}", this.PageSize)); using (DocumentServiceRequest request = this.createRequestFunc(this.CurrentBackendContinuationToken, pageSize)) { retryPolicyInstance = retryPolicyInstance ?? this.createRetryPolicyFunc(); retryPolicyInstance.OnBeforeSendRequest(request); // Custom backoff and retry ExceptionDispatchInfo exception = null; try { cancellationToken.ThrowIfCancellationRequested(); feedResponse = await this.executeRequestFunc(request, cancellationToken); this.fetchExecutionRangeAccumulator.EndFetchRange(feedResponse.Count, Interlocked.Read(ref this.retries)); this.ActivityId = Guid.Parse(feedResponse.ActivityId); } catch (Exception ex) { exception = ExceptionDispatchInfo.Capture(ex); } if (exception != null) { cancellationToken.ThrowIfCancellationRequested(); ShouldRetryResult shouldRetryResult = await retryPolicyInstance.ShouldRetryAsync(exception.SourceException, cancellationToken); shouldRetryResult.ThrowIfDoneTrying(exception); this.ScheduleFetch(retryPolicyInstance, shouldRetryResult.BackoffTime); Interlocked.Increment(ref this.retries); return(this); } requestCharge += feedResponse.RequestCharge; responseLengthBytes += feedResponse.ResponseLengthBytes; if (feedResponse.Headers[HttpConstants.HttpHeaders.QueryMetrics] != null) { queryMetrics = QueryMetrics.CreateFromDelimitedStringAndClientSideMetrics( feedResponse.Headers[HttpConstants.HttpHeaders.QueryMetrics], new ClientSideMetrics(this.retries, requestCharge, this.fetchExecutionRangeAccumulator.GetExecutionRanges(), new List <Tuple <string, SchedulingTimeSpan> >()), this.activityId); // Reset the counters. Interlocked.Exchange(ref this.retries, 0); } this.UpdateRequestContinuationToken(feedResponse.ResponseContinuation); retryPolicyInstance = null; this.numDocumentsFetched += feedResponse.Count; } }while (!this.FetchedAll && feedResponse.Count <= 0); await this.CompleteFetchAsync(feedResponse, cancellationToken); this.produceAsyncCompleteCallback(this, feedResponse.Count, requestCharge, queryMetrics, responseLengthBytes, cancellationToken); } catch (Exception ex) { DefaultTrace.TraceWarning(string.Format( CultureInfo.InvariantCulture, "{0}, CorrelatedActivityId: {1}, ActivityId {2} | DocumentProducer Id: {3}, Exception in FetchAsync: {4}", DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture), this.correlatedActivityId, this.ActivityId, this.targetRange.Id, ex.Message)); exceptionFetchResult = new FetchResult(ExceptionDispatchInfo.Capture(ex)); } finally { this.fetchSchedulingMetrics.Stop(); if (this.FetchedAll) { // One more callback to send the scheduling metrics this.produceAsyncCompleteCallback( producer: this, size: 0, resourceUnitUsage: 0, queryMetrics: QueryMetrics.CreateFromDelimitedStringAndClientSideMetrics( QueryMetrics.Zero.ToDelimitedString(), new ClientSideMetrics( retries: 0, requestCharge: 0, fetchExecutionRanges: new List <FetchExecutionRange>(), partitionSchedulingTimeSpans: new List <Tuple <string, SchedulingTimeSpan> > { new Tuple <string, SchedulingTimeSpan>(this.targetRange.Id, this.fetchSchedulingMetrics.Elapsed) }), Guid.Empty), responseLengthBytes: 0, token: cancellationToken); } } if (exceptionFetchResult != null) { this.UpdateRequestContinuationToken(this.CurrentBackendContinuationToken); await this.itemBuffer.AddAsync(exceptionFetchResult, cancellationToken); } return(this); }