public async Task PartitionKeyRangeGoneRetryPolicyWithNextRetryPolicy() { Mock <IDocumentClientRetryPolicy> nextRetryPolicyMock = new Mock <IDocumentClientRetryPolicy>(); nextRetryPolicyMock .Setup(m => m.ShouldRetryAsync(It.IsAny <ResponseMessage>(), It.IsAny <CancellationToken>())) .Returns(() => Task.FromResult <ShouldRetryResult>(ShouldRetryResult.RetryAfter(TimeSpan.FromDays(1)))) .Verifiable(); nextRetryPolicyMock .Setup(m => m.ShouldRetryAsync(It.IsAny <Exception>(), It.IsAny <CancellationToken>())) .Returns(() => Task.FromResult <ShouldRetryResult>(ShouldRetryResult.RetryAfter(TimeSpan.FromDays(1)))) .Verifiable(); PartitionKeyRangeGoneRetryPolicy retryPolicy = new PartitionKeyRangeGoneRetryPolicy(null, null, null, nextRetryPolicyMock.Object); ShouldRetryResult exceptionResult = await retryPolicy.ShouldRetryAsync(new Exception("", null), CancellationToken.None); Assert.IsNotNull(exceptionResult); Assert.IsTrue(exceptionResult.ShouldRetry); Assert.AreEqual(TimeSpan.FromDays(1), exceptionResult.BackoffTime); ShouldRetryResult messageResult = await retryPolicy.ShouldRetryAsync(new ResponseMessage(), CancellationToken.None); Assert.IsNotNull(exceptionResult); Assert.IsTrue(exceptionResult.ShouldRetry); Assert.AreEqual(TimeSpan.FromDays(1), exceptionResult.BackoffTime); }
private Task <ShouldRetryResult> ShouldRetryAsyncInternal( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, string resourceIdOrFullName, Func <Task <ShouldRetryResult> > continueIfNotHandled) { if (statusCode.HasValue && subStatusCode.HasValue && statusCode == HttpStatusCode.Gone && subStatusCode == SubStatusCodes.NameCacheIsStale) { if (!this.retried) { if (!string.IsNullOrEmpty(resourceIdOrFullName)) { this.clientCollectionCache.Refresh(resourceIdOrFullName); } this.retried = true; return(Task.FromResult(ShouldRetryResult.RetryAfter(TimeSpan.Zero))); } else { return(Task.FromResult(ShouldRetryResult.NoRetry())); } } return(continueIfNotHandled != null?continueIfNotHandled() : Task.FromResult(ShouldRetryResult.NoRetry())); }
private Task <ShouldRetryResult> ShouldRetryAsyncInternal( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, string resourceIdOrFullName, Func <Task <ShouldRetryResult> > continueIfNotHandled) { if (!statusCode.HasValue && !subStatusCode.HasValue) { return(continueIfNotHandled()); } if (statusCode == HttpStatusCode.BadRequest && subStatusCode == SubStatusCodes.PartitionKeyMismatch && this.retriesAttempted < MaxRetries) { Debug.Assert(resourceIdOrFullName != null); if (!string.IsNullOrEmpty(resourceIdOrFullName)) { this.clientCollectionCache.Refresh(resourceIdOrFullName); } this.retriesAttempted++; return(Task.FromResult(ShouldRetryResult.RetryAfter(TimeSpan.Zero))); } return(continueIfNotHandled()); }
public Task <ShouldRetryResult> ShouldRetryAsync( Exception exception, CancellationToken cancellationToken) { TimeSpan backoffTime = TimeSpan.FromSeconds(0); if (!WebExceptionUtility.IsWebExceptionRetriable(exception)) { // Have caller propagate original exception. this.durationTimer.Stop(); return(Task.FromResult(ShouldRetryResult.NoRetry())); } // Don't penalise first retry with delay. if (attemptCount++ > 1) { int remainingSeconds = WebExceptionRetryPolicy.waitTimeInSeconds - this.durationTimer.Elapsed.Seconds; if (remainingSeconds <= 0) { this.durationTimer.Stop(); return(Task.FromResult(ShouldRetryResult.NoRetry())); } backoffTime = TimeSpan.FromSeconds(Math.Min(this.currentBackoffSeconds, remainingSeconds)); this.currentBackoffSeconds *= WebExceptionRetryPolicy.backoffMultiplier; } DefaultTrace.TraceWarning("Received retriable web exception, will retry, {0}", exception); return(Task.FromResult(ShouldRetryResult.RetryAfter(backoffTime))); }
private async Task <ShouldRetryResult> ShouldRetryInternalAsync( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, CancellationToken cancellationToken) { if (statusCode == HttpStatusCode.Gone) { if (subStatusCode == SubStatusCodes.PartitionKeyRangeGone || subStatusCode == SubStatusCodes.CompletingSplit || subStatusCode == SubStatusCodes.CompletingPartitionMigration) { PartitionKeyRangeCache partitionKeyRangeCache = await this.container.ClientContext.DocumentClient.GetPartitionKeyRangeCacheAsync(); string containerRid = await this.container.GetCachedRIDAsync(forceRefresh : false, cancellationToken : cancellationToken); await partitionKeyRangeCache.TryGetOverlappingRangesAsync(containerRid, FeedRangeEpk.FullRange.Range, forceRefresh : true); return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } if (subStatusCode == SubStatusCodes.NameCacheIsStale) { return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } } return(null); }
private ShouldRetryResult ShouldRetryInternal( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, string resourceIdOrFullName) { if (!statusCode.HasValue && (!subStatusCode.HasValue || subStatusCode.Value == SubStatusCodes.Unknown)) { return(null); } if (statusCode == HttpStatusCode.Gone && subStatusCode == SubStatusCodes.NameCacheIsStale) { if (!this.retried) { if (!string.IsNullOrEmpty(resourceIdOrFullName)) { this.clientCollectionCache.Refresh(resourceIdOrFullName); } this.retried = true; return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } else { return(ShouldRetryResult.NoRetry()); } } return(null); }
private async Task <ShouldRetryResult> ShouldRetryInternalAsync( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, ShouldRetryResult shouldRetryResult, CancellationToken cancellationToken) { if (this.request == null) { // someone didn't call OnBeforeSendRequest - nothing we can do DefaultTrace.TraceCritical("Cannot apply RenameCollectionAwareClientRetryPolicy as OnBeforeSendRequest has not been called and there is no DocumentServiceRequest context."); return(shouldRetryResult); } Debug.Assert(shouldRetryResult != null); if (!shouldRetryResult.ShouldRetry && !this.hasTriggered && statusCode.HasValue && subStatusCode.HasValue) { if (this.request.IsNameBased && statusCode.Value == HttpStatusCode.NotFound && subStatusCode.Value == SubStatusCodes.ReadSessionNotAvailable) { // Clear the session token, because the collection name might be reused. DefaultTrace.TraceWarning("Clear the the token for named base request {0}", request.ResourceAddress); this.sessionContainer.ClearTokenByCollectionFullname(request.ResourceAddress); this.hasTriggered = true; string oldCollectionRid = request.RequestContext.ResolvedCollectionRid; request.ForceNameCacheRefresh = true; request.RequestContext.ResolvedCollectionRid = null; try { CosmosContainerProperties collectionInfo = await this.collectionCache.ResolveCollectionAsync(request, cancellationToken); if (collectionInfo == null) { DefaultTrace.TraceCritical("Can't recover from session unavailable exception because resolving collection name {0} returned null", request.ResourceAddress); } else if (!string.IsNullOrEmpty(oldCollectionRid) && !string.IsNullOrEmpty(collectionInfo.ResourceId)) { if (!oldCollectionRid.Equals(collectionInfo.ResourceId)) { return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } } } catch (Exception e) { // When ResolveCollectionAsync throws an exception ignore it because it's an attempt to recover an existing // error. When the recovery fails we return ShouldRetryResult.NoRetry and propaganate the original exception to the client DefaultTrace.TraceCritical("Can't recover from session unavailable exception because resolving collection name {0} failed with {1}", request.ResourceAddress, e.ToString()); } } } return(shouldRetryResult); }
private ShouldRetryResult ShouldRetryInternal( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, string resourceIdOrFullName) { if (!statusCode.HasValue && (!subStatusCode.HasValue || subStatusCode.Value == SubStatusCodes.Unknown)) { return(null); } if (statusCode == HttpStatusCode.BadRequest && subStatusCode == SubStatusCodes.PartitionKeyMismatch && this.retriesAttempted < MaxRetries) { Debug.Assert(resourceIdOrFullName != null); if (!string.IsNullOrEmpty(resourceIdOrFullName)) { this.clientCollectionCache.Refresh(resourceIdOrFullName, HttpConstants.Versions.CurrentVersion); } this.retriesAttempted++; return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } return(null); }
private async Task <ShouldRetryResult> ShouldRetryOnEndpointFailureAsync( bool isReadRequest, bool forceRefresh, bool retryOnPreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { DefaultTrace.TraceInformation("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {0}", this.failoverRetryCount); return(ShouldRetryResult.NoRetry()); } this.failoverRetryCount++; if (this.locationEndpoint != null) { if (isReadRequest) { this.globalEndpointManager.MarkEndpointUnavailableForRead(this.locationEndpoint); } else { this.globalEndpointManager.MarkEndpointUnavailableForWrite(this.locationEndpoint); } } TimeSpan retryDelay = TimeSpan.Zero; if (!isReadRequest) { DefaultTrace.TraceInformation("Failover happening. retryCount {0}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { //if retried both endpoints, follow regular retry interval. retryDelay = TimeSpan.FromMilliseconds(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = TimeSpan.FromMilliseconds(ClientRetryPolicy.RetryIntervalInMS); } await this.globalEndpointManager.RefreshLocationAsync(null, forceRefresh); int retryLocationIndex = this.failoverRetryCount; // Used to generate a round-robin effect if (retryOnPreferredLocations) { retryLocationIndex = 0; // When the endpoint is marked as unavailable, it is moved to the bottom of the preferrence list } this.retryContext = new RetryContext { RetryLocationIndex = retryLocationIndex, RetryRequestOnPreferredLocations = retryOnPreferredLocations, }; return(ShouldRetryResult.RetryAfter(retryDelay)); }
private async Task <ShouldRetryResult> ShouldRetryInternalAsync( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode) { if (!statusCode.HasValue && (!subStatusCode.HasValue || subStatusCode.Value == SubStatusCodes.Unknown)) { return(null); } // Received 403.3 on write region, initiate the endpoint rediscovery if (statusCode == HttpStatusCode.Forbidden && subStatusCode == SubStatusCodes.WriteForbidden) { if (this.partitionKeyRangeLocationCache.TryMarkEndpointUnavailableForPartitionKeyRange( this.documentServiceRequest)) { return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } DefaultTrace.TraceWarning("Endpoint not writable. Refresh cache and retry"); return(await this.ShouldRetryOnEndpointFailureAsync( isReadRequest : false, forceRefresh : true, retryOnPreferredLocations : false)); } // Regional endpoint is not available yet for reads (e.g. add/ online of region is in progress) if (statusCode == HttpStatusCode.Forbidden && subStatusCode == SubStatusCodes.DatabaseAccountNotFound && (this.isReadRequest || this.canUseMultipleWriteLocations)) { DefaultTrace.TraceWarning("Endpoint not available for reads. Refresh cache and retry"); return(await this.ShouldRetryOnEndpointFailureAsync( isReadRequest : this.isReadRequest, forceRefresh : false, retryOnPreferredLocations : false)); } if (statusCode == HttpStatusCode.NotFound && subStatusCode == SubStatusCodes.ReadSessionNotAvailable) { return(this.ShouldRetryOnSessionNotAvailable()); } // Received 503.0 due to client connect timeout or Gateway if (statusCode == HttpStatusCode.ServiceUnavailable && subStatusCode == SubStatusCodes.Unknown) { this.partitionKeyRangeLocationCache.TryMarkEndpointUnavailableForPartitionKeyRange( this.documentServiceRequest); return(this.ShouldRetryOnServiceUnavailable()); } return(null); }
private ShouldRetryResult ShouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { // if endpoint discovery is disabled, the request cannot be retried anywhere else return(ShouldRetryResult.NoRetry()); } else { if (this.canUseMultipleWriteLocations) { ReadOnlyCollection <Uri> endpoints = this.isReadRequest ? this.globalEndpointManager.ReadEndpoints : this.globalEndpointManager.WriteEndpoints; if (this.sessionTokenRetryCount > endpoints.Count) { // When use multiple write locations is true and the request has been tried // on all locations, then don't retry the request return(ShouldRetryResult.NoRetry()); } else { this.retryContext = new RetryContext() { RetryCount = this.sessionTokenRetryCount - 1, RetryRequestOnPreferredLocations = this.sessionTokenRetryCount > 1, ClearSessionTokenOnSessionNotAvailable = (this.sessionTokenRetryCount == endpoints.Count) // clear on last attempt }; return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } } else { if (this.sessionTokenRetryCount > 1) { // When cannot use multiple write locations, then don't retry the request if // we have already tried this request on the write location return(ShouldRetryResult.NoRetry()); } else { this.retryContext = new RetryContext { RetryCount = this.sessionTokenRetryCount - 1, RetryRequestOnPreferredLocations = false, ClearSessionTokenOnSessionNotAvailable = true, }; return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } } } }
private async Task <ShouldRetryResult> ShouldRetryInternalAsync( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, CancellationToken cancellationToken) { if (!statusCode.HasValue && (!subStatusCode.HasValue || subStatusCode.Value == SubStatusCodes.Unknown)) { return(null); } if (statusCode == HttpStatusCode.Gone && subStatusCode == SubStatusCodes.PartitionKeyRangeGone) { if (this.retried) { return(ShouldRetryResult.NoRetry()); } using (DocumentServiceRequest request = DocumentServiceRequest.Create( OperationType.Read, ResourceType.Collection, this.collectionLink, null, AuthorizationTokenType.PrimaryMasterKey)) { ContainerProperties collection = await this.collectionCache.ResolveCollectionAsync(request, cancellationToken, this.trace); CollectionRoutingMap routingMap = await this.partitionKeyRangeCache.TryLookupAsync( collectionRid : collection.ResourceId, previousValue : null, request : request, trace : this.trace); if (routingMap != null) { // Force refresh. await this.partitionKeyRangeCache.TryLookupAsync( collectionRid : collection.ResourceId, previousValue : routingMap, request : request, trace : this.trace); } } this.retried = true; return(ShouldRetryResult.RetryAfter(TimeSpan.FromSeconds(0))); } return(null); }
private async Task <ShouldRetryResult> ShouldRetryAsyncInternal( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, CancellationToken cancellationToken, Func <Task <ShouldRetryResult> > continueIfNotHandled) { if (statusCode.HasValue && subStatusCode.HasValue && statusCode == HttpStatusCode.Gone && subStatusCode == SubStatusCodes.PartitionKeyRangeGone) { if (this.retried) { return(ShouldRetryResult.NoRetry()); } using (DocumentServiceRequest request = DocumentServiceRequest.Create( OperationType.Read, ResourceType.Collection, this.collectionLink, null, AuthorizationTokenType.PrimaryMasterKey)) { CosmosContainerSettings collection = await this.collectionCache.ResolveCollectionAsync(request, cancellationToken); CollectionRoutingMap routingMap = await this.partitionKeyRangeCache.TryLookupAsync(collection.ResourceId, null, request, false, cancellationToken); if (routingMap != null) { // Force refresh. await this.partitionKeyRangeCache.TryLookupAsync( collection.ResourceId, routingMap, request, false, cancellationToken); } } this.retried = true; return(ShouldRetryResult.RetryAfter(TimeSpan.FromSeconds(0))); } if (continueIfNotHandled != null) { return(await continueIfNotHandled() ?? ShouldRetryResult.NoRetry()); } else { return(await Task.FromResult(ShouldRetryResult.NoRetry())); } }
private async Task <ShouldRetryResult> ShouldRetryInternalAsync( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, CancellationToken cancellationToken) { if (statusCode == HttpStatusCode.Gone) { this.retriesOn410++; if (this.retriesOn410 > MaxRetryOn410) { return(ShouldRetryResult.NoRetry()); } if (subStatusCode == SubStatusCodes.PartitionKeyRangeGone || subStatusCode == SubStatusCodes.CompletingSplit || subStatusCode == SubStatusCodes.CompletingPartitionMigration) { PartitionKeyRangeCache partitionKeyRangeCache = await this.container.ClientContext.DocumentClient.GetPartitionKeyRangeCacheAsync(NoOpTrace.Singleton); string containerRid = await this.container.GetCachedRIDAsync( forceRefresh : false, NoOpTrace.Singleton, cancellationToken : cancellationToken); await partitionKeyRangeCache.TryGetOverlappingRangesAsync( containerRid, FeedRangeEpk.FullRange.Range, NoOpTrace.Singleton, forceRefresh : true); return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } if (subStatusCode == SubStatusCodes.NameCacheIsStale) { return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } } // Batch API can return 413 which means the response is bigger than 4Mb. // Operations that exceed the 4Mb limit are returned as 413/3402, while the operations within the 4Mb limit will be 200 if (statusCode == HttpStatusCode.RequestEntityTooLarge && (int)subStatusCode == SubstatusCodeBatchResponseSizeExceeded) { return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } return(null); }
private async Task <ShouldRetryResult> ShouldRetryOnEndpointFailureAsync(bool isReadRequest, bool forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { DefaultTrace.TraceInformation("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {0}", this.failoverRetryCount); return(ShouldRetryResult.NoRetry()); } this.failoverRetryCount++; if (this.locationEndpoint != null) { if (isReadRequest) { this.globalEndpointManager.MarkEndpointUnavailableForRead(this.locationEndpoint); } else { this.globalEndpointManager.MarkEndpointUnavailableForWrite(this.locationEndpoint); } } TimeSpan retryDelay = TimeSpan.Zero; if (!isReadRequest) { DefaultTrace.TraceInformation("Failover happening. retryCount {0}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { //if retried both endpoints, follow regular retry interval. retryDelay = TimeSpan.FromMilliseconds(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = TimeSpan.FromMilliseconds(ClientRetryPolicy.RetryIntervalInMS); } await this.globalEndpointManager.RefreshLocationAsync(null, forceRefresh); this.retryContext = new RetryContext { RetryCount = this.failoverRetryCount, RetryRequestOnPreferredLocations = false }; return(ShouldRetryResult.RetryAfter(retryDelay)); }
private ShouldRetryResult ShouldRetryInternal( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, string resourceIdOrFullName) { if (statusCode == HttpStatusCode.Gone && subStatusCode == SubStatusCodes.PartitionKeyRangeGone && this.retriesAttempted < MaxRetries) { this.retriesAttempted++; return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } return(null); }
public Task <ShouldRetryResult> ShouldRetryAsync( Exception exception, CancellationToken cancellationToken) { HttpRequestMessage httpRequestMessage = this.getHttpReqestMessage(); this.diagnosticsContext.AddDiagnosticsInternal( new PointOperationStatistics( activityId: Trace.CorrelationManager.ActivityId.ToString(), statusCode: HttpStatusCode.InternalServerError, subStatusCode: SubStatusCodes.Unknown, responseTimeUtc: DateTime.UtcNow, requestCharge: 0, errorMessage: exception.ToString(), method: httpRequestMessage.Method, requestUri: httpRequestMessage.RequestUri.OriginalString, requestSessionToken: null, responseSessionToken: null)); if (!this.IsExceptionTransientRetriable( httpRequestMessage.Method, exception, cancellationToken)) { // Have caller propagate original exception. return(Task.FromResult(ShouldRetryResult.NoRetry())); } TimeSpan backOffTime = TimeSpan.FromSeconds(0); // Don't penalize first retry with delay. if (this.attemptCount++ > 1) { int remainingSeconds = TransientHttpClientRetryPolicy.WaitTimeInSeconds - (int)(DateTime.UtcNow - this.startDateTimeUtc).TotalSeconds; if (remainingSeconds <= 0) { ShouldRetryResult shouldRetry = this.GetShouldRetryFromException(exception); return(Task.FromResult(shouldRetry)); } backOffTime = TimeSpan.FromSeconds(Math.Min(this.currentBackoffSeconds, remainingSeconds)); this.currentBackoffSeconds *= TransientHttpClientRetryPolicy.BackoffMultiplier; } DefaultTrace.TraceWarning("Received retriable web exception, will retry, {0}", exception); return(Task.FromResult(ShouldRetryResult.RetryAfter(backOffTime))); }
private ShouldRetryResult ShouldRetryInternal( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode) { if (statusCode == HttpStatusCode.Gone && (subStatusCode == SubStatusCodes.PartitionKeyRangeGone || subStatusCode == SubStatusCodes.NameCacheIsStale || subStatusCode == SubStatusCodes.CompletingSplit || subStatusCode == SubStatusCodes.CompletingPartitionMigration) && this.retriesAttempted < MaxRetries) { this.retriesAttempted++; return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } return(null); }
public async Task RetryHandlerHttpClientExceptionRefreshesLocations() { DocumentClient dc = new MockDocumentClient(RetryHandlerTests.TestUri, "test"); CosmosClient client = new CosmosClient( RetryHandlerTests.TestUri.OriginalString, Guid.NewGuid().ToString(), new CosmosClientOptions(), dc); Mock <IDocumentClientRetryPolicy> mockClientRetryPolicy = new Mock <IDocumentClientRetryPolicy>(); mockClientRetryPolicy.Setup(m => m.ShouldRetryAsync(It.IsAny <Exception>(), It.IsAny <CancellationToken>())) .Returns <Exception, CancellationToken>((ex, tooken) => Task.FromResult(ShouldRetryResult.RetryAfter(TimeSpan.FromMilliseconds(1)))); Mock <IRetryPolicyFactory> mockRetryPolicy = new Mock <IRetryPolicyFactory>(); mockRetryPolicy.Setup(m => m.GetRequestPolicy()) .Returns(() => mockClientRetryPolicy.Object); RetryHandler retryHandler = new RetryHandler(client); int handlerCalls = 0; int expectedHandlerCalls = 2; TestHandler testHandler = new TestHandler((request, response) => { handlerCalls++; if (handlerCalls == expectedHandlerCalls) { return(TestHandler.ReturnSuccess()); } throw new HttpRequestException("DNS or some other network issue"); }); retryHandler.InnerHandler = testHandler; RequestInvokerHandler invoker = new RequestInvokerHandler(client, requestedClientConsistencyLevel: null); invoker.InnerHandler = retryHandler; RequestMessage requestMessage = new RequestMessage(HttpMethod.Get, new System.Uri("https://dummy.documents.azure.com:443/dbs")); requestMessage.Headers.Add(HttpConstants.HttpHeaders.PartitionKey, "[]"); requestMessage.ResourceType = ResourceType.Document; requestMessage.OperationType = OperationType.Read; await invoker.SendAsync(requestMessage, new CancellationToken()); Assert.AreEqual(expectedHandlerCalls, handlerCalls); }
private Task <ShouldRetryResult> ShouldRetryInternalAsync(TimeSpan?retryAfter) { TimeSpan retryDelay = TimeSpan.Zero; if (this.currentAttemptCount < this.maxAttemptCount && (this.CheckIfRetryNeeded(retryAfter, out retryDelay))) { this.currentAttemptCount++; DefaultTrace.TraceWarning( "Operation will be retried after {0} milliseconds. Current attempt {1}, Cumulative delay {2}", retryDelay.TotalMilliseconds, this.currentAttemptCount, this.cumulativeRetryDelay); return(Task.FromResult(ShouldRetryResult.RetryAfter(retryDelay))); } else { DefaultTrace.TraceError( "Operation will NOT be retried. Current attempt {0}", this.currentAttemptCount); return(Task.FromResult(ShouldRetryResult.NoRetry())); } }
/// <summary> /// For a ServiceUnavailable (503.0) we could be having a timeout from Direct/TCP locally or a request to Gateway request with a similar response due to an endpoint not yet available. /// We try and retry the request only if there are other regions available. /// </summary> private ShouldRetryResult ShouldRetryOnServiceUnavailable() { if (this.serviceUnavailableRetryCount++ >= ClientRetryPolicy.MaxServiceUnavailableRetryCount) { DefaultTrace.TraceInformation($"ShouldRetryOnServiceUnavailable() Not retrying. Retry count = {this.serviceUnavailableRetryCount}."); return(ShouldRetryResult.NoRetry()); } if (!this.canUseMultipleWriteLocations && !this.isReadRequest) { // Write requests on single master cannot be retried, no other regions available return(ShouldRetryResult.NoRetry()); } int availablePreferredLocations = this.globalEndpointManager.PreferredLocationCount; if (availablePreferredLocations <= 1) { // No other regions to retry on DefaultTrace.TraceInformation($"ShouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {availablePreferredLocations}."); return(ShouldRetryResult.NoRetry()); } DefaultTrace.TraceInformation($"ShouldRetryOnServiceUnavailable() Retrying. Received on endpoint {this.locationEndpoint}, IsReadRequest = {this.isReadRequest}."); // Retrying on second PreferredLocations // RetryCount is used as zero-based index this.retryContext = new RetryContext() { RetryLocationIndex = this.serviceUnavailableRetryCount, RetryRequestOnPreferredLocations = true }; return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); }
private ShouldRetryResult ShouldRetryInternal( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode, string resourceIdOrFullName) { if (!statusCode.HasValue && (!subStatusCode.HasValue || subStatusCode.Value == SubStatusCodes.Unknown)) { return(null); } if (statusCode == HttpStatusCode.Gone && subStatusCode == SubStatusCodes.NameCacheIsStale) { if (!this.retried) { if (this.documentServiceRequest == null) { throw new InvalidOperationException("OnBeforeSendRequest was never called"); } this.documentServiceRequest.ForceNameCacheRefresh = true; this.documentServiceRequest.ClearRoutingHints(); this.retried = true; return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } else { return(ShouldRetryResult.NoRetry()); } } return(null); }
public Task <ShouldRetryResult> ShouldRetryAsync(Exception exception, CancellationToken cancellationToken) { return(Task.FromResult(ShouldRetryResult.RetryAfter(TimeSpan.FromTicks(this.rand.Next(25))))); }
public Task <ShouldRetryResult> ShouldRetryAsync(CosmosResponseMessage httpResponseMessage, CancellationToken cancellationToken) { return(Task.FromResult(ShouldRetryResult.RetryAfter(TimeSpan.FromTicks(this.rand.Next(25))))); }
private async Task <ShouldRetryResult> ShouldRetryInternalAsync( HttpStatusCode?statusCode, SubStatusCodes?subStatusCode) { if (!statusCode.HasValue && (!subStatusCode.HasValue || subStatusCode.Value == SubStatusCodes.Unknown)) { return(null); } // Received request timeout if (statusCode == HttpStatusCode.RequestTimeout) { DefaultTrace.TraceWarning("ClientRetryPolicy: RequestTimeout. Failed Location: {0}; ResourceAddress: {1}", this.documentServiceRequest?.RequestContext?.LocationEndpointToRoute?.ToString() ?? string.Empty, this.documentServiceRequest?.ResourceAddress ?? string.Empty); // Mark the partition key range as unavailable to retry future request on a new region. this.partitionKeyRangeLocationCache.TryMarkEndpointUnavailableForPartitionKeyRange( this.documentServiceRequest); } // Received 403.3 on write region, initiate the endpoint rediscovery if (statusCode == HttpStatusCode.Forbidden && subStatusCode == SubStatusCodes.WriteForbidden) { // It's a write forbidden so it safe to retry if (this.partitionKeyRangeLocationCache.TryMarkEndpointUnavailableForPartitionKeyRange( this.documentServiceRequest)) { return(ShouldRetryResult.RetryAfter(TimeSpan.Zero)); } DefaultTrace.TraceWarning("ClientRetryPolicy: Endpoint not writable. Refresh cache and retry. Failed Location: {0}; ResourceAddress: {1}", this.documentServiceRequest?.RequestContext?.LocationEndpointToRoute?.ToString() ?? string.Empty, this.documentServiceRequest?.ResourceAddress ?? string.Empty); return(await this.ShouldRetryOnEndpointFailureAsync( isReadRequest : false, markBothReadAndWriteAsUnavailable : false, forceRefresh : true, retryOnPreferredLocations : false)); } // Regional endpoint is not available yet for reads (e.g. add/ online of region is in progress) if (statusCode == HttpStatusCode.Forbidden && subStatusCode == SubStatusCodes.DatabaseAccountNotFound && (this.isReadRequest || this.canUseMultipleWriteLocations)) { DefaultTrace.TraceWarning("ClientRetryPolicy: Endpoint not available for reads. Refresh cache and retry. Failed Location: {0}; ResourceAddress: {1}", this.documentServiceRequest?.RequestContext?.LocationEndpointToRoute?.ToString() ?? string.Empty, this.documentServiceRequest?.ResourceAddress ?? string.Empty); return(await this.ShouldRetryOnEndpointFailureAsync( isReadRequest : this.isReadRequest, markBothReadAndWriteAsUnavailable : false, forceRefresh : false, retryOnPreferredLocations : false)); } if (statusCode == HttpStatusCode.NotFound && subStatusCode == SubStatusCodes.ReadSessionNotAvailable) { return(this.ShouldRetryOnSessionNotAvailable()); } // Received 503.0 due to client connect timeout or Gateway if (statusCode == HttpStatusCode.ServiceUnavailable && subStatusCode == SubStatusCodes.Unknown) { DefaultTrace.TraceWarning("ClientRetryPolicy: ServiceUnavailable. Refresh cache and retry. Failed Location: {0}; ResourceAddress: {1}", this.documentServiceRequest?.RequestContext?.LocationEndpointToRoute?.ToString() ?? string.Empty, this.documentServiceRequest?.ResourceAddress ?? string.Empty); // Mark the partition as unavailable. // Let the ClientRetry logic decide if the request should be retried this.partitionKeyRangeLocationCache.TryMarkEndpointUnavailableForPartitionKeyRange( this.documentServiceRequest); return(this.ShouldRetryOnServiceUnavailable()); } return(null); }