public void MaxRetryDelay() { var options = WriteOptions.CreateNew() .RetryInterval(2_000) .ExponentialBase(2) .MaxRetryDelay(50_000) .Build(); var retry = new RetryAttempt(new HttpException("", 429), 1, options); Assert.AreEqual(2_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 2, options); Assert.AreEqual(4_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 3, options); Assert.AreEqual(8_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 4, options); Assert.AreEqual(16_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 5, options); Assert.AreEqual(32_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 6, options); Assert.AreEqual(50_000, retry.GetRetryInterval()); }
internal static async Task <TResponse> Retry <TRequest, TResponse>( Func <TRequest, CallSettings, Task <TResponse> > fn, TRequest request, CallSettings callSettings, IClock clock, IScheduler scheduler) { RetrySettings retrySettings = callSettings.Retry; if (retrySettings == null) { return(await fn(request, callSettings).ConfigureAwait(false)); } DateTime?overallDeadline = callSettings.Expiration.CalculateDeadline(clock); // Every attempt should use the same deadline, calculated from the start of the call. if (callSettings.Expiration?.Type == ExpirationType.Timeout) { callSettings = callSettings.WithDeadline(overallDeadline.Value); } // Remove retry from the call settings we pass into the function, so that the settings // can be used even for a streaming call. callSettings = callSettings.WithRetry(null); foreach (var attempt in RetryAttempt.CreateRetrySequence(retrySettings, scheduler, overallDeadline, clock)) { try { return(await fn(request, callSettings).ConfigureAwait(false)); } catch (RpcException e) when(attempt.ShouldRetry(e)) { await attempt.BackoffAsync(callSettings.CancellationToken.GetValueOrDefault()).ConfigureAwait(false); } } throw new InvalidOperationException("Bug in GAX retry handling: finished sequence of attempts"); }
public void ExponentialBase() { var options = WriteOptions.CreateNew() .RetryInterval(5_000) .ExponentialBase(5) .MaxRetryDelay(int.MaxValue) .Build(); var retry = new RetryAttempt(new HttpException("", 429), 1, options); Assert.AreEqual(5_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 2, options); Assert.AreEqual(25_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 3, options); Assert.AreEqual(125_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 4, options); Assert.AreEqual(625_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 5, options); Assert.AreEqual(3_125_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 6, options); Assert.AreEqual(15_625_000, retry.GetRetryInterval()); retry = new RetryAttempt(CreateException(3), 7, options); Assert.AreEqual(3_000, retry.GetRetryInterval()); }
private void UpdateRetryInfo(RetryInfo retryInfo, DelegateResult <HttpResponseMessage> result, TimeSpan timeSpan, int retryAttempt, string retryReason, string retryMessage, string contentType, string responseBody) { retryInfo.RetryCount = retryAttempt; RetryAttempt retry = new RetryAttempt { RetryAttemptNumber = retryAttempt, RetryDelay = timeSpan, RetryMessage = retryMessage, RequestFailure = new RequestFailure { Reason = retryReason, StatusCode = (int?)result?.Result?.StatusCode, ContentType = contentType, ResponseBody = responseBody } }; if (result.Exception != null) { retry.RequestFailure.RequestException = new RequestException { Message = result.Exception.Message, Type = result.Exception.GetType().ToString(), Source = result.Exception.Source }; } retryInfo.RetryAttempts.Add(retry); }
/// <summary> /// Resets the state, in terms of delays and error counts. This should /// be called after each successful call. /// </summary> internal void Reset() { _consecutiveErrors = 0; _retrySettingsBackoffs = RetryAttempt.CreateRetrySequence(_retrySettings, _scheduler) .Select(attempt => attempt.JitteredBackoff) .GetEnumerator(); // Make sure "Current" is already valid. _retrySettingsBackoffs.MoveNext(); }
/// <summary> /// Resets the state, in terms of delays and error counts. This should /// be called after each successful call. /// </summary> internal void Reset() { _currentDeadline = _callSettings.Expiration.CalculateDeadline(_clock) ?? _clock.GetCurrentDateTimeUtc().Add(DefaultRpcTimeout); _retrySettingsBackoffs = RetryAttempt.CreateRetrySequence(_retrySettings, _scheduler) .Select(attempt => attempt.JitteredBackoff) .GetEnumerator(); // Make sure "Current" is already valid. _retrySettingsBackoffs.MoveNext(); }
public Task InitialBackoffOverrideRespected() { var settings = RetrySettings.FromConstantBackoff( maxAttempts: 4, backoff: FiveSeconds, retryFilter: ex => true, backoffJitter: RetrySettings.NoJitter); var scheduler = new FakeScheduler(); var sequence = RetryAttempt.CreateRetrySequence(settings, scheduler, initialBackoffOverride: OneSecond); // Should attempt at T=0, T=1, T=6, T=11. return(AssertAttemptsAsync(sequence, scheduler, () => new Exception(), 0, 1, 6, 11)); }
public void HeaderHasPriority() { var exception = CreateException(); var retry = new RetryAttempt(exception, 1, _default); Assert.AreEqual(10_000, retry.GetRetryInterval()); retry = new RetryAttempt(new HttpException("", 429), 1, _default); Assert.AreEqual(5_000, retry.GetRetryInterval()); }
public Task MaxAttemptsRespected() { var settings = RetrySettings.FromConstantBackoff( maxAttempts: 4, backoff: OneSecond, retryFilter: ex => true, backoffJitter: RetrySettings.NoJitter); var scheduler = new FakeScheduler(); var sequence = RetryAttempt.CreateRetrySequence(settings, scheduler); // Should attempt at T=0, T=1, T=2, T=3, then stop because we're only allowed four attempts. return(AssertAttemptsAsync(sequence, scheduler, () => new Exception(), 0, 1, 2, 3)); }
public void RetryableHttpErrorCodes() { var retry = new RetryAttempt(new HttpException("", 428), 1, _default); Assert.IsFalse(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 429), 1, _default); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 504), 1, _default); Assert.IsTrue(retry.IsRetry()); }
public Task DeadlineRespected() { var settings = RetrySettings.FromExponentialBackoff( maxAttempts: 10, initialBackoff: OneSecond, maxBackoff: FiveSeconds, backoffMultiplier: 2, retryFilter: ex => true, backoffJitter: RetrySettings.NoJitter); var scheduler = new FakeScheduler(); var deadline = scheduler.Clock.GetCurrentDateTimeUtc() + FiveSeconds; var sequence = RetryAttempt.CreateRetrySequence(settings, scheduler, deadline, scheduler.Clock); // Should attempt at T=0, T=1, T=3, then stop because the next attempt would be after the deadline. return(AssertAttemptsAsync(sequence, scheduler, () => new Exception(), 0, 1, 3)); }
public Task PredicateRespected() { int count = 0; Func <Exception> exceptionProvider = () => ++ count == 3 ? new Exception() : new RpcException(Status.DefaultCancelled); var settings = RetrySettings.FromExponentialBackoff( maxAttempts: 4, initialBackoff: OneSecond, maxBackoff: FiveSeconds, backoffMultiplier: 1, retryFilter: ex => ex is RpcException, backoffJitter: RetrySettings.NoJitter); var scheduler = new FakeScheduler(); var sequence = RetryAttempt.CreateRetrySequence(settings, scheduler); return(AssertAttemptsAsync(sequence, scheduler, exceptionProvider, 0, 1, 2)); }
public Task JitterRespected() { var settings = RetrySettings.FromExponentialBackoff( maxAttempts: 6, initialBackoff: TimeSpan.FromSeconds(2), maxBackoff: TimeSpan.FromSeconds(10), backoffMultiplier: 2, retryFilter: ex => true, backoffJitter: new HalvingJitter()); var scheduler = new FakeScheduler(); var sequence = RetryAttempt.CreateRetrySequence(settings, scheduler); // Sequence of theoretical backoffs is 2, 4, 8, 10, 10, 10 // Sequence of jittered backoffs is 1, 2, 4, 5, 5. return(AssertAttemptsAsync(sequence, scheduler, () => new Exception(), 0, 1, 3, 7, 12, 17)); }
public void ErrorType() { var retry = new RetryAttempt(new ArgumentException(""), 1, _default); Assert.IsFalse(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 429), 1, _default); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new WebException("", WebExceptionStatus.Timeout), 1, _default); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 0, new WebException("", WebExceptionStatus.Timeout)), 1, _default); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new WebException("", WebExceptionStatus.ProtocolError), 1, _default); Assert.IsFalse(retry.IsRetry()); }
internal static async Task RetryOperationUntilCompleted( Func <CallSettings, Task <bool> > fn, IClock clock, IScheduler scheduler, CallSettings callSettings, RetrySettings retrySettings) { DateTime?overallDeadline = callSettings.Expiration.CalculateDeadline(clock); // Every attempt should use the same deadline, calculated from the start of the call. if (callSettings.Expiration?.Type == ExpirationType.Timeout) { callSettings = callSettings.WithDeadline(overallDeadline.Value); } var deadlineException = new RpcException(new Status(StatusCode.DeadlineExceeded, "Deadline Exceeded")); foreach (var attempt in RetryAttempt.CreateRetrySequence(retrySettings, scheduler, overallDeadline, clock)) { try { bool isResponseOk = await fn(callSettings).ConfigureAwait(false); if (isResponseOk) { return; } if (!attempt.ShouldRetry(deadlineException)) { throw deadlineException; } } catch (RpcException e) when(attempt.ShouldRetry(e)) { // We back off below... } await attempt.BackoffAsync(callSettings.CancellationToken.GetValueOrDefault()).ConfigureAwait(false); } throw new InvalidOperationException("Bug in GAX retry handling: finished sequence of attempts"); }
public void MaxRetries() { var options = WriteOptions.CreateNew().MaxRetries(5).Build(); var retry = new RetryAttempt(new HttpException("", 429), 1, _default); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 429), 2, options); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 429), 3, options); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 429), 4, options); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 429), 5, options); Assert.IsTrue(retry.IsRetry()); retry = new RetryAttempt(new HttpException("", 429), 6, options); Assert.IsFalse(retry.IsRetry()); }
protected internal WriteApi( InfluxDBClientOptions options, WriteService service, WriteOptions writeOptions, IDomainObjectMapper mapper, InfluxDBClient influxDbClient, IObservable <Unit> disposeCommand) { Arguments.CheckNotNull(service, nameof(service)); Arguments.CheckNotNull(writeOptions, nameof(writeOptions)); Arguments.CheckNotNull(mapper, nameof(mapper)); Arguments.CheckNotNull(influxDbClient, nameof(_influxDbClient)); Arguments.CheckNotNull(disposeCommand, nameof(disposeCommand)); _options = options; _mapper = mapper; _influxDbClient = influxDbClient; _unsubscribeDisposeCommand = disposeCommand.Subscribe(_ => Dispose()); // backpreasure - is not implemented in C# // // => use unbound buffer // // https://github.com/dotnet/reactive/issues/19 IObservable <IObservable <BatchWriteRecord> > batches = _subject // // Batching // .Publish(connectedSource => { var trigger = Observable.Merge( // triggered by time & count connectedSource.Window(TimeSpan.FromMilliseconds( writeOptions.FlushInterval), writeOptions.BatchSize, writeOptions.WriteScheduler), // flush trigger _flush ); return(connectedSource .Window(trigger)); }) // // Group by key - same bucket, same org // .SelectMany(it => it.GroupBy(batchWrite => batchWrite.Options)) // // Create Write Point = bucket, org, ... + data // .Select(grouped => { var aggregate = grouped .Aggregate(_stringBuilderPool.Get(), (builder, batchWrite) => { var data = batchWrite.ToLineProtocol(); if (string.IsNullOrEmpty(data)) { return(builder); } if (builder.Length > 0) { builder.Append("\n"); } return(builder.Append(data)); }).Select(builder => { var result = builder.ToString(); builder.Clear(); _stringBuilderPool.Return(builder); return(result); }); return(aggregate.Select(records => new BatchWriteRecord(grouped.Key, records)) .Where(batchWriteItem => !string.IsNullOrEmpty(batchWriteItem.ToLineProtocol()))); }); if (writeOptions.JitterInterval > 0) { batches = batches // // Jitter // .Select(source => { return(source.Delay(_ => Observable.Timer(TimeSpan.FromMilliseconds(RetryAttempt.JitterDelay(writeOptions)), writeOptions.WriteScheduler))); }); } var query = batches .Concat() // // Map to Async request // .Select(batchWriteItem => { var org = batchWriteItem.Options.OrganizationId; var bucket = batchWriteItem.Options.Bucket; var lineProtocol = batchWriteItem.ToLineProtocol(); var precision = batchWriteItem.Options.Precision; return(Observable .Defer(() => service.PostWriteAsyncWithIRestResponse(org, bucket, Encoding.UTF8.GetBytes(lineProtocol), null, "identity", "text/plain; charset=utf-8", null, "application/json", null, precision) .ToObservable()) .RetryWhen(f => f .Zip(Observable.Range(1, writeOptions.MaxRetries + 1), (exception, count) => new RetryAttempt(exception, count, writeOptions)) .SelectMany(attempt => { if (attempt.IsRetry()) { var retryInterval = attempt.GetRetryInterval(); var retryable = new WriteRetriableErrorEvent(org, bucket, precision, lineProtocol, attempt.Error, retryInterval); Publish(retryable); return Observable.Timer(TimeSpan.FromMilliseconds(retryInterval), writeOptions.WriteScheduler); } throw attempt.Error; })) .Select(result => { // ReSharper disable once ConvertIfStatementToReturnStatement if (result.IsSuccessful) { return Notification.CreateOnNext(result); } return Notification.CreateOnError <IRestResponse>(HttpException.Create(result, result.Content)); }) .Catch <Notification <IRestResponse>, Exception>(ex => { var error = new WriteErrorEvent(org, bucket, precision, lineProtocol, ex); Publish(error); return Observable.Return(Notification.CreateOnError <IRestResponse>(ex)); }).Do(res => { if (res.Kind == NotificationKind.OnNext) { var success = new WriteSuccessEvent(org, bucket, precision, lineProtocol); Publish(success); } })); }) .Concat() .Subscribe( notification => { switch (notification.Kind) { case NotificationKind.OnNext: Trace.WriteLine($"The batch item: {notification} was processed successfully."); break; case NotificationKind.OnError: Trace.WriteLine( $"The batch item wasn't processed successfully because: {notification.Exception}"); break; default: Trace.WriteLine($"The batch item: {notification} was processed"); break; } }, exception => { _disposed = true; Trace.WriteLine($"The unhandled exception occurs: {exception}"); }, () => { _disposed = true; Trace.WriteLine("The WriteApi was disposed."); }); }
internal async Task StartAsync() { // State used within the method. This is modified by local methods too. StreamInitializationCause cause = StreamInitializationCause.WatchStarting; FirestoreClient.ListenStream underlyingStream = null; IEnumerator <RetryAttempt> retryAttempts = CreateRetryAttemptSequence(); try { // This won't actually run forever. Calling Stop will cancel the cancellation token, and we'll end up with // an exception which may or may not be caught. while (true) { var serverResponse = await GetNextResponse().ConfigureAwait(false); _callbackCancellationTokenSource.Token.ThrowIfCancellationRequested(); var result = await _state.HandleResponseAsync(serverResponse, _callbackCancellationTokenSource.Token).ConfigureAwait(false); switch (result) { case WatchResponseResult.Continue: break; case WatchResponseResult.ResetStream: await CloseStreamAsync().ConfigureAwait(false); cause = StreamInitializationCause.ResetRequested; break; case WatchResponseResult.StreamHealthy: // Reset the retry backoff to zero. retryAttempts = CreateRetryAttemptSequence(); break; default: throw new InvalidOperationException($"Unknown result type: {result}"); } // What about other exception types? } } // Swallow cancellation exceptions unless one of the user-provided cancellation tokens has been // cancelled, in which case it's fine to let it through. catch (OperationCanceledException) when(!_callbackCancellationTokenSource.Token.IsCancellationRequested) { // We really do just swallow the exception. No need for logging. } finally { lock (_stateLock) { _networkCancellationTokenSource.Dispose(); _callbackCancellationTokenSource.Dispose(); _stopCancellationTokenRegistration.Dispose(); _finished = true; } // Make sure we clean up even if we get an exception we don't handle explicitly. await CloseStreamAsync().ConfigureAwait(false); } // Local method responsible for fetching the next response from the server stream, including // stream initialization and error handling. async Task <ListenResponse> GetNextResponse() { while (true) { try { // If we're just starting, or we've closed the stream or it broke, restart. if (underlyingStream == null) { await retryAttempts.Current.BackoffAsync(_networkCancellationTokenSource.Token).ConfigureAwait(false); retryAttempts.MoveNext(); underlyingStream = _db.Client.Listen(_listenCallSettings); await underlyingStream.TryWriteAsync(CreateRequest(_state.ResumeToken)).ConfigureAwait(false); _state.OnStreamInitialization(cause); } // Wait for a response or end-of-stream var next = await underlyingStream.GrpcCall.ResponseStream.MoveNext(_networkCancellationTokenSource.Token).ConfigureAwait(false); // If the server provided a response, return it if (next) { return(underlyingStream.GrpcCall.ResponseStream.Current); } // Otherwise, close the current stream and restart. await CloseStreamAsync().ConfigureAwait(false); cause = StreamInitializationCause.StreamCompleted; } catch (RpcException e) when(s_transientErrorStatusCodes.Contains(e.Status.StatusCode)) { // Close the current stream, ready to create a new one. await CloseStreamAsync().ConfigureAwait(false); // Extend the back-off if necessary. if (e.Status.StatusCode == StatusCode.ResourceExhausted) { retryAttempts.MoveNext(); } cause = StreamInitializationCause.RpcError; } } } async Task CloseStreamAsync() { if (underlyingStream != null) { try { var completeTask = underlyingStream.TryWriteCompleteAsync(); if (completeTask != null) { await completeTask.ConfigureAwait(false); } } catch (RpcException) { // Swallow gRPC errors when trying to "complete" the stream. This may be in response to the network connection // being dropped, at which point completing the stream will fail; we don't want the listener to stop at that // point. Instead, it will reconnect. } underlyingStream.GrpcCall.Dispose(); } underlyingStream = null; } // Create a new enumerator for the retry attempt sequence, starting with a backoff of zero. IEnumerator <RetryAttempt> CreateRetryAttemptSequence() { var iterator = RetryAttempt .CreateRetrySequence(_backoffSettings, _scheduler, initialBackoffOverride: TimeSpan.Zero) .GetEnumerator(); // Make sure Current is already valid iterator.MoveNext(); return(iterator); } }