public async Task RecordErrorAndWait_BackoffSettingsObeyed() { var settings = new BackoffSettings(TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(5), 2.0); var mock = new Mock <IScheduler>(MockBehavior.Strict); mock.Setup(s => s.Delay(TimeSpan.FromSeconds(1), default)).Returns(Task.FromResult(0)); mock.Setup(s => s.Delay(TimeSpan.FromSeconds(2), default)).Returns(Task.FromResult(0)); mock.Setup(s => s.Delay(TimeSpan.FromSeconds(4), default)).Returns(Task.FromResult(0)); // Retry maxes out at 5 seconds mock.Setup(s => s.Delay(TimeSpan.FromSeconds(5), default)).Returns(Task.FromResult(0)); // After reset mock.Setup(s => s.Delay(TimeSpan.FromSeconds(1), default)).Returns(Task.FromResult(0)); var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang")); var state = new RetryState(mock.Object, settings, RetrySettings.NoJitter, maxConsecutiveErrors: 5); await state.RecordErrorAndWaitAsync(exception, default); await state.RecordErrorAndWaitAsync(exception, default); await state.RecordErrorAndWaitAsync(exception, default); await state.RecordErrorAndWaitAsync(exception, default); state.Reset(); await state.RecordErrorAndWaitAsync(exception, default); }
public async Task RecordErrorAndWait_RetryInfo() { var mock = new Mock <IScheduler>(MockBehavior.Strict); // Delay taken from retry info mock.Setup(s => s.Delay(TimeSpan.FromSeconds(3), default)).Returns(Task.FromResult(0)); // Delay taken from backoff settings (which weren't affected by the first exception, because it contained backoff information) mock.Setup(s => s.Delay(TimeSpan.FromSeconds(1), default)).Returns(Task.FromResult(0)); // The first exception contains retry info, so we don't use the backoff settings var retryInfo = new Rpc.RetryInfo { RetryDelay = Duration.FromTimeSpan(TimeSpan.FromSeconds(3)) }; Metadata trailers = new Metadata { { RetryState.RetryInfoKey, retryInfo.ToByteArray() } }; var exception1 = new RpcException(new Status(StatusCode.Unavailable, "Bang"), trailers); var exception2 = new RpcException(new Status(StatusCode.Unavailable, "Bang")); RetryState state = new RetryState(new FakeClock(), mock.Object, s_retrySettings, s_callSettings); Assert.True(state.CanRetry(exception1)); await state.WaitAsync(exception1, default); Assert.True(state.CanRetry(exception2)); await state.WaitAsync(exception2, default); }
public async Task ErrorWithBackoffAfterDeadline_FailsRetry() { // Create a clock that starts at zero ticks. var clock = new FakeClock(0); var scheduler = new AdvanceFakeClockScheduler(clock); var callSettings = CallSettings.FromExpiration(Expiration.FromDeadline(new DateTime(TimeSpan.FromSeconds(10).Ticks, DateTimeKind.Utc))); var state = new RetryState(clock, scheduler, s_retrySettings, callSettings); // The retry info contains a wait time that is past the deadline of the call. // The retry state will throw a DeadlineExceeded error without waiting. var retryInfo = new Rpc.RetryInfo { RetryDelay = Duration.FromTimeSpan(TimeSpan.FromSeconds(20)) }; Metadata trailers = new Metadata { { RetryState.RetryInfoKey, retryInfo.ToByteArray() } }; var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang"), trailers); Assert.True(state.CanRetry(exception)); await Assert.ThrowsAsync <RpcException>(() => state.WaitAsync(exception, default)); // Check that the clock has not been advanced to verify that the retry state did not wait 20 seconds // before throwing an exception. Assert.Equal(0, clock.GetCurrentDateTimeUtc().Ticks); }
public async Task ResetDeadline() { var clock = new FakeClock(0); var scheduler = new AdvanceFakeClockScheduler(clock); var callSettings = CallSettings.FromExpiration(Expiration.FromDeadline(new DateTime(TimeSpan.FromSeconds(7).Ticks, DateTimeKind.Utc))); var state = new RetryState(clock, scheduler, s_retrySettings, callSettings); var retryInfo = new Rpc.RetryInfo { RetryDelay = Duration.FromTimeSpan(TimeSpan.FromSeconds(3)) }; Metadata trailers = new Metadata { { RetryState.RetryInfoKey, retryInfo.ToByteArray() } }; var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang"), trailers); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); // Reset does not change the absolute deadline of the call. // The next retry attempt will therefore fail. state.Reset(); Assert.True(state.CanRetry(exception)); await Assert.ThrowsAsync <RpcException>(() => state.WaitAsync(exception, default)); Assert.Equal(TimeSpan.FromSeconds(6).Ticks, clock.GetCurrentDateTimeUtc().Ticks); }
public async Task ConsecutiveErrors_FailsRetryWhenDeadlineExceeded() { var clock = new FakeClock(0); var scheduler = new AdvanceFakeClockScheduler(clock); var callSettings = CallSettings.FromExpiration(Expiration.FromDeadline(new DateTime(TimeSpan.FromSeconds(7).Ticks, DateTimeKind.Utc))); var state = new RetryState(clock, scheduler, s_retrySettings, callSettings); var retryInfo = new Rpc.RetryInfo { RetryDelay = Duration.FromTimeSpan(TimeSpan.FromSeconds(3)) }; Metadata trailers = new Metadata { { RetryState.RetryInfoKey, retryInfo.ToByteArray() } }; var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang"), trailers); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await Assert.ThrowsAsync <RpcException>(() => state.WaitAsync(exception, default)); // Verify that the clock has been advanced 6 seconds. Assert.Equal(TimeSpan.FromSeconds(6).Ticks, clock.GetCurrentDateTimeUtc().Ticks); }
public void CanRetry_SimpleStatusCodes(StatusCode code, bool expectedRetriable) { var exception = new RpcException(new Status(code, "Bang")); RetryState state = CreateSimpleRetryState(); Assert.Equal(expectedRetriable, state.CanRetry(exception)); }
public async Task RecordErrorAndWait_RetrySettingsObeyed() { RetrySettings retrySettings = RetrySettings.FromExponentialBackoff( maxAttempts: int.MaxValue, // Ignored in SqlResultStream initialBackoff: TimeSpan.FromSeconds(1), maxBackoff: TimeSpan.FromSeconds(5), backoffMultiplier: 2.0, ignored => false, // Ignored in SqlResultStream RetrySettings.NoJitter); var mock = new Mock <IScheduler>(MockBehavior.Strict); mock.Setup(s => s.Delay(TimeSpan.FromSeconds(1), default)).Returns(Task.FromResult(0)); mock.Setup(s => s.Delay(TimeSpan.FromSeconds(2), default)).Returns(Task.FromResult(0)); mock.Setup(s => s.Delay(TimeSpan.FromSeconds(4), default)).Returns(Task.FromResult(0)); // Retry maxes out at 5 seconds mock.Setup(s => s.Delay(TimeSpan.FromSeconds(5), default)).Returns(Task.FromResult(0)); // After reset mock.Setup(s => s.Delay(TimeSpan.FromSeconds(1), default)).Returns(Task.FromResult(0)); var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang")); var state = new RetryState(new FakeClock(), mock.Object, retrySettings, s_callSettings); await state.WaitAsync(exception, default); await state.WaitAsync(exception, default); await state.WaitAsync(exception, default); await state.WaitAsync(exception, default); state.Reset(); await state.WaitAsync(exception, default); }
public async Task RecordErrorAndWait_RetryInfo() { var settings = new BackoffSettings(TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(5), 2.0); var mock = new Mock <IScheduler>(MockBehavior.Strict); // Delay taken from retry info mock.Setup(s => s.Delay(TimeSpan.FromSeconds(3), default)).Returns(Task.FromResult(0)); // Delay taken from backoff settings (which have still doubled, even when the first value wasn't used) mock.Setup(s => s.Delay(TimeSpan.FromSeconds(2), default)).Returns(Task.FromResult(0)); // The first exception contains retry info, so we don't use the backoff settings var retryInfo = new Rpc.RetryInfo { RetryDelay = Duration.FromTimeSpan(TimeSpan.FromSeconds(3)) }; Metadata trailers = new Metadata { { RetryState.RetryInfoKey, retryInfo.ToByteArray() } }; var exception1 = new RpcException(new Status(StatusCode.Unavailable, "Bang"), trailers); var exception2 = new RpcException(new Status(StatusCode.Unavailable, "Bang")); RetryState state = new RetryState(mock.Object, settings, RetrySettings.NoJitter, maxConsecutiveErrors: 5); Assert.True(state.CanRetry(exception1)); await state.RecordErrorAndWaitAsync(exception1, default); Assert.True(state.CanRetry(exception2)); await state.RecordErrorAndWaitAsync(exception2, default); }
/// <summary> Builds <see cref="ActionPolicy"/> that will keep retrying forever </summary> /// <param name="syntax">The syntax to extend.</param> /// <param name="onRetry">The action to perform when the exception could be retried.</param> /// <returns> reusable instance of policy</returns> public static ActionPolicy RetryForever(this Syntax <ExceptionHandler> syntax, Action <Exception> onRetry) { Enforce.Arguments(() => syntax, () => onRetry); var state = new RetryState(onRetry); return(new ActionPolicy(action => RetryPolicy.Implementation(action, syntax.Target, () => state))); }
public RetryReader(TextReader reader) { this.reader = reader; this.retry = new Stack <char>(); this.readState = new ReaderState(this); this.retryState = new RetryState(this); this.state = readState; }
public void CanRetry_ResourceExhausted_NoRetryInfo() { Metadata trailers = new Metadata { { "otherinfo", "value" } }; var exception = new RpcException(new Status(StatusCode.ResourceExhausted, "Bang"), trailers); RetryState state = CreateSimpleRetryState(); Assert.False(state.CanRetry(exception)); }
public Task Forwarding() { RetryState = RetryState.Forwarding; return(domainEvents.Raise(new RetryOperationForwarding { RequestId = requestId, RetryType = retryType, TotalNumberOfMessages = TotalNumberOfMessages, Progress = GetProgress(), IsFailed = Failed, StartTime = Started })); }
public void CanRetry_ResourceExhausted_WithRetryInfo() { var retryInfo = new Rpc.RetryInfo { RetryDelay = Duration.FromTimeSpan(TimeSpan.FromSeconds(2)) }; Metadata trailers = new Metadata { { RetryState.RetryInfoKey, retryInfo.ToByteArray() } }; var exception = new RpcException(new Status(StatusCode.ResourceExhausted, "Bang"), trailers); RetryState state = CreateSimpleRetryState(); Assert.True(state.CanRetry(exception)); }
public async Task CanRetry_MaxConsecutiveRetries_NoReset() { var state = new RetryState( new NoOpScheduler(), s_retrySettings, maxConsecutiveErrors: 2); var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang")); Assert.True(state.CanRetry(exception)); await state.RecordErrorAndWaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await state.RecordErrorAndWaitAsync(exception, default); Assert.False(state.CanRetry(exception)); }
public Task Prepare(int totalNumberOfMessages) { RetryState = RetryState.Preparing; TotalNumberOfMessages = totalNumberOfMessages; NumberOfMessagesForwarded = 0; NumberOfMessagesPrepared = 0; return(domainEvents.Raise(new RetryOperationPreparing { RequestId = requestId, RetryType = retryType, TotalNumberOfMessages = TotalNumberOfMessages, Progress = GetProgress(), IsFailed = Failed, StartTime = Started })); }
public async Task CanRetry_NoReset() { var state = new RetryState( new FakeClock(), new NoOpScheduler(), s_retrySettings, s_callSettings); var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang")); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); Assert.True(state.CanRetry(exception)); }
public async Task CanRetry_MaxConsecutiveRetries_NoReset() { var state = new RetryState( new NoOpScheduler(), new BackoffSettings(TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(15), 2.0), RetrySettings.NoJitter, maxConsecutiveErrors: 2); var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang")); Assert.True(state.CanRetry(exception)); await state.RecordErrorAndWaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await state.RecordErrorAndWaitAsync(exception, default); Assert.False(state.CanRetry(exception)); }
public Task Wait(DateTime started, string originator = null, string classifier = null, DateTime?last = null) { RetryState = RetryState.Waiting; NumberOfMessagesPrepared = 0; NumberOfMessagesForwarded = 0; TotalNumberOfMessages = 0; NumberOfMessagesSkipped = 0; CompletionTime = null; Originator = originator; Started = started; Failed = false; Last = last; Classifier = classifier; return(domainEvents.Raise(new RetryOperationWaiting { RequestId = requestId, RetryType = retryType, Progress = GetProgress(), StartTime = Started })); }
public async Task ResetTimeout() { var clock = new FakeClock(0); var scheduler = new AdvanceFakeClockScheduler(clock); var callSettings = CallSettings.FromExpiration(Expiration.FromTimeout(TimeSpan.FromSeconds(7))); var state = new RetryState(clock, scheduler, s_retrySettings, callSettings); var retryInfo = new Rpc.RetryInfo { RetryDelay = Duration.FromTimeSpan(TimeSpan.FromSeconds(3)) }; Metadata trailers = new Metadata { { RetryState.RetryInfoKey, retryInfo.ToByteArray() } }; var exception = new RpcException(new Status(StatusCode.Unavailable, "Bang"), trailers); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); // Reset should set the deadline of the call to CurrentTime + Timeout. // That means that we can do two new retries without a timeout exception. state.Reset(); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await state.WaitAsync(exception, default); Assert.True(state.CanRetry(exception)); await Assert.ThrowsAsync <RpcException>(() => state.WaitAsync(exception, default)); // Verify that the clock has been advanced 12 seconds. Assert.Equal(TimeSpan.FromSeconds(12).Ticks, clock.GetCurrentDateTimeUtc().Ticks); }
private async Task CheckForCompletion() { if (NumberOfMessagesForwarded + NumberOfMessagesSkipped != TotalNumberOfMessages) { return; } RetryState = RetryState.Completed; CompletionTime = DateTime.UtcNow; await domainEvents.Raise(new RetryOperationCompleted { RequestId = requestId, RetryType = retryType, Failed = Failed, Progress = GetProgress(), StartTime = Started, CompletionTime = CompletionTime.Value, Originator = Originator, NumberOfMessagesProcessed = NumberOfMessagesForwarded, Last = Last ?? DateTime.MaxValue, Classifier = Classifier }).ConfigureAwait(false); if (retryType == RetryType.FailureGroup) { await domainEvents.Raise(new MessagesSubmittedForRetry { FailedMessageIds = new string[0], NumberOfFailedMessages = NumberOfMessagesForwarded, Context = Originator }).ConfigureAwait(false); } Log.Info($"Retry operation {requestId} completed. {NumberOfMessagesSkipped} messages skipped, {NumberOfMessagesForwarded} forwarded. Total {TotalNumberOfMessages}."); }
// retry loading the stream source for the specified amount of time internal void Retry(TimeSpan retryDuration) { // adjust time so it retries for the time specified, the retry // duration is an override, and the RetryDuration property // should not be updated with the new value RetryState = RetryState.Retrying; startTime = DateTime.Now.Subtract(RetryDuration - retryDuration); if (Retrying != null) { Retrying(this, null); } ResetSource(); }
private void OnAutoRetryStart(Exception exception) { // store current time so can tell when time expires startTime = DateTime.Now; RetryState = RetryState.Retrying; if (Retrying != null) { // TODO: jack Retrying(this, new SimpleEventArgs<Exception>(exception)); } }
private void OnRetryFailed(ExceptionRoutedEventArgs e) { // gave up on auto retry ResetAutoRetry(); RetryState = RetryState.RetriesFailed; if (RetryFailed != null) { RetryFailed(this, e); } }
/// <summary> /// Always returns false. /// </summary> /// <param name="ex">The exception.</param> /// <returns>Always false.</returns> public bool IsTransient(RetryState ex) { return false; }
// See https://github.com/googleapis/google-cloud-java/blob/master/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java#L2674 private async Task <PartialResultSet> ComputeNextAsync(CancellationToken cancellationToken) { // The retry state is local to the method as we're not trying to handle callers retrying. RetryState retryState = new RetryState(_client.Settings.Clock ?? SystemClock.Instance, _client.Settings.Scheduler ?? SystemScheduler.Instance, _retrySettings, _callSettings); while (true) { // If we've successfully read to the end of the stream and emptied the buffer, we've read all the responses. if (_finished && _buffer.Count == 0) { return(null); } // Buffer contains items up to a resume token or has reached capacity: flush. if (_buffer.Count > 0 && (_finished || !_safeToRetry || !_buffer.Last.Value.ResumeToken.IsEmpty)) { var firstResult = _buffer.First.Value; _buffer.RemoveFirst(); return(firstResult); } try { if (_grpcCall == null) { // Note: no cancellation token here; if we've been given a short cancellation token, // it ought to apply to just the MoveNext call, not the original request. _grpcCall = _request.ExecuteStreaming(_client, _callSettings); } bool hasNext = await _grpcCall.ResponseStream .MoveNext(cancellationToken) .WithSessionExpiryChecking(_session) .ConfigureAwait(false); retryState.Reset(); if (hasNext) { var next = _grpcCall.ResponseStream.Current; var hasResumeToken = !next.ResumeToken.IsEmpty; if (hasResumeToken) { _request.ResumeToken = next.ResumeToken; _safeToRetry = true; } // If the buffer is empty and this result has a resume token or we cannot resume safely // anyway, we can yield it immediately rather than placing it in the buffer to be // returned on the next iteration. if ((hasResumeToken || !_safeToRetry) && _buffer.Count == 0) { return(next); } _buffer.AddLast(next); if (_buffer.Count > _maxBufferSize && !hasResumeToken) { // We need to flush without a restart token. Errors encountered until we see // such a token will fail the read. _safeToRetry = false; } } else { _finished = true; // Let the next iteration of the loop return 0 or buffered data. } } catch (RpcException e) when(e.StatusCode == StatusCode.Cancelled && cancellationToken.IsCancellationRequested) { // gRPC throws RpcException, but it's more idiomatic to see an OperationCanceledException cancellationToken.ThrowIfCancellationRequested(); } catch (RpcException e) when(_safeToRetry && retryState.CanRetry(e)) { _client.Settings.Logger.Warn($"Exception when reading from result stream. Retrying.", e); await retryState.WaitAsync(e, cancellationToken).ConfigureAwait(false); // Clear anything we've received since the previous response that contained a resume token _buffer.Clear(); _grpcCall.Dispose(); _grpcCall = null; } } }
// stop and reset auto retrys internal void ResetAutoRetry() { RetryState = RetryState.None; retryTimer.Stop(); }
public bool InvokeShouldRetryImpl(RetryState retryStateObj) { return(ShouldRetryImpl(retryStateObj)); }
public void DoOnIgnoreErrorOccurred(RetryState retryState) { OnIgnoreErrorOccurred(retryState); }
public static double CalculateProgress(int totalNumberOfMessages, int numberOfMessagesPrepared, int numberOfMessagesForwarded, int numberOfMessagesSkipped, RetryState state) { double total = totalNumberOfMessages; switch (state) { case RetryState.Preparing: return(numberOfMessagesPrepared / total); case RetryState.Forwarding: return((numberOfMessagesForwarded + numberOfMessagesSkipped) / total); case RetryState.Completed: return(1.0); default: return(0.0); } }
/// <summary> /// Always returns true. /// </summary> /// <param name="ex">The exception.</param> /// <returns>Always true.</returns> public bool IsTransient(RetryState ex) { return true; }