public Task ExecuteAsync(Func <Task> operation, CancellationToken cancellationToken) { ExecutionCount = 0; return(defaultRetryPolicy.ExecuteAsync(() => { doBefore(++ExecutionCount); return operation(); }, cancellationToken)); }
private async Task buildConnectionAsync(CancellationToken token) { if (_connection == null) { _connection = _factory is null ? new TransactionState(_mode, _isolationLevel, _commandTimeout, _externalConnection, _ownsConnection) : new TransactionState(_factory, _mode, _isolationLevel, _commandTimeout, _ownsConnection); await _retryPolicy.ExecuteAsync(() => _connection.OpenAsync(token), token).ConfigureAwait(false); } }
public static Task <T> ExecuteAsync <T>( this IRetryPolicy policy, Context context, Func <Task <T> > func, CancellationToken token, string?databaseName, [CallerMemberName] string?caller = null) { int attempt = 0; Func <Task <T> > outerFunc = async() => { string databaseText = string.IsNullOrEmpty(databaseName) ? string.Empty : $" against '{databaseName}'"; try { attempt++; var result = await func(); return(result); } catch (Exception e) { // Intentionally tracing only message, because if the issue is transient, its not very important to see the full stack trace (we never seen them before) // and if the issue is not transient, then the client of this class is responsible for properly tracing the full stack trace. context.Debug($"RetryPolicy.ExecuteAsync: attempt #{attempt}, Redis operation '{caller}'{databaseText} failed with: {e.Message}.", component: nameof(RetryPolicyExtensions)); ExceptionDispatchInfo.Capture(e).Throw(); throw; // unreachable } }; return(policy.ExecuteAsync(outerFunc, token)); }
protected override async Task <BoolResult> SendEventsCoreAsync( OperationContext context, ContentLocationEventData[] events, CounterCollection <ContentLocationEventStoreCounters> counters) { IReadOnlyList <EventData> eventDatas; using (counters[Serialization].Start()) { eventDatas = SerializeEventData(context, events); } var operationId = context.TracingContext.TraceId; for (var eventNumber = 0; eventNumber < eventDatas.Count; eventNumber++) { var eventData = eventDatas[eventNumber]; eventData.Properties[EpochEventKey] = _configuration.Epoch; eventData.Properties[SenderMachineEventKey] = _localMachineName; counters[SentEventBatchCount].Increment(); Tracer.Info( context, $"{Tracer.Name}: Sending {eventNumber}/{events.Length} event. OpId={operationId}, Epoch='{_configuration.Epoch}', Size={eventData.Body.Count}."); counters[SentMessagesTotalSize].Add(eventData.Body.Count); eventData.Properties[OperationIdEventKey] = operationId.ToString(); // Even though event hub client has it's own built-in retry strategy, we have to wrap all the calls into a separate // one to cover a few more important cases that the default strategy misses. await _extraEventHubClientRetryPolicy.ExecuteAsync(async() => { try { await _eventHubClient.SendAsync(context, eventData); } catch (ServerBusyException exception) { // TODO: Verify that the HResult is 50002. Documentation shows that this should be the error code for throttling, // but documentation is done for Microsoft.ServiceBus.Messaging.ServerBusyException and not Microsoft.Azure.EventHubs.ServerBusyException // https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-messaging-exceptions#serverbusyexception Tracer.Debug(context, $"{Tracer.Name}: OpId={operationId} was throttled by EventHub. HResult={exception.HResult}"); Tracer.TrackMetric(context, "EventHubThrottle", 1); throw; } catch (Exception e) { // If the error is not retryable, then the entire operation will fail and we don't need to double trace the error. if (TransientEventHubErrorDetectionStrategy.IsRetryable(e)) { Tracer.Debug(context, $"{Tracer.Name}.{nameof(SendEventsCoreAsync)} failed with retryable error=[{e}]"); } throw; } }, CancellationToken.None); } return(BoolResult.Success); }
public Task <ElasticResponse> SearchAsync(SearchRequest searchRequest, CancellationToken cancellationToken) { var formatter = new SearchRequestFormatter(connection, mapping, searchRequest); log.Debug(null, null, "Request: POST {0}", formatter.Uri); log.Debug(null, null, "Body:\n{0}", formatter.Body); return(retryPolicy.ExecuteAsync( async token => { using (var requestMessage = new HttpRequestMessage(HttpMethod.Post, formatter.Uri) { Content = new StringContent(formatter.Body) }) using (var response = await SendRequestAsync(connection.HttpClient, requestMessage, token)) using (var responseStream = await response.Content.ReadAsStreamAsync()) return ParseResponse(responseStream, log); }, (response, exception) => !cancellationToken.IsCancellationRequested && exception != null, (response, additionalInfo) => { additionalInfo["index"] = connection.Index; additionalInfo["uri"] = formatter.Uri; additionalInfo["query"] = formatter.Body; }, cancellationToken)); }
public Task <ElasticResponse> SearchAsync(ElasticSearchRequest searchRequest) { var formatter = new PostBodyRequestFormatter(connection, mapping, searchRequest); log.Debug(null, null, "Request: POST {0}", formatter.Uri); log.Debug(null, null, "Body: {0}", formatter.Body); return(retryPolicy.ExecuteAsync( async() => { using (var requestMessage = new HttpRequestMessage(HttpMethod.Post, formatter.Uri) { Content = new StringContent(formatter.Body) }) using (var response = await SendRequestAsync(connection.HttpClient, requestMessage)) using (var responseStream = await response.Content.ReadAsStreamAsync()) return ParseResponse(responseStream, log); }, (response, exception) => exception is TaskCanceledException, (response, additionalInfo) => { additionalInfo["index"] = connection.Index; additionalInfo["query"] = formatter.Body; })); }
private async Task buildConnectionAsync(CancellationToken token) { if (_connection == null) { _connection = new TransactionState(_factory, _mode, _isolationLevel, _commandTimeout, _ownsConnection); await _retryPolicy.ExecuteAsync(async() => await _connection.OpenAsync(token), token); } }
/// <inheritdoc /> protected override async Task <BoolResult> StartupCoreAsync(OperationContext operationContext) { BoolResult result; try { result = await RetryPolicy.ExecuteAsync(() => RpcClient.CreateSessionAsync(operationContext, Name, Configuration.CacheName, ImplicitPin), CancellationToken.None); } catch (Exception ex) { result = new BoolResult(ex); } if (!result) { await RetryPolicy.ExecuteAsync(() => RpcClient.ShutdownAsync(operationContext), CancellationToken.None).ThrowIfFailure(); } return(result); }
public Task <ElasticResponse> SearchAsync(SearchRequest searchRequest, CancellationToken cancellationToken) { var formatter = new SearchRequestFormatter(connection, mapping, searchRequest); return(retryPolicy.ExecuteAsync( async token => await connection.SearchAsync( formatter.Body, searchRequest, token, log), (response, exception) => !cancellationToken.IsCancellationRequested && exception != null, (response, additionalInfo) => { additionalInfo["index"] = connection.Index; additionalInfo["uri"] = connection.GetSearchUri(searchRequest); additionalInfo["query"] = formatter.Body; }, cancellationToken)); }
public static Task ExecuteAsync( this IRetryPolicy policy, Context context, Func <Task> func, CancellationToken token, string?databaseName, [CallerMemberName] string?caller = null) { return(policy.ExecuteAsync( context, async() => { await func(); return true; }, token, databaseName: databaseName, caller)); }
private static async Task <int> CallExecuteWithFaultingActionAsync <TException>(IRetryPolicy policy, int numberOfTimesToThrowException) where TException : Exception, new() { var attempts = 0; await policy.ExecuteAsync(() => { if (attempts >= numberOfTimesToThrowException) { return(Task.FromResult(attempts)); } attempts++; return(Task.FromException(new TException())); }); return(attempts); }
/// <summary> /// Check for any recording that are still being saved to the disk. /// </summary> /// <param name="recordingSavePath"></param> /// <returns>True if any of files are still being processed. </returns> public async Task <bool> CheckForUnprocessedFilesAsync(string recordingSavePath) { try { if (string.IsNullOrEmpty(recordingSavePath)) { throw new UnprocessedFilesMonitorException($"Argument:{recordingSavePath} not provided."); } Process leagueOfLegendsProcess = Process.GetProcessesByName(LeagueClientProcess).First(); var recordings = await _retryPolicy.ExecuteAsync(async() => { return(await Task.Run(() => { if (!leagueOfLegendsProcess.Responding) { throw new LeagueOfLegendsProcessException($"LeagueClient is not responding. "); } var filesFound = Directory.GetFiles(recordingSavePath, TemporaryFilesPattern); _logger.Debug($"Found {filesFound.Length} temporary files."); return filesFound; })); }, records => records.Length == 0, delayBetweenRequest : 1000, maxRetries : 20); return(recordings.Length == 0); } catch (RetryPolicyException retryPolicyException) { throw new UnprocessedFilesMonitorException($"There was an error trying to execute unprocessed files check. See inner exception forsetails.", retryPolicyException); } catch (LeagueOfLegendsProcessException exception) { throw new LeagueOfLegendsProcessException($"League of Legends client exception. ", exception); } catch (Exception exception) { throw new UnprocessedFilesMonitorException($"There was an unexpected error in unprocessed files processor. See inner exception for details. ", exception); } }
/// <inheritdoc /> public async Task <BoolResult> StartupAsync(Context context) { StartupStarted = true; IRetryPolicy retryPolicy = RetryPolicyFactory.GetExponentialPolicy(AuthorizationErrorDetectionStrategy.IsTransient); try { var creds = await retryPolicy.ExecuteAsync(() => _vssCredentialsFactory.GetOrCreateVssCredentialsAsync(_backingStoreBaseUri, _useAad, PatType.CacheReadWrite), CancellationToken.None).ConfigureAwait(false); _httpClientFactory = new ArtifactHttpClientFactory( creds, _httpSendTimeout, tracer: new AppTraceSourceContextAdapter(context, nameof(BackingContentStoreHttpClientFactory), SourceLevels.All), verifyConnectionCancellationToken: CancellationToken.None); // TODO: Pipe down cancellation support (bug 1365340) StartupCompleted = true; return(BoolResult.Success); } catch (Exception ex) { return(new BoolResult(ex)); } }
/// <summary> /// Creates an http client that can communicate with a VSTS Build Cache Service. /// </summary> public async Task <IBlobBuildCacheHttpClient> CreateBlobBuildCacheHttpClientAsync(Context context) { IRetryPolicy authRetryPolicy = RetryPolicyFactory.GetExponentialPolicy(AuthorizationErrorDetectionStrategy.IsTransient); var creds = await authRetryPolicy.ExecuteAsync( () => _vssCredentialsFactory.CreateVssCredentialsAsync(_buildCacheBaseUri, _useAad), CancellationToken.None).ConfigureAwait(false); var httpClientFactory = new ArtifactHttpClientFactory( creds, _httpSendTimeout, tracer: new AppTraceSourceContextAdapter(context, "BuildCacheHttpClientFactory", SourceLevels.All), verifyConnectionCancellationToken: CancellationToken.None); // TODO: Pipe down cancellation support (bug 1365340) IBlobBuildCacheHttpClient client = httpClientFactory.CreateVssHttpClient <IArtifactBlobBuildCacheHttpClient, BlobBuildCacheHttpClient>(_buildCacheBaseUri); await ArtifactHttpClientErrorDetectionStrategy.ExecuteAsync( context, "VerifyBlobBuildCacheHttpClientConnection", () => httpClientFactory.VerifyConnectionAsync(client as IArtifactHttpClient), CancellationToken.None).ConfigureAwait(false); _tracer.Debug(context, "Verified connection to {_buildCacheBaseUri} with SessionId=[{httpClientFactory.ClientSettings.SessionId}]"); return(client); }
private async Task <Result <TResult> > PerformRedisOperationAsync <TResult>( OperationContext context, Func <OperationContext, IDatabase, Task <TResult> > operation, string operationName, Func <Task>?onSuccess = null, Action <Exception>?onFailure = null, Action?onCancel = null, string?extraEndMessage = null) { // The cancellation logic in this method is quite complicated. // We have following "forces" that can cancel the operation: // 1. A token provided to this method is triggered. // (if the current operation is no longer needed because we got the result from another redis instance already). // 2. Operation exceeds a timeout // 3. A multiplexer is closed and we need to retry with a newly created connection multiplexer. bool operationIsCanceled = false; // Cancellation token can be changed in this method so we need another local to avoid re-assigning an argument. CancellationToken token; var result = await context.PerformOperationWithTimeoutAsync( _tracer, async (withTimeoutContext) => { string getCancellationReason(bool multiplexerIsClosed) { bool externalTokenIsCancelled = context.Token.IsCancellationRequested; bool timeoutTokenIsCancelled = withTimeoutContext.Token.IsCancellationRequested; Contract.Assert(externalTokenIsCancelled || timeoutTokenIsCancelled || multiplexerIsClosed); operationIsCanceled = true; // Its possible to have more than one token to be triggered, in this case we'll report based on the check order. // Have to put '!' at the end of each return statement due to this bug: https://github.com/dotnet/roslyn/issues/42396 // Should be removed once moved to a newer C# compiler version. if (externalTokenIsCancelled) { return("a given cancellation token is cancelled" !); } if (timeoutTokenIsCancelled) { return($"Operation timed out after {_configuration.OperationTimeout}" !); } if (multiplexerIsClosed) { return("the multiplexer is closed" !); } return("The operation is not cancelled" !); } // Now the token is a combination of "external token" and "timeout token" token = withTimeoutContext.Token; using (Counters[RedisOperation.All].Start()) { try { // Need to register the cancellation here and not inside the ExecuteAsync callback, // because the cancellation can happen before the execution of the given callback. // And we still need to cancel the batch operations to finish all the tasks associated with them. using (token.Register(() => { cancelTheBatch(getCancellationReason(multiplexerIsClosed: false)); })) { var r = await _redisRetryStrategy.ExecuteAsync( withTimeoutContext, async() => { var(database, databaseClosedCancellationToken) = await GetDatabaseAsync(withTimeoutContext); CancellationTokenSource?linkedCts = null; if (_configuration.CancelBatchWhenMultiplexerIsClosed) { // The database may be closed during a redis call. // Linking two tokens together and cancelling the batch if one of the cancellations was requested. // We want to make sure the following: the task returned by this call and the tasks for each and individual // operation within a batch are cancelled. // To do that, we need to "Notify" all the batches about the cancellation inside the Register callback // and ExecuteBatchOperationAndGetCompletion should respect the cancellation token and throw an exception // if the token is set. linkedCts = CancellationTokenSource.CreateLinkedTokenSource(databaseClosedCancellationToken, withTimeoutContext.Token); linkedCts.Token.Register( () => { cancelTheBatch(getCancellationReason(multiplexerIsClosed: databaseClosedCancellationToken.IsCancellationRequested)); }); // Now the token is a combination of "external token", "timeout token" and "database is closed token" token = linkedCts.Token; // It is fine that the second cancellation token is not passed to retry strategy. // Retry strategy only retries on redis exceptions and all the rest, like TaskCanceledException or OperationCanceledException // are be ignored. } // We need to dispose the token source to unlink it from the tokens the source was created from. // This is important, because the database cancellation token can live a long time // referencing a lot of token sources created here. using (linkedCts) { return(await operation(new OperationContext(withTimeoutContext, token), database)); } }, token, databaseName: DatabaseName); if (onSuccess != null) { await onSuccess(); } return(new Result <TResult>(r, isNullAllowed: true)); } } catch (TaskCanceledException e) { // Don't have to cancel batch here, because we track the cancellation already and call 'cancelBatch' if needed return(new Result <TResult>(e) { IsCancelled = true }); } catch (OperationCanceledException e) { // The same applies to OperationCanceledException as for TaskCanceledException return(new Result <TResult>(e) { IsCancelled = true }); } catch (Exception ex) { onFailure?.Invoke(ex); return(new Result <TResult>(ex) { IsCancelled = operationIsCanceled }); } } }, // Tracing errors all the time. They're not happening too frequently and its useful to know about all of them. traceErrorsOnly : true, traceOperationStarted : false, extraEndMessage : r => $"{extraEndMessage}Database={_configuration.DatabaseName}, ConnectionErrors={_connectionErrorCount}, IsCancelled={operationIsCanceled}", timeout : _configuration.OperationTimeout); HandleOperationResult(context, result); return(result); void cancelTheBatch(string reason) { _tracer.Debug(context, $"Cancelling {operationName} against {DatabaseName} because {reason}."); onCancel?.Invoke(); } }
private async Task <TResult> ExecuteAsync <TResult>( OperationContext originalContext, Func <OperationContext, CallOptions, IGlobalCacheService, Task <TResult> > executeAsync, Func <TResult, string?> extraEndMessage, string?extraStartMessage = null, [CallerMemberName] string caller = null !) where TResult : ResultBase { var attempt = -1; using var contextWithShutdown = TrackShutdown(originalContext); var context = contextWithShutdown.Context; var callerAttempt = $"{caller}_Attempt"; return(await context.PerformOperationWithTimeoutAsync( Tracer, context => { var callOptions = new CallOptions( headers: new Metadata() { MetadataServiceSerializer.CreateContextIdHeaderEntry(context.TracingContext.TraceId) }, deadline: _clock.UtcNow + _configuration.OperationTimeout, cancellationToken: context.Token); return _retryPolicy.ExecuteAsync(async() => { await Task.Yield(); attempt++; var stopwatch = StopwatchSlim.Start(); var clientCreationTime = TimeSpan.Zero; var result = await context.PerformOperationAsync(Tracer, () => { return _serviceClientFactory.UseAsync(context, service => { clientCreationTime = stopwatch.Elapsed; return executeAsync(context, callOptions, service); }); }, extraStartMessage: extraStartMessage, extraEndMessage: r => $"Attempt=[{attempt}] ClientCreationTimeMs=[{clientCreationTime.TotalMilliseconds}] {extraEndMessage(r)}", caller: callerAttempt, traceErrorsOnly: true); await Task.Yield(); // Because we capture exceptions inside the PerformOperation, we need to make sure that they // get propagated for the retry policy to kick in. if (result.Exception != null) { result.ReThrow(); } return result; }, context.Token); }, caller : caller, traceErrorsOnly : true, extraStartMessage : extraStartMessage, extraEndMessage : r => $"Attempts=[{attempt + 1}] {extraEndMessage(r)}", timeout : _configuration.OperationTimeout)); }
public override Task OpenAsync(CancellationToken cancellationToken) { return(_policy.ExecuteAsync(ct => _connection.OpenAsync(ct), cancellationToken)); }
/// <inheritdoc /> public async Task ExecuteAsync(Func <Task> action) { await _decorated.ExecuteAsync(action); _afterExecute(); }
/// <inheritdoc /> protected override async Task <HttpResponseMessage> SendAsync(HttpRequestMessage request, CancellationToken cancellation) { return(await _retryPolicy.ExecuteAsync(async ct => await base.SendAsync(request, ct), cancellation)); }
public async Task <ReplayRecording> GetReplayRecordingAsync() { return(await _retryPolicy.ExecuteAsync( async() => await _restRepostory.GetAsync <ReplayRecording>(ReplayRecordingURL), replayRecordingTask => IsRecordingStarted(replayRecordingTask))); }
protected override Task <DbDataReader> ExecuteDbDataReaderAsync(CommandBehavior behavior, CancellationToken cancellationToken) { return(_policy.ExecuteAsync(ct => _command.ExecuteReaderAsync(behavior, ct), cancellationToken)); }