public DynamoDbDataStore(DynamoDbConfiguration configuration, ILogger <DynamoDbDataStore> logger, IRangeFilterer <TimeRange> timeFilterer, TripCounterFactory tripCounterFactory) { _configuration = configuration; _logger = logger; _timeFilterer = timeFilterer; _tripCounterFactory = tripCounterFactory; var credentials = new BasicAWSCredentials(configuration.AccessKey, configuration.SecretKey); _client = new AmazonDynamoDBClient(credentials, RegionEndpoint.GetBySystemName(configuration.RegionSystemName)); _context = new DynamoDBContext(_client); _retryPolicy = Policy .Handle <ProvisionedThroughputExceededException>() .WaitAndRetryForeverAsync(i => TimeSpan.Zero, (Action <Exception, TimeSpan>)OnRetry); Task.Run(async() => { while (true) { var timeRanges = Interlocked.Exchange(ref _timeRangesSavedSinceLastCheck, 0); var dataPoints = Interlocked.Exchange(ref _dataPointsSavedSinceLastCheck, 0); _logger.LogDebug($"{timeRanges} timeRanges {dataPoints} dataPoints"); await Task.Delay(1000); } }); }
protected T ExecuteQuery <T>(string uri, QueryHandler <T> handler) { Polly.Retry.RetryPolicy <HttpResponseMessage> httpRetryPolicy = Policy .HandleResult <HttpResponseMessage>(r => r.StatusCode == (HttpStatusCode)429) .WaitAndRetry(new[] { TimeSpan.FromSeconds(30), TimeSpan.FromSeconds(60), TimeSpan.FromSeconds(90) }, onRetry: (outcome, timespan, retryAttempt, context) => { //included for debug to see what the response is }); var response = httpRetryPolicy.ExecuteAndCapture(() => _httpClient.GetAsync(uri).Result); if (response.Outcome == OutcomeType.Successful) { if (response.Result.IsSuccessStatusCode) { return(handler(response.Result)); } throw new HttpRequestException($"There was an error while executing the HTTP query. Reason: {response.Result.ReasonPhrase}"); } else { var reason = response.FinalHandledResult != null ? response.FinalHandledResult.ReasonPhrase : response.FinalException.Message; throw new HttpRequestException($"There was an error while executing the HTTP query. Reason: {reason}"); } }
public AzureMessageLogWriter(CloudStorageAccount account, string tableName) { if (account == null) { throw new ArgumentNullException("account"); } if (tableName == null) { throw new ArgumentNullException("tableName"); } if (string.IsNullOrWhiteSpace(tableName)) { throw new ArgumentException("tableName"); } this.account = account; this.tableName = tableName; tableClient = account.CreateCloudTableClient(); tableClient.DefaultRequestOptions = new TableRequestOptions { RetryPolicy = new Microsoft.WindowsAzure.Storage.RetryPolicies.NoRetry() }; retryPolicy = Policy.Handle <Exception>().WaitAndRetry(3, retry => TimeSpan.FromSeconds(Math.Pow(2, retry))); }
public RefreshPolicy() { _totalTries = 0; var policyProvider = new PolicyProvider(_mockUtility.Object); _policy = (policyProvider.WrappedPolicy.Inner as PolicyWrap)?.Outer as Polly.Retry.RetryPolicy; }
public SharePointRepository(ILogger logger) { this.logger = logger; var waitAndRetryPolicy = Policy.Handle <Exception>().WaitAndRetry( retryCount: 3, // Retry up to 3 times! - should be enough that we eventually succeed. sleepDurationProvider: attempt => TimeSpan.FromMinutes(1), // Wait 1 minute between each try. onRetry: (exception, calculatedWaitDuration, retryCount, context) => // Capture some info for logging! { //var methodThatRaisedException = context["methodName"]; logger.Log(LogLevel.Info, $"Retrying Attempt: {retryCount}"); logger.Log(LogLevel.Error, exception, context.ExecutionKey); }); this.retryPolicy = waitAndRetryPolicy; /* * var circuitBreakerPolicy = Policy * .Handle<Exception>() * .CircuitBreaker( * exceptionsAllowedBeforeBreaking: 3, * durationOfBreak: TimeSpan.FromSeconds(3), * onBreak: (ex, breakDelay) => * { * logger.Log(LogLevel.Info, ".Breaker logging: Breaking the circuit for " + breakDelay.TotalMilliseconds + "ms!", ConsoleColor.Magenta); * logger.Log(LogLevel.Info, "..due to: " + ex.Message, ConsoleColor.Magenta); * }, * onReset: () => logger.Log(LogLevel.Info, ".Breaker logging: Call ok! Closed the circuit again!", ConsoleColor.Magenta), * onHalfOpen: () => logger.Log(LogLevel.Info, ".Breaker logging: Half-open: Next call is a trial!", ConsoleColor.Magenta) * ); * this.retryPolicy = Policy.Wrap(new Policy[] { waitAndRetryPolicy, circuitBreakerPolicy }); */ }
public MongoDbRepository(string connecionString, string collectionName) { this.collectionName = collectionName; policyAsync = ConfigureRetryPolicy(connecionString, true); policy = ConfigureRetryPolicy(connecionString, false); policy.Execute(() => InitiateMongoClient(connecionString, collectionName)); }
private void RetryAssertUntilTelemetryShouldBeAvailable(System.Action assertion, TimeSpan timeout) { RetryPolicy retryPolicy = Policy.Handle <Exception>(exception => { _logger.LogError(exception, "Failed assertion. Reason: {Message}", exception.Message); return(true); }) .WaitAndRetryForever(index => TimeSpan.FromSeconds(3)); Policy.Timeout(timeout) .Wrap(retryPolicy) .Execute(assertion); }
public RetryPolicy() { _totalTimeSlept = 0; _totalRetries = 0; SystemClock.SleepAsync = (span, token) => { _totalTimeSlept += span.Seconds; _totalRetries++; return(TaskHelper.FromResult(true)); }; _mockUtility.Setup(u => u.GetRandomJitterLength(It.IsAny <int>())).Returns(1000); var policyProvider = new PolicyProvider(_mockUtility.Object); _policy = (policyProvider.WrappedPolicy.Inner as PolicyWrap)?.Inner as Polly.Retry.RetryPolicy; }
/// <summary> /// Initializes a new instance of the <see cref="TopicSender"/> class, /// automatically creating the given topic if it does not exist. /// </summary> public TopicSender(ServiceBusSettings settings, string topic, ILogger <TopicSender> logger) { this.settings = settings; this.topic = topic; this.logger = logger; //TODO: verify how does it work. Was changed to newest version of ServiceBus library retryPolicy = Policy.Handle <Exception>(e => { logger.LogWarning( "An error occurred while sending message to the topic {1}: {0}", e.Message, topic); return(true); }) .WaitAndRetryAsync(4, retry => TimeSpan.FromSeconds(Math.Pow(2, retry))); topicClient = new TopicClient(settings.ConnectionString, topic); }
public static async Task Test() { Polly.Retry.RetryPolicy <int> politicaWaitAndRetry = Polly.Policy .HandleResult <int>(i => true) .WaitAndRetryAsync(new[] { TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(3), TimeSpan.FromSeconds(5), TimeSpan.FromSeconds(7) }, (e, t, i, c) => ReportPollyError(e, t, i, c)); await politicaWaitAndRetry.ExecuteAsync(() => { Logger.LogInformation("test running"); return(Task.FromResult(1)); }); }
internal static async Task SafeMessagingActionAsync(Task task, Message message, Action <bool> callback, string actionErrorDescription, string messageId, string subscription, ILogger logger, /*long processingElapsedMilliseconds, long schedulingElapsedMilliseconds, */ Stopwatch roundtripStopwatch) { Polly.Retry.RetryPolicy retryPolicy = Policy.Handle <Exception>() .WaitAndRetryAsync(3, (retry) => TimeSpan.FromSeconds(2), (ex, ts, attempt, context) => { logger.LogWarning($"An error occurred in attempt number {attempt} to release message {message.MessageId}" + $" in subscription \"{subscription}\": {ex.GetType().Name + " - " + ex.Message}"); } ); long messagingActionStart = 0; await retryPolicy.ExecuteAsync( async() => { try { messagingActionStart = roundtripStopwatch.ElapsedMilliseconds; await task; roundtripStopwatch.Stop(); callback(true); } catch (Exception e) { roundtripStopwatch.Stop(); if (e is MessageLockLostException || /*ex is MessagingException ||*/ e is TimeoutException) { logger.LogWarning(actionErrorDescription, messageId, subscription, e.GetType().Name + " - " + e.Message, messagingActionStart, roundtripStopwatch.ElapsedMilliseconds); } else { logger.LogError($"Unexpected error releasing message in subscription \"{subscription}\": {e.GetType().Name + " - " + e.Message}"); } callback(false); } }); }
public AggregationWorker(string sourceName, int aggregationSeconds, int sourceDelaySeconds, ISourceDataProvider provider, Func <AggregatedDataRange, Task> onPoint) { _sourceName = sourceName; _aggregationSeconds = aggregationSeconds; _sourceDelaySeconds = sourceDelaySeconds; _provider = provider; _onPoint = onPoint; // Retry a specified number of times, using a function to // calculate the duration to wait between retries based on // the current retry attempt (allows for exponential backoff) // In this case will wait for // 2 ^ 1 = 2 seconds then // 2 ^ 2 = 4 seconds then // 2 ^ 3 = 8 seconds then // 2 ^ 4 = 16 seconds then // 2 ^ 5 = 32 seconds _retryPolicy = Policy .Handle <Exception>(exception => exception.GetType() != typeof(TaskCanceledException)) .WaitAndRetryForeverAsync(retryAttempt => TimeSpan.FromSeconds(Math.Pow(2, Math.Min(retryAttempt, 5)))); Task.Run(TimerLoop); }
/// <summary> /// /// </summary> /// <typeparam name="TContext"></typeparam> /// <param name="app"></param> /// <param name="seeder"></param> public static IApplicationBuilder MigrateDbContext <TContext>(this IApplicationBuilder app, Action <TContext, IServiceProvider> seeder) where TContext : DbContext { using (var scope = app.ApplicationServices.GetRequiredService <IServiceScopeFactory>().CreateScope()) { IServiceProvider services = scope.ServiceProvider; ILogger <TContext> logger = services.GetRequiredService <ILogger <TContext> >(); using (TContext context = services.GetService <TContext>()) { try { logger.LogInformation($"Migrating database associated with context {typeof(TContext).Name}"); Polly.Retry.RetryPolicy retry = Policy.Handle <SqlException>() .WaitAndRetry(new TimeSpan[] { TimeSpan.FromSeconds(5), TimeSpan.FromSeconds(10), TimeSpan.FromSeconds(15), }); retry.Execute(() => { //if the sql server container is not created on run docker compose this //migration can't fail for network related exception. The retry options for DbContext only //apply to transient exceptions. context.Database.Migrate(); seeder(context, services); }); logger.LogInformation($"Migrated database associated with context {typeof(TContext).Name}"); } catch (Exception ex) { logger.LogError(ex, $"An error occurred while migrating the database used on context {typeof(TContext).Name}"); } } } return(app); }
public async Task <RemoteCallBackData> InvokeAsync(List <ServerAddress> service, string serviceIdOrPath, IDictionary <string, object> paras, string token) { ServerAddress desc = await _addressSelector.GetAddressAsync(service); if (paras == null) { paras = new ConcurrentDictionary <string, object>(); } if (_retryTimes < 0) { _retryTimes = service.Count; } RemoteCallBackData result = null; Polly.Retry.RetryPolicy retryPolicy = Policy.Handle <Exception>() .RetryAsync(_retryTimes, async(ex, count) => { desc = await _addressSelector.GetAddressAsync(service); _logger.Debug( $"FaultHandling,retry times: {count},serviceId: {serviceIdOrPath},Address: {desc.Code},RemoteServiceCaller excute retry by Polly for exception {ex.Message}"); }); Polly.Wrap.PolicyWrap <RemoteCallBackData> fallbackPolicy = Policy <RemoteCallBackData> .Handle <Exception>() .FallbackAsync(new RemoteCallBackData() { ErrorCode = "500", ErrorMsg = "error occur when communicate with server. server maybe have been down." }) .WrapAsync(retryPolicy); return(await fallbackPolicy.ExecuteAsync(async() => { ITransportClient client = _transportClientFactory.CreateClient(desc); if (client == null) { return new RemoteCallBackData { ErrorCode = "400", ErrorMsg = "服务不可用" }; } _logger.Debug($"invoke: serviceId:{serviceIdOrPath}, parameters count: {paras.Count()}, token:{token}"); Payload payload = new Payload(); if (!string.IsNullOrEmpty(token) && _authorizationHandler != null && desc.EnableAuthorization) { var authorizationContext = _authorizationHandler.GetAuthorizationContext(token, desc.Roles); if (authorizationContext != null) { payload.Items = authorizationContext; } else { return new RemoteCallBackData { ErrorMsg = "没有权限", ErrorCode = "401" }; } } result = await client.SendAsync(new RemoteCallData { Payload = payload, Parameters = paras, ServiceId = serviceIdOrPath, Token = token, }); return result; })); }