internal OverflowQueue(SharedRingBufferMailbox parent) { backoff = new Backoff(); messages = new Queue <IMessage>(); open = false; this.parent = parent; }
//TODO: move to constructor private IActorRef SpawnRunner() { Props runnerProps; try { runnerProps = Context.DI().Props <TJobRunner>().WithDispatcher(Context.Props.Dispatcher); } catch (Exception ex) { Context.GetLogger().Error(ex, "No DI available at the moment, falling back to default props creation."); runnerProps = Props.Create <TJobRunner>().WithDispatcher(Context.Props.Dispatcher); } var runnerSupervisorProps = BackoffSupervisor.Props( Backoff.OnFailure( runnerProps, $"{Name}-runner", TimeSpan.FromSeconds(10), TimeSpan.FromSeconds(60), 0.2, 3)).WithDispatcher(Context.Props.Dispatcher); var runner = Context.ActorOf(runnerSupervisorProps, $"{Name}-runner-supervisor"); return(runner); }
private static AsyncPolicyWrap <HttpResponseMessage> CreatePollyPolicy(PollyOptions pollyOptions) { var retries = Backoff.ConstantBackoff( TimeSpan.FromMilliseconds(pollyOptions.RetryBackOffMs), pollyOptions.RetryCount, true); // Timeout policy var timeoutPolicy = Policy.TimeoutAsync(TimeSpan.FromMilliseconds(pollyOptions.TimeoutMs)); // Wrap timeout policy with retry var retryWithTimeout = Policy .Handle <TimeoutRejectedException>() .Or <TimeoutException>() .OrResult <HttpResponseMessage>(IsTransientError) .WaitAndRetryAsync(retries) .WrapAsync(timeoutPolicy); // Wrap retry with circuit breaker var circuitBreaker = Policy .Handle <HttpRequestException>() .Or <TimeoutRejectedException>() .Or <TimeoutException>() .OrResult <HttpResponseMessage>(IsTransientError) .CircuitBreakerAsync( handledEventsAllowedBeforeBreaking: pollyOptions.ExceptionsBeforeBreak, durationOfBreak: TimeSpan.FromMilliseconds(pollyOptions.BreakDurationMs)) .WrapAsync(retryWithTimeout); return(circuitBreaker); }
public static AsyncRetryPolicy GetWaitAndRetryPolicy <TException>(ILoggerService loggerService, int medianFirstRetryDelaySeconds = WaitFactor) where TException : Exception => Policy .Handle <TException>() .WaitAndRetryAsync( Backoff.DecorrelatedJitterBackoffV2(TimeSpan.FromSeconds(medianFirstRetryDelaySeconds), MaxRetries), GetOnRetryDelegate(MaxRetries, loggerService));
public void BackoffSupervisorOnFailure(ApplicationEnvironment applicationEnvironment) { ActorSystem actorSystem = ActorSystem.Create("app"); // This class represents a configuration object used in creating an // ActorBase actor Props childProps = Props.Create <EchoActor>(); // TimeSpan minBackoff = TimeSpan.FromSeconds(3); TimeSpan maxBackoff = TimeSpan.FromSeconds(30); double randomFactor = 0.2; int maxNrOfRetries = 2; // Builds back-off options for creating a back-off supervisor. BackoffOptions backoffOptions = Backoff.OnFailure(childProps, "myEcho", minBackoff, maxBackoff, randomFactor, maxNrOfRetries); Props supervisor = BackoffSupervisor.Props(backoffOptions); IActorRef supervisorActor = actorSystem.ActorOf(supervisor, "echoSupervisor"); supervisorActor.Tell("EchoMessage1"); supervisorActor.Tell(new Exception("File not found exception")); TestUtilities.ThreadSleepSeconds(5); supervisorActor.Tell("EchoMessage2"); TestUtilities.ThreadSleepSeconds(5); supervisorActor.Tell("EchoMessage3"); TestUtilities.MethodEnds(); }
public void Backoff_WithFastFirstEqualToTrue_ResultIsZero() { // Arrange var initialDelay = TimeSpan.FromMilliseconds(1); const int retryCount = 3; const double factor = 0; const bool fastFirst = true; // Act IEnumerable <TimeSpan> result = Backoff.LinearBackoff(initialDelay, retryCount, factor, fastFirst); // Assert result.Should().NotBeNull(); result.Should().HaveCount(retryCount); bool first = true; foreach (TimeSpan timeSpan in result) { if (first) { timeSpan.Should().Be(TimeSpan.Zero); first = false; } else { timeSpan.Should().Be(initialDelay); } } }
private static AsyncPolicy Create(string description, DataMoverSettings settings, ILogger logger) { var breaker = Policy .Handle <Exception>() .CircuitBreakerAsync( exceptionsAllowedBeforeBreaking: 5, durationOfBreak: TimeSpan.FromSeconds(3)); var retry = Policy .Handle <Exception>() .WaitAndRetryAsync(Backoff.LinearBackoff( TimeSpan.FromSeconds(settings.RetryInitialDelaySeconds), settings.RetryCount), (exception, span, retryCount, context) => { logger.LogError( exception, "{@EventType} {@Realm} {@TimeSpan} {@retryCount}", "Retry", description, span, retryCount); }); return(Policy.WrapAsync(retry, breaker)); }
public ReplicatorResiliencySpec(ITestOutputHelper helper) : base(SpecConfig, helper) { _sys1 = Sys; _sys3 = ActorSystem.Create(Sys.Name, Sys.Settings.Config); _sys2 = ActorSystem.Create(Sys.Name, Sys.Settings.Config); var settings = ReplicatorSettings.Create(Sys) .WithGossipInterval(TimeSpan.FromSeconds(1.0)) .WithMaxDeltaElements(10) .WithRestartReplicatorOnFailure(true); var props = BackoffSupervisor.Props( Backoff.OnStop( childProps: Replicator.Props(settings), childName: "replicator", minBackoff: TimeSpan.FromSeconds(3), maxBackoff: TimeSpan.FromSeconds(300), randomFactor: 0.2, maxNrOfRetries: -1) .WithFinalStopMessage(m => m is Terminate)) .WithDeploy(Deploy.Local).WithDispatcher(settings.Dispatcher); _replicator1 = _sys1.ActorOf(props, "replicatorSuper"); _replicator2 = _sys2.ActorOf(props, "replicatorSuper"); _replicator3 = _sys3.ActorOf(props, "replicatorSuper"); }
public static void UseDefaultHttpRetryLogic() { ILogger logger = Logger.Factory.CreateLogger(typeof(PhilomenaClientRetryLogic)); // Use a jittered exponential backoff var delay = Backoff.DecorrelatedJitterBackoffV2 ( medianFirstRetryDelay: TimeSpan.FromSeconds(1), retryCount: 4 ); // Retry on transient http errors var defaultRetryPolicy = HttpPolicyExtensions .HandleTransientHttpError() .Or <TimeoutRejectedException>() .WaitAndRetryAsync(delay, (result, timeout, attempt, context) => { logger.LogWarning(result.Exception, "Request #{Attempt} failed. Retrying in {Timeout}", attempt, timeout); }); // Timeout requests var defaultTimeoutPolicy = Policy .TimeoutAsync <HttpResponseMessage>(TimeSpan.FromSeconds(2)); // Wrap the default policies var defaultPolicy = Policy.WrapAsync(defaultRetryPolicy, defaultTimeoutPolicy); // Configure Flurl to use the backoff policy by overriding the HttpClientFactory FlurlHttp.Configure(settings => { settings.HttpClientFactory = new PollyHttpClientFactory(defaultPolicy); }); }
public void Queue_Processing_CorruptedEvent() { // Enqueue events for further handling bus.Subscribe(testService.Enque <TestEvent>()); // except test to throw Exception event Exception exception = null; bus.Subscribe <Exception>(e => exception = e); int counter = 0; int backofIntervalMs = 50; // Subscribe handler which waits 50 ms Delegate <TestEvent> handler = (e) => { counter++; throw new NotImplementedException(); }; bus.Subscribe(handler.WhenQueued().RetryQueued(3, Backoff.Fixed(TimeSpan.FromMilliseconds(backofIntervalMs)) )); bus.Publish(new TestEvent(), this); // There is event to handle Assert.AreEqual(1, repository.Queue.Where(e => e.DeclaringEventType == typeof(TestEvent).AssemblyQualifiedName).Count()); testService.ProcessEvents(); Assert.AreEqual(0, repository.Queue.Where(e => e.DeclaringEventType == typeof(TestEvent).ToString()).Count()); Assert.IsNotNull(exception); Assert.IsInstanceOfType(exception, typeof(FormatException)); }
private static void Demo6BackSupervision() { Console.WriteLine("Creating SimpleActor Props with BackOff Supervisor"); var childProps = SimpleActor.CreateProps(); var supervisor = BackoffSupervisor.Props( Backoff.OnFailure( childProps: childProps, childName: "SimpleActor", minBackoff: TimeSpan.FromSeconds(3), maxBackoff: TimeSpan.FromSeconds(60), randomFactor: 0.2) .WithAutoReset(TimeSpan.FromSeconds(160))); var simple = ActorsSystem.Instance.ActorOf(supervisor, "SimpleActor:Supervisor"); Console.WriteLine("Press enter to start raise exception"); Console.ReadLine(); simple.Tell(new ExceptionMessage()); Console.WriteLine("Exception Raised"); Console.ReadLine(); simple.Tell(new ExceptionMessage()); Console.WriteLine("Exception Raised"); Console.ReadLine(); simple.Tell(new ExceptionMessage()); Console.WriteLine("Exception Raised"); }
public void Backoff_WithFastFirstEqualToTrue_ResultIsZero() { // Arrange var medianFirstDelay = TimeSpan.FromSeconds(2); const int retryCount = 10; const bool fastFirst = true; const int seed = 1; // Act IEnumerable <TimeSpan> result = Backoff.DecorrelatedJitterBackoffV2(medianFirstDelay, retryCount, seed, fastFirst); // Assert result.Should().NotBeNull(); result = result.ToList(); result.Should().HaveCount(retryCount); bool first = true; int t = 0; foreach (TimeSpan timeSpan in result) { if (first) { timeSpan.Should().Be(TimeSpan.FromMilliseconds(0)); first = false; } else { t++; AssertOnRetryDelayForTry(t, timeSpan, medianFirstDelay); } } }
private static IAsyncPolicy BuildRetryPolicy(IDbConnectionFactory connectionFactory, QueryLoggingContext loggingContext) { return(connectionFactory.RetryPolicy.WaitAndRetryAsync( Backoff.DecorrelatedJitterBackoffV2(TimeSpan.FromMilliseconds(100), MaxRetryAttempts), (ex, delay, retryAttempt, ctx) => loggingContext.Retry(retryAttempt, delay) )); }
public void Backoff_WithFastFirstEqualToTrue_ResultIsZero() { // Arrange var minDelay = TimeSpan.FromMilliseconds(1); var maxDelay = TimeSpan.FromMilliseconds(2); const int retryCount = 3; const bool fastFirst = true; const int seed = 1; // Act IEnumerable <TimeSpan> result = Backoff.AwsDecorrelatedJitterBackoff(minDelay, maxDelay, retryCount, seed, fastFirst); // Assert result.Should().NotBeNull(); result = result.ToList(); result.Should().HaveCount(retryCount); bool first = true; foreach (TimeSpan timeSpan in result) { if (first) { timeSpan.Should().Be(TimeSpan.FromMilliseconds(0)); first = false; } else { timeSpan.Should().BeGreaterOrEqualTo(minDelay); timeSpan.Should().BeLessOrEqualTo(maxDelay); } } }
private Policy <T> BuildPolicy <T>() { var delay = Backoff.ExponentialBackoff(TimeSpan.FromMilliseconds(_backOffInitialInterval), _maxAttempts - 1, _backOffMultiplier, true); var retryPolicy = Policy <T> .HandleInner <Exception>((e) => { return(_retryableExceptions.Classify(e)); }) .WaitAndRetry(delay, (delegateResult, time, count, context) => OnRetry(delegateResult, time, count, context)); var fallbackPolicy = Policy <T> .Handle <Exception>() .Fallback <T>( (delegateResult, context, token) => { var retryContext = GetRetryContext(context); retryContext.LastException = delegateResult.Exception; var callback = retryContext.GetAttribute(RECOVERY_CALLBACK_KEY) as IRecoveryCallback; var result = default(T); if (callback != null) { result = (T)callback.Recover(retryContext); retryContext.SetAttribute(RECOVERED, true); retryContext.SetAttribute(RECOVERED_RESULT, result); } else if (delegateResult.Exception != null) { throw delegateResult.Exception; } return(result); }, (ex, context) => { }); return(fallbackPolicy.Wrap(retryPolicy)); }
public void BackoffSupervisor_must_not_reply_to_sender_if_replyWhileStopped_is_not_specified() { EventFilter.Exception <TestException>().Expect(1, () => { var supervisor = Create(Backoff.OnFailure(Child.Props(TestActor), "c1", TimeSpan.FromSeconds(100), TimeSpan.FromSeconds(300), 0.2, -1)); supervisor.Tell(BackoffSupervisor.GetCurrentChild.Instance); var c1 = ExpectMsg <BackoffSupervisor.CurrentChild>().Ref; Watch(c1); supervisor.Tell(BackoffSupervisor.GetRestartCount.Instance); ExpectMsg <BackoffSupervisor.RestartCount>().Count.Should().Be(0); c1.Tell("boom"); ExpectTerminated(c1); AwaitAssert(() => { supervisor.Tell(BackoffSupervisor.GetRestartCount.Instance); ExpectMsg <BackoffSupervisor.RestartCount>().Count.Should().Be(1); }); supervisor.Tell("boom"); //this will be sent to deadLetters ExpectNoMsg(500); }); }
private static void AddClients(this IServiceCollection services) { var delay = Backoff.DecorrelatedJitterBackoffV2(medianFirstRetryDelay: TimeSpan.FromMilliseconds(100), retryCount: 10, fastFirst: true); services.AddHttpClient <IMarketDataClient, MarketDataClient>("Trakx.Shrimpy.ApiClient.MarketDataClient") .AddPolicyHandler((s, request) => Policy <HttpResponseMessage> .Handle <ApiException>() .Or <HttpRequestException>() .OrTransientHttpStatusCode() .WaitAndRetryAsync(delay, onRetry: (result, timeSpan, retryCount, context) => { var logger = Log.Logger.ForContext <MarketDataClient>(); logger.LogApiFailure(result, timeSpan, retryCount, context); }) .WithPolicyKey("Trakx.Shrimpy.ApiClient.MarketDataClient")); services.AddHttpClient <IAccountsClient, AccountsClient>("Trakx.Shrimpy.ApiClient.AccountsClient") .AddPolicyHandler((s, request) => Policy <HttpResponseMessage> .Handle <ApiException>() .Or <HttpRequestException>() .OrTransientHttpStatusCode() .WaitAndRetryAsync(delay, onRetry: (result, timeSpan, retryCount, context) => { var logger = Log.Logger.ForContext <AccountsClient>(); logger.LogApiFailure(result, timeSpan, retryCount, context); }) .WithPolicyKey("Trakx.Shrimpy.ApiClient.AccountsClient")); }
public void BackoffOnRestartSupervisor_must_respect_withinTimeRange_property_of_OneForOneStrategy() { var probe = CreateTestProbe(); // withinTimeRange indicates the time range in which maxNrOfRetries will cause the child to // stop. IE: If we restart more than maxNrOfRetries in a time range longer than withinTimeRange // that is acceptable. var options = Backoff.OnFailure(TestActor.Props(probe.Ref), "someChildName", 300.Milliseconds(), 10.Seconds(), 0.0, -1) .WithSupervisorStrategy(new OneForOneStrategy(3, 1.Seconds(), ex => ex is StoppingException ? Directive.Stop : SupervisorStrategy.DefaultStrategy.Decider.Decide(ex))); var supervisor = Sys.ActorOf(BackoffSupervisor.Props(options)); supervisor.Tell(BackoffSupervisor.GetCurrentChild.Instance); probe.ExpectMsg("STARTED"); probe.Watch(supervisor); // Throw three times rapidly for (int i = 1; i <= 3; i++) { supervisor.Tell("THROW"); probe.ExpectMsg("STARTED"); } // Now wait the length of our window, and throw again. We should still restart. Thread.Sleep(1000); supervisor.Tell("THROW"); probe.ExpectMsg("STARTED"); // Now we'll issue three more requests, and should be terminated. supervisor.Tell("THROW"); probe.ExpectMsg("STARTED"); supervisor.Tell("THROW"); probe.ExpectMsg("STARTED"); supervisor.Tell("THROW"); probe.ExpectTerminated(supervisor); }
// Called when the web socket establishes a connection private void OnConnect(object sender, EventArgs args) { if (_cts.IsCancellationRequested) { Log.To.ChangeTracker.I(Tag, "{0} Cancellation requested, aborting in OnConnect", this); return; } Misc.SafeDispose(ref _responseLogic); _responseLogic = new WebSocketLogic(); _responseLogic.OnCaughtUp = () => Client?.ChangeTrackerCaughtUp(this); _responseLogic.OnChangeFound = (change) => { if (!ReceivedChange(change)) { Log.To.ChangeTracker.W(Tag, String.Format("change is not parseable")); } }; Backoff.ResetBackoff(); Log.To.ChangeTracker.V(Tag, "{0} websocket opened", this); // Now that the WebSocket is open, send the changes-feed options (the ones that would have // gone in the POST body if this were HTTP-based.) var bytes = GetChangesFeedPostBody().ToArray(); _client?.SendAsync(bytes, null); }
public SqlServerTransientFaultRetryPolicyFactory( SqlServerDataStoreConfiguration sqlServerDataStoreConfiguration, IPollyRetryLoggerFactory pollyRetryLoggerFactory) { EnsureArg.IsNotNull(sqlServerDataStoreConfiguration, nameof(sqlServerDataStoreConfiguration)); EnsureArg.IsNotNull(pollyRetryLoggerFactory, nameof(pollyRetryLoggerFactory)); SqlServerTransientFaultRetryPolicyConfiguration transientFaultRetryPolicyConfiguration = sqlServerDataStoreConfiguration.TransientFaultRetryPolicy; IEnumerable <TimeSpan> sleepDurations = Backoff.ExponentialBackoff( transientFaultRetryPolicyConfiguration.InitialDelay, transientFaultRetryPolicyConfiguration.RetryCount, transientFaultRetryPolicyConfiguration.Factor, transientFaultRetryPolicyConfiguration.FastFirst); PolicyBuilder policyBuilder = Policy .Handle <SqlException>(sqlException => sqlException.IsTransient()) .Or <TimeoutException>(); Action <Exception, TimeSpan, int, Context> onRetryLogger = pollyRetryLoggerFactory.Create(); _retryPolicy = policyBuilder.WaitAndRetryAsync( sleepDurations, onRetry: onRetryLogger); }
// Called when the web socket connection is closed private void OnClose(object sender, CloseEventArgs args) { if (_client != null) { if (args.Code == (ushort)CloseStatusCode.ProtocolError) { // This is not a valid web socket connection, need to fall back to regular HTTP CanConnect = false; Stopped(ErrorResolution.RetryNow); } else { if (Backoff.CanContinue) { Log.To.ChangeTracker.I(Tag, "{0} remote {1} closed connection ({2} {3})", this, args.WasClean ? "cleanly" : "forcibly", args.Code, args.Reason); Backoff.DelayAppropriateAmountOfTime().ContinueWith(t => _client?.ConnectAsync()); } else { Stopped(ErrorResolution.RetryLater); } } } else { Log.To.ChangeTracker.I(Tag, "{0} is closed", this); Stopped(ErrorResolution.Stop); } }
public static void Configure(IConfiguration configuration, IServiceCollection serviceCollection) { serviceCollection.AddOptions <ShikiOptions>().Bind(configuration.GetSection(Constants.NAME)); var policy = HttpPolicyExtensions.HandleTransientHttpError().OrResult(message => message.StatusCode == HttpStatusCode.TooManyRequests) .WaitAndRetryAsync(Backoff.DecorrelatedJitterBackoffV2(TimeSpan.FromSeconds(10), 5)); serviceCollection.AddHttpClient(Constants.NAME).AddPolicyHandler(policy).AddHttpMessageHandler(provider => { var logger = provider.GetRequiredService <ILogger <IRateLimiter <ShikiClient> > >(); var rl = new RateLimit(90, TimeSpan.FromMinutes(1.05d)); // 90rpm with .05 as inaccuracy return(RateLimiterFactory.Create(rl, logger).ToHttpMessageHandler()); }).AddHttpMessageHandler(provider => { var logger = provider.GetRequiredService <ILogger <IRateLimiter <ShikiClient> > >(); var rl = new RateLimit(5, TimeSpan.FromSeconds(1.05d)); //5rps with .05 as inaccuracy return(RateLimiterFactory.Create(rl, logger).ToHttpMessageHandler()); }).ConfigureHttpClient((provider, client) => { client.DefaultRequestHeaders.UserAgent.Clear(); client.DefaultRequestHeaders.UserAgent.ParseAdd($"{provider.GetRequiredService<IOptions<ShikiOptions>>().Value.ShikimoriAppName}"); client.BaseAddress = new(Wrapper.Constants.BASE_URL); }); serviceCollection.AddSingleton <ShikiClient>(provider => { var factory = provider.GetRequiredService <IHttpClientFactory>(); var logger = provider.GetRequiredService <ILogger <ShikiClient> >(); return(new(factory.CreateClient(Constants.NAME), logger)); }); serviceCollection.AddSingleton <IExecuteOnStartupService, ShikiExecuteOnStartupService>(); serviceCollection.AddSingleton <IUserFeaturesService <ShikiUserFeatures>, ShikiUserFeaturesService>(); serviceCollection.AddSingleton <ShikiUserService>(); serviceCollection.AddSingleton <IUpdateProvider, ShikiUpdateProvider>(); }
private void RetryOrStopIfNecessary(HttpStatusCode statusCode) { if (!IsRunning || ((int)statusCode >= 200 && (int)statusCode <= 299)) { return; } if (!Continuous) { _workExecutor.StartNew(Stop); return; } if (!Misc.IsTransientError(statusCode)) { // Log.To.ChangeTracker.I(Tag, String.Format ("{0} got a non-transient error ({1}), stopping NOW...", this, statusCode)); _workExecutor.StartNew(Stop); return; } Log.To.ChangeTracker.I(Tag, "{0} transient error ({1}) detected, sleeping for {2}ms...", this, statusCode, Backoff.GetSleepTime().TotalMilliseconds); Backoff.DelayAppropriateAmountOfTime().ContinueWith(t => PerformRetry(true)); }
private void RetryOrStopIfNecessary(Exception e) { if (!IsRunning) { return; } if (e == null) { // No error occurred, keep going if continuous if (Continuous) { PerformRetry(false); } else { _workExecutor.StartNew(Stop); } return; } Error = Misc.Flatten(e).First(); string statusCode; if (Misc.IsTransientNetworkError(e, out statusCode)) { // Transient error occurred in a replication -> RETRY or STOP if (!Continuous && !Backoff.CanContinue) { // Give up for non-continuous Log.To.ChangeTracker.I(Tag, "{0} transient error ({1}) detected, giving up NOW...", this, statusCode); _workExecutor.StartNew(Stop); return; } // Keep retrying for continuous Log.To.ChangeTracker.I(Tag, "{0} transient error ({1}) detected, sleeping for {2}ms...", this, statusCode, Backoff.GetSleepTime().TotalMilliseconds); Backoff.DelayAppropriateAmountOfTime().ContinueWith(t => PerformRetry(true)); return; } if (String.IsNullOrEmpty(statusCode)) { Log.To.ChangeTracker.I(Tag, String.Format ("{0} got an exception, stopping NOW...", this), e); } else { Log.To.ChangeTracker.I(Tag, String.Format ("{0} got a non-transient error ({1}), stopping NOW...", this, statusCode)); } // Non-transient error occurred in a continuous replication -> STOP _workExecutor.StartNew(Stop); }
public static RetryHandler <E> WaitAndRetry <E>(this Handler @this, Func <E, bool> predicate, int?attempts = null, Backoff backoff = null) where E : Exception { return(new RetryHandler <E>(@this, predicate, attempts ?? Three, backoff ?? Exponential)); }
/// <summary> /// Builds a <see cref="SqlStrategy"/> with a policy for retrying /// actions on transaction failures. /// </summary> /// <param name="sqlStrategy">The SQL strategy.</param> /// <param name="exceptionHandlingStrategy"> /// The exception handling strategy used to determine which exceptions /// should be retried. /// </param> /// <param name="sqlStrategyConfiguration"> /// An <see cref="SqlStrategyOptions"/> containing configuration parameters. /// </param> /// <returns>The strategy instance.</returns> public static SqlStrategyBuilder Retry(this SqlStrategyBuilder sqlStrategy, IExceptionHandlingStrategy exceptionHandlingStrategy, SqlStrategyOptions sqlStrategyConfiguration) { var backoff = Backoff.ExponentialBackoff(TimeSpan.FromSeconds(2), sqlStrategyConfiguration.RetryCount()); sqlStrategy.Policies.Add(Policy.Handle <SqlException>(exceptionHandlingStrategy.ShouldHandle).WaitAndRetry(backoff, SqlStrategyLoggingDelegates.OnRetry).WithPolicyKey(SqlServerPolicyKeys.TransactionPolicy)); sqlStrategy.Policies.Add(Policy.Handle <SqlException>(exceptionHandlingStrategy.ShouldHandle).WaitAndRetryAsync(backoff, SqlStrategyLoggingDelegates.OnRetryAsync).WithPolicyKey(SqlServerPolicyKeys.TransactionPolicyAsync)); return(sqlStrategy); }
public GetTopicsOfNamespaceRetry(NamespaceName @namespace, Backoff backoff, long remainingTime, Mode mode, IActorRef replyTo) { Namespace = @namespace; Mode = mode; ReplyTo = replyTo; RemainingTime = remainingTime; Backoff = backoff; }
internal RingBufferDispatcher(int mailboxSize, long fixedBackoff, int throttlingCount) { closed = new AtomicBoolean(false); backoff = fixedBackoff == 0L ? new Backoff() : new Backoff(fixedBackoff); RequiresExecutionNotification = fixedBackoff == 0L; Mailbox = new SharedRingBufferMailbox(this, mailboxSize); this.throttlingCount = throttlingCount; }
public void C3Test() { for (int i = 0; i < TestIterations; i++) { long backOff = Backoff.GetBackoff(2, 1); Assert.IsTrue(backOff == 0 || backOff == 1 || backOff == 7, "Backoff = " + backOff); } }
private static IAsyncPolicy <HttpResponseMessage> GetRetryPolicy() { var delay = Backoff.DecorrelatedJitterBackoffV2(medianFirstRetryDelay: TimeSpan.FromSeconds(1), retryCount: 5); return(HttpPolicyExtensions .HandleTransientHttpError() .WaitAndRetryAsync(delay)); }