/// <summary> /// Verify that operation retry times are consistent with the retry policy. /// </summary> /// <param name="times"></param> /// <param name="policy"></param> private void VerifyIntervals(List <DateTime> times, LinearRetryPolicy policy) { for (int i = 0; i < times.Count - 1; i++) { Assert.True(VerifyInterval(times[i], times[i + 1], policy.RetryInterval)); } }
/// <summary> /// Get issue count from the server. This operation may be retried internally and take a while to complete. /// </summary> /// <remarks>Uses the REST API <a href="https://www.jetbrains.com/help/youtrack/standalone/Get-a-Number-of-Issues.html">Get a Number of Issues</a>.</remarks> /// <param name="filter">Apply a filter to issues.</param> /// <returns>The number of <see cref="Issue" /> that match the specified filter.</returns> /// <exception cref="T:System.Net.HttpRequestException">When the call to the remote YouTrack server instance failed.</exception> public async Task <long> GetIssueCount(string filter = null) { var query = !string.IsNullOrEmpty(filter) ? $"filter={WebUtility.UrlEncode(filter)}" : string.Empty; var client = await _connection.GetAuthenticatedHttpClient(); var retryPolicy = new LinearRetryPolicy <long>(async() => { var response = await client.GetAsync($"rest/issue/count?{query}"); if (response.StatusCode == HttpStatusCode.NotFound) { return(default(long)); } response.EnsureSuccessStatusCode(); return(JsonConvert.DeserializeObject <SubValue <long> >( await response.Content.ReadAsStringAsync()).Value); }, result => Task.FromResult(result < 0), TimeSpan.FromSeconds(1), 30); return(await retryPolicy.Execute()); }
/// <summary> /// Used to start the fixture within a <see cref="ComposedFixture"/>. /// </summary> /// <param name="image"> /// Optionally specifies the NATS container image. This defaults to /// <b>nkubeio/nats:latest</b> or <b>nkubedev/nats:latest</b> depending /// on whether the assembly was built from a git release branch or not. /// </param> /// <param name="name">Optionally specifies the container name (defaults to <c>nats-test</c>).</param> /// <param name="args">Optional NATS server command line arguments.</param> public void StartAsComposed( string image = null, string name = "nats-test", string[] args = null) { image = image ?? $"{KubeConst.NeonBranchRegistry}/nats:latest"; base.CheckWithinAction(); var dockerArgs = new string[] { "--detach", "-p", "4222:4222", "-p", "8222:8222", "-p", "6222:6222" }; if (!IsRunning) { StartAsComposed(name, image, dockerArgs, args); } var factory = new ConnectionFactory(); var retry = new LinearRetryPolicy(exception => true, 20, TimeSpan.FromSeconds(0.5)); retry.InvokeAsync( async() => { Connection = factory.CreateConnection(); await Task.CompletedTask; }).Wait(); }
/// <summary> /// Signals the Docker orchestrator to begin scheduling service tasks on a node. /// </summary> /// <param name="nodeName">Identifies the target node.</param> /// <exception cref="KeyNotFoundException">Thrown if the named node does not exist.</exception> /// <exception cref="InvalidOperationException">Thrown if the node is not part of the swarm.</exception> public void ActivateNode(string nodeName) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(nodeName)); var node = hive.GetNode(nodeName); if (!node.Metadata.InSwarm) { throw new InvalidOperationException($"Node [{nodeName}] is not part of the swarm."); } // I've see transient errors, so we'll retry a few times. var manager = hive.GetReachableManager(); var retry = new LinearRetryPolicy(typeof(Exception), maxAttempts: 5, retryInterval: TimeSpan.FromSeconds(5)); retry.InvokeAsync( async() => { var response = manager.SudoCommand($"docker node update --availability active {nodeName}"); if (response.ExitCode != 0) { throw new Exception(response.ErrorSummary); } await Task.CompletedTask; }).Wait(); }
public void Defaults() { var policy = new LinearRetryPolicy(TransientDetector); Assert.Equal(5, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(1), policy.RetryInterval); }
public void FailDelayed() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); Assert.Throws <NotImplementedException>( () => { policy.Invoke( () => { times.Add(DateTime.UtcNow); if (times.Count < 2) { throw new TransientException(); } else { throw new NotImplementedException(); } }); }); Assert.Equal(2, times.Count); VerifyIntervals(times, policy); }
public async Task FailDelayed_Result() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); await Assert.ThrowsAsync <NotImplementedException>( async() => { await policy.InvokeAsync <string>( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); if (times.Count < 2) { throw new TransientException(); } else { throw new NotImplementedException(); } }); }); Assert.Equal(2, times.Count); VerifyIntervals(times, policy); }
public async Task SuccessDelayedAggregateArray() { var policy = new LinearRetryPolicy(new Type[] { typeof(NotReadyException), typeof(KeyNotFoundException) }); var times = new List <DateTime>(); var success = false; await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); if (times.Count < policy.MaxAttempts) { if (times.Count % 1 == 0) { throw new AggregateException(new NotReadyException()); } else { throw new AggregateException(new KeyNotFoundException()); } } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
/// <summary> /// Used to start the fixture within a <see cref="ComposedFixture"/>. /// </summary> /// <param name="image"> /// Optionally specifies the NATS container image. This defaults to /// <b>ghcr.io/neonrelease/nats:latest</b> or <b>ghcr.io/ghcr.io/neonrelease-dev/nats:latest</b> depending /// on whether the assembly was built from a git release branch or not. /// </param> /// <param name="name">Optionally specifies the container name (defaults to <c>nats-test</c>).</param> /// <param name="args">Optional NATS server command line arguments.</param> /// <param name="hostInterface"> /// Optionally specifies the host interface where the container public ports will be /// published. This defaults to <see cref="ContainerFixture.DefaultHostInterface"/> /// but may be customized. This needs to be an IPv4 address. /// </param> public void StartAsComposed( string image = null, string name = "nats-test", string[] args = null, string hostInterface = null) { image = image ?? $"{NeonHelper.NeonLibraryBranchRegistry}/nats:latest"; this.hostInterface = hostInterface; base.CheckWithinAction(); var dockerArgs = new string[] { "--detach", "-p", $"{GetHostInterface(hostInterface)}:4222:4222", "-p", $"{GetHostInterface(hostInterface)}:8222:8222", "-p", $"{GetHostInterface(hostInterface)}:6222:6222" }; if (!IsRunning) { StartAsComposed(name, image, dockerArgs, args); } var factory = new ConnectionFactory(); var retry = new LinearRetryPolicy(exception => true, 20, TimeSpan.FromSeconds(0.5)); retry.Invoke( () => { Connection = factory.CreateConnection($"nats://{GetHostInterface(hostInterface, forConnection: true)}:4222"); }); }
public async Task SuccessCustom_Result() { var policy = new LinearRetryPolicy(TransientDetector, maxAttempts: 4, retryInterval: TimeSpan.FromSeconds(2)); var times = new List <DateTime>(); Assert.Equal(4, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(2), policy.RetryInterval); var success = await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); if (times.Count < policy.MaxAttempts) { throw new TransientException(); } return("WOOHOO!"); }); Assert.Equal("WOOHOO!", success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public async Task SuccessCustom() { var policy = new LinearRetryPolicy(TransientDetector, maxAttempts: 4, retryInterval: TimeSpan.FromSeconds(2)); var times = new List <DateTime>(); var success = false; Assert.Equal(4, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(2), policy.RetryInterval); await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.CompletedTask; if (times.Count < policy.MaxAttempts) { throw new TransientException(); } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
/// <summary> /// Establishes the server connection. /// </summary> private void Connect() { var factory = new StanConnectionFactory(); var retry = new LinearRetryPolicy(exception => true, 20, TimeSpan.FromSeconds(0.5)); retry.Invoke( () => { Connection = factory.CreateConnection("test-cluster", nameof(NatsStreamingFixture)); }); }
public async Task Timeout() { var policy = new LinearRetryPolicy(TransientDetector, retryInterval: TimeSpan.FromSeconds(0.5), timeout: TimeSpan.FromSeconds(1.5)); var times = new List <DateTime>(); Assert.Equal(int.MaxValue, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(0.5), policy.RetryInterval); Assert.Equal(TimeSpan.FromSeconds(1.5), policy.Timeout); await Assert.ThrowsAsync <TransientException>( async() => { await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.CompletedTask; throw new TransientException(); }); }); Assert.Equal(4, times.Count); // Additional test to verify this serious problem is fixed: // // https://github.com/nforgeio/neonKUBE/issues/762 // // We'll wait a bit longer to enure that any (incorrect) deadline computed // by the policy when constructed above does not impact a subsequent run. await Task.Delay(TimeSpan.FromSeconds(4)); times.Clear(); Assert.Equal(TimeSpan.FromSeconds(0.5), policy.RetryInterval); Assert.Equal(TimeSpan.FromSeconds(1.5), policy.Timeout); await Assert.ThrowsAsync <TransientException>( async() => { await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.CompletedTask; throw new TransientException(); }); }); Assert.Equal(4, times.Count); }
/// <summary> /// Writes a file as text, retrying if the file is already open. /// </summary> /// <param name="path">The file path.</param> /// <param name="text">The text to be written.</param> /// <remarks> /// It's possible for the configuration file to be temporarily opened /// by another process (e.g. the neonKUBE Desktop application or a /// command line tool). Rather than throw an exception, we're going /// to retry the operation a few times. /// </remarks> internal static string WriteFileTextWithRetry(string path, string text) { var retry = new LinearRetryPolicy(typeof(IOException), maxAttempts: 10, retryInterval: TimeSpan.FromMilliseconds(200)); retry.InvokeAsync( async() => { await Task.CompletedTask; File.WriteAllText(path, text); }).Wait(); return(text); }
/// <summary> /// Constructs an instance from a <see cref="LinearRetryPolicy"/>. /// </summary> /// <param name="policy">The policy.</param> public RetryOptions(LinearRetryPolicy policy) { Covenant.Requires <ArgumentNullException>(policy != null); this.InitialInterval = CadenceHelper.Normalize(policy.RetryInterval); this.BackoffCoefficient = 1.0; if (policy.Timeout.HasValue) { this.ExpirationInterval = CadenceHelper.Normalize(policy.Timeout.Value); } this.MaximumAttempts = policy.MaxAttempts; }
/// <summary> /// Initializes the database and establishes the connections. /// </summary> private void Initialize() { // Establish the database connections. Note that rather than using a fragile // warmup delay, we'll just retry establishing the connections for up to 15 seconds. // // Note that we're going to delete the Cassandra keyspace and Postgres database // before recreating them so they'll start out empty for each unit test. var retry = new LinearRetryPolicy(e => true, int.MaxValue, retryInterval: TimeSpan.FromSeconds(1), timeout: new TimeSpan(15)); // Establish the Cassandra session, recreating the keyspace. var cluster = Cluster.Builder() .AddContactPoint("localhost") .WithPort(ycqlPort) .Build(); retry.Invoke( () => { CassandraSession = cluster.Connect(); }); CassandraSession.Execute($"DROP KEYSPACE IF EXISTS \"{cassandraKeyspace}\""); CassandraSession.Execute($"CREATE KEYSPACE \"{cassandraKeyspace}\""); CassandraSession = cluster.Connect(cassandraKeyspace); // Establish the Postgres connection, recreating the database. PostgresConnection = new NpgsqlConnection($"host=localhost;port={ysqlPort};user id=yugabyte;password="******"DROP DATABASE IF EXISTS \"{postgresDatabase}\"", PostgresConnection); command.ExecuteNonQuery(); command = new NpgsqlCommand($"CREATE DATABASE \"{postgresDatabase}\"", PostgresConnection); command.ExecuteNonQuery(); PostgresConnection = new NpgsqlConnection($"host=localhost;database={postgresDatabase};port={ysqlPort};user id=yugabyte;password="); PostgresConnection.Open(); }
public void SuccessImmediate_Result() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = policy.Invoke( () => { times.Add(DateTime.UtcNow); return("WOOHOO!"); }); Assert.Single(times); Assert.Equal("WOOHOO!", success); }
public void SuccessImmediate() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = false; policy.Invoke( () => { times.Add(DateTime.UtcNow); success = true; }); Assert.Single(times); Assert.True(success); }
public async Task SuccessImmediate_Result() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); return("WOOHOO!"); }); Assert.Single(times); Assert.Equal("WOOHOO!", success); }
/// <summary> /// Signals the Docker orchestrator to drain all service tasks from a node. /// </summary> /// <param name="nodeName">Identifies the target node.</param> /// <exception cref="KeyNotFoundException">Thrown if the named node does not exist.</exception> /// <exception cref="InvalidOperationException">Thrown if the node is not part of the swarm.</exception> public void DrainNode(string nodeName) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(nodeName)); var node = hive.GetNode(nodeName); if (!node.Metadata.InSwarm) { throw new InvalidOperationException($"Node [{nodeName}] is not part of the swarm."); } // I've see transient errors, so we'll retry a few times. var manager = hive.GetReachableManager(); var retry = new LinearRetryPolicy(typeof(Exception), maxAttempts: 5, retryInterval: TimeSpan.FromSeconds(5)); retry.InvokeAsync( async() => { var response = manager.SudoCommand($"docker node update --availability drain {nodeName}"); if (response.ExitCode != 0) { throw new Exception(response.ErrorSummary); } await Task.CompletedTask; }).Wait(); // $todo(jeff.lill): // // Ideally, we'd wait for all of the service tasks to stop but it // appears that there's no easy way to check for this other than // listing all of the hive services and then doing a // // docker service ps SERVICE] // // for each until none report running on this node. // // A hacky alternative would be to list local containers and try // to determine which ones look liks service tasks by examining // the container name. Thread.Sleep(TimeSpan.FromSeconds(30)); }
public async Task SuccessImmediate() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = false; await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); success = true; }); Assert.Single(times); Assert.True(success); }
public void FailImmediate_Result() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); Assert.Throws <NotImplementedException>( () => { policy.Invoke <string>( () => { times.Add(DateTime.UtcNow); throw new NotImplementedException(); }); }); Assert.Single(times); }
public void FailAll_Result() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); Assert.Throws <TransientException>( () => { policy.Invoke <string>( () => { times.Add(DateTime.UtcNow); throw new TransientException(); }); }); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public async Task FailImmediate_Result() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); await Assert.ThrowsAsync <NotImplementedException>( async() => { await policy.InvokeAsync <string>( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); throw new NotImplementedException(); }); }); Assert.Single(times); }
public async Task FailAll_Result() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); await Assert.ThrowsAsync <TransientException>( async() => { await policy.InvokeAsync <string>( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); throw new TransientException(); }); }); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
/// <summary> /// Restarts the NATS container to clear any previous state and returns the /// new client connection. /// </summary> /// <returns>The new connection.</returns> public new IConnection Restart() { base.Restart(); if (Connection != null) { Connection.Dispose(); Connection = null; } var factory = new ConnectionFactory(); var retry = new LinearRetryPolicy(exception => true, 20, TimeSpan.FromSeconds(0.5)); retry.Invoke( () => { Connection = factory.CreateConnection($"nats://{GetHostInterface(hostInterface, forConnection: true)}:4222"); }); return(Connection); }
public void SuccessDelayed_Result() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = policy.Invoke( () => { times.Add(DateTime.UtcNow); if (times.Count < policy.MaxAttempts) { throw new TransientException(); } return("WOOHOO!"); }); Assert.Equal("WOOHOO!", success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
/// <summary> /// Restarts the NATS container to clear any previous state and returns the /// new client connection. /// </summary> public new IConnection Restart() { base.Restart(); if (Connection != null) { Connection.Dispose(); Connection = null; } var factory = new ConnectionFactory(); var retry = new LinearRetryPolicy(exception => true, 20, TimeSpan.FromSeconds(0.5)); retry.InvokeAsync( async() => { Connection = factory.CreateConnection(); await Task.CompletedTask; }).Wait(); return(Connection); }
public void SuccessDelayed() { var policy = new LinearRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = false; policy.Invoke( () => { times.Add(DateTime.UtcNow); if (times.Count < policy.MaxAttempts) { throw new TransientException(); } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public void SuccessDelayedAggregateSingle() { var policy = new LinearRetryPolicy(typeof(NotReadyException)); var times = new List <DateTime>(); var success = false; policy.Invoke( () => { times.Add(DateTime.UtcNow); if (times.Count < policy.MaxAttempts) { throw new AggregateException(new NotReadyException()); } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }