public async Task Timeout() { var policy = new ExponentialRetryPolicy(TransientDetector, maxAttempts: 6, initialRetryInterval: TimeSpan.FromSeconds(0.5), maxRetryInterval: TimeSpan.FromSeconds(4), timeout: TimeSpan.FromSeconds(1.5)); var times = new List <DateTime>(); Assert.Equal(6, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(0.5), policy.InitialRetryInterval); Assert.Equal(TimeSpan.FromSeconds(4), policy.MaxRetryInterval); Assert.Equal(TimeSpan.FromSeconds(1.5), policy.Timeout); await Assert.ThrowsAsync <TransientException>( async() => { await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.CompletedTask; throw new TransientException(); }); }); Assert.True(times.Count < 6); }
public void SuccessDelayedAggregateArray() { var policy = new ExponentialRetryPolicy(new Type[] { typeof(NotReadyException), typeof(KeyNotFoundException) }); var times = new List <DateTime>(); var success = false; policy.Invoke( () => { times.Add(DateTime.UtcNow); if (times.Count < policy.MaxAttempts) { if (times.Count % 1 == 0) { throw new AggregateException(new NotReadyException()); } else { throw new AggregateException(new KeyNotFoundException()); } } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public async Task FailDelayed() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); await Assert.ThrowsAsync <NotImplementedException>( async() => { await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); if (times.Count < 2) { throw new TransientException(); } else { throw new NotImplementedException(); } }); }); Assert.Equal(2, times.Count); VerifyIntervals(times, policy); }
/// <summary> /// Implements the service as a <see cref="Task"/>. /// </summary> /// <returns>The <see cref="Task"/>.</returns> private static async Task RunAsync() { try { var settings = new CouchbaseSettings() { Servers = new List <Uri>() { new Uri("couchbase://10.0.0.90"), new Uri("couchbase://10.0.0.91"), new Uri("couchbase://10.0.0.92") }, Bucket = "stoke" }; var credentials = new Credentials() { Username = Environment.GetEnvironmentVariable("TS_COUCHBASE_USERNAME"), Password = Environment.GetEnvironmentVariable("TS_COUCHBASE_PASSWORD") }; using (var bucket = settings.OpenBucket(credentials)) { var retry = new ExponentialRetryPolicy(CouchbaseTransientDetector.IsTransient); for (int i = 0; i < 500000; i++) { var key = bucket.GenKey(); await retry.InvokeAsync(async() => await bucket.InsertSafeAsync(key, new Document <Item>() { Id = key, Content = new Item() { Name = "Jeff", Age = 56 } })); var exists = await bucket.ExistsAsync(key); var result2 = await bucket.GetAsync <Document <Item> >(key); result2.EnsureSuccess(); } } } catch (OperationCanceledException) { return; } catch (Exception e) { log.LogError(e); } finally { terminator.ReadyToExit(); } }
// Perform concat with retries. Currently retries only once. If the concat fails, checks whether it can be retried based on HttpStatuscode, // If true then check whether the destiantion already exists and the source is deleted. If there is no intermediate state then returns true. private bool PerformConcatWithRetries(out AdlsException excep) { var retryPolicy = new ExponentialRetryPolicy(); string destGuid = ChunkSegmentFolder + FileUploader.DestTempGuidForConcat; var chunkList = new List <string>((int)_totalChunks); for (int i = 0; i < _totalChunks; i++) { chunkList.Add(ChunkSegmentFolder + "/" + i); } int retries = 0; do { excep = PerformConcatSingle(chunkList, destGuid); if (excep == null) { return(true); } if (!retryPolicy.ShouldRetryBasedOnHttpOutput((int)excep.HttpStatus, excep.Ex)) { return(false); } if (VerifyAdlExists(destGuid)) { if (Client.CheckExists(ChunkSegmentFolder)) { // If both destination and source folder exist then end-no way to recover return(false); } return(true); } } while (retries++ < UploadRetryTime); return(false); }
public void SuccessCustom_Result() { var policy = new ExponentialRetryPolicy(TransientDetector, maxAttempts: 6, initialRetryInterval: TimeSpan.FromSeconds(0.5), maxRetryInterval: TimeSpan.FromSeconds(4)); var times = new List <DateTime>(); Assert.Equal(6, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(0.5), policy.InitialRetryInterval); Assert.Equal(TimeSpan.FromSeconds(4), policy.MaxRetryInterval); var success = policy.Invoke( () => { times.Add(DateTime.UtcNow); if (times.Count < policy.MaxAttempts) { throw new TransientException(); } return("WOOHOO!"); }); Assert.Equal("WOOHOO!", success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public async Task SuccessCustom() { var policy = new ExponentialRetryPolicy(TransientDetector, maxAttempts: 6, initialRetryInterval: TimeSpan.FromSeconds(0.5), maxRetryInterval: TimeSpan.FromSeconds(4)); var times = new List <DateTime>(); var success = false; Assert.Equal(6, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(0.5), policy.InitialRetryInterval); Assert.Equal(TimeSpan.FromSeconds(4), policy.MaxRetryInterval); await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); if (times.Count < policy.MaxAttempts) { throw new TransientException(); } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public void FailDelayed_Result() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); Assert.Throws <NotImplementedException>( () => { policy.Invoke <string>( () => { times.Add(DateTime.UtcNow); if (times.Count < 2) { throw new TransientException(); } else { throw new NotImplementedException(); } }); }); Assert.Equal(2, times.Count); VerifyIntervals(times, policy); }
public void Defaults() { var policy = new ExponentialRetryPolicy(TransientDetector, sourceModule: "test"); Assert.Equal(5, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(1), policy.InitialRetryInterval); Assert.Equal(TimeSpan.FromHours(24), policy.MaxRetryInterval); }
public SecretClientOptions() { LoggingPolicy = Core.Pipeline.Policies.LoggingPolicy.Shared; RetryPolicy = new ExponentialRetryPolicy() { Delay = TimeSpan.FromMilliseconds(800), MaxRetries = 3 }; }
public IdentityClientOptions() { AuthorityHost = DefaultAuthorityHost; RefreshBuffer = DefaultRefreshBuffer; RetryPolicy = new ExponentialRetryPolicy() { Delay = TimeSpan.FromMilliseconds(800), MaxRetries = 3 }; }
public async Task Timeout() { var policy = new ExponentialRetryPolicy(TransientDetector, initialRetryInterval: TimeSpan.FromSeconds(0.5), maxRetryInterval: TimeSpan.FromSeconds(4), timeout: TimeSpan.FromSeconds(1.5)); var times = new List <DateTime>(); Assert.Equal(int.MaxValue, policy.MaxAttempts); Assert.Equal(TimeSpan.FromSeconds(0.5), policy.InitialRetryInterval); Assert.Equal(TimeSpan.FromSeconds(4), policy.MaxRetryInterval); Assert.Equal(TimeSpan.FromSeconds(1.5), policy.Timeout); await Assert.ThrowsAsync <TransientException>( async() => { await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.CompletedTask; throw new TransientException(); }); }); Assert.Equal(3, times.Count); // Additional test to verify this serious problem is fixed: // // https://github.com/nforgeio/neonKUBE/issues/762 // // We'll wait a bit longer to enure that any (incorrect) deadline computed // by the policy when constructed above does not impact a subsequent run. await Task.Delay(TimeSpan.FromSeconds(4)); times.Clear(); Assert.Equal(TimeSpan.FromSeconds(0.5), policy.InitialRetryInterval); Assert.Equal(TimeSpan.FromSeconds(4), policy.MaxRetryInterval); Assert.Equal(TimeSpan.FromSeconds(1.5), policy.Timeout); await Assert.ThrowsAsync <TransientException>( async() => { await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.CompletedTask; throw new TransientException(); }); }); Assert.Equal(3, times.Count); }
private static async Task Main() { Console.WriteLine("Starting program execution..."); var policy = ExponentialRetryPolicy.GetPolicy(); try { await policy.ExecuteAsync(CallApi); } catch (HttpRequestException) { Console.WriteLine("Oh no! The call failed."); } }
/// <summary> /// Constructs an instance from a <see cref="ExponentialRetryPolicy"/>, /// </summary> /// <param name="policy">The policy.</param> public RetryOptions(ExponentialRetryPolicy policy) { Covenant.Requires <ArgumentNullException>(policy != null); this.InitialInterval = CadenceHelper.Normalize(policy.InitialRetryInterval); this.BackoffCoefficient = 2.0; this.MaximumInterval = CadenceHelper.Normalize(policy.MaxRetryInterval); if (policy.Timeout.HasValue) { this.ExpirationInterval = CadenceHelper.Normalize(policy.Timeout.Value); } this.MaximumAttempts = policy.MaxAttempts; }
/// <summary> /// Verify that operation retry times are consistent with the retry policy. /// </summary> /// <param name="times"></param> /// <param name="policy"></param> private void VerifyIntervals(List <DateTime> times, ExponentialRetryPolicy policy) { var interval = policy.InitialRetryInterval; for (int i = 0; i < times.Count - 1; i++) { Assert.True(VerifyInterval(times[i], times[i + 1], interval)); interval = TimeSpan.FromTicks(interval.Ticks * 2); if (interval > policy.MaxRetryInterval) { interval = policy.MaxRetryInterval; } } }
public void SuccessImmediate_Result() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = policy.Invoke( () => { times.Add(DateTime.UtcNow); return("WOOHOO!"); }); Assert.Single(times); Assert.Equal("WOOHOO!", success); }
public void SuccessImmediate() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = false; policy.Invoke( () => { times.Add(DateTime.UtcNow); success = true; }); Assert.Single(times); Assert.True(success); }
public async Task SuccessImmediate_Result() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); return("WOOHOO!"); }); Assert.Single(times); Assert.Equal("WOOHOO!", success); }
public void FailImmediate_Result() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); Assert.Throws <NotImplementedException>( () => { policy.Invoke <string>( () => { times.Add(DateTime.UtcNow); throw new NotImplementedException(); }); }); Assert.Single(times); }
public async Task SuccessImmediate() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = false; await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); success = true; }); Assert.Single(times); Assert.True(success); }
public async Task FailImmediate_Result() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); await Assert.ThrowsAsync <NotImplementedException>( async() => { await policy.InvokeAsync <string>( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); throw new NotImplementedException(); }); }); Assert.Single(times); }
public void FailAll() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); Assert.Throws <TransientException>( () => { policy.Invoke( () => { times.Add(DateTime.UtcNow); throw new TransientException(); }); }); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public async Task FailAll_Result() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); await Assert.ThrowsAsync <TransientException>( async() => { await policy.InvokeAsync <string>( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); throw new TransientException(); }); }); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public void SuccessDelayed_Result() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = policy.Invoke( () => { times.Add(DateTime.UtcNow); if (times.Count < policy.MaxAttempts) { throw new TransientException(); } return("WOOHOO!"); }); Assert.Equal("WOOHOO!", success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public void SuccessDelayedByType() { var policy = new ExponentialRetryPolicy(typeof(NotReadyException)); var times = new List <DateTime>(); var success = false; policy.Invoke( () => { times.Add(DateTime.UtcNow); if (times.Count < policy.MaxAttempts) { throw new NotReadyException(); } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public async Task SuccessDelayed() { var policy = new ExponentialRetryPolicy(TransientDetector); var times = new List <DateTime>(); var success = false; await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); if (times.Count < policy.MaxAttempts) { throw new TransientException(); } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
public async Task SuccessDelayedAggregateSingle() { var policy = new ExponentialRetryPolicy(typeof(NotReadyException)); var times = new List <DateTime>(); var success = false; await policy.InvokeAsync( async() => { times.Add(DateTime.UtcNow); await Task.Delay(0); if (times.Count < policy.MaxAttempts) { throw new AggregateException(new NotReadyException()); } success = true; }); Assert.True(success); Assert.Equal(policy.MaxAttempts, times.Count); VerifyIntervals(times, policy); }
/// <summary> /// GET запрос, обёрнутый retry стратегией /// </summary> /// <param name="endpoint">конечная точка (api/...)</param> /// <returns></returns> public virtual async Task <HttpResponseMessage> Get(string endpoint) => await ExponentialRetryPolicy.ExecuteAsync(() => SendGet(endpoint));
/// <summary> /// <para> /// Deletes workflow runs from a GitHub repo. /// </para> /// <note> /// Only completed runs will be deleted. /// </note> /// </summary> /// <param name="repo">Identifies the target repository.</param> /// <param name="workflowName"> /// Optionally specifies the workflow whose runs are to be deleted otherwise /// runs from all workflows in the repo will be deleted. /// </param> /// <param name="maxAge"> /// Optionally specifies the maximum age for retained workflow runs. This /// defaults to <see cref="TimeSpan.Zero"/> which deletes all runs. /// </param> /// <returns>The number of runs deleted.</returns> public async Task <int> DeleteRunsAsync(string repo, string workflowName = null, TimeSpan maxAge = default) { await SyncContext.Clear; GitHub.GetCredentials(); var repoPath = GitHubRepoPath.Parse(repo); var deleteCount = 0; using (var client = new HttpClient()) { var retry = new ExponentialRetryPolicy(TransientDetector.NetworkOrHttp, 5); client.BaseAddress = new Uri("https://api.github.com"); client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", GitHub.AccessToken); client.DefaultRequestHeaders.UserAgent.Add(new ProductInfoHeaderValue("neonforge.com", "0")); client.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/vnd.github.v3+json")); // List all of the workflow runs for the repo, paging to get all of them. // // https://docs.github.com/en/rest/reference/actions#list-workflow-runs-for-a-repository var runs = new List <RunInfo>(); var page = 1; while (true) { var response = await retry.InvokeAsync( async() => { var request = new HttpRequestMessage(HttpMethod.Get, $"/repos/{repoPath.Owner}/{repoPath.Repo}/actions/runs?page={page}"); // We're seeing some 502 Bad Gateway responses from GHCR.io. We're going to // treat these as transients. var response = await client.SendAsync(request); if (response.StatusCode == HttpStatusCode.BadGateway) { throw new TransientException("503 (Bad Gateway)"); } return(response); }); response.EnsureSuccessStatusCode(); var json = response.Content.ReadAsStringAsync().Result; var result = JsonConvert.DeserializeObject <dynamic>(json); var workflowRuns = result.workflow_runs; if (workflowRuns.Count == 0) { // We've seen all of the runs. break; } foreach (var run in workflowRuns) { runs.Add( new RunInfo() { Id = run.id, Name = run.name, Status = run.status, UpdatedAtUtc = run.updated_at }); } page++; } // Here's the reference for deleting runs: // // https://docs.github.com/en/rest/reference/actions#delete-a-workflow-run var minDate = DateTime.UtcNow - maxAge; var selectedRuns = runs.Where(run => run.UpdatedAtUtc < minDate && run.Status == "completed"); if (!string.IsNullOrEmpty(workflowName)) { selectedRuns = selectedRuns.Where(run => run.Name.Equals(workflowName, StringComparison.InvariantCultureIgnoreCase)); } foreach (var run in selectedRuns) { var response = await retry.InvokeAsync( async() => { var request = new HttpRequestMessage(HttpMethod.Delete, $"/repos/{repoPath.Owner}/{repoPath.Repo}/actions/runs/{run.Id}"); return(await client.SendAsync(request)); }); // We're also seeing some 500s but I'm not sure why. We'll ignore these // for now. if (response.StatusCode == HttpStatusCode.InternalServerError) { Task.Delay(TimeSpan.FromSeconds(2)).WaitWithoutAggregate(); // Pause in case this is a rate-limit thing continue; } // We're seeing 403s for some runs, so we'll ignore those too. if (response.StatusCode != HttpStatusCode.Forbidden) { response.EnsureSuccessStatusCode(); } deleteCount++; } } return(deleteCount); }
/// <summary> /// Used for temporarily uploading an ISO disk to a XenServer such that it can be mounted /// to a VM, typically for one-time initialization purposes. neonKUBE uses this as a very /// simple poor man's alternative to <b>cloud-init</b> for initializing a VM on first boot. /// </summary> /// <param name="isoPath">Path to the source ISO file on the local workstation.</param> /// <param name="srName">Optionally specifies the storage repository name. <b>neon-UUID</b> with a generated UUID will be used by default.</param> /// <returns>A <see cref="XenTempIso"/> with information about the new storage repository and its contents.</returns> /// <remarks> /// <para> /// During cluster setup on virtualization platforms like XenServer and Hyper-V, neonKUBE need /// to configure new VMs with IP addresses, hostnames, etc. Traditionally, we've relied on /// being able to SSH into the VM to perform all of these actions, but this relied on being /// VM being able to obtain an IP address via DHCP and for setup to be able to discover the /// assigned address. /// </para> /// <para> /// The dependency on DHCP is somewhat problematic, because it's conceivable that this may /// not be available for more controlled environments. We looked into using Linux <b>cloud-init</b> /// for this, but that requires additional local infrastructure for non-cloud deployments and /// was also a bit more complex than what we had time for. /// </para> /// <para> /// Instead of <b>cloud-init</b>, we provisioned our XenServer and Hyper-V node templates /// with a <b>neon-init</b> service that runs before the network service to determine /// whether a DVD (ISO) is inserted into the VM and runs the <b>neon-init.sh</b> script /// there one time, if it exists. This script will initialize the node's IP address and /// could also be used for other configuration as well, like setting user credentials. /// </para> /// <note> /// In theory, we could have used the same technique for mounting a <b>cloud-init</b> data source /// via this ISO, but we decided not to go there, at least for now (we couldn't get that working). /// </note> /// <note> /// neonKUBE doesn't use this technique for true cloud deployments (AWS, Azure, Google,...) because /// we can configure VM networking directly via the cloud APIs. /// </note> /// <para> /// The XenServer requires the temporary ISO implementation to be a bit odd. We want these temporary /// ISOs to be created directly on the XenServer host machine so users won't have to configure any /// additional infrastructure as well as to simplify cluster setup. We'll be creating a local /// ISO storage repository from a folder on the host. Any files to be added to the repository /// must exist when the repository is created and it is not possible to add, modify, or remove /// files from a repository after its been created. /// </para> /// <note> /// XenServer hosts have only 4GB of free space at the root Linux level, so you must take care /// not to create large ISOs or to allow these to accumulate. /// </note> /// <para> /// This method uploads the ISO file <paramref name="isoPath"/> from the local workstation to /// the XenServer host, creating a new folder named with a UUID. Then a new storage repository /// will be created from this folder and a <see cref="XenTempIso"/> will be returned holding /// details about the new storage repository and its contents. The setup code will use this to /// insert the ISO into a VM. /// </para> /// <para> /// Once the setup code is done with the ISO, it will eject it from the VM and call /// <see cref="RemoveTempIso(XenTempIso)"/> to remove the storage repository. /// </para> /// </remarks> public XenTempIso CreateTempIso(string isoPath, string srName = null) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(isoPath), nameof(isoPath)); if (string.IsNullOrEmpty(srName)) { srName = "neon-" + Guid.NewGuid().ToString("d"); } var tempIso = new XenTempIso(); // Create the temporary SR subfolder and upload the ISO file. var srMountPath = "/var/run/sr-mount"; tempIso.SrPath = LinuxPath.Combine(srMountPath, Guid.NewGuid().ToString("d")); tempIso.IsoName = $"neon-dvd-{Guid.NewGuid().ToString("d")}.iso"; if (!sftpClient.PathExists(srMountPath)) { sftpClient.CreateDirectory(srMountPath); } if (!sftpClient.PathExists(tempIso.SrPath)) { sftpClient.CreateDirectory(tempIso.SrPath); sftpClient.ChangePermissions(tempIso.SrPath, Convert.ToInt16("751", 8)); } var xenIsoPath = LinuxPath.Combine(tempIso.SrPath, tempIso.IsoName); using (var isoInput = File.OpenRead(isoPath)) { sftpClient.UploadFile(isoInput, xenIsoPath); sftpClient.ChangePermissions(xenIsoPath, Convert.ToInt16("751", 8)); } // Create the new storage repository. This command returns the [sr-uuid]. var response = SafeInvoke("sr-create", $"name-label={tempIso.IsoName}", $"type=iso", $"device-config:location={tempIso.SrPath}", $"device-config:legacy_mode=true", $"content-type=iso"); tempIso.SrUuid = response.OutputText.Trim(); // XenServer created a PBD behind the scenes for the new SR. We're going // to need its UUID so we can completely remove the SR later. Note that // doesn't seem to appear immediately so, we'll retry a few times. var retry = new ExponentialRetryPolicy(typeof(InvalidOperationException), maxAttempts: 5, initialRetryInterval: TimeSpan.FromSeconds(0.5), maxRetryInterval: TimeSpan.FromSeconds(5)); retry.Invoke( () => { var result = SafeInvokeItems("pbd-list", $"sr-uuid={tempIso.SrUuid}"); tempIso.PdbUuid = result.Items.Single()["uuid"]; // Obtain the UUID for the ISO's VDI within the SR. result = SafeInvokeItems("vdi-list", $"sr-uuid={tempIso.SrUuid}"); tempIso.VdiUuid = result.Items.Single()["uuid"]; }); return(tempIso); }