private async Task <bool> IsClusterReadyForFaultsAsync(CancellationToken token) { IsClusterUpgradingAction isUpgradingAction = new IsClusterUpgradingAction(); isUpgradingAction.ActionTimeout = this.chaosScenarioParameters.OperationTimeout; isUpgradingAction.RequestTimeout = this.chaosScenarioParameters.RequestTimeout; var clusterUpgradeCheckTask = this.TestContext.ActionExecutor.RunAsync(isUpgradingAction, token); bool checkFailed = false; await clusterUpgradeCheckTask.ContinueWith(t => { this.HandleTaskComplete(t, "IsClusterUpgrading", "IsClusterUpgrading"); if (t.Exception != null) { checkFailed = true; } }); if (checkFailed) { // Need to retry return(false); } else if (isUpgradingAction.Result) { // Need to retry after a wait this.ReportProgress("Cluster is under going an upgrade waiting 30 seconds before next check"); await Task.Delay(TimeSpan.FromSeconds(30)); return(false); } return(true); }
private async Task <bool> IsClusterReadyForFaultsAsync(CancellationToken token) { string localType = TraceType + "ClusterReady"; TestabilityTrace.TraceSource.WriteInfo(localType, "Entered "); this.StatusDictionary = this.StatusDictionary ?? await this.StateManager.GetOrAddAsync <IReliableDictionary <string, byte[]> >(FASConstants.ChaosStatusDictionaryName).ConfigureAwait(false); await this.StateManager.RegisterCurrentStatusAsync(ChaosStatus.Running, this.partition, this.cancellationToken).ConfigureAwait(false); IsClusterUpgradingAction isUpgradingAction = new IsClusterUpgradingAction { ActionTimeout = this.ChaosParameters.OperationTimeout, RequestTimeout = this.ChaosParameters.RequestTimeout }; var clusterUpgradeCheckTask = this.TestContext.ActionExecutor.RunAsync(isUpgradingAction, token); bool checkFailed = false; await clusterUpgradeCheckTask.ContinueWith( t => { this.HandleTaskComplete(t, "IsClusterUpgrading", "IsClusterUpgrading"); if (t.Exception != null) { checkFailed = true; } }, token).ConfigureAwait(false); IsInfrastructureUpgradingAction infraUpgradingAction = new IsInfrastructureUpgradingAction { ActionTimeout = this.ChaosParameters.OperationTimeout, RequestTimeout = this.ChaosParameters.RequestTimeout }; var infraUpgradeCheckTask = this.TestContext.ActionExecutor.RunAsync(infraUpgradingAction, token); bool infraCheckFailed = false; await infraUpgradeCheckTask.ContinueWith( t => { this.HandleTaskComplete(t, "IsInfrastructureUpgrading", "IsInfrastructureUpgrading"); if (t.Exception != null) { infraCheckFailed = true; } }, token).ConfigureAwait(false); bool isready = true; if (checkFailed || infraCheckFailed) { // Record validation failed event await this.StateManager.RegisterChaosEventAndUpdateChaosStatusAsync( new WaitingEvent(DateTime.UtcNow, StringResources.ChaosInfo_WaitingForQuiescentCluster), ChaosStatus.Running, this.partition, this.cancellationToken, () => { TestabilityTrace.TraceSource.WriteInfo("[0]:Upgrade going on, so going to wait.", DateTime.UtcNow.ToString()); }).ConfigureAwait(false); return(false); } if (isUpgradingAction.Result || infraUpgradingAction.Result) { isready = false; } // Not ready because cluster or infrastructure is upgrading if (!isready) { // Record validation failed event await this.StateManager.RegisterChaosEventAndUpdateChaosStatusAsync( new WaitingEvent(DateTime.UtcNow, StringResources.ChaosInfo_WaitingForQuiescentCluster), ChaosStatus.Running, this.partition, this.cancellationToken, () => { TestabilityTrace.TraceSource.WriteInfo("[0]:Upgrade going on, so going to wait.", DateTime.UtcNow.ToString()); }).ConfigureAwait(false); return(false); } else { return(true); } }