/// <inheritdoc/> protected override void ProcessRecordInternal() { var clusterHealthPolicy = new ClusterHealthPolicy( considerWarningAsError: this.ConsiderWarningAsError, maxPercentUnhealthyNodes: this.MaxPercentUnhealthyNodes, maxPercentUnhealthyApplications: this.MaxPercentUnhealthyApplications, applicationTypeHealthPolicyMap: this.ApplicationTypeHealthPolicyMap); var clusterHealthPolicies = new ClusterHealthPolicies( applicationHealthPolicyMap: this.ApplicationHealthPolicyMap, clusterHealthPolicy: clusterHealthPolicy); var result = this.ServiceFabricClient.Cluster.GetClusterHealthUsingPolicyAsync( nodesHealthStateFilter: this.NodesHealthStateFilter, applicationsHealthStateFilter: this.ApplicationsHealthStateFilter, eventsHealthStateFilter: this.EventsHealthStateFilter, excludeHealthStatistics: this.ExcludeHealthStatistics, includeSystemApplicationHealthStatistics: this.IncludeSystemApplicationHealthStatistics, clusterHealthPolicies: clusterHealthPolicies, serverTimeout: this.ServerTimeout, cancellationToken: this.CancellationToken).GetAwaiter().GetResult(); if (result != null) { this.WriteObject(this.FormatOutput(result)); } }
private async Task ValidateClusterHealth(FabricTestContext testContext, ValidateClusterAction action, CancellationToken token) { TimeoutHelper timer = new TimeoutHelper(action.MaximumStabilizationTimeout); bool success = false; StringBuilder healthinfo = new StringBuilder(); while (!success && timer.GetRemainingTime() > TimeSpan.Zero) { healthinfo.Clear(); ClusterHealthPolicy healthPolicy = new ClusterHealthPolicy(); var clusterHealthResult = await FabricClientRetryHelper.ExecuteFabricActionWithRetryAsync( () => testContext.FabricClient.HealthManager.GetClusterHealthAsync( healthPolicy, action.RequestTimeout, token), FabricClientRetryErrors.GetEntityHealthFabricErrors.Value, timer.GetRemainingTime(), token).ConfigureAwait(false); bool checkError = (action.CheckFlag & ValidationCheckFlag.CheckError) != 0; bool checkWarning = (action.CheckFlag & ValidationCheckFlag.CheckWarning) != 0; if ((checkError && clusterHealthResult.AggregatedHealthState == HealthState.Error) || (checkWarning && clusterHealthResult.AggregatedHealthState == HealthState.Warning) || clusterHealthResult.AggregatedHealthState == HealthState.Invalid || clusterHealthResult.AggregatedHealthState == HealthState.Unknown) { AppTrace.TraceSource.WriteInfo(TraceSource, "Cluster health state is {0}. Will Retry check", clusterHealthResult.AggregatedHealthState); foreach (HealthEvent healthEvent in clusterHealthResult.HealthEvents) { healthinfo.AppendLine(string.Format( "Cluster health state is '{0}' with property '{1}', sourceId '{2}' and description '{3}'", healthEvent.HealthInformation.HealthState, healthEvent.HealthInformation.Property, healthEvent.HealthInformation.SourceId, healthEvent.HealthInformation.Description)); } AppTrace.TraceSource.WriteInfo(TraceSource, healthinfo.ToString()); } else { success = true; } if (!success) { // Delay before querying again so we allow some time for state to change - don't spam the node await AsyncWaiter.WaitAsync(RetryWaitTimeout, token).ConfigureAwait(false); } } if (!success) { throw new FabricValidationException(StringHelper.Format(StringResources.Error_ServiceNotHealthy, "Cluster", action.MaximumStabilizationTimeout, healthinfo)); } }
/// <inheritdoc/> protected override void ProcessRecordInternal() { var clusterHealthPolicy = new ClusterHealthPolicy( considerWarningAsError: this.ConsiderWarningAsError, maxPercentUnhealthyNodes: this.MaxPercentUnhealthyNodes, maxPercentUnhealthyApplications: this.MaxPercentUnhealthyApplications, applicationTypeHealthPolicyMap: this.ApplicationTypeHealthPolicyMap); var chaosContext = new ChaosContext( map: this.Map?.ToDictionary <string, string>()); var chaosTargetFilter = new ChaosTargetFilter( nodeTypeInclusionList: this.NodeTypeInclusionList, applicationInclusionList: this.ApplicationInclusionList); var chaosParameters = new ChaosParameters( timeToRunInSeconds: this.TimeToRunInSeconds, maxClusterStabilizationTimeoutInSeconds: this.MaxClusterStabilizationTimeoutInSeconds, maxConcurrentFaults: this.MaxConcurrentFaults, enableMoveReplicaFaults: this.EnableMoveReplicaFaults, waitTimeBetweenFaultsInSeconds: this.WaitTimeBetweenFaultsInSeconds, waitTimeBetweenIterationsInSeconds: this.WaitTimeBetweenIterationsInSeconds, clusterHealthPolicy: clusterHealthPolicy, context: chaosContext, chaosTargetFilter: chaosTargetFilter); this.ServiceFabricClient.ChaosClient.StartChaosAsync( chaosParameters: chaosParameters, serverTimeout: this.ServerTimeout, cancellationToken: this.CancellationToken).GetAwaiter().GetResult(); Console.WriteLine("Success!"); }
/// <summary> /// Serializes the object to JSON. /// </summary> /// <param name="writer">The <see cref="T: Newtonsoft.Json.JsonWriter" /> to write to.</param> /// <param name="obj">The object to serialize to JSON.</param> internal static void Serialize(JsonWriter writer, ClusterHealthPolicy obj) { // Required properties are always serialized, optional properties are serialized when not null. writer.WriteStartObject(); if (obj.ConsiderWarningAsError != null) { writer.WriteProperty(obj.ConsiderWarningAsError, "ConsiderWarningAsError", JsonWriterExtensions.WriteBoolValue); } if (obj.MaxPercentUnhealthyNodes != null) { writer.WriteProperty(obj.MaxPercentUnhealthyNodes, "MaxPercentUnhealthyNodes", JsonWriterExtensions.WriteIntValue); } if (obj.MaxPercentUnhealthyApplications != null) { writer.WriteProperty(obj.MaxPercentUnhealthyApplications, "MaxPercentUnhealthyApplications", JsonWriterExtensions.WriteIntValue); } if (obj.ApplicationTypeHealthPolicyMap != null) { writer.WriteEnumerableProperty(obj.ApplicationTypeHealthPolicyMap, "ApplicationTypeHealthPolicyMap", ApplicationTypeHealthPolicyMapItemConverter.Serialize); } writer.WriteEndObject(); }
/// <inheritdoc/> protected override void ProcessRecordInternal() { var clusterHealthPolicy = new ClusterHealthPolicy( considerWarningAsError: this.ConsiderWarningAsError, maxPercentUnhealthyNodes: this.MaxPercentUnhealthyNodes, maxPercentUnhealthyApplications: this.MaxPercentUnhealthyApplications, applicationTypeHealthPolicyMap: this.ApplicationTypeHealthPolicyMap, nodeTypeHealthPolicyMap: this.NodeTypeHealthPolicyMap); var applicationHealthPolicies = new ApplicationHealthPolicies( applicationHealthPolicyMap: this.ApplicationHealthPolicyMap); var clusterHealthChunkQueryDescription = new ClusterHealthChunkQueryDescription( nodeFilters: this.NodeFilters, applicationFilters: this.ApplicationFilters, clusterHealthPolicy: clusterHealthPolicy, applicationHealthPolicies: applicationHealthPolicies); var result = this.ServiceFabricClient.Cluster.GetClusterHealthChunkUsingPolicyAndAdvancedFiltersAsync( clusterHealthChunkQueryDescription: clusterHealthChunkQueryDescription, serverTimeout: this.ServerTimeout, cancellationToken: this.CancellationToken).GetAwaiter().GetResult(); if (result != null) { this.WriteObject(this.FormatOutput(result)); } }
public void ClusterHealthPolicySerializationTest() { ClusterHealthPolicy healthPolicy = this.random.CreateRandom <ClusterHealthPolicy>(); TestUsingSerializer(this.Serializer, healthPolicy); healthPolicy.ApplicationTypeHealthPolicyMap.Clear(); TestUsingSerializer(this.Serializer, healthPolicy); }
private void WriteClusterHealthPolicy(BinaryWriter bw, ClusterHealthPolicy policy) { bw.Write(policy.ConsiderWarningAsError); bw.Write(policy.MaxPercentUnhealthyNodes); bw.Write(policy.MaxPercentUnhealthyApplications); bw.Write(policy.ApplicationTypeHealthPolicyMap.Count); foreach (var pair in policy.ApplicationTypeHealthPolicyMap) { bw.Write(pair.Key); bw.Write(pair.Value); } }
new internal static unsafe MonitoredRollingFabricUpgradePolicyDescription FromNative(IntPtr policyPtr) { if (policyPtr == IntPtr.Zero) { return(null); } var castedPtr = (NativeTypes.FABRIC_ROLLING_UPGRADE_POLICY_DESCRIPTION *)policyPtr; var monitoringPolicy = new MonitoredRollingFabricUpgradePolicyDescription(); if (castedPtr->Reserved != IntPtr.Zero) { var castedEx1Ptr = (NativeTypes.FABRIC_ROLLING_UPGRADE_POLICY_DESCRIPTION_EX1 *)castedPtr->Reserved; if (castedEx1Ptr->MonitoringPolicy != IntPtr.Zero) { monitoringPolicy.MonitoringPolicy = RollingUpgradeMonitoringPolicy.FromNative(castedEx1Ptr->MonitoringPolicy); } if (castedEx1Ptr->HealthPolicy != IntPtr.Zero) { monitoringPolicy.HealthPolicy = ClusterHealthPolicy.FromNative(castedEx1Ptr->HealthPolicy); } if (castedEx1Ptr->Reserved != IntPtr.Zero) { var castedEx2Ptr = (NativeTypes.FABRIC_ROLLING_UPGRADE_POLICY_DESCRIPTION_EX2 *)castedEx1Ptr->Reserved; monitoringPolicy.EnableDeltaHealthEvaluation = NativeTypes.FromBOOLEAN(castedEx2Ptr->EnableDeltaHealthEvaluation); if (castedEx2Ptr->UpgradeHealthPolicy != IntPtr.Zero) { monitoringPolicy.UpgradeHealthPolicy = ClusterUpgradeHealthPolicy.FromNative(castedEx2Ptr->UpgradeHealthPolicy); } if (castedEx2Ptr->Reserved != IntPtr.Zero) { var castedEx3Ptr = (NativeTypes.FABRIC_ROLLING_UPGRADE_POLICY_DESCRIPTION_EX3 *)castedEx2Ptr->Reserved; if (castedEx3Ptr->ApplicationHealthPolicyMap != IntPtr.Zero) { monitoringPolicy.ApplicationHealthPolicyMap.FromNative(castedEx3Ptr->ApplicationHealthPolicyMap); } } } } monitoringPolicy.FromNativeHelper(policyPtr); return(monitoringPolicy); }
/// <inheritdoc/> protected override void ProcessRecordInternal() { var monitoringPolicyDescription = new MonitoringPolicyDescription( failureAction: this.FailureAction, healthCheckWaitDurationInMilliseconds: this.HealthCheckWaitDurationInMilliseconds, healthCheckStableDurationInMilliseconds: this.HealthCheckStableDurationInMilliseconds, healthCheckRetryTimeoutInMilliseconds: this.HealthCheckRetryTimeoutInMilliseconds, upgradeTimeoutInMilliseconds: this.UpgradeTimeoutInMilliseconds, upgradeDomainTimeoutInMilliseconds: this.UpgradeDomainTimeoutInMilliseconds); var clusterHealthPolicy = new ClusterHealthPolicy( considerWarningAsError: this.ConsiderWarningAsError, maxPercentUnhealthyNodes: this.MaxPercentUnhealthyNodes, maxPercentUnhealthyApplications: this.MaxPercentUnhealthyApplications, applicationTypeHealthPolicyMap: this.ApplicationTypeHealthPolicyMap, nodeTypeHealthPolicyMap: this.NodeTypeHealthPolicyMap); var clusterUpgradeHealthPolicyObject = new ClusterUpgradeHealthPolicyObject( maxPercentDeltaUnhealthyNodes: this.MaxPercentDeltaUnhealthyNodes, maxPercentUpgradeDomainDeltaUnhealthyNodes: this.MaxPercentUpgradeDomainDeltaUnhealthyNodes); var applicationHealthPolicies = new ApplicationHealthPolicies( applicationHealthPolicyMap: this.ApplicationHealthPolicyMap); var startClusterUpgradeDescription = new StartClusterUpgradeDescription( codeVersion: this.CodeVersion, configVersion: this.ConfigVersion, upgradeKind: this.UpgradeKind, rollingUpgradeMode: this.RollingUpgradeMode, upgradeReplicaSetCheckTimeoutInSeconds: this.UpgradeReplicaSetCheckTimeoutInSeconds, forceRestart: this.ForceRestart, sortOrder: this.SortOrder, monitoringPolicy: monitoringPolicyDescription, clusterHealthPolicy: clusterHealthPolicy, enableDeltaHealthEvaluation: this.EnableDeltaHealthEvaluation, clusterUpgradeHealthPolicy: clusterUpgradeHealthPolicyObject, applicationHealthPolicyMap: applicationHealthPolicies, instanceCloseDelayDurationInSeconds: this.InstanceCloseDelayDurationInSeconds); var result = this.ServiceFabricClient.Cluster.ValidateClusterUpgradeAsync( startClusterUpgradeDescription: startClusterUpgradeDescription, serverTimeout: this.ServerTimeout, cancellationToken: this.CancellationToken).GetAwaiter().GetResult(); if (result != null) { this.WriteObject(this.FormatOutput(result)); } }
/// <inheritdoc /> public Task <NodeHealth> GetNodeHealthUsingPolicyAsync( NodeName nodeName, int?eventsHealthStateFilter = 0, ClusterHealthPolicy clusterHealthPolicy = default(ClusterHealthPolicy), long?serverTimeout = 60, CancellationToken cancellationToken = default(CancellationToken)) { nodeName.ThrowIfNull(nameof(nodeName)); serverTimeout?.ThrowIfOutOfInclusiveRange("serverTimeout", 1, 4294967295); var requestId = Guid.NewGuid().ToString(); var url = "Nodes/{nodeName}/$/GetHealth"; url = url.Replace("{nodeName}", Uri.EscapeDataString(nodeName.ToString())); var queryParams = new List <string>(); // Append to queryParams if not null. eventsHealthStateFilter?.AddToQueryParameters(queryParams, $"EventsHealthStateFilter={eventsHealthStateFilter}"); serverTimeout?.AddToQueryParameters(queryParams, $"timeout={serverTimeout}"); queryParams.Add("api-version=6.0"); url += "?" + string.Join("&", queryParams); string content; using (var sw = new StringWriter()) { if (clusterHealthPolicy != default(ClusterHealthPolicy)) { ClusterHealthPolicyConverter.Serialize(new JsonTextWriter(sw), clusterHealthPolicy); } content = sw.ToString(); } HttpRequestMessage RequestFunc() { var request = new HttpRequestMessage() { Method = HttpMethod.Post, Content = new StringContent(content, Encoding.UTF8), }; request.Content.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json; charset=utf-8"); return(request); } return(this.httpClient.SendAsyncGetResponse(RequestFunc, url, NodeHealthConverter.Deserialize, requestId, cancellationToken)); }
public void ChaosParametersSerializationTest() { TimeSpan maxClusterStabilizationTimeout = TimeSpan.FromSeconds(997); long maxConcurrentFaults = 7; TimeSpan waitTimeBetweenIterations = TimeSpan.FromSeconds(131); TimeSpan waitTimeBetweenFaults = TimeSpan.FromSeconds(19); TimeSpan timeToRun = TimeSpan.FromSeconds(104729); bool enableMoveReplicaFaults = true; var healthPolicy = new ClusterHealthPolicy { ConsiderWarningAsError = false, MaxPercentUnhealthyNodes = 10, MaxPercentUnhealthyApplications = 15 }; healthPolicy.ApplicationTypeHealthPolicyMap["TestApplicationTypePolicy"] = 11; var context = new Dictionary <string, string> { { "key1", "value1" }, { "key2", "value2" } }; var chaosParameters = new ChaosParameters( maxClusterStabilizationTimeout, maxConcurrentFaults, enableMoveReplicaFaults, timeToRun, context, waitTimeBetweenIterations, waitTimeBetweenFaults, healthPolicy); var chaosTargetFilter = new ChaosTargetFilter(); var nodeTypeInclustionList = new List <string> { "NodeType1", "NodeType2" }; var applicationInclusionList = new List <string> { "fabric:/app1", "fabric:/app2" }; chaosTargetFilter.ApplicationInclusionList = applicationInclusionList; chaosTargetFilter.NodeTypeInclusionList = nodeTypeInclustionList; this.TestUsingSerializer(this.Serializer, chaosParameters); }
/// <inheritdoc/> protected override void ProcessRecordInternal() { var rollingUpgradeUpdateDescription = new RollingUpgradeUpdateDescription( rollingUpgradeMode: this.RollingUpgradeMode, forceRestart: this.ForceRestart, replicaSetCheckTimeoutInMilliseconds: this.ReplicaSetCheckTimeoutInMilliseconds, failureAction: this.FailureAction, healthCheckWaitDurationInMilliseconds: this.HealthCheckWaitDurationInMilliseconds, healthCheckStableDurationInMilliseconds: this.HealthCheckStableDurationInMilliseconds, healthCheckRetryTimeoutInMilliseconds: this.HealthCheckRetryTimeoutInMilliseconds, upgradeTimeoutInMilliseconds: this.UpgradeTimeoutInMilliseconds, upgradeDomainTimeoutInMilliseconds: this.UpgradeDomainTimeoutInMilliseconds, instanceCloseDelayDurationInSeconds: this.InstanceCloseDelayDurationInSeconds); var clusterHealthPolicy = new ClusterHealthPolicy( considerWarningAsError: this.ConsiderWarningAsError, maxPercentUnhealthyNodes: this.MaxPercentUnhealthyNodes, maxPercentUnhealthyApplications: this.MaxPercentUnhealthyApplications, applicationTypeHealthPolicyMap: this.ApplicationTypeHealthPolicyMap, nodeTypeHealthPolicyMap: this.NodeTypeHealthPolicyMap); var clusterUpgradeHealthPolicyObject = new ClusterUpgradeHealthPolicyObject( maxPercentDeltaUnhealthyNodes: this.MaxPercentDeltaUnhealthyNodes, maxPercentUpgradeDomainDeltaUnhealthyNodes: this.MaxPercentUpgradeDomainDeltaUnhealthyNodes); var applicationHealthPolicies = new ApplicationHealthPolicies( applicationHealthPolicyMap: this.ApplicationHealthPolicyMap); var updateClusterUpgradeDescription = new UpdateClusterUpgradeDescription( upgradeKind: this.UpgradeKind, updateDescription: rollingUpgradeUpdateDescription, clusterHealthPolicy: clusterHealthPolicy, enableDeltaHealthEvaluation: this.EnableDeltaHealthEvaluation, clusterUpgradeHealthPolicy: clusterUpgradeHealthPolicyObject, applicationHealthPolicyMap: applicationHealthPolicies); this.ServiceFabricClient.Cluster.UpdateClusterUpgradeAsync( updateClusterUpgradeDescription: updateClusterUpgradeDescription, serverTimeout: this.ServerTimeout, cancellationToken: this.CancellationToken).GetAwaiter().GetResult(); Console.WriteLine("Success!"); }
private void ReadClusterHealthPolicy(BinaryReader br, ClusterHealthPolicy policy) { policy.ConsiderWarningAsError = br.ReadBoolean(); policy.MaxPercentUnhealthyNodes = br.ReadByte(); policy.MaxPercentUnhealthyApplications = br.ReadByte(); // Read application health policy map int kvpCount = br.ReadInt32(); if (kvpCount > 0) { for (int i = 0; i < kvpCount; ++i) { string key = br.ReadString(); byte value = br.ReadByte(); policy.ApplicationTypeHealthPolicyMap[key] = value; } } }
/// <summary> /// <para>Initializes a new instance of the <see cref="System.Fabric.Chaos.DataStructures.ChaosParameters" /> class.</para> /// </summary> /// <param name="maxClusterStabilizationTimeout">The maximum amount of time to wait for the entire cluster to stabilize after a fault iteration; cannot exceed TimeSpan.FromSeconds(uint.MaxValue)</param> /// <param name="maxConcurrentFaults">Maximum number of concurrent faults induced per iteration with the lowest being 1. The higher the concurrency the more aggressive the failovers /// thus inducing more complex series of failures to uncover bugs. using 2 or 3 for this is recommended.</param> /// <param name="enableMoveReplicaFaults">Enables or disables the MovePrimary and MoveSecondary faults.</param> /// <param name="timeToRun">After running for this much time, Chaos will stop; cannot exceed TimeSpan.FromSeconds(uint.MaxValue)</param> /// <param name="context">This is a bag of (key, value) pairs. This can be used to record detailed context about why Chaos is being started for example.</param> /// <param name="waitTimeBetweenIterations">This is the amount of pause between two consecutive iterations of fault inducing. The more the pause, the less is the rate of faults over time; cannot exceed TimeSpan.FromSeconds(uint.MaxValue)</param> /// <param name="waitTimeBetweenFaults">This is the amount of pause between two consecutive faults in a single iteration -- the more the pause, the less the concurrency of faults; cannot exceed TimeSpan.FromSeconds(uint.MaxValue)</param> /// <param name="clusterHealthPolicy">The cluster health policy that determines how healthy a cluster must be in order for Chaos to go on inducing faults.</param> public ChaosParameters( TimeSpan maxClusterStabilizationTimeout, long maxConcurrentFaults, bool enableMoveReplicaFaults, TimeSpan timeToRun, Dictionary <string, string> context, TimeSpan waitTimeBetweenIterations, TimeSpan waitTimeBetweenFaults, ClusterHealthPolicy clusterHealthPolicy) : this( maxClusterStabilizationTimeout, maxConcurrentFaults, enableMoveReplicaFaults, timeToRun, context, true, waitTimeBetweenIterations, waitTimeBetweenFaults, clusterHealthPolicy) { }
protected void AddToPSObject(PSObject itemPSObj, ClusterHealthPolicy healthPolicy) { if (healthPolicy == null) { return; } itemPSObj.Properties.Add(new PSNoteProperty(Constants.ConsiderWarningAsErrorPropertyName, healthPolicy.ConsiderWarningAsError)); itemPSObj.Properties.Add(new PSNoteProperty(Constants.MaxPercentUnhealthyApplicationsPropertyName, healthPolicy.MaxPercentUnhealthyApplications)); itemPSObj.Properties.Add(new PSNoteProperty(Constants.MaxPercentUnhealthyNodesPropertyName, healthPolicy.MaxPercentUnhealthyNodes)); if (healthPolicy.ApplicationTypeHealthPolicyMap != null) { var appTypeHealthPolicyMapPSObj = new PSObject(healthPolicy.ApplicationTypeHealthPolicyMap); appTypeHealthPolicyMapPSObj.Members.Add( new PSCodeMethod( Constants.ToStringMethodName, typeof(OutputFormatter).GetMethod(Constants.FormatObjectMethodName))); itemPSObj.Properties.Add(new PSNoteProperty(Constants.ApplicationTypeHealthPolicyMapPropertyName, appTypeHealthPolicyMapPSObj)); } }
static void Main(string[] args) { // README: // // Please ensure your cluster certificate is installed in // the 'CurrentUser' certificate store. // // REQUIRED STEPS: // - Paste your Service Fabric certificate's thumbprint below (line 52,53) // - Update the cluster domain name to match your SF cluster (line 54) // - Add your cluster node types to the inclusion list (line 102) string clientCertThumb = "D6426E96E0169B60ED030E53FCD05EAC12AAA1E0"; string serverCertThumb = "D6426E96E0169B60ED030E53FCD05EAC12AAA1E0"; string clusterDomainName = "dotjson.westeurope.cloudapp.azure.com"; string commonName = $"www.{clusterDomainName}"; string clusterEndpoint = $"{clusterDomainName}:19000"; var creds = GetCredentials(clientCertThumb, serverCertThumb, commonName); Console.WriteLine($"Connecting to cluster {clusterEndpoint} using certificate '{clientCertThumb}'."); using (var client = new FabricClient(creds, clusterEndpoint)) { var startTimeUtc = DateTime.UtcNow; // The maximum amount of time to wait for all cluster entities to become stable and healthy. // Chaos executes in iterations and at the start of each iteration it validates the health of cluster entities. // During validation if a cluster entity is not stable and healthy within MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation failed event. var maxClusterStabilizationTimeout = TimeSpan.FromSeconds(30.0); var timeToRun = TimeSpan.FromMinutes(60.0); // MaxConcurrentFaults is the maximum number of concurrent faults induced per iteration. // Chaos executes in iterations and two consecutive iterations are separated by a validation phase. // The higher the concurrency, the more aggressive the injection of faults -- inducing more complex series of states to uncover bugs. // The recommendation is to start with a value of 2 or 3 and to exercise caution while moving up. var maxConcurrentFaults = 3; // Describes a map, which is a collection of (string, string) type key-value pairs. The map can be used to record information about // the Chaos run. There cannot be more than 100 such pairs and each string (key or value) can be at most 4095 characters long. // This map is set by the starter of the Chaos run to optionally store the context about the specific run. var startContext = new Dictionary <string, string> { { "ReasonForStart", "Testing" } }; // Time-separation (in seconds) between two consecutive iterations of Chaos. The larger the value, the lower the fault injection rate. var waitTimeBetweenIterations = TimeSpan.FromSeconds(10); // Wait time (in seconds) between consecutive faults within a single iteration. // The larger the value, the lower the overlapping between faults and the simpler the sequence of state transitions that the cluster goes through. // The recommendation is to start with a value between 1 and 5 and exercise caution while moving up. var waitTimeBetweenFaults = TimeSpan.Zero; // Passed-in cluster health policy is used to validate health of the cluster in between Chaos iterations. var clusterHealthPolicy = new ClusterHealthPolicy { ConsiderWarningAsError = false, MaxPercentUnhealthyApplications = 100, MaxPercentUnhealthyNodes = 100 }; // All types of faults, restart node, restart code package, restart replica, move primary replica, and move secondary replica will happen // for nodes of type 'FrontEndType' var nodetypeInclusionList = new List <string> { "nodetype0" }; // In addition to the faults included by nodetypeInclusionList, // restart code package, restart replica, move primary replica, move secondary replica faults will happen for 'fabric:/TestApp2' // even if a replica or code package from 'fabric:/TestApp2' is residing on a node which is not of type included in nodeypeInclusionList. var applicationInclusionList = new List <string> { "fabric:/Exchange" }; // List of cluster entities to target for Chaos faults. var chaosTargetFilter = new ChaosTargetFilter { NodeTypeInclusionList = nodetypeInclusionList, ApplicationInclusionList = applicationInclusionList }; var parameters = new ChaosParameters( maxClusterStabilizationTimeout, maxConcurrentFaults, true, /* EnableMoveReplicaFault */ timeToRun, startContext, waitTimeBetweenIterations, waitTimeBetweenFaults, clusterHealthPolicy) { ChaosTargetFilter = chaosTargetFilter }; try { client.TestManager.StartChaosAsync(parameters).GetAwaiter().GetResult(); System.Threading.Thread.Sleep(TimeSpan.FromSeconds(30)); // Allow enough time for Chaos engine to start } catch (FabricChaosAlreadyRunningException) { Console.WriteLine("An instance of Chaos is already running in the cluster"); } var filter = new ChaosReportFilter(startTimeUtc, DateTime.MaxValue); var eventSet = new HashSet <ChaosEvent>(new ChaosEventComparer()); string continuationToken = null; while (true) { ChaosReport report; try { report = string.IsNullOrEmpty(continuationToken) ? client.TestManager.GetChaosReportAsync(filter).GetAwaiter().GetResult() : client.TestManager.GetChaosReportAsync(continuationToken).GetAwaiter().GetResult(); } catch (Exception e) { if (e is FabricTransientException) { Console.WriteLine("A transient exception happened: '{0}'", e); } else if (e is TimeoutException) { Console.WriteLine("A timeout exception happened: '{0}'", e); } else { throw; } Task.Delay(TimeSpan.FromSeconds(1.0)).GetAwaiter().GetResult(); continue; } continuationToken = report.ContinuationToken; foreach (var chaosEvent in report.History) { if (eventSet.Add(chaosEvent)) { Console.WriteLine(chaosEvent); } } // When Chaos stops, a StoppedEvent is created. // If a StoppedEvent is found, exit the loop. var lastEvent = report.History.LastOrDefault(); if (lastEvent is StoppedEvent) { break; } Task.Delay(TimeSpan.FromSeconds(1.0)).GetAwaiter().GetResult(); } } }
internal static unsafe ChaosParameters CreateFromNative(IntPtr nativeRaw) { NativeTypes.FABRIC_CHAOS_PARAMETERS native = *(NativeTypes.FABRIC_CHAOS_PARAMETERS *)nativeRaw; TimeSpan maxClusterStabilizationTimeout = TimeSpan.FromSeconds(native.MaxClusterStabilizationTimeoutInSeconds); var maxConcurrentFaults = native.MaxConcurrentFaults; TimeSpan waitTimeBetweenIterations = TimeSpan.FromSeconds(native.WaitTimeBetweenIterationsInSeconds); TimeSpan waitTimeBetweenFaults = TimeSpan.FromSeconds(native.WaitTimeBetweenFaultsInSeconds); TimeSpan timeToRun = TimeSpan.FromSeconds(native.TimeToRunInSeconds); var enabledMoveReplicaFaults = NativeTypes.FromBOOLEAN(native.EnableMoveReplicaFaults); var contextMap = new Dictionary <string, string>(); if (native.Context != IntPtr.Zero) { var nativeMapPtr = (NativeTypes.FABRIC_EVENT_CONTEXT_MAP *)native.Context; var bytesPerItem = Marshal.SizeOf(typeof(NativeTypes.FABRIC_EVENT_CONTEXT_MAP_ITEM)); for (int i = 0; i < nativeMapPtr->Count; ++i) { var nativeItemPtr = nativeMapPtr->Items + (i * bytesPerItem); var nativeMapItemPtr = (NativeTypes.FABRIC_EVENT_CONTEXT_MAP_ITEM *)nativeItemPtr; contextMap.Add( NativeTypes.FromNativeString(nativeMapItemPtr->Key), NativeTypes.FromNativeString(nativeMapItemPtr->Value)); } } var parameters = new ChaosParameters( maxClusterStabilizationTimeout, maxConcurrentFaults, enabledMoveReplicaFaults, timeToRun, contextMap, waitTimeBetweenIterations, waitTimeBetweenFaults); if (native.Reserved != IntPtr.Zero) { var ex1 = *((NativeTypes.FABRIC_CHAOS_PARAMETERS_EX1 *)native.Reserved); if (ex1.ClusterHealthPolicy != IntPtr.Zero) { parameters.ClusterHealthPolicy = ClusterHealthPolicy.FromNative(ex1.ClusterHealthPolicy); } if (ex1.Reserved != IntPtr.Zero) { var ex2 = *(NativeTypes.FABRIC_CHAOS_PARAMETERS_EX2 *)ex1.Reserved; parameters.ChaosTargetFilter = ChaosTargetFilter.FromNative(ex2.ChaosTargetFilter); } } return(parameters); }
protected override void ProcessRecord() { try { if (this.ReplicaQuorumTimeoutSec.HasValue) { this.WriteWarning(StringResources.PowerShell_ReplicaQuorumTimeoutSec_Deprecated); if (!this.UpgradeReplicaSetCheckTimeoutSec.HasValue) { this.UpgradeReplicaSetCheckTimeoutSec = this.ReplicaQuorumTimeoutSec.Value; } } if (this.RestartProcess) { this.WriteWarning(StringResources.PowerShell_RestartProcess_Deprecated); if (!this.ForceRestart) { this.ForceRestart = this.RestartProcess; } } RollingUpgradePolicyDescription upgradePolicyDescription; if (this.Monitored) { var monitoringPolicy = new RollingUpgradeMonitoringPolicy(); if (this.FailureAction != UpgradeFailureAction.Invalid) { monitoringPolicy.FailureAction = this.FailureAction; } if (this.HealthCheckRetryTimeoutSec.HasValue) { monitoringPolicy.HealthCheckRetryTimeout = TimeSpan.FromSeconds(this.HealthCheckRetryTimeoutSec.Value); } if (this.HealthCheckWaitDurationSec.HasValue) { monitoringPolicy.HealthCheckWaitDuration = TimeSpan.FromSeconds(this.HealthCheckWaitDurationSec.Value); } if (this.HealthCheckStableDurationSec.HasValue) { monitoringPolicy.HealthCheckStableDuration = TimeSpan.FromSeconds(this.HealthCheckStableDurationSec.Value); } if (this.UpgradeDomainTimeoutSec.HasValue) { monitoringPolicy.UpgradeDomainTimeout = TimeSpan.FromSeconds(this.UpgradeDomainTimeoutSec.Value); } if (this.UpgradeTimeoutSec.HasValue) { monitoringPolicy.UpgradeTimeout = TimeSpan.FromSeconds(this.UpgradeTimeoutSec.Value); } var monitoredPolicyDescription = new MonitoredRollingFabricUpgradePolicyDescription { UpgradeMode = RollingUpgradeMode.Monitored, ForceRestart = this.ForceRestart, MonitoringPolicy = monitoringPolicy }; upgradePolicyDescription = monitoredPolicyDescription; if (this.IsUpdatingHealthPolicy()) { if (!this.Force && !this.IsHealthPolicyComplete() && !this.ShouldProcess( //// description shows up for "-WhatIf" string.Format( CultureInfo.InvariantCulture, "{0} {1}", StringResources.PowerShell_HealthPolicyUpgradeCaption, StringResources.PowerShell_ClusterHealthPolicyUpdateWarning), //// warning and caption show up when prompting for confirmation StringResources.PowerShell_ClusterHealthPolicyUpdateWarning, StringResources.PowerShell_HealthPolicyUpgradeCaption)) { return; } var healthPolicy = new ClusterHealthPolicy(); if (this.ConsiderWarningAsError.HasValue) { healthPolicy.ConsiderWarningAsError = this.ConsiderWarningAsError.Value; } if (this.MaxPercentUnhealthyApplications.HasValue) { healthPolicy.MaxPercentUnhealthyApplications = this.MaxPercentUnhealthyApplications.Value; } if (this.MaxPercentUnhealthyNodes.HasValue) { healthPolicy.MaxPercentUnhealthyNodes = this.MaxPercentUnhealthyNodes.Value; } if (this.ApplicationTypeHealthPolicyMap != null) { foreach (var entry in this.ApplicationTypeHealthPolicyMap) { healthPolicy.ApplicationTypeHealthPolicyMap.Add(entry.Key, entry.Value); } } monitoredPolicyDescription.HealthPolicy = healthPolicy; } monitoredPolicyDescription.EnableDeltaHealthEvaluation = this.EnableDeltaHealthEvaluation; if (this.IsUpdatingUpgradeHealthPolicy()) { if (!this.Force && !this.IsUpgradeHealthPolicyComplete() && !this.ShouldProcess( //// description shows up for "-WhatIf" string.Format( CultureInfo.InvariantCulture, "{0} {1}", StringResources.PowerShell_HealthPolicyUpgradeCaption, StringResources.PowerShell_ClusterUpgradeHealthPolicyUpdateWarning), //// warning and caption show up when prompting for confirmation StringResources.PowerShell_ClusterUpgradeHealthPolicyUpdateWarning, StringResources.PowerShell_HealthPolicyUpgradeCaption)) { return; } var upgradeHealthPolicy = new ClusterUpgradeHealthPolicy(); if (this.MaxPercentDeltaUnhealthyNodes.HasValue) { upgradeHealthPolicy.MaxPercentDeltaUnhealthyNodes = this.MaxPercentDeltaUnhealthyNodes.Value; } if (this.MaxPercentUpgradeDomainDeltaUnhealthyNodes.HasValue) { upgradeHealthPolicy.MaxPercentUpgradeDomainDeltaUnhealthyNodes = this.MaxPercentUpgradeDomainDeltaUnhealthyNodes.Value; } monitoredPolicyDescription.UpgradeHealthPolicy = upgradeHealthPolicy; } if (this.ApplicationHealthPolicyMap != null) { foreach (var entry in this.ApplicationHealthPolicyMap) { monitoredPolicyDescription.ApplicationHealthPolicyMap.Add(entry.Key, entry.Value); } } } else if (this.UnmonitoredManual) { upgradePolicyDescription = new RollingUpgradePolicyDescription { UpgradeMode = RollingUpgradeMode.UnmonitoredManual, ForceRestart = this.ForceRestart, }; } else { upgradePolicyDescription = new RollingUpgradePolicyDescription { UpgradeMode = RollingUpgradeMode.UnmonitoredAuto, ForceRestart = this.ForceRestart, }; } if (this.UpgradeReplicaSetCheckTimeoutSec.HasValue) { upgradePolicyDescription.UpgradeReplicaSetCheckTimeout = TimeSpan.FromSeconds(this.UpgradeReplicaSetCheckTimeoutSec.Value); } var upgradeDescription = new FabricUpgradeDescription { UpgradePolicyDescription = upgradePolicyDescription, TargetCodeVersion = this.CodePackageVersion, TargetConfigVersion = this.ClusterManifestVersion }; this.UpgradeCluster(upgradeDescription); } catch (Exception exception) { this.ThrowTerminatingError( exception, Constants.UpgradeClusterErrorId, null); } }
protected override void ProcessRecord() { try { var updateDescription = new FabricUpgradeUpdateDescription(); if (this.ForceRestart.HasValue) { updateDescription.ForceRestart = this.ForceRestart.Value; } if (this.UpgradeReplicaSetCheckTimeoutSec.HasValue) { updateDescription.UpgradeReplicaSetCheckTimeout = TimeSpan.FromSeconds(this.UpgradeReplicaSetCheckTimeoutSec.Value); } if (this.UpgradeMode.HasValue) { updateDescription.UpgradeMode = this.UpgradeMode.Value; } if (this.FailureAction.HasValue) { updateDescription.FailureAction = this.FailureAction.Value; } if (this.HealthCheckWaitDurationSec.HasValue) { updateDescription.HealthCheckWaitDuration = TimeSpan.FromSeconds(this.HealthCheckWaitDurationSec.Value); } if (this.HealthCheckStableDurationSec.HasValue) { updateDescription.HealthCheckStableDuration = TimeSpan.FromSeconds(this.HealthCheckStableDurationSec.Value); } if (this.HealthCheckRetryTimeoutSec.HasValue) { updateDescription.HealthCheckRetryTimeout = TimeSpan.FromSeconds(this.HealthCheckRetryTimeoutSec.Value); } if (this.UpgradeTimeoutSec.HasValue) { updateDescription.UpgradeTimeout = TimeSpan.FromSeconds(this.UpgradeTimeoutSec.Value); } if (this.UpgradeDomainTimeoutSec.HasValue) { updateDescription.UpgradeDomainTimeout = TimeSpan.FromSeconds(this.UpgradeDomainTimeoutSec.Value); } if (this.IsUpdatingHealthPolicy()) { if (!this.Force && !this.IsHealthPolicyComplete() && !this.ShouldProcess( //// description shows up for "-WhatIf" string.Format( CultureInfo.InvariantCulture, "{0} {1}", StringResources.PowerShell_HealthPolicyUpdateCaption, StringResources.PowerShell_ClusterHealthPolicyUpdateWarning), //// warning and caption show up when prompting for confirmation StringResources.PowerShell_ClusterHealthPolicyUpdateWarning, StringResources.PowerShell_HealthPolicyUpdateCaption)) { return; } var healthPolicy = new ClusterHealthPolicy(); if (this.ConsiderWarningAsError.HasValue) { healthPolicy.ConsiderWarningAsError = this.ConsiderWarningAsError.Value; } if (this.MaxPercentUnhealthyApplications.HasValue) { healthPolicy.MaxPercentUnhealthyApplications = this.MaxPercentUnhealthyApplications.Value; } if (this.MaxPercentUnhealthyNodes.HasValue) { healthPolicy.MaxPercentUnhealthyNodes = this.MaxPercentUnhealthyNodes.Value; } if (this.ApplicationTypeHealthPolicyMap != null) { foreach (var entry in this.ApplicationTypeHealthPolicyMap) { healthPolicy.ApplicationTypeHealthPolicyMap.Add(entry.Key, entry.Value); } } updateDescription.HealthPolicy = healthPolicy; } if (this.EnableDeltaHealthEvaluation.HasValue) { updateDescription.EnableDeltaHealthEvaluation = this.EnableDeltaHealthEvaluation.Value; } if (this.IsUpdatingUpgradeHealthPolicy()) { if (!this.Force && !this.IsUpgradeHealthPolicyComplete() && !this.ShouldProcess( //// description shows up for "-WhatIf" string.Format( CultureInfo.InvariantCulture, "{0} {1}", StringResources.PowerShell_HealthPolicyUpdateCaption, StringResources.PowerShell_ClusterUpgradeHealthPolicyUpdateWarning), //// warning and caption show up when prompting for confirmation StringResources.PowerShell_ClusterUpgradeHealthPolicyUpdateWarning, StringResources.PowerShell_HealthPolicyUpdateCaption)) { return; } var upgradeHealthPolicy = new ClusterUpgradeHealthPolicy(); if (this.MaxPercentDeltaUnhealthyNodes.HasValue) { upgradeHealthPolicy.MaxPercentDeltaUnhealthyNodes = this.MaxPercentDeltaUnhealthyNodes.Value; } if (this.MaxPercentUpgradeDomainDeltaUnhealthyNodes.HasValue) { upgradeHealthPolicy.MaxPercentUpgradeDomainDeltaUnhealthyNodes = this.MaxPercentUpgradeDomainDeltaUnhealthyNodes.Value; } updateDescription.UpgradeHealthPolicy = upgradeHealthPolicy; } updateDescription.ApplicationHealthPolicyMap = this.ApplicationHealthPolicyMap; this.UpdateClusterUpgrade(updateDescription); } catch (Exception exception) { this.ThrowTerminatingError( exception, Constants.UpdateClusterUpgradeErrorId, null); } }
internal ChaosParameters( TimeSpan maxClusterStabilizationTimeout, long maxConcurrentFaults, bool enableMoveReplicaFaults, TimeSpan timeToRun, Dictionary <string, string> context, bool disableStartStopNodeFaults, TimeSpan waitTimeBetweenIterations, TimeSpan waitTimeBetweenFaults, ClusterHealthPolicy clusterHealthPolicy) { this.ValidateArguments(maxConcurrentFaults, context, maxClusterStabilizationTimeout, timeToRun, waitTimeBetweenIterations, waitTimeBetweenFaults); this.MaxClusterStabilizationTimeout = maxClusterStabilizationTimeout; this.WaitTimeBetweenIterations = ChaosConstants.WaitTimeBetweenIterationsDefault; this.ActionGeneratorParameters = new ActionGeneratorParameters { MaxConcurrentFaults = maxConcurrentFaults }; // Set default values for action generator parameters. this.EnableMoveReplicaFaults = enableMoveReplicaFaults; if (disableStartStopNodeFaults) { // This is disable start/stop node this.ActionGeneratorParameters.NodeFaultActionsParameters.RestartNodeFaultWeight = 1.0; this.ActionGeneratorParameters.NodeFaultActionsParameters.StartStopNodeFaultWeight = 0; } this.ActionGeneratorParameters.NodeFaultActionWeight = 40.0; this.ActionGeneratorParameters.ServiceFaultActionWeight = 60.0; this.ActionGeneratorParameters.ServiceFaultActionsParameters.RemoveReplicaFaultWeight = 100.0; this.ActionGeneratorParameters.ServiceFaultActionsParameters.RestartReplicaFaultWeight = 100.0; this.ActionGeneratorParameters.ServiceFaultActionsParameters.RestartCodePackageFaultWeight = 100.0; if (enableMoveReplicaFaults) { this.ActionGeneratorParameters.ServiceFaultActionsParameters.MovePrimaryFaultWeight = 100.0; this.ActionGeneratorParameters.ServiceFaultActionsParameters.MoveSecondaryFaultWeight = 100.0; } else { this.ActionGeneratorParameters.ServiceFaultActionsParameters.MovePrimaryFaultWeight = 0.0; this.ActionGeneratorParameters.ServiceFaultActionsParameters.MoveSecondaryFaultWeight = 0.0; } this.MaxConcurrentFaults = maxConcurrentFaults; this.TimeToRun = timeToRun; this.WaitTimeBetweenIterations = waitTimeBetweenIterations; this.WaitTimeBetweenFaults = waitTimeBetweenFaults; this.requestTimeoutFactor = ChaosConstants.RequestTimeoutFactorDefault; this.OperationTimeout = ChaosConstants.OperationTimeoutDefault; this.populated = true; this.Context = context; this.ClusterHealthPolicy = clusterHealthPolicy ?? ChaosConstants.ClusterHealthPolicyDefault; this.ApplyUpdatesFromContextIfAvailable(context); }
public static async Task <ChaosReport> RunTest(int minsTorun) { string clientCertThumb = "87b906f84a251c015d44ea188e2eff322d1c16f8"; string serverCertThumb = "87b906f84a251c015d44ea188e2eff322d1c16f8"; string CommonName = "memoryleak"; string connection = "sf-memoryleak.eastus.cloudapp.azure.com:19000"; var xc = GetCredentials(clientCertThumb, serverCertThumb, CommonName); using (var client = new FabricClient(xc, connection)) { var startTimeUtc = DateTime.UtcNow; var maxClusterStabilizationTimeout = TimeSpan.FromSeconds(30.0); var timeToRun = TimeSpan.FromMinutes(minsTorun); // The recommendation is to start with a value of 2 or 3 and to exercise caution while moving up. var maxConcurrentFaults = 3; var startContext = new Dictionary <string, string> { { "ReasonForStart", "Testing" } }; // Time-separation (in seconds) between two consecutive iterations of Chaos. The larger the value, the // lower the fault injection rate. var waitTimeBetweenIterations = TimeSpan.FromSeconds(1); // Wait time (in seconds) between consecutive faults within a single iteration. // The larger the value, the lower the overlapping between faults and the simpler the sequence of // state transitions that the cluster goes through. var waitTimeBetweenFaults = TimeSpan.FromSeconds(1); // Passed-in cluster health policy is used to validate health of the cluster in between Chaos iterations. var clusterHealthPolicy = new ClusterHealthPolicy { ConsiderWarningAsError = false, MaxPercentUnhealthyApplications = 100, MaxPercentUnhealthyNodes = 100 }; var nodetypeInclusionList = new List <string> { "nt2vm", "nt3vm" }; var applicationInclusionList = new List <string> { "fabric:/RequestHandling" }; // List of cluster entities to target for Chaos faults. var chaosTargetFilter = new ChaosTargetFilter { NodeTypeInclusionList = nodetypeInclusionList, //ApplicationInclusionList = applicationInclusionList, }; var parameters = new ChaosParameters( maxClusterStabilizationTimeout, maxConcurrentFaults, true, /* EnableMoveReplicaFault */ timeToRun, startContext, waitTimeBetweenIterations, waitTimeBetweenFaults, clusterHealthPolicy) { ChaosTargetFilter = chaosTargetFilter }; try { await client.TestManager.StartChaosAsync(parameters); } catch (FabricChaosAlreadyRunningException) { Console.WriteLine("An instance of Chaos is already running in the cluster."); await client.TestManager.StopChaosAsync(); throw new Exception("Chaos test already running"); } var filter = new ChaosReportFilter(startTimeUtc, DateTime.MaxValue); var eventSet = new HashSet <ChaosEvent>(new ChaosEventComparer()); string continuationToken = null; while (true) { ChaosReport report; try { report = string.IsNullOrEmpty(continuationToken) ? await client.TestManager.GetChaosReportAsync(filter) : await client.TestManager.GetChaosReportAsync(continuationToken); } catch (Exception e) { if (e is FabricTransientException) { Console.WriteLine("A transient exception happened: '{0}'", e); } else if (e is TimeoutException) { Console.WriteLine("A timeout exception happened: '{0}'", e); } else { throw; } Task.Delay(TimeSpan.FromSeconds(1.0)).GetAwaiter().GetResult(); continue; } continuationToken = report.ContinuationToken; foreach (var chaosEvent in report.History) { eventSet.Add(chaosEvent); } // When Chaos stops, a StoppedEvent is created. // If a StoppedEvent is found, exit the loop. var lastEvent = report.History.LastOrDefault(); if (lastEvent is StoppedEvent) { return(report); } Task.Delay(TimeSpan.FromSeconds(1.0)).GetAwaiter().GetResult(); } } }
/// <inheritdoc /> public override object ReadJson( JsonReader reader, Type objectType, object existingValue, JsonSerializer serializer) { ThrowIf.Null(reader, "reader"); if (reader.TokenType == JsonToken.Null) { return(null); } TimeSpan maxClusterStabilizationTimeout = ChaosConstants.DefaultClusterStabilizationTimeout; long maxConcurrentFaults = ChaosConstants.MaxConcurrentFaultsDefault; TimeSpan waitTimeBetweenIterations = ChaosConstants.WaitTimeBetweenIterationsDefault; TimeSpan waitTimeBetweenFaults = ChaosConstants.WaitTimeBetweenFaultsDefault; TimeSpan timeToRun = TimeSpan.FromSeconds(uint.MaxValue); bool enableMoveReplicaFaults = false; ClusterHealthPolicy healthPolicy = new ClusterHealthPolicy(); Dictionary <string, string> context = null; ChaosTargetFilter chaosTargetFilter = null; var chaosParametersJObject = JObject.Load(reader); this.ReadTimePeriod(chaosParametersJObject, JsonSerializerImplConstants.MaxClusterStabilizationTimeoutInSeconds, ref maxClusterStabilizationTimeout); JToken maxConcurrentFaultsJToken = chaosParametersJObject[JsonSerializerImplConstants.MaxConcurrentFaults]; if (maxConcurrentFaultsJToken != null) { maxConcurrentFaults = maxConcurrentFaultsJToken.Value <long>(); } this.ReadTimePeriod( chaosParametersJObject, JsonSerializerImplConstants.WaitTimeBetweenIterationsInSeconds, ref waitTimeBetweenIterations); this.ReadTimePeriod( chaosParametersJObject, JsonSerializerImplConstants.WaitTimeBetweenFaultsInSeconds, ref waitTimeBetweenFaults); this.ReadTimePeriod( chaosParametersJObject, JsonSerializerImplConstants.TimeToRunInSeconds, ref timeToRun); JToken enableMoveJToken = chaosParametersJObject[JsonSerializerImplConstants.EnableMoveReplicaFaults]; if (enableMoveJToken != null) { enableMoveReplicaFaults = enableMoveJToken.Value <bool>(); } JToken policyJToken = chaosParametersJObject[JsonSerializerImplConstants.ClusterHealthPolicy]; if (policyJToken != null) { healthPolicy = policyJToken.ToObject <ClusterHealthPolicy>(serializer); } JToken contextJToken = chaosParametersJObject[JsonSerializerImplConstants.Context]; if (contextJToken != null) { var contextMap = contextJToken[JsonSerializerImplConstants.Map]; if (contextMap != null) { context = contextMap.ToObject <Dictionary <string, string> >(serializer); } } JToken entityFilterJToken = chaosParametersJObject[JsonSerializerImplConstants.ChaosTargetFilter]; if (entityFilterJToken != null) { chaosTargetFilter = entityFilterJToken.ToObject <ChaosTargetFilter>(new JsonSerializer { NullValueHandling = NullValueHandling.Ignore }); } return(new ChaosParameters( maxClusterStabilizationTimeout, maxConcurrentFaults, enableMoveReplicaFaults, timeToRun, context, waitTimeBetweenIterations, waitTimeBetweenFaults, healthPolicy) { ChaosTargetFilter = chaosTargetFilter }); }