public void DownscaleManualExamples(string fromString, string toString) { var from = RedisClusterSize.TryParse(fromString).ThrowIfFailure(); var to = RedisClusterSize.TryParse(toString).ThrowIfFailure(); Assert.True(RedisScalingUtilities.IsDownScale(from, to)); }
/// <summary> /// Decides whether a scaling move is allowed. At this point, we don't know if Azure Cache for Redis business /// rules allow scaling from the current to the target size. We just decide whether it is reasonable based on /// our knowledge of our production workload. /// /// The autoscaler will figure out how to reach the desired plan. /// </summary> private bool IsScalingAllowed( RedisClusterSize currentClusterSize, RedisClusterSize targetClusterSize, ModelContext modelContext) { // WARNING: order matters in the following if statements. Please be careful. // Cluster must be able to handle the amount of data we'll give it, with some overhead in case of // production issues. Notice we don't introduce a per-shard restriction; reason for this is that the shards // distribute keys evenly. if (targetClusterSize.ClusterMemorySizeMb < modelContext.MinimumAllowedClusterMemoryMb) { return(false); } // Cluster must be able to handle the amount of operations needed. Notice we don't introduce a per-shard // restriction; reason for this is that the shards distribute keys evenly. if (targetClusterSize.EstimatedRequestsPerSecond < modelContext.MinimumAllowedClusterRps) { return(false); } // Disallow going over the maximum allowed cluster memory // NOTE: we only constrain on the target not being over the allowed size, rather than all nodes in the // path. The reason for this is that our ability to reach all nodes is based on being able to scale above // any specific memory threshold. if (modelContext.MaximumAllowedClusterMemoryMb != null && targetClusterSize.ClusterMemorySizeMb > modelContext.MaximumAllowedClusterMemoryMb.Value) { return(false); } // Always allow not doing anything if it's available. // NOTE: this is here because in downscale situations we always want to ensure we have the "status quo" // action available. if (currentClusterSize.Equals(targetClusterSize)) { return(true); } // Disallow downscales that don't improve cost significantly if (_configuration.MinimumCostSavingForDownScaling != null) { var monthlyCostDelta = targetClusterSize.MonthlyCostUsd - currentClusterSize.MonthlyCostUsd; if (RedisScalingUtilities.IsDownScale(currentClusterSize, targetClusterSize) && monthlyCostDelta <= 0 && -monthlyCostDelta < _configuration.MinimumCostSavingForDownScaling) { return(false); } } return(true); }
public void LoweringTierIsDownscaling() { foreach (var source in RedisClusterSize.Instances) { var candidates = from size in source.ScaleEligibleSizes where size.Shards == source.Shards && RedisScalingUtilities.IsDownScale(source.Tier, size.Tier) select size; foreach (var to in candidates) { RedisScalingUtilities.IsDownScale(source, to).Should().BeTrue(); } } }
public void RemovingShardsIsDownscaling() { foreach (var source in RedisClusterSize.Instances) { var candidates = from size in source.ScaleEligibleSizes where size.Tier.Equals(source.Tier) && size.Shards < source.Shards select size; foreach (var to in candidates) { RedisScalingUtilities.IsDownScale(source, to).Should().BeTrue(); } } }