void Start() { m_Time = 0; m_ScaleStatus = ScaleStatus.Revert; m_ScaleFormer = transform.GetComponent <RectTransform>().localScale; //LogManager.Log(m_ScaleStatus); }
void Update() { transform.eulerAngles += new Vector3(0, 0, m_RotationSpeed); if (m_ScaleSize != 1) { m_Time += Time.deltaTime; if (m_Time > m_ScaleTimeLength && m_ScaleStatus == ScaleStatus.Revert) { transform.DOScale(m_ScaleSize * m_ScaleFormer, m_ScaleTimeLength); m_Time -= m_ScaleTimeLength; m_ScaleStatus = ScaleStatus.Expansion; } if (m_Time > m_ScaleTimeLength && m_ScaleStatus == ScaleStatus.Expansion) { transform.DOScale(m_ScaleFormer, m_ScaleTimeLength); m_Time -= m_ScaleTimeLength; m_ScaleStatus = ScaleStatus.Revert; } } }
/// <summary> /// Will process the byte array /// </summary> /// <param name="data"></param> /// <returns></returns> public WeightResult HelpGetWeight(byte[] data) { double weight = 0; ScaleStatus status = ScaleStatus.UNKNOWN; // Byte 0 == Report ID? // Byte 1 == Scale Status (1 == Fault, 2 == Stable @ 0, 3 == In Motion, 4 == Stable, 5 == Under 0, 6 == Over Weight, 7 == Requires Calibration, 8 == Requires Re-Zeroing) // Byte 2 == Weight Unit // Byte 3 == Data Scaling (decimal placement) 10^-127 to 127 // Byte 4 == Weight LSB // Byte 5 == Weight MSB // Byte 3 //Don't use convert as it will thrown an exception sbyte signedByte = (sbyte)data[3]; double scale = Math.Pow(10, signedByte); weight = scale * (data[4] + (256 * data[5])); switch (data[2]) { case 3: // Kilos weight *= 2.2; break; case 11: // Ounces weight *= 0.625; break; case 12: // Pounds // already in pounds, do nothing break; } status = (ScaleStatus)(data[1]); if (data[1] > 8) { status = ScaleStatus.UNKNOWN; } return(new WeightResult(weight, status)); }
private ScaleStatus GetScaleStatusCore(int workerCount, KafkaTriggerMetrics[] metrics) { var status = new ScaleStatus { Vote = ScaleVote.None, }; const int NumberOfSamplesToConsider = 5; // At least 5 samples are required to make a scale decision for the rest of the checks. if (metrics == null || metrics.Length < NumberOfSamplesToConsider) { return(status); } var lastMetrics = metrics.Last(); long totalLag = lastMetrics.TotalLag; long partitionCount = lastMetrics.PartitionCount; long lagThreshold = this.lagThreshold; // We shouldn't assign more workers than there are partitions // This check is first, because it is independent of load or number of samples. if (partitionCount > 0 && partitionCount < workerCount) { status.Vote = ScaleVote.ScaleIn; if (this.logger.IsEnabled(LogLevel.Information)) { this.logger.LogInformation("WorkerCount ({workerCount}) > PartitionCount ({partitionCount}). For topic {topicName}, for consumer group {consumerGroup}.", workerCount, partitionCount, this.topicName, this.consumerGroup); this.logger.LogInformation("Number of instances ({workerCount}) is too high relative to number of partitions ({partitionCount}). For topic {topicName}, for consumer group {consumerGroup}.", workerCount, partitionCount, this.topicName, this.consumerGroup); } return(status); } // Check to see if the Kafka consumer has been empty for a while. Only if all metrics samples are empty do we scale down. bool partitionIsIdle = metrics.All(p => p.TotalLag == 0); if (partitionIsIdle) { status.Vote = ScaleVote.ScaleIn; if (this.logger.IsEnabled(LogLevel.Information)) { this.logger.LogInformation("Topic '{topicName}', for consumer group {consumerGroup}' is idle.", this.topicName, this.consumerGroup); } return(status); } // Maintain a minimum ratio of 1 worker per lagThreshold --1,000 unprocessed message. if (totalLag > workerCount * lagThreshold) { if (workerCount < partitionCount) { status.Vote = ScaleVote.ScaleOut; if (this.logger.IsEnabled(LogLevel.Information)) { this.logger.LogInformation("Total lag ({totalLag}) is less than the number of instances ({workerCount}). Scale out, for topic {topicName}, for consumer group {consumerGroup}.", totalLag, workerCount, topicName, consumerGroup); } } return(status); } // Samples are in chronological order. Check for a continuous increase in unprocessed message count. // If detected, this results in an automatic scale out for the site container. if (metrics[0].TotalLag > 0) { if (workerCount < partitionCount) { bool queueLengthIncreasing = IsTrueForLast( metrics, NumberOfSamplesToConsider, (prev, next) => prev.TotalLag < next.TotalLag) && metrics[0].TotalLag > 0; if (queueLengthIncreasing) { status.Vote = ScaleVote.ScaleOut; if (this.logger.IsEnabled(LogLevel.Information)) { this.logger.LogInformation("Total lag ({totalLag}) is less than the number of instances ({workerCount}). Scale out, for topic {topicName}, for consumer group {consumerGroup}.", totalLag, workerCount, topicName, consumerGroup); } return(status); } } } if (workerCount > 1) { bool queueLengthDecreasing = IsTrueForLast( metrics, NumberOfSamplesToConsider, (prev, next) => prev.TotalLag > next.TotalLag); if (queueLengthDecreasing) { // Only vote down if the new workerCount / totalLag < threshold // Example: 4 workers, only scale in if totalLag <= 2999 (3000 < (3 * 1000)) var proposedWorkerCount = workerCount - 1; var proposedLagPerWorker = totalLag / proposedWorkerCount; if (proposedLagPerWorker < lagThreshold) { status.Vote = ScaleVote.ScaleIn; if (this.logger.IsEnabled(LogLevel.Information)) { this.logger.LogInformation("Total lag length is decreasing for topic {topicName}, for consumer group {consumerGroup}.", this.topicName, this.consumerGroup); } } } } return(status); }
private ScaleStatus GetScaleStatusCore(int workerCount, QueueTriggerMetrics[] metrics) { ScaleStatus status = new ScaleStatus { Vote = ScaleVote.None }; // verify we have enough samples to make a scale decision. if (metrics == null || (metrics.Length < NumberOfSamplesToConsider)) { return(status); } // Maintain a minimum ratio of 1 worker per 1,000 queue messages. long latestQueueLength = metrics.Last().QueueLength; if (latestQueueLength > workerCount * 1000) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"QueueLength ({latestQueueLength}) > workerCount ({workerCount}) * 1,000"); _logger.LogInformation($"Length of queue ({_queue.Name}, {latestQueueLength}) is too high relative to the number of instances ({workerCount})."); return(status); } // Check to see if the queue has been empty for a while. bool queueIsIdle = metrics.All(p => p.QueueLength == 0); if (queueIsIdle) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Queue '{_queue.Name}' is idle"); return(status); } // Samples are in chronological order. Check for a continuous increase in time or length. // If detected, this results in an automatic scale out. if (metrics[0].QueueLength > 0) { bool queueLengthIncreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.QueueLength < next.QueueLength); if (queueLengthIncreasing) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"Queue length is increasing for '{_queue.Name}'"); return(status); } } if (metrics[0].QueueTime > TimeSpan.Zero && metrics[0].QueueTime < metrics[NumberOfSamplesToConsider - 1].QueueTime) { bool queueTimeIncreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.QueueTime <= next.QueueTime); if (queueTimeIncreasing) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"Queue time is increasing for '{_queue.Name}'"); return(status); } } bool queueLengthDecreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.QueueLength > next.QueueLength); if (queueLengthDecreasing) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Queue length is decreasing for '{_queue.Name}'"); return(status); } bool queueTimeDecreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.QueueTime > next.QueueTime); if (queueTimeDecreasing) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Queue time is decreasing for '{_queue.Name}'"); return(status); } _logger.LogInformation($"Queue '{_queue.Name}' is steady"); return(status); }
private ScaleStatus GetScaleStatusCore(int workerCount, ServiceBusTriggerMetrics[] metrics) { ScaleStatus status = new ScaleStatus { Vote = ScaleVote.None }; const int NumberOfSamplesToConsider = 5; // Unable to determine the correct vote with no metrics. if (metrics == null || metrics.Length == 0) { return(status); } // We shouldn't assign more workers than there are partitions // This check is first, because it is independent of load or number of samples. int partitionCount = metrics.Last().PartitionCount; if (partitionCount > 0 && partitionCount < workerCount) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"WorkerCount ({workerCount}) > PartitionCount ({partitionCount})."); _logger.LogInformation($"Number of instances ({workerCount}) is too high relative to number " + $"of partitions for Service Bus entity ({_entityPath}, {partitionCount})."); return(status); } // At least 5 samples are required to make a scale decision for the rest of the checks. if (metrics.Length < NumberOfSamplesToConsider) { return(status); } // Maintain a minimum ratio of 1 worker per 1,000 messages. long latestMessageCount = metrics.Last().MessageCount; if (latestMessageCount > workerCount * 1000) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"MessageCount ({latestMessageCount}) > WorkerCount ({workerCount}) * 1,000."); _logger.LogInformation($"Message count for Service Bus Entity ({_entityPath}, {latestMessageCount}) " + $"is too high relative to the number of instances ({workerCount})."); return(status); } // Check to see if the queue/topic has been empty for a while. Only if all metrics samples are empty do we scale down. bool isIdle = metrics.All(m => m.MessageCount == 0); if (isIdle) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"'{_entityPath}' is idle."); return(status); } // Samples are in chronological order. Check for a continuous increase in message count. // If detected, this results in an automatic scale out for the site container. if (metrics[0].MessageCount > 0) { bool messageCountIncreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.MessageCount < next.MessageCount) && metrics[0].MessageCount > 0; if (messageCountIncreasing) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"Message count is increasing for '{_entityPath}'."); return(status); } } if (metrics[0].QueueTime > TimeSpan.Zero && metrics[0].QueueTime < metrics[NumberOfSamplesToConsider - 1].QueueTime) { bool queueTimeIncreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.QueueTime <= next.QueueTime); if (queueTimeIncreasing) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"Queue time is increasing for '{_entityPath}'."); return(status); } } bool messageCountDecreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.MessageCount > next.MessageCount); if (messageCountDecreasing) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Message count is decreasing for '{_entityPath}'."); return(status); } bool queueTimeDecreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.QueueTime > next.QueueTime); if (queueTimeDecreasing) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Queue time is decreasing for '{_entityPath}'."); return(status); } _logger.LogInformation($"Service Bus entity '{_entityPath}' is steady."); return(status); }
private ScaleStatus GetScaleStatusCore(int workerCount, RabbitMQTriggerMetrics[] metrics) { ScaleStatus status = new ScaleStatus { Vote = ScaleVote.None, }; // TODO: Make the below two ints configurable. int numberOfSamplesToConsider = 5; int targetQueueLength = 1000; if (metrics == null || metrics.Length < numberOfSamplesToConsider) { return(status); } long latestQueueLength = metrics.Last().QueueLength; if (latestQueueLength > workerCount * targetQueueLength) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"QueueLength ({latestQueueLength}) > workerCount ({workerCount}) * 1000"); _logger.LogInformation($"Length of queue ({_queueName}, {latestQueueLength}) is too high relative to the number of instances ({workerCount})."); return(status); } bool queueIsIdle = metrics.All(p => p.QueueLength == 0); if (queueIsIdle) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Queue '{_queueName}' is idle"); return(status); } bool queueLengthIncreasing = IsTrueForLast( metrics, numberOfSamplesToConsider, (prev, next) => prev.QueueLength < next.QueueLength) && metrics[0].QueueLength > 0; if (queueLengthIncreasing) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"Queue length is increasing for '{_queueName}'"); return(status); } bool queueLengthDecreasing = IsTrueForLast( metrics, numberOfSamplesToConsider, (prev, next) => prev.QueueLength > next.QueueLength); if (queueLengthDecreasing) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Queue length is decreasing for '{_queueName}'"); } _logger.LogInformation($"Queue '{_queueName}' is steady"); return(status); }
private ScaleStatus GetScaleStatusCore(int workerCount, EventHubsTriggerMetrics[] metrics) { ScaleStatus status = new ScaleStatus { Vote = ScaleVote.None }; const int NumberOfSamplesToConsider = 5; // Unable to determine the correct vote with no metrics. if (metrics == null || metrics.Length == 0) { return(status); } // We shouldn't assign more workers than there are partitions // This check is first, because it is independent of load or number of samples. int partitionCount = metrics.Last().PartitionCount; if (partitionCount > 0 && partitionCount < workerCount) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"WorkerCount ({workerCount}) > PartitionCount ({partitionCount})."); _logger.LogInformation($"Number of instances ({workerCount}) is too high relative to number " + $"of partitions ({partitionCount}) for EventHubs entity ({_client.EventHubName}, {_client.ConsumerGroup})."); return(status); } // At least 5 samples are required to make a scale decision for the rest of the checks. if (metrics.Length < NumberOfSamplesToConsider) { return(status); } // Maintain a minimum ratio of 1 worker per 1,000 unprocessed events. long latestEventCount = metrics.Last().EventCount; if (latestEventCount > workerCount * 1000) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"EventCount ({latestEventCount}) > WorkerCount ({workerCount}) * 1,000."); _logger.LogInformation($"Event count ({latestEventCount}) for EventHubs entity ({_client.EventHubName}, {_client.ConsumerGroup}) " + $"is too high relative to the number of instances ({workerCount})."); return(status); } // Check to see if the EventHub has been empty for a while. Only if all metrics samples are empty do we scale down. bool isIdle = metrics.All(m => m.EventCount == 0); if (isIdle) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"'{_client.EventHubName}' is idle."); return(status); } // Samples are in chronological order. Check for a continuous increase in unprocessed event count. // If detected, this results in an automatic scale out for the site container. if (metrics[0].EventCount > 0) { bool eventCountIncreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.EventCount < next.EventCount); if (eventCountIncreasing) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"Event count is increasing for '{_client.EventHubName}'."); return(status); } } bool eventCountDecreasing = IsTrueForLastN( metrics, NumberOfSamplesToConsider, (prev, next) => prev.EventCount > next.EventCount); if (eventCountDecreasing) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Event count is decreasing for '{_client.EventHubName}'."); return(status); } _logger.LogInformation($"EventHubs entity '{_client.EventHubName}' is steady."); return(status); }
private ScaleStatus GetScaleStatusCore(int workerCount, DurableTaskTriggerMetrics[] metrics) { var scaleStatus = new ScaleStatus() { Vote = ScaleVote.None }; if (metrics == null) { return(scaleStatus); } var heartbeats = new PerformanceHeartbeat[metrics.Length]; for (int i = 0; i < metrics.Length; ++i) { TimeSpan workItemQueueLatency; bool parseResult = TimeSpan.TryParse(metrics[i].WorkItemQueueLatency, out workItemQueueLatency); heartbeats[i] = new PerformanceHeartbeat() { PartitionCount = metrics[i].PartitionCount, WorkItemQueueLatency = parseResult ? workItemQueueLatency : TimeSpan.FromMilliseconds(0), WorkItemQueueLength = metrics[i].WorkItemQueueLength, }; if (metrics[i].ControlQueueLengths == null) { heartbeats[i].ControlQueueLengths = new List <int>(); } else { heartbeats[i].ControlQueueLengths = JsonConvert.DeserializeObject <IReadOnlyList <int> >(metrics[i].ControlQueueLengths); } if (metrics[i].ControlQueueLatencies == null) { heartbeats[i].ControlQueueLatencies = new List <TimeSpan>(); } else { heartbeats[i].ControlQueueLatencies = JsonConvert.DeserializeObject <IReadOnlyList <TimeSpan> >(metrics[i].ControlQueueLatencies); } } DisconnectedPerformanceMonitor performanceMonitor = this.GetPerformanceMonitor(); var scaleRecommendation = performanceMonitor.MakeScaleRecommendation(workerCount, heartbeats.ToArray()); bool writeToUserLogs = false; switch (scaleRecommendation?.Action) { case ScaleAction.AddWorker: scaleStatus.Vote = ScaleVote.ScaleOut; writeToUserLogs = true; break; case ScaleAction.RemoveWorker: scaleStatus.Vote = ScaleVote.ScaleIn; writeToUserLogs = true; break; default: scaleStatus.Vote = ScaleVote.None; break; } this.traceHelper.ExtensionInformationalEvent( this.hubName, string.Empty, this.functionName.Name, $"Durable Functions Trigger Scale Decision: {scaleStatus.Vote.ToString()}, Reason: {scaleRecommendation?.Reason}", writeToUserLogs: writeToUserLogs); return(scaleStatus); }
private ScaleStatus GetScaleStatusCore(int workerCount, CosmosDBTriggerMetrics[] metrics) { ScaleStatus status = new ScaleStatus { Vote = ScaleVote.None }; const int NumberOfSamplesToConsider = 5; // Unable to determine the correct vote with no metrics. if (metrics == null) { return(status); } // We shouldn't assign more workers than there are partitions (Cosmos DB, Event Hub, Service Bus Queue/Topic) // This check is first, because it is independent of load or number of samples. int partitionCount = metrics.Length > 0 ? metrics.Last().PartitionCount : 0; if (partitionCount > 0 && partitionCount < workerCount) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation(string.Format($"WorkerCount ({workerCount}) > PartitionCount ({partitionCount}).")); _logger.LogInformation(string.Format($"Number of instances ({workerCount}) is too high relative to number " + $"of partitions for collection ({this._monitorCollection.CollectionName}, {partitionCount}).")); return(status); } // At least 5 samples are required to make a scale decision for the rest of the checks. if (metrics.Length < NumberOfSamplesToConsider) { return(status); } // Maintain a minimum ratio of 1 worker per 1,000 items of remaining work. long latestRemainingWork = metrics.Last().RemainingWork; if (latestRemainingWork > workerCount * 1000) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation(string.Format($"RemainingWork ({latestRemainingWork}) > WorkerCount ({workerCount}) * 1,000.")); _logger.LogInformation(string.Format($"Remaining work for collection ({this._monitorCollection.CollectionName}, {latestRemainingWork}) " + $"is too high relative to the number of instances ({workerCount}).")); return(status); } bool documentsWaiting = metrics.All(m => m.RemainingWork > 0); if (documentsWaiting && partitionCount > 0 && partitionCount > workerCount) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation(string.Format($"CosmosDB collection '{this._monitorCollection.CollectionName}' has documents waiting to be processed.")); _logger.LogInformation(string.Format($"There are {workerCount} instances relative to {partitionCount} partitions.")); return(status); } // Check to see if the trigger source has been empty for a while. Only if all trigger sources are empty do we scale down. bool isIdle = metrics.All(m => m.RemainingWork == 0); if (isIdle) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation(string.Format($"'{this._monitorCollection.CollectionName}' is idle.")); return(status); } // Samples are in chronological order. Check for a continuous increase in work remaining. // If detected, this results in an automatic scale out for the site container. bool remainingWorkIncreasing = IsTrueForLast( metrics, NumberOfSamplesToConsider, (prev, next) => prev.RemainingWork < next.RemainingWork) && metrics[0].RemainingWork > 0; if (remainingWorkIncreasing) { status.Vote = ScaleVote.ScaleOut; _logger.LogInformation($"Remaining work is increasing for '{this._monitorCollection.CollectionName}'."); return(status); } bool remainingWorkDecreasing = IsTrueForLast( metrics, NumberOfSamplesToConsider, (prev, next) => prev.RemainingWork > next.RemainingWork); if (remainingWorkDecreasing) { status.Vote = ScaleVote.ScaleIn; _logger.LogInformation($"Remaining work is decreasing for '{this._monitorCollection.CollectionName}'."); return(status); } _logger.LogInformation($"CosmosDB collection '{this._monitorCollection.CollectionName}' is steady."); return(status); }
/// <summary> /// Constructor /// </summary> /// <param name="weight"></param> /// <param name="status"></param> public WeightResult(double weight, ScaleStatus status) : this() { Weight = weight; Status = status; }