public void CheckMetricsAggregationLogic() { MetricInfo metrics = new MetricInfo("metricsName", "unitName"); LongConcurrentHistogram histogram = new LongConcurrentHistogram(1, Int64.MaxValue, 5); histogram.RecordValue((long)10); histogram.RecordValue((long)20); histogram.RecordValue((long)30); histogram.RecordValue((long)40); metrics.SetAggregators(histogram); Assert.AreEqual(40, metrics.Max); Assert.AreEqual(10, metrics.Min); Assert.AreEqual(4, metrics.Count); Assert.AreEqual(25, metrics.Mean); Assert.AreEqual(20, metrics.Percentiles[ClientTelemetryOptions.Percentile50]); Assert.AreEqual(40, metrics.Percentiles[ClientTelemetryOptions.Percentile90]); Assert.AreEqual(40, metrics.Percentiles[ClientTelemetryOptions.Percentile95]); Assert.AreEqual(40, metrics.Percentiles[ClientTelemetryOptions.Percentile99]); Assert.AreEqual(40, metrics.Percentiles[ClientTelemetryOptions.Percentile999]); }
public void CheckMetricsAggregationLogicWithAdjustment() { MetricInfo metrics = new MetricInfo("metricsName", "unitName"); long adjustmentFactor = 1000; LongConcurrentHistogram histogram = new LongConcurrentHistogram(1, Int64.MaxValue, 5); histogram.RecordValue((long)(10 * adjustmentFactor)); histogram.RecordValue((long)(20 * adjustmentFactor)); histogram.RecordValue((long)(30 * adjustmentFactor)); histogram.RecordValue((long)(40 * adjustmentFactor)); metrics.SetAggregators(histogram, adjustmentFactor); Assert.AreEqual(40, metrics.Max); Assert.AreEqual(10, metrics.Min); Assert.AreEqual(4, metrics.Count); Assert.AreEqual(25, metrics.Mean); Assert.AreEqual(20, metrics.Percentiles[ClientTelemetryOptions.Percentile50]); Assert.AreEqual(40, metrics.Percentiles[ClientTelemetryOptions.Percentile90]); Assert.AreEqual(40, metrics.Percentiles[ClientTelemetryOptions.Percentile95]); Assert.AreEqual(40, metrics.Percentiles[ClientTelemetryOptions.Percentile99]); Assert.AreEqual(40, metrics.Percentiles[ClientTelemetryOptions.Percentile999]); }
/// <summary> /// Collecting CPU usage information and aggregating that data using Histogram /// </summary> /// <param name="systemUsageCollection"></param> /// <returns>SystemInfo</returns> public static SystemInfo GetCpuInfo(IReadOnlyCollection <SystemUsageLoad> systemUsageCollection) { LongConcurrentHistogram histogram = new LongConcurrentHistogram(ClientTelemetryOptions.CpuMin, ClientTelemetryOptions.CpuMax, ClientTelemetryOptions.CpuPrecision); SystemInfo systemInfo = new SystemInfo(ClientTelemetryOptions.CpuName, ClientTelemetryOptions.CpuUnit); foreach (SystemUsageLoad load in systemUsageCollection) { if (float.IsNaN(load.CpuUsage.Value)) { continue; } long?infoToRecord = (long?)load.CpuUsage * ClientTelemetryOptions.HistogramPrecisionFactor; if (infoToRecord.HasValue) { histogram.RecordValue((long)infoToRecord); } } if (histogram.TotalCount > 0) { systemInfo.SetAggregators(histogram, ClientTelemetryOptions.HistogramPrecisionFactor); } return(systemInfo); }
/// <summary> /// Collecting TCP Connection Count and aggregating using Histogram /// </summary> /// <param name="systemUsageCollection"></param> /// <returns>SystemInfo</returns> public static SystemInfo GetTcpConnectionCount(IReadOnlyCollection <SystemUsageLoad> systemUsageCollection) { LongConcurrentHistogram histogram = new LongConcurrentHistogram(ClientTelemetryOptions.NumberOfTcpConnectionMin, ClientTelemetryOptions.NumberOfTcpConnectionMax, ClientTelemetryOptions.NumberOfTcpConnectionPrecision); SystemInfo systemInfo = new SystemInfo(ClientTelemetryOptions.NumberOfTcpConnectionName, ClientTelemetryOptions.NumberOfTcpConnectionUnit); foreach (SystemUsageLoad load in systemUsageCollection) { int?infoToRecord = load.NumberOfOpenTcpConnections; // If anyhow, there are more than 70000 TCP connections, just fallback to 69999 if (infoToRecord.HasValue && infoToRecord.Value >= ClientTelemetryOptions.NumberOfTcpConnectionMax) { infoToRecord = (int)(ClientTelemetryOptions.NumberOfTcpConnectionMax - 1); } if (infoToRecord.HasValue) { histogram.RecordValue(infoToRecord.Value); } } if (histogram.TotalCount > 0) { systemInfo.SetAggregators(histogram); } return(systemInfo); }
public long LongConcurrentHistogramRecording() { long counter = 0L; for (int i = 0; i < _testValues.Length; i++) { var value = _testValues[i]; _longConcurrentHistogram.RecordValue(value); counter += value; } return(counter); }
/// <summary> /// Collecting Available Thread information and aggregating that data using Histogram /// </summary> /// <param name="systemUsageCollection"></param> /// <returns>SystemInfo</returns> public static SystemInfo GetAvailableThreadsInfo(IReadOnlyCollection <SystemUsageLoad> systemUsageCollection) { LongConcurrentHistogram histogram = new LongConcurrentHistogram(ClientTelemetryOptions.AvailableThreadsMin, ClientTelemetryOptions.AvailableThreadsMax, ClientTelemetryOptions.AvailableThreadsPrecision); SystemInfo systemInfo = new SystemInfo(ClientTelemetryOptions.AvailableThreadsName, ClientTelemetryOptions.AvailableThreadsUnit); foreach (SystemUsageLoad load in systemUsageCollection) { long?infoToRecord = (long?)load.ThreadInfo?.AvailableThreads; if (infoToRecord.HasValue) { histogram.RecordValue((long)infoToRecord); } } if (histogram.TotalCount > 0) { systemInfo.SetAggregators(histogram); } return(systemInfo); }
/// <summary> /// Collecting Memory Remaining information and aggregating that data using Histogram /// </summary> /// <param name="systemUsageCollection"></param> /// <returns>SystemInfo</returns> public static SystemInfo GetMemoryRemainingInfo(IReadOnlyCollection <SystemUsageLoad> systemUsageCollection) { LongConcurrentHistogram histogram = new LongConcurrentHistogram(ClientTelemetryOptions.MemoryMin, ClientTelemetryOptions.MemoryMax, ClientTelemetryOptions.MemoryPrecision); SystemInfo systemInfo = new SystemInfo(ClientTelemetryOptions.MemoryName, ClientTelemetryOptions.MemoryUnit); foreach (SystemUsageLoad load in systemUsageCollection) { long?infoToRecord = (long?)load.MemoryAvailable; if (infoToRecord.HasValue) { histogram.RecordValue((long)infoToRecord); } } if (histogram.TotalCount > 0) { systemInfo.SetAggregators(histogram, ClientTelemetryOptions.KbToMbFactor); } return(systemInfo); }
/// <summary> /// Collecting Thread Wait Interval in Millisecond and aggregating using Histogram /// </summary> /// <param name="systemUsageCollection"></param> /// <returns>SystemInfo</returns> public static SystemInfo GetThreadWaitIntervalInMs(IReadOnlyCollection <SystemUsageLoad> systemUsageCollection) { LongConcurrentHistogram histogram = new LongConcurrentHistogram(ClientTelemetryOptions.ThreadWaitIntervalInMsMin, ClientTelemetryOptions.ThreadWaitIntervalInMsMax, ClientTelemetryOptions.ThreadWaitIntervalInMsPrecision); SystemInfo systemInfo = new SystemInfo(ClientTelemetryOptions.ThreadWaitIntervalInMsName, ClientTelemetryOptions.ThreadWaitIntervalInMsUnit); foreach (SystemUsageLoad load in systemUsageCollection) { double?infoToRecord = load.ThreadInfo?.ThreadWaitIntervalInMs; if (infoToRecord.HasValue) { histogram.RecordValue(TimeSpan.FromMilliseconds(infoToRecord.Value).Ticks); } } if (histogram.TotalCount > 0) { systemInfo.SetAggregators(histogram, ClientTelemetryOptions.TicksToMsFactor); } return(systemInfo); }
private static LongConcurrentHistogram RunTest(RunInfo run) { var config = run.Configuration; var pool = new RoundRobinPool(config.PoolSize, config.ProjectId); var histogram = new LongConcurrentHistogram(1L, TimeStamp.Minutes(10), 4); long documentCounter = 0; var data = new byte[config.DataSize]; RandomNumberGenerator.Create().GetBytes(data); // Warm the pool up with no throttling SemaphoreSlim qpsThrottle = new SemaphoreSlim(config.PoolSize); for (int i = 0; i < config.PoolSize; i++) { WriteBatchAsync().GetAwaiter().GetResult(); } // Discard the warm-up results histogram.Reset(); long batchesLeft = config.Batches; long completed = 0; var tasks = Enumerable .Range(0, config.TaskCount) .Select(_ => Task.Run(WriteBatchesAsync)) .ToArray(); // Start tasks that we don't need to wait for at the end. (They will complete, // but possibly not immediately, and that's fine.) Task.Run(PrintDiagnosticsAsync); Task.Run(ReleaseThrottleAsync); Task.WaitAll(tasks); Interlocked.Increment(ref completed); return(histogram); async Task WriteBatchesAsync() { while (Interlocked.Decrement(ref batchesLeft) >= 0) { await WriteBatchAsync(); } } async Task WriteBatchAsync() { var db = pool.GetFirestoreDb(); var batch = db.StartBatch(); for (int i = 0; i < config.BatchSize; i++) { var documentRef = db.Collection(run.Collection).Document(); var document = new SampleDocument { Id = Interlocked.Increment(ref documentCounter), Data = data }; batch.Create(documentRef, document); } await qpsThrottle.WaitAsync(); long start = Stopwatch.GetTimestamp(); await batch.CommitAsync(); long end = Stopwatch.GetTimestamp(); histogram.RecordValue(Math.Max(1L, end - start)); } async Task ReleaseThrottleAsync() { Stopwatch stopwatch = Stopwatch.StartNew(); long released = 0; // Every ~50ms, see how many more batches we should release while (Interlocked.Read(ref completed) == 0) { await Task.Delay(50); long targetRelease = (stopwatch.ElapsedTicks * config.TargetQps) / Stopwatch.Frequency; qpsThrottle.Release((int)(targetRelease - released)); released = targetRelease; } } async Task PrintDiagnosticsAsync() { while (Interlocked.Read(ref completed) == 0) { Log($"Batches left to start: {Interlocked.Read(ref batchesLeft)}"); await Task.Delay(1000); } } }