// TODO: Try with other metric types, like memory, since this might not actually be generic/accurate for all AggregateMetrics but only for ElapsedTime public static void FirstIsFaster(AggregateMetrics first, AggregateMetrics second) { /*/ * DummyNotStatisticalTest(first, second); * /*/ RealTest(first, second); //*/ }
private static void DummyNotStatisticalTest(AggregateMetrics first, AggregateMetrics second) { // TODO: Dummy NOT statistical test var firstAvg = first.Runs.Average(run => run.ElapsedNanos); var secondAvg = second.Runs.Average(run => run.ElapsedNanos); PerfAssertContext.AssertIsTrue2(firstAvg < secondAvg, "Expected first to be faster (less time) than second"); }
/// <summary> /// TODO: Would prefer NotWorseThan(X, InTermsOf.Runtime), but leaving for later /// </summary> public static PerfAssertObjectWithSourceAndTarget <T> NotWorseRuntimeThan <T>( this PerfAssertObjectWithSource <T> source, Expression <Action <T> > perfBenchmark) { // TODO: COPY PASTE!!! string targetName = ExpressionUtils.GetPerfBenchmarkName(perfBenchmark); var perfAssert = new PerfAssertObjectWithSourceAndTarget <T>( source.SourceName, targetName: targetName, benchmarkRunCache: source.BenchmarkRunCache); var results = source.BenchmarkRunCache.GetResults(perfAssert); var speedMetricsByBenchmark = results .ToDictionary( result => result.BenchmarkName, result => { AggregateMetrics timingMetricValues = result.StatsByMetric .Single/* TODO: Error handling to help developers with usage */ (kvp => kvp.Key is TimingMetricName /* Counter, Gc, Memory, Timing */) .Value; return(timingMetricValues); }); var speedMetricForSource = speedMetricsByBenchmark[source.SourceName] /* TODO: Error handling */; var speedMetricForTarget = speedMetricsByBenchmark[targetName] /* TODO: Error handling */; // Run the validation, and wrap the exception with a meaningful message //AssertionFailedExceptionWrapper.Wrap( // () => AggregateMetricsAssertions.FirstIsNotSlower( first: speedMetricForSource, second: speedMetricForTarget) ; // , //ex => new AssertFailedException($"Expected {source.SourceName} to not be slower than {targetName}.\r\n" // + $"{ex.Message}", ex)); //AssertionFailedExceptionWrapper.Wrap( // () => AggregateMetricsAssertions.FirstDoesNotHaveHigherVariance( first: speedMetricForSource, second: speedMetricForTarget) ; //, //ex => new AssertFailedException( // $"Expected {source.SourceName} to not have more variance(/noise)/less-reliability(/reproducibility) than {targetName}.\r\n" // + $"{ex.Message}", // ex)); return(perfAssert); }
private static void RealTest(AggregateMetrics first, AggregateMetrics second) { // Wilcoxon test to not assume normal distributions var mannWhitneyWilcoxonTest = new MannWhitneyWilcoxonTest( first.Runs.Select(run => run.ElapsedNanos).ToArray(), second.Runs.Select(run => run.ElapsedNanos).ToArray(), TwoSampleHypothesis.FirstValueIsSmallerThanSecond); Trace.WriteLine( "MannWWT s1 < s2: " + mannWhitneyWilcoxonTest.PValue + " Stat1: " + mannWhitneyWilcoxonTest.Statistic1 + "; Stat2: " + mannWhitneyWilcoxonTest.Statistic2 + " - Significant: " + mannWhitneyWilcoxonTest.Significant + " - Hyp: " + mannWhitneyWilcoxonTest.Hypothesis); PerfAssertContext.AssertIsTrue(mannWhitneyWilcoxonTest.Significant); }
public static void FirstDoesNotHaveHigherVariance(AggregateMetrics first, AggregateMetrics second) { double[] firstSamples = first.Runs.Select(run => run.ElapsedNanos).ToArray(); double[] secondSamples = second.Runs.Select(run => run.ElapsedNanos).ToArray(); // Fisher's F-test (also known as Snedecor) var firstVariance = Measures.Variance(firstSamples); var secondVariance = Measures.Variance(secondSamples); var fishersFTest = new FTest( firstVariance, secondVariance, firstSamples.Length - 1, secondSamples.Length - 1, TwoSampleHypothesis.FirstValueIsGreaterThanSecond); Trace.WriteLine( "FTest Var(s1) > Var(s2): " + fishersFTest.PValue + " - Significant: " + fishersFTest.Significant + " - Hyp: " + fishersFTest.Hypothesis); PerfAssertContext.AssertIsFalse(fishersFTest.Significant); }
// TODO: COPY PASTE!!! #region Different types of assertions (memory/speed/etc) // TODO: COPY PASTE!!! public static PerfAssertObjectWithSourceAndTarget <T> FasterThan <T>( this PerfAssertObjectWithSource <T> source, Expression <Action <T> > perfBenchmark) { // TODO: COPY PASTE!!! string targetName = ExpressionUtils.GetPerfBenchmarkName(perfBenchmark); var perfAssert = new PerfAssertObjectWithSourceAndTarget <T>( source.SourceName, targetName: targetName, benchmarkRunCache: source.BenchmarkRunCache); var results = source.BenchmarkRunCache.GetResults(perfAssert); var speedMetricsByBenchmark = results .ToDictionary( result => result.BenchmarkName, result => { AggregateMetrics timingMetricValues = result.StatsByMetric .Single/* TODO: Error handling to help developers with usage */ (kvp => kvp.Key is TimingMetricName /* Counter, Gc, Memory, Timing */) .Value; return(timingMetricValues); }); var speedMetricForSource = speedMetricsByBenchmark[source.SourceName] /* TODO: Error handling */; var speedMetricForTarget = speedMetricsByBenchmark[targetName] /* TODO: Error handling */; // Run the validation, and wrap the exception with a meaningful message // TODO: Pass the 'meaningful message' down so we call the user delegate with it //AssertionFailedExceptionWrapper.Wrap( // () => AggregateMetricsAssertions.FirstIsFaster( first: speedMetricForSource, second: speedMetricForTarget) ; // , //ex => new Exception($"Expected {source.SourceName} to be faster than {targetName}", ex)); return(perfAssert); }
public static void FirstIsNotSlower(AggregateMetrics first, AggregateMetrics second) { double[] firstSamples = first.Runs.Select(run => run.ElapsedNanos).ToArray(); double[] secondSamples = second.Runs.Select(run => run.ElapsedNanos).ToArray(); // Wilcoxon test to not assume normal distributions var mannWhitneyWilcoxonTest = new MannWhitneyWilcoxonTest( firstSamples, secondSamples, TwoSampleHypothesis.FirstValueIsGreaterThanSecond); Trace.WriteLine($"Sample 1 mean: {firstSamples.Average()} Sample 2 mean: {secondSamples.Average()}"); Trace.WriteLine( "MannWWT s1 > s2: " + mannWhitneyWilcoxonTest.PValue + " Stat1: " + mannWhitneyWilcoxonTest.Statistic1 + "; Stat2: " + mannWhitneyWilcoxonTest.Statistic2 + " - Significant: " + mannWhitneyWilcoxonTest.Significant + " - Hyp: " + mannWhitneyWilcoxonTest.Hypothesis); // We want to assert that we CANNOT statistically say that first > second (it can be less than, equal, or it might be greater than but not // with statistical significance -- the only thing it CAN'T be, is KNOWN to be slower [>]) PerfAssertContext.AssertIsFalse(mannWhitneyWilcoxonTest.Significant); }
public void CardinalityLimitsPersistAcrossFlushes() { var client = new TelemetryClient(); client.TrackAggregateMetric("Test1", 123, "Prop1"); client.TrackAggregateMetric("Test1", 123, "Prop2"); client.TrackAggregateMetric("Test1", 123, "Prop3"); client.TrackAggregateMetric("Test1", 123, "Prop4"); client.TrackAggregateMetric("Test1", 123, "Prop5"); client.TrackAggregateMetric("Test1", 123, "Prop6"); client.TrackAggregateMetric("Test1", 123, "Prop7"); Assert.AreEqual(6, AggregateMetrics.aggregationSets.Values.First().aggregations.Count); AggregateMetrics.FlushImpl(); client.TrackAggregateMetric("Test1", 123, "Prop8"); client.TrackAggregateMetric("Test1", 123, "Prop9"); client.TrackAggregateMetric("Test1", 123, "Prop10"); Assert.AreEqual(1, AggregateMetrics.aggregationSets.Values.First().aggregations.Count); Assert.AreEqual("other", AggregateMetrics.aggregationSets.Values.First().aggregations.First().Value.Property1); }
public void Initialize() { AggregateMetricsTelemetryModule.IsTimerFlushEnabled = false; AggregateMetrics.Clear(); }
public void Cleanup() { AggregateMetrics.Clear(); }
public void Initialize() { AggregateMetricsTelemetryModule.FlushInterval = TimeSpan.FromSeconds(5); AggregateMetricsTelemetryModule.IsTimerFlushEnabled = true; AggregateMetrics.Clear(); }