public void PercentileValuesTest()
        {
            var summary = new Statistics(Enumerable.Range(1, 30));
            Print(summary);
            Assert.Equal(1, summary.Percentiles.P0);
            Assert.Equal(8.25, summary.Percentiles.P25);
            Assert.Equal(15.5, summary.Percentiles.P50);
            Assert.Equal(20.43, summary.Percentiles.P67, 4);
            Assert.Equal(24.2, summary.Percentiles.P80, 4);
            Assert.Equal(25.65, summary.Percentiles.P85);
            Assert.Equal(27.1, summary.Percentiles.P90);
            Assert.Equal(28.55, summary.Percentiles.P95, 4);
            Assert.Equal(30, summary.Percentiles.P100);

            var a = Enumerable.Range(1, 30);
            var b = Enumerable.Concat(Enumerable.Repeat(0, 30), a);
            var c = Enumerable.Concat(b, Enumerable.Repeat(31, 30));
            summary = new Statistics(c);
            Print(summary);
            Assert.Equal(0, summary.Percentiles.P0);
            Assert.Equal(0, summary.Percentiles.P25);
            Assert.Equal(15.5, summary.Percentiles.P50);
            Assert.Equal(30.63, summary.Percentiles.P67, 4);
            Assert.Equal(31, summary.Percentiles.P80);
            Assert.Equal(31, summary.Percentiles.P85);
            Assert.Equal(31, summary.Percentiles.P90);
            Assert.Equal(31, summary.Percentiles.P95);
            Assert.Equal(31, summary.Percentiles.P100);
        }
 private string Format(Statistics statistics, TimeUnit timeUnit)
 {
     if (statistics == null)
         return "NA";
     var value = calc(statistics);
     return isTimeColumn ? value.ToTimeStr(timeUnit) : value.ToStr();
 }
 public void ConfidenceIntervalTest()
 {
     var summary = new Statistics(Enumerable.Range(1, 30));
     Print(summary);
     Assert.Equal(95, summary.ConfidenceInterval.Level.ToPercent());
     Assert.Equal(15.5, summary.ConfidenceInterval.Mean);
     Assert.Equal(summary.StandardError, summary.ConfidenceInterval.Error);
     Assert.Equal(12.34974, summary.ConfidenceInterval.Lower, 4);
     Assert.Equal(18.65026, summary.ConfidenceInterval.Upper, 4);
 }
 public void RankTest()
 {
     var s1 = new Statistics(100, 101, 100, 101);
     var s2 = new Statistics(300, 301, 300, 301);
     var s3 = new Statistics(200.3279, 200.3178, 200.4046);
     var s4 = new Statistics(200.2298, 200.5738, 200.3582);
     var s5 = new Statistics(195, 196, 195, 196);
     var actualRanks = RankHelper.GetRanks(s1, s2, s3, s4, s5);
     var expectedRanks = new[] { 1, 4, 3, 3, 2 };
     Assert.Equal(expectedRanks, actualRanks);
 }
Esempio n. 5
0
 private static int[] GetPlaces(Statistics[] s)
 {
     var n = s.Length;
     int[] places = new int[n];
     places[0] = 1;
     for (int i = 1; i < n; i++)
         if (LookSame(s[i - 1], s[i]))
             places[i] = places[i - 1];
         else
             places[i] = places[i - 1] + 1;
     return places;
 }
Esempio n. 6
0
        /// <summary>
        /// Welch's Two Sample t-test
        /// </summary>
        public static WelchTTest Calc(Statistics x, Statistics y)
        {
            int n1 = x.N, n2 = y.N;
            double v1 = x.Variance, v2 = y.Variance, m1 = x.Mean, m2 = y.Mean;

            var se = Sqrt((v1 / n1) + (v2 / n2));
            var t = (m1 - m2) / se;
            var df = (v1 / n1 + v2 / n2).Sqr() / ((v1 / n1).Sqr() / (n1 - 1) + (v2 / n2).Sqr() / (n2 - 1));
            var pValue = MathHelper.Student(t, df);

            return new WelchTTest(t, df, pValue);
        }
Esempio n. 7
0
 private void Print(Statistics summary)
 {
     output.WriteLine("Min = " + summary.Min);
     output.WriteLine("LowerFence = " + summary.LowerFence);
     output.WriteLine("Q1 = " + summary.Q1);
     output.WriteLine("Median = " + summary.Median);
     output.WriteLine("Mean = " + summary.Mean);
     output.WriteLine("Q3 = " + summary.Q3);
     output.WriteLine("UpperFence = " + summary.UpperFence);
     output.WriteLine("Max = " + summary.Max);
     output.WriteLine("InterquartileRange = " + summary.InterquartileRange);
     output.WriteLine("StandardDeviation = " + summary.StandardDeviation);
     output.WriteLine("Outlier = [" + string.Join("; ", summary.Outliers) + "]");
     output.WriteLine("CI = " + summary.ConfidenceInterval.ToStr());
 }
Esempio n. 8
0
 public void Test2()
 {
     var summary = new Statistics(1, 2);
     Print(summary);
     Assert.Equal(1, summary.Min);
     Assert.Equal(-0.5, summary.LowerFence);
     Assert.Equal(1, summary.Q1);
     Assert.Equal(1.5, summary.Median);
     Assert.Equal(1.5, summary.Mean);
     Assert.Equal(2, summary.Q3);
     Assert.Equal(3.5, summary.UpperFence);
     Assert.Equal(2, summary.Max);
     Assert.Equal(1, summary.InterquartileRange);
     Assert.Equal(0.70711, summary.StandardDeviation, 4);
     Assert.Equal(new double[0], summary.Outliers);
 }
Esempio n. 9
0
 public void Test3()
 {
     var summary = new Statistics(1, 2, 4);
     Print(summary);
     Assert.Equal(1, summary.Min);
     Assert.Equal(-3.5, summary.LowerFence);
     Assert.Equal(1, summary.Q1);
     Assert.Equal(2, summary.Median);
     Assert.Equal(2.333333, summary.Mean, 5);
     Assert.Equal(4, summary.Q3);
     Assert.Equal(8.5, summary.UpperFence);
     Assert.Equal(4, summary.Max);
     Assert.Equal(3, summary.InterquartileRange);
     Assert.Equal(1.52753, summary.StandardDeviation, 4);
     Assert.Equal(new double[0], summary.Outliers);
 }
        /// <summary>
        /// Welch's Two Sample t-test
        /// </summary>
        public static WelchTTest Calc(Statistics x, Statistics y)
        {
            int n1 = x.N, n2 = y.N;
            if (x.N < 2)
                throw new ArgumentException("x should contains at least 2 elements", nameof(x));
            if (y.N < 2)
                throw new ArgumentException("y should contains at least 2 elements", nameof(y));

            double v1 = x.Variance, v2 = y.Variance, m1 = x.Mean, m2 = y.Mean;

            double se = Sqrt((v1 / n1) + (v2 / n2));
            double t = (m1 - m2) / se;
            double df = (v1 / n1 + v2 / n2).Sqr() /
                        ((v1 / n1).Sqr() / (n1 - 1) + (v2 / n2).Sqr() / (n2 - 1));
            double pValue = MathHelper.Student(t, df);

            return new WelchTTest(t, df, pValue);
        }
        // TODO: rewrite without allocations
        public IEnumerable<Measurement> GetMeasurements()
        {
            double overhead = Idle == null ? 0.0 : new Statistics(Idle.Select(m => m.Nanoseconds)).Mean;
            var mainStats = new Statistics(Main.Select(m => m.Nanoseconds));
            int resultIndex = 0;
            foreach (var measurement in Main)
            {
                if (removeOutliers && mainStats.IsOutlier(measurement.Nanoseconds))
                    continue;

                double value = Math.Max(0, measurement.Nanoseconds - overhead);
                if (IsSuspiciouslySmall(value))
                    value = 0;

                yield return new Measurement(
                    measurement.LaunchIndex,
                    IterationMode.Result,
                    ++resultIndex,
                    measurement.Operations,
                    value);
            }
        }
 public void Test1()
 {
     var summary = new Statistics(1);
     Print(summary);
     Assert.Equal(1, summary.Min);
     Assert.Equal(1, summary.LowerFence);
     Assert.Equal(1, summary.Q1);
     Assert.Equal(1, summary.Median);
     Assert.Equal(1, summary.Mean);
     Assert.Equal(1, summary.Q3);
     Assert.Equal(1, summary.UpperFence);
     Assert.Equal(1, summary.Max);
     Assert.Equal(0, summary.InterquartileRange);
     Assert.Equal(0, summary.StandardDeviation);
     Assert.Equal(new double[0], summary.Outliers);
     Assert.Equal(1, summary.Percentiles.P0);
     Assert.Equal(1, summary.Percentiles.P25);
     Assert.Equal(1, summary.Percentiles.P50);
     Assert.Equal(1, summary.Percentiles.P85);
     Assert.Equal(1, summary.Percentiles.P95);
     Assert.Equal(1, summary.Percentiles.P100);
 }
Esempio n. 13
0
 /// <summary>
 /// Mean for [X*Y].
 /// </summary>
 public static double MulMean(Statistics x, Statistics y) => x.Mean * y.Mean;
Esempio n. 14
0
 public void Test7()
 {
     var summary = new Statistics(1, 2, 4, 8, 16, 32, 64);
     Print(summary);
     Assert.Equal(1, summary.Min);
     Assert.Equal(-43, summary.LowerFence);
     Assert.Equal(2, summary.Q1);
     Assert.Equal(8, summary.Median);
     Assert.Equal(18.1428571429, summary.Mean, 5);
     Assert.Equal(32, summary.Q3);
     Assert.Equal(77, summary.UpperFence);
     Assert.Equal(64, summary.Max);
     Assert.Equal(30, summary.InterquartileRange);
     Assert.Equal(22.9378, summary.StandardDeviation, 4);
     Assert.Equal(new double[0], summary.Outliers);
 }
Esempio n. 15
0
 private static bool LookSame(Statistics s1, Statistics s2) =>
     s1.Mean + 3*s1.StandardDeviation > s2.Mean &&
     s2.Mean - 3*s1.StandardDeviation < s1.Mean;
 /// <summary>
 /// Mean for [X*Y].
 /// </summary>        
 public static double MulMean(Statistics x, Statistics y) => x.Mean * y.Mean;
 /// <summary>
 /// Variance for [X*Y].
 /// </summary>        
 public static double MulVariance(Statistics x, Statistics y)
 {
     return x.Sqr().Mean * y.Sqr().Mean - x.Mean.Sqr() * y.Mean.Sqr();
 }
Esempio n. 18
0
 public void OutlierTest()
 {
     var summary = new Statistics(1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 10, 10.1);
     Print(summary);
     Assert.Equal(new[] { 10, 10.1 }, summary.Outliers);
 }
 /// <summary>
 /// Variance for [X/Y].
 /// </summary>        
 public static double DivVariance(Statistics x, Statistics y)
 {
     var yInvert = y.Invert();
     if (yInvert == null)
         throw new DivideByZeroException();
     return MulVariance(x, yInvert);
 }
        private static List<ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver)
        {
            var executeResults = new List<ExecuteResult>();

            logger.WriteLineInfo("// *** Execute ***");
            bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
            bool autoLaunchCount = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
            int defaultValue = analyzeRunToRunVariance ? 2 : 1;
            int launchCount = Math.Max(
                1,
                autoLaunchCount ? defaultValue: benchmark.Job.Run.LaunchCount);

            for (int launchIndex = 0; launchIndex < launchCount; launchIndex++)
            {
                string printedLaunchCount = (analyzeRunToRunVariance &&
                    autoLaunchCount &&
                    launchIndex < 2)
                    ? ""
                    : " / " + launchCount;
                logger.WriteLineInfo($"// Launch: {launchIndex + 1}{printedLaunchCount}");

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver);

                if (!executeResult.FoundExecutable)
                    logger.WriteLineError("Executable not found");
                if (executeResult.ExitCode != 0)
                    logger.WriteLineError("ExitCode != 0");
                executeResults.Add(executeResult);

                var measurements = executeResults
                        .SelectMany(r => r.Data)
                        .Select(line => Measurement.Parse(logger, line, 0))
                        .Where(r => r.IterationMode != IterationMode.Unknown).
                        ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError($"No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (autoLaunchCount && launchIndex == 1 && analyzeRunToRunVariance)
                {
                    // TODO: improve this logic
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            if (config.GetDiagnosers().Any())
            {
                logger.WriteLineInfo("// Run, Diagnostic");
                var compositeDiagnoser = config.GetCompositeDiagnoser();

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver, compositeDiagnoser);

                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
                var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns);
                compositeDiagnoser.ProcessResults(benchmark, report);

                if (!executeResult.FoundExecutable)
                    logger.WriteLineError("Executable not found");
                logger.WriteLine();
            }

            return executeResults;
        }
        private static List<ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config)
        {
            var executeResults = new List<ExecuteResult>();

            logger.WriteLineInfo("// *** Execute ***");
            var launchCount = Math.Max(1, benchmark.Job.LaunchCount.IsAuto ? 2 : benchmark.Job.LaunchCount.Value);

            for (int processNumber = 0; processNumber < launchCount; processNumber++)
            {
                var printedProcessNumber = (benchmark.Job.LaunchCount.IsAuto && processNumber < 2) ? "" : " / " + launchCount.ToString();
                logger.WriteLineInfo($"// Launch: {processNumber + 1}{printedProcessNumber}");

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger);

                if (!executeResult.FoundExecutable)
                    logger.WriteLineError("Executable not found");
                executeResults.Add(executeResult);

                var measurements = executeResults
                        .SelectMany(r => r.Data)
                        .Select(line => Measurement.Parse(logger, line, 0))
                        .Where(r => r.IterationMode != IterationMode.Unknown).
                        ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError($"No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (benchmark.Job.LaunchCount.IsAuto && processNumber == 1)
                {
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            if (config.GetDiagnosers().Count() > 0)
            {
                logger.WriteLineInfo($"// Run, Diagnostic");
                config.GetCompositeDiagnoser().Start(benchmark);
                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, config.GetCompositeDiagnoser());
                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
                var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns);
                config.GetCompositeDiagnoser().Stop(benchmark, report);

                if (!executeResult.FoundExecutable)
                    logger.WriteLineError("Executable not found");
                logger.WriteLine();
            }

            return executeResults;
        }
Esempio n. 22
0
 /// <summary>
 /// Variance for [X*Y].
 /// </summary>
 public static double MulVariance(Statistics x, Statistics y)
 {
     return(x.Sqr().Mean *y.Sqr().Mean - x.Mean.Sqr() * y.Mean.Sqr());
 }