Exemplo n.º 1
0
 public bool IsDefault(Summary summary, BenchmarkDotNet.Running.Benchmark benchmark)
 {
     return(true);
 }
Exemplo n.º 2
0
 public string GetValue(Summary summary, BenchmarkDotNet.Running.Benchmark benchmark, ISummaryStyle style)
 {
     return(_value);
 }
        private static List <ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config)
        {
            var executeResults = new List <ExecuteResult>();

            logger.WriteLineInfo("// *** Execute ***");
            var launchCount = Math.Max(1, benchmark.Job.LaunchCount.IsAuto ? 2 : benchmark.Job.LaunchCount.Value);

            for (int processNumber = 0; processNumber < launchCount; processNumber++)
            {
                var printedProcessNumber = (benchmark.Job.LaunchCount.IsAuto && processNumber < 2) ? "" : " / " + launchCount.ToString();
                logger.WriteLineInfo($"// Launch: {processNumber + 1}{printedProcessNumber}");

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger);

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                executeResults.Add(executeResult);

                var measurements = executeResults
                                   .SelectMany(r => r.Data)
                                   .Select(line => Measurement.Parse(logger, line, 0))
                                   .Where(r => r.IterationMode != IterationMode.Unknown).
                                   ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError($"No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (benchmark.Job.LaunchCount.IsAuto && processNumber == 1)
                {
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent    = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            if (config.GetDiagnosers().Count() > 0)
            {
                logger.WriteLineInfo($"// Run, Diagnostic");
                config.GetCompositeDiagnoser().Start(benchmark);
                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, config.GetCompositeDiagnoser());
                var allRuns       = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
                var report        = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns);
                config.GetCompositeDiagnoser().Stop(benchmark, report);

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                logger.WriteLine();
            }

            return(executeResults);
        }
        private static BuildResult Build(ILogger logger, IToolchain toolchain, GenerateResult generateResult, Benchmark benchmark)
        {
            logger.WriteLineInfo("// *** Build ***");
            var buildResult = toolchain.Builder.Build(generateResult, logger, benchmark);

            if (buildResult.IsBuildSuccess)
            {
                logger.WriteLineInfo("// Result = Success");
            }
            else
            {
                logger.WriteLineError("// Result = Failure");
                if (buildResult.BuildException != null)
                {
                    logger.WriteLineError($"// Exception: {buildResult.BuildException.Message}");
                }
            }
            logger.WriteLine();
            return(buildResult);
        }
        private static (List <ExecuteResult> executeResults, GcStats gcStats) Execute(
            ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver)
        {
            var executeResults = new List <ExecuteResult>();
            var gcStats        = default(GcStats);

            logger.WriteLineInfo("// *** Execute ***");
            bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
            bool autoLaunchCount         = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
            int  defaultValue            = analyzeRunToRunVariance ? 2 : 1;
            int  launchCount             = Math.Max(
                1,
                autoLaunchCount ? defaultValue : benchmark.Job.Run.LaunchCount);

            var noOverheadCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.NoOverhead);

            for (int launchIndex = 1; launchIndex <= launchCount; launchIndex++)
            {
                string printedLaunchCount = (analyzeRunToRunVariance && autoLaunchCount && launchIndex <= 2)
                    ? ""
                    : " / " + launchCount;
                logger.WriteLineInfo($"// Launch: {launchIndex}{printedLaunchCount}");

                // use diagnoser only for the last run (we need single result, not many)
                bool useDiagnoser = launchIndex == launchCount && noOverheadCompositeDiagnoser != null;

                var executeResult = toolchain.Executor.Execute(
                    new ExecuteParameters(
                        buildResult,
                        benchmark,
                        logger,
                        resolver,
                        config,
                        useDiagnoser ? noOverheadCompositeDiagnoser : null));

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError($"Executable {buildResult.ArtifactsPaths.ExecutablePath} not found");
                }
                if (executeResult.ExitCode != 0)
                {
                    logger.WriteLineError("ExitCode != 0");
                }

                executeResults.Add(executeResult);

                var measurements = executeResults
                                   .SelectMany(r => r.Data)
                                   .Select(line => Measurement.Parse(logger, line, 0))
                                   .Where(r => r.IterationMode != IterationMode.Unknown)
                                   .ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError("No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (useDiagnoser)
                {
                    if (config.HasMemoryDiagnoser())
                    {
                        gcStats = GcStats.Parse(executeResult.Data.Last());
                    }

                    noOverheadCompositeDiagnoser.ProcessResults(
                        new DiagnoserResults(benchmark, measurements.Where(measurement => !measurement.IterationMode.IsIdle()).Sum(m => m.Operations), gcStats));
                }

                if (autoLaunchCount && launchIndex == 2 && analyzeRunToRunVariance)
                {
                    // TODO: improve this logic
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent    = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            var extraRunCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.ExtraRun);

            if (extraRunCompositeDiagnoser != null)
            {
                logger.WriteLineInfo("// Run, Diagnostic");

                var executeResult = toolchain.Executor.Execute(
                    new ExecuteParameters(buildResult, benchmark, logger, resolver, config, extraRunCompositeDiagnoser));

                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();

                extraRunCompositeDiagnoser.ProcessResults(
                    new DiagnoserResults(benchmark, allRuns.Where(measurement => !measurement.IterationMode.IsIdle()).Sum(m => m.Operations), gcStats));

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                logger.WriteLine();
            }

            var separateLogicCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.SeparateLogic);

            if (separateLogicCompositeDiagnoser != null)
            {
                logger.WriteLineInfo("// Run, Diagnostic [SeparateLogic]");

                separateLogicCompositeDiagnoser.Handle(HostSignal.SeparateLogic, new DiagnoserActionParameters(null, benchmark, config));
            }

            return(executeResults, gcStats);
        }
        private static List <ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver, out GcStats gcStats)
        {
            var executeResults = new List <ExecuteResult>();

            gcStats = default(GcStats);

            logger.WriteLineInfo("// *** Execute ***");
            bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
            bool autoLaunchCount         = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
            int  defaultValue            = analyzeRunToRunVariance ? 2 : 1;
            int  launchCount             = Math.Max(
                1,
                autoLaunchCount ? defaultValue: benchmark.Job.Run.LaunchCount);

            for (int launchIndex = 0; launchIndex < launchCount; launchIndex++)
            {
                string printedLaunchCount = (analyzeRunToRunVariance &&
                                             autoLaunchCount &&
                                             launchIndex < 2)
                    ? ""
                    : " / " + launchCount;
                logger.WriteLineInfo($"// Launch: {launchIndex + 1}{printedLaunchCount}");

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver);

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                if (executeResult.ExitCode != 0)
                {
                    logger.WriteLineError("ExitCode != 0");
                }
                executeResults.Add(executeResult);

                var measurements = executeResults
                                   .SelectMany(r => r.Data)
                                   .Select(line => Measurement.Parse(logger, line, 0))
                                   .Where(r => r.IterationMode != IterationMode.Unknown).
                                   ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError("No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (autoLaunchCount && launchIndex == 1 && analyzeRunToRunVariance)
                {
                    // TODO: improve this logic
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent    = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            if (config.GetDiagnosers().Any())
            {
                logger.WriteLineInfo("// Run, Diagnostic");
                var compositeDiagnoser = config.GetCompositeDiagnoser();

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver, compositeDiagnoser);

                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
                gcStats = GcStats.Parse(executeResult.Data.Last());
                var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns, gcStats);
                compositeDiagnoser.ProcessResults(benchmark, report);

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                logger.WriteLine();
            }

            return(executeResults);
        }