private static BenchmarkReport CreateReport(BenchmarkCase benchmarkCase, int n, double nanoseconds) { var buildResult = BuildResult.Success(GenerateResult.Success(ArtifactsPaths.Empty, Array.Empty <string>())); var executeResult = new ExecuteResult(true, 0, 0, Array.Empty <string>(), new[] { $"// Runtime=extra output line" }); var measurements = Enumerable.Range(0, n) .Select(index => new Measurement(1, IterationMode.Workload, IterationStage.Result, index + 1, 1, nanoseconds + index)) .ToList(); var gcStats = GcStats.Parse("GC: 100 10 1 666666 888"); return(new BenchmarkReport(true, benchmarkCase, buildResult, buildResult, new List <ExecuteResult> { executeResult }, measurements, gcStats, Array.Empty <Metric>())); }
public IEnumerable <Metric> ProcessResults(DiagnoserResults diagnoserResults) { var gcStatsLine = InterceptingExecutor.LastExecResult.Data.LastOrDefault(line => line.StartsWith("GC")); if (gcStatsLine is null) { Console.WriteLine(string.Join("\n", InterceptingExecutor.LastExecResult.Data)); } var gcStats = GcStats.Parse(gcStatsLine); savedStats[diagnoserResults.BenchmarkCase] = gcStats; return (MemoryDiagnoser.Default.ProcessResults(new DiagnoserResults(diagnoserResults.BenchmarkCase, diagnoserResults.TotalOperations, gcStats, diagnoserResults.ThreadingStats, diagnoserResults.BuildResult)) .Select(m => new Metric( new GenericMetricDescriptor("X" + m.Descriptor.Id, "X" + m.Descriptor.DisplayName, m.Descriptor.UnitType, m.Descriptor.Legend, m.Descriptor.Unit, m.Descriptor.TheGreaterTheBetter, m.Descriptor.NumberFormat), m.Value ))); // diagnoserResults. // yield return new Metric(GarbageCollectionsMetricDescriptor.Gen0, diagnoserResults.GcStats.Gen0Collections / (double)diagnoserResults.GcStats.TotalOperations * 1000); // yield return new Metric(GarbageCollectionsMetricDescriptor.Gen1, diagnoserResults.GcStats.Gen1Collections / (double)diagnoserResults.GcStats.TotalOperations * 1000); // yield return new Metric(GarbageCollectionsMetricDescriptor.Gen2, diagnoserResults.GcStats.Gen2Collections / (double)diagnoserResults.GcStats.TotalOperations * 1000); // yield return new Metric(AllocatedMemoryMetricDescriptor.Instance, diagnoserResults.GcStats.BytesAllocatedPerOperation); }
private static (List <ExecuteResult> executeResults, GcStats gcStats) Execute( ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver) { var executeResults = new List <ExecuteResult>(); var gcStats = default(GcStats); logger.WriteLineInfo("// *** Execute ***"); bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver); bool autoLaunchCount = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic); int defaultValue = analyzeRunToRunVariance ? 2 : 1; int launchCount = Math.Max( 1, autoLaunchCount ? defaultValue : benchmark.Job.Run.LaunchCount); var noOverheadCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.NoOverhead); for (int launchIndex = 1; launchIndex <= launchCount; launchIndex++) { string printedLaunchCount = (analyzeRunToRunVariance && autoLaunchCount && launchIndex <= 2) ? "" : " / " + launchCount; logger.WriteLineInfo($"// Launch: {launchIndex}{printedLaunchCount}"); // use diagnoser only for the last run (we need single result, not many) bool useDiagnoser = launchIndex == launchCount && noOverheadCompositeDiagnoser != null; var executeResult = toolchain.Executor.Execute( new ExecuteParameters( buildResult, benchmark, logger, resolver, config, useDiagnoser ? noOverheadCompositeDiagnoser : null)); if (!executeResult.FoundExecutable) { logger.WriteLineError($"Executable {buildResult.ArtifactsPaths.ExecutablePath} not found"); } if (executeResult.ExitCode != 0) { logger.WriteLineError("ExitCode != 0"); } executeResults.Add(executeResult); var measurements = executeResults .SelectMany(r => r.Data) .Select(line => Measurement.Parse(logger, line, 0)) .Where(r => r.IterationMode != IterationMode.Unknown) .ToArray(); if (!measurements.Any()) { // Something went wrong during the benchmark, don't bother doing more runs logger.WriteLineError("No more Benchmark runs will be launched as NO measurements were obtained from the previous run!"); break; } if (useDiagnoser) { if (config.HasMemoryDiagnoser()) { gcStats = GcStats.Parse(executeResult.Data.Last()); } noOverheadCompositeDiagnoser.ProcessResults( new DiagnoserResults(benchmark, measurements.Where(measurement => !measurement.IterationMode.IsIdle()).Sum(m => m.Operations), gcStats)); } if (autoLaunchCount && launchIndex == 2 && analyzeRunToRunVariance) { // TODO: improve this logic var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median; var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median; var percent = idleApprox / mainApprox * 100; launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula } } logger.WriteLine(); // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results var extraRunCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.ExtraRun); if (extraRunCompositeDiagnoser != null) { logger.WriteLineInfo("// Run, Diagnostic"); var executeResult = toolchain.Executor.Execute( new ExecuteParameters(buildResult, benchmark, logger, resolver, config, extraRunCompositeDiagnoser)); var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList(); extraRunCompositeDiagnoser.ProcessResults( new DiagnoserResults(benchmark, allRuns.Where(measurement => !measurement.IterationMode.IsIdle()).Sum(m => m.Operations), gcStats)); if (!executeResult.FoundExecutable) { logger.WriteLineError("Executable not found"); } logger.WriteLine(); } var separateLogicCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.SeparateLogic); if (separateLogicCompositeDiagnoser != null) { logger.WriteLineInfo("// Run, Diagnostic [SeparateLogic]"); separateLogicCompositeDiagnoser.Handle(HostSignal.SeparateLogic, new DiagnoserActionParameters(null, benchmark, config)); } return(executeResults, gcStats); }
private static List <ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver, out GcStats gcStats) { var executeResults = new List <ExecuteResult>(); gcStats = default(GcStats); logger.WriteLineInfo("// *** Execute ***"); bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver); bool autoLaunchCount = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic); int defaultValue = analyzeRunToRunVariance ? 2 : 1; int launchCount = Math.Max( 1, autoLaunchCount ? defaultValue: benchmark.Job.Run.LaunchCount); for (int launchIndex = 0; launchIndex < launchCount; launchIndex++) { string printedLaunchCount = (analyzeRunToRunVariance && autoLaunchCount && launchIndex < 2) ? "" : " / " + launchCount; logger.WriteLineInfo($"// Launch: {launchIndex + 1}{printedLaunchCount}"); var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver); if (!executeResult.FoundExecutable) { logger.WriteLineError($"Executable {buildResult.ArtifactsPaths.ExecutablePath} not found"); } if (executeResult.ExitCode != 0) { logger.WriteLineError("ExitCode != 0"); } executeResults.Add(executeResult); var measurements = executeResults .SelectMany(r => r.Data) .Select(line => Measurement.Parse(logger, line, 0)) .Where(r => r.IterationMode != IterationMode.Unknown). ToArray(); if (!measurements.Any()) { // Something went wrong during the benchmark, don't bother doing more runs logger.WriteLineError("No more Benchmark runs will be launched as NO measurements were obtained from the previous run!"); break; } if (autoLaunchCount && launchIndex == 1 && analyzeRunToRunVariance) { // TODO: improve this logic var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median; var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median; var percent = idleApprox / mainApprox * 100; launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula } } logger.WriteLine(); // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results if (config.GetDiagnosers().Any()) { logger.WriteLineInfo("// Run, Diagnostic"); var compositeDiagnoser = config.GetCompositeDiagnoser(); var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver, compositeDiagnoser); var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList(); gcStats = GcStats.Parse(executeResult.Data.Last()); var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns, gcStats); compositeDiagnoser.ProcessResults(benchmark, report); if (!executeResult.FoundExecutable) { logger.WriteLineError("Executable not found"); } logger.WriteLine(); } return(executeResults); }