예제 #1
0
 public void AddToolchain(IToolchain toolchain)
 {
     Toolchains.Add(toolchain);
 }
예제 #2
0
 public ToolchainReference(IToolchain toolchain)
 {
     Id = toolchain.UniqueId;
 }
예제 #3
0
 public bool Match(IToolchain toolchain)
 {
     return (string.Compare(toolchain.UniqueId, Id, true) == 0);
 }
예제 #4
0
 public void ArgumentsFromSourceArePassedToBenchmarks(IToolchain toolchain) => CanExecute <WithArgumentsSource>(toolchain);
 // Infrastructure
 public static Job With(this Job job, IToolchain toolchain) => job.WithCore(j => j.Infrastructure.Toolchain = toolchain);
 private static BuildResult Build(ILogger logger, IToolchain toolchain, GenerateResult generateResult, Benchmark benchmark)
 {
     logger.WriteLineInfo("// *** Build ***");
     var buildResult = toolchain.Builder.Build(generateResult, logger, benchmark);
     if (buildResult.IsBuildSuccess)
     {
         logger.WriteLineInfo("// Result = Success");
     }
     else
     {
         logger.WriteLineError("// Result = Failure");
         if (buildResult.BuildException != null)
             logger.WriteLineError($"// Exception: {buildResult.BuildException.Message}");
     }
     logger.WriteLine();
     return buildResult;
 }
 public void ArgumentsCanBePassedByReferenceToBenchmark(IToolchain toolchain) => CanExecute <WithRefArguments>(toolchain);
 public void MemoryDiagnoserDoesNotIncludeAllocationsFromGlobalSetupAndCleanup(IToolchain toolchain)
 {
     AssertAllocations(toolchain, typeof(AllocatingGlobalSetupAndCleanup), new Dictionary <string, long>
     {
         { nameof(AllocatingGlobalSetupAndCleanup.AllocateNothing), 0 }
     });
 }
예제 #9
0
 // Infrastructure
 public static Job With(this Job job, IToolchain toolchain) => job.WithCore(j => j.Infrastructure.Toolchain             = toolchain);
예제 #10
0
 private InfrastructureMode(IToolchain toolchain)
 {
     Toolchain = toolchain;
 }
        private static (List <ExecuteResult> executeResults, GcStats gcStats) Execute(
            ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver)
        {
            var executeResults = new List <ExecuteResult>();
            var gcStats        = default(GcStats);

            logger.WriteLineInfo("// *** Execute ***");
            bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
            bool autoLaunchCount         = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
            int  defaultValue            = analyzeRunToRunVariance ? 2 : 1;
            int  launchCount             = Math.Max(
                1,
                autoLaunchCount ? defaultValue : benchmark.Job.Run.LaunchCount);

            var noOverheadCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.NoOverhead);

            for (int launchIndex = 1; launchIndex <= launchCount; launchIndex++)
            {
                string printedLaunchCount = (analyzeRunToRunVariance && autoLaunchCount && launchIndex <= 2)
                    ? ""
                    : " / " + launchCount;
                logger.WriteLineInfo($"// Launch: {launchIndex}{printedLaunchCount}");

                // use diagnoser only for the last run (we need single result, not many)
                bool useDiagnoser = launchIndex == launchCount && noOverheadCompositeDiagnoser != null;

                var executeResult = toolchain.Executor.Execute(
                    new ExecuteParameters(
                        buildResult,
                        benchmark,
                        logger,
                        resolver,
                        config,
                        useDiagnoser ? noOverheadCompositeDiagnoser : null));

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError($"Executable {buildResult.ArtifactsPaths.ExecutablePath} not found");
                }
                if (executeResult.ExitCode != 0)
                {
                    logger.WriteLineError("ExitCode != 0");
                }

                executeResults.Add(executeResult);

                var measurements = executeResults
                                   .SelectMany(r => r.Data)
                                   .Select(line => Measurement.Parse(logger, line, 0))
                                   .Where(r => r.IterationMode != IterationMode.Unknown)
                                   .ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError("No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (useDiagnoser)
                {
                    if (config.HasMemoryDiagnoser())
                    {
                        gcStats = GcStats.Parse(executeResult.Data.Last());
                    }

                    noOverheadCompositeDiagnoser.ProcessResults(
                        new DiagnoserResults(benchmark, measurements.Where(measurement => !measurement.IterationMode.IsIdle()).Sum(m => m.Operations), gcStats));
                }

                if (autoLaunchCount && launchIndex == 2 && analyzeRunToRunVariance)
                {
                    // TODO: improve this logic
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent    = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            var extraRunCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.ExtraRun);

            if (extraRunCompositeDiagnoser != null)
            {
                logger.WriteLineInfo("// Run, Diagnostic");

                var executeResult = toolchain.Executor.Execute(
                    new ExecuteParameters(buildResult, benchmark, logger, resolver, config, extraRunCompositeDiagnoser));

                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();

                extraRunCompositeDiagnoser.ProcessResults(
                    new DiagnoserResults(benchmark, allRuns.Where(measurement => !measurement.IterationMode.IsIdle()).Sum(m => m.Operations), gcStats));

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                logger.WriteLine();
            }

            var separateLogicCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.SeparateLogic);

            if (separateLogicCompositeDiagnoser != null)
            {
                logger.WriteLineInfo("// Run, Diagnostic [SeparateLogic]");

                separateLogicCompositeDiagnoser.Handle(HostSignal.SeparateLogic, new DiagnoserActionParameters(null, benchmark, config));
            }

            return(executeResults, gcStats);
        }
        private static List<ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver)
        {
            var executeResults = new List<ExecuteResult>();

            logger.WriteLineInfo("// *** Execute ***");
            bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
            bool autoLaunchCount = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
            int defaultValue = analyzeRunToRunVariance ? 2 : 1;
            int launchCount = Math.Max(
                1,
                autoLaunchCount ? defaultValue: benchmark.Job.Run.LaunchCount);

            for (int launchIndex = 0; launchIndex < launchCount; launchIndex++)
            {
                string printedLaunchCount = (analyzeRunToRunVariance &&
                    autoLaunchCount &&
                    launchIndex < 2)
                    ? ""
                    : " / " + launchCount;
                logger.WriteLineInfo($"// Launch: {launchIndex + 1}{printedLaunchCount}");

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver);

                if (!executeResult.FoundExecutable)
                    logger.WriteLineError("Executable not found");
                if (executeResult.ExitCode != 0)
                    logger.WriteLineError("ExitCode != 0");
                executeResults.Add(executeResult);

                var measurements = executeResults
                        .SelectMany(r => r.Data)
                        .Select(line => Measurement.Parse(logger, line, 0))
                        .Where(r => r.IterationMode != IterationMode.Unknown).
                        ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError($"No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (autoLaunchCount && launchIndex == 1 && analyzeRunToRunVariance)
                {
                    // TODO: improve this logic
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            if (config.GetDiagnosers().Any())
            {
                logger.WriteLineInfo("// Run, Diagnostic");
                var compositeDiagnoser = config.GetCompositeDiagnoser();

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver, compositeDiagnoser);

                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
                var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns);
                compositeDiagnoser.ProcessResults(benchmark, report);

                if (!executeResult.FoundExecutable)
                    logger.WriteLineError("Executable not found");
                logger.WriteLine();
            }

            return executeResults;
        }
예제 #13
0
 public static IJob With(this IJob job, IToolchain toolchain) => job.With(j => j.Toolchain           = toolchain);
        private static List <ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver, out GcStats gcStats)
        {
            var executeResults = new List <ExecuteResult>();

            gcStats = default(GcStats);

            logger.WriteLineInfo("// *** Execute ***");
            bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
            bool autoLaunchCount         = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
            int  defaultValue            = analyzeRunToRunVariance ? 2 : 1;
            int  launchCount             = Math.Max(
                1,
                autoLaunchCount ? defaultValue: benchmark.Job.Run.LaunchCount);

            for (int launchIndex = 0; launchIndex < launchCount; launchIndex++)
            {
                string printedLaunchCount = (analyzeRunToRunVariance &&
                                             autoLaunchCount &&
                                             launchIndex < 2)
                    ? ""
                    : " / " + launchCount;
                logger.WriteLineInfo($"// Launch: {launchIndex + 1}{printedLaunchCount}");

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver);

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                if (executeResult.ExitCode != 0)
                {
                    logger.WriteLineError("ExitCode != 0");
                }
                executeResults.Add(executeResult);

                var measurements = executeResults
                                   .SelectMany(r => r.Data)
                                   .Select(line => Measurement.Parse(logger, line, 0))
                                   .Where(r => r.IterationMode != IterationMode.Unknown).
                                   ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError("No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (autoLaunchCount && launchIndex == 1 && analyzeRunToRunVariance)
                {
                    // TODO: improve this logic
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent    = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            if (config.GetDiagnosers().Any())
            {
                logger.WriteLineInfo("// Run, Diagnostic");
                var compositeDiagnoser = config.GetCompositeDiagnoser();

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver, compositeDiagnoser);

                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
                gcStats = GcStats.Parse(executeResult.Data.Last());
                var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns, gcStats);
                compositeDiagnoser.ProcessResults(benchmark, report);

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                logger.WriteLine();
            }

            return(executeResults);
        }
예제 #15
0
 public static IJob With(this IJob job, IToolchain toolchain) => job.With(j => j.Toolchain = toolchain);
예제 #16
0
 public void NonCompileTimeConstantsCanBeReturnedFromSource(IToolchain toolchain) => CanExecute <WithComplexTypesReturnedFromSources>(toolchain);
 private static GenerateResult Generate(ILogger logger, IToolchain toolchain, Benchmark benchmark, string rootArtifactsFolderPath, IConfig config)
 {
     logger.WriteLineInfo("// *** Generate *** ");
     var generateResult = toolchain.Generator.GenerateProject(benchmark, logger, rootArtifactsFolderPath, config);
     if (generateResult.IsGenerateSuccess)
     {
         logger.WriteLineInfo("// Result = Success");
         logger.WriteLineInfo($"// {nameof(generateResult.ArtifactsPaths.BinariesDirectoryPath)} = {generateResult.ArtifactsPaths?.BinariesDirectoryPath}");
     }
     else
     {
         logger.WriteLineError("// Result = Failure");
         if (generateResult.GenerateException != null)
             logger.WriteLineError($"// Exception: {generateResult.GenerateException.Message}");
     }
     logger.WriteLine();
     return generateResult;
 }
예제 #18
0
 public void ArgumentsArePassedToBenchmarks(IToolchain toolchain) => CanExecute <WithArguments>(toolchain);
        private static List<ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config)
        {
            var executeResults = new List<ExecuteResult>();

            logger.WriteLineInfo("// *** Execute ***");
            var launchCount = Math.Max(1, benchmark.Job.LaunchCount.IsAuto ? 2 : benchmark.Job.LaunchCount.Value);

            for (int processNumber = 0; processNumber < launchCount; processNumber++)
            {
                var printedProcessNumber = (benchmark.Job.LaunchCount.IsAuto && processNumber < 2) ? "" : " / " + launchCount.ToString();
                logger.WriteLineInfo($"// Launch: {processNumber + 1}{printedProcessNumber}");

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger);

                if (!executeResult.FoundExecutable)
                    logger.WriteLineError("Executable not found");
                executeResults.Add(executeResult);

                var measurements = executeResults
                        .SelectMany(r => r.Data)
                        .Select(line => Measurement.Parse(logger, line, 0))
                        .Where(r => r.IterationMode != IterationMode.Unknown).
                        ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError($"No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (benchmark.Job.LaunchCount.IsAuto && processNumber == 1)
                {
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            if (config.GetDiagnosers().Count() > 0)
            {
                logger.WriteLineInfo($"// Run, Diagnostic");
                config.GetCompositeDiagnoser().Start(benchmark);
                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, config.GetCompositeDiagnoser());
                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
                var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns);
                config.GetCompositeDiagnoser().Stop(benchmark, report);

                if (!executeResult.FoundExecutable)
                    logger.WriteLineError("Executable not found");
                logger.WriteLine();
            }

            return executeResults;
        }
 public static Job With(this Job job, IToolchain toolchain) => job.WithToolchain(toolchain);