Beispiel #1
0
//JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes:
//ORIGINAL LINE: @VisibleForTesting void monitor() throws InterruptedException
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#:
        internal virtual void Monitor()
        {
            GcStats lastGcStats    = GetGcStats();
            long    nextCheckPoint = nanoTime() + _measurementDurationNs;

            while (!Stopped)
            {
                NANOSECONDS.sleep(_measurementDurationNs);
//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final long now = nanoTime();
                long now = nanoTime();
//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final long pauseNs = max(0L, now - nextCheckPoint);
                long pauseNs = max(0L, now - nextCheckPoint);
                nextCheckPoint = now + _measurementDurationNs;

//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final GcStats gcStats = getGcStats();
                GcStats gcStats = GetGcStats();
                if (pauseNs >= _stallAlertThresholdNs)
                {
//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final VmPauseInfo pauseInfo = new VmPauseInfo(NANOSECONDS.toMillis(pauseNs), gcStats.time - lastGcStats.time, gcStats.count - lastGcStats.count);
                    VmPauseInfo pauseInfo = new VmPauseInfo(NANOSECONDS.toMillis(pauseNs), gcStats.Time - lastGcStats.Time, gcStats.Count - lastGcStats.Count);
                    _listener.accept(pauseInfo);
                }
                lastGcStats = gcStats;
            }
        }
 public DiagnoserResults(BenchmarkCase benchmarkCase, long totalOperations, GcStats gcStats, ThreadingStats threadingStats)
 {
     BenchmarkCase   = benchmarkCase;
     TotalOperations = totalOperations;
     GcStats         = gcStats;
     ThreadingStats  = threadingStats;
 }
 public BenchmarkGcStats(GcStats statistics)
 {
     Gen0Collections            = statistics.Gen0Collections;
     Gen1Collections            = statistics.Gen1Collections;
     Gen2Collections            = statistics.Gen2Collections;
     TotalOperations            = statistics.TotalOperations;
     BytesAllocatedPerOperation = statistics.BytesAllocatedPerOperation;
 }
Beispiel #4
0
 internal ExecuteResult(List <Measurement> measurements, GcStats gcStats, ThreadingStats threadingStats)
 {
     FoundExecutable   = true;
     ExitCode          = 0;
     errors            = new List <string>();
     ExtraOutput       = Array.Empty <string>();
     this.measurements = measurements;
     GcStats           = gcStats;
     ThreadingStats    = threadingStats;
 }
Beispiel #5
0
        private static BenchmarkReport CreateReport(BenchmarkCase benchmarkCase, int n, double nanoseconds)
        {
            var buildResult   = BuildResult.Success(GenerateResult.Success(ArtifactsPaths.Empty, Array.Empty <string>()));
            var executeResult = new ExecuteResult(true, 0, 0, Array.Empty <string>(), new[] { $"// Runtime=extra output line" });
            var measurements  = Enumerable.Range(0, n)
                                .Select(index => new Measurement(1, IterationMode.Workload, IterationStage.Result, index + 1, 1, nanoseconds + index))
                                .ToList();
            var gcStats = GcStats.Parse("GC: 100 10 1 666666 888");

            return(new BenchmarkReport(true, benchmarkCase, buildResult, buildResult, new List <ExecuteResult> {
                executeResult
            }, measurements, gcStats, Array.Empty <Metric>()));
        }
Beispiel #6
0
 public BenchmarkReport(
     BenchmarkCase benchmarkCase,
     GenerateResult generateResult,
     BuildResult buildResult,
     IReadOnlyList <ExecuteResult> executeResults,
     IReadOnlyList <Measurement> allMeasurements,
     GcStats gcStats)
 {
     BenchmarkCase   = benchmarkCase;
     GenerateResult  = generateResult;
     BuildResult     = buildResult;
     ExecuteResults  = executeResults ?? Array.Empty <ExecuteResult>();
     AllMeasurements = allMeasurements ?? Array.Empty <Measurement>();
     GcStats         = gcStats;
 }
Beispiel #7
0
 public BenchmarkReport(
     Benchmark benchmark,
     GenerateResult generateResult,
     BuildResult buildResult,
     IReadOnlyList <ExecuteResult> executeResults,
     IReadOnlyList <Measurement> allMeasurements,
     GcStats gcStats)
 {
     Benchmark       = benchmark;
     GenerateResult  = generateResult;
     BuildResult     = buildResult;
     ExecuteResults  = executeResults ?? new ExecuteResult[0];
     AllMeasurements = allMeasurements ?? new Measurement[0];
     GcStats         = gcStats;
 }
 public BenchmarkReport(
     BenchmarkCase benchmarkCase,
     GenerateResult generateResult,
     BuildResult buildResult,
     IReadOnlyList <ExecuteResult> executeResults,
     IReadOnlyList <Measurement> allMeasurements,
     GcStats gcStats,
     IReadOnlyList <Metric> metrics)
 {
     BenchmarkCase   = benchmarkCase;
     GenerateResult  = generateResult;
     BuildResult     = buildResult;
     ExecuteResults  = executeResults ?? Array.Empty <ExecuteResult>();
     AllMeasurements = allMeasurements ?? Array.Empty <Measurement>();
     GcStats         = gcStats;
     Metrics         = metrics?.ToDictionary(metric => metric.Descriptor.Id);
 }
 public BenchmarkReport(
     bool success,
     BenchmarkCase benchmarkCase,
     GenerateResult generateResult,
     BuildResult buildResult,
     IReadOnlyList <ExecuteResult> executeResults,
     IReadOnlyList <Measurement> allMeasurements,
     GcStats gcStats,
     IReadOnlyList <Metric> metrics)
 {
     Success         = success;
     BenchmarkCase   = benchmarkCase;
     GenerateResult  = generateResult;
     BuildResult     = buildResult;
     ExecuteResults  = executeResults ?? Array.Empty <ExecuteResult>();
     AllMeasurements = allMeasurements ?? Array.Empty <Measurement>();
     GcStats         = gcStats;
     Metrics         = metrics?.ToDictionary(metric => metric.Descriptor.Id)
                       ?? (IReadOnlyDictionary <string, Metric>)ImmutableDictionary <string, Metric> .Empty;
 }
        private double GetGcValuesFromReport(GcStats gcStats, GcMetricSource metricSource)
        {
            switch (metricSource)
            {
            case GcMetricSource.BytesAllocatedPerOperation:
                return(gcStats.BytesAllocatedPerOperation);

            case GcMetricSource.BytesAllocatedPerOperationRaw:
                return(gcStats.GetTotalAllocatedBytes(false) * 1.0 / gcStats.TotalOperations);

            case GcMetricSource.Gen0CollectionsPer1000:
                return(1000.0 * gcStats.Gen0Collections / gcStats.TotalOperations);

            case GcMetricSource.Gen1CollectionsPer1000:
                return(1000.0 * gcStats.Gen1Collections / gcStats.TotalOperations);

            case GcMetricSource.Gen2CollectionsPer1000:
                return(1000.0 * gcStats.Gen2Collections / gcStats.TotalOperations);

            default:
                throw CodeExceptions.UnexpectedArgumentValue(nameof(metricSource), metricSource);
            }
        }
Beispiel #11
0
        public IEnumerable <Metric> ProcessResults(DiagnoserResults diagnoserResults)
        {
            var gcStatsLine = InterceptingExecutor.LastExecResult.Data.LastOrDefault(line => line.StartsWith("GC"));

            if (gcStatsLine is null)
            {
                Console.WriteLine(string.Join("\n", InterceptingExecutor.LastExecResult.Data));
            }
            var gcStats = GcStats.Parse(gcStatsLine);

            savedStats[diagnoserResults.BenchmarkCase] = gcStats;
            return
                (MemoryDiagnoser.Default.ProcessResults(new DiagnoserResults(diagnoserResults.BenchmarkCase, diagnoserResults.TotalOperations, gcStats, diagnoserResults.ThreadingStats, diagnoserResults.BuildResult))
                 .Select(m => new Metric(
                             new GenericMetricDescriptor("X" + m.Descriptor.Id, "X" + m.Descriptor.DisplayName, m.Descriptor.UnitType, m.Descriptor.Legend, m.Descriptor.Unit, m.Descriptor.TheGreaterTheBetter, m.Descriptor.NumberFormat),
                             m.Value
                             )));
            // diagnoserResults.
            // yield return new Metric(GarbageCollectionsMetricDescriptor.Gen0, diagnoserResults.GcStats.Gen0Collections / (double)diagnoserResults.GcStats.TotalOperations * 1000);
            // yield return new Metric(GarbageCollectionsMetricDescriptor.Gen1, diagnoserResults.GcStats.Gen1Collections / (double)diagnoserResults.GcStats.TotalOperations * 1000);
            // yield return new Metric(GarbageCollectionsMetricDescriptor.Gen2, diagnoserResults.GcStats.Gen2Collections / (double)diagnoserResults.GcStats.TotalOperations * 1000);
            // yield return new Metric(AllocatedMemoryMetricDescriptor.Instance, diagnoserResults.GcStats.BytesAllocatedPerOperation);
        }
        private static (List <ExecuteResult> executeResults, GcStats gcStats) Execute(
            ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver)
        {
            var executeResults = new List <ExecuteResult>();
            var gcStats        = default(GcStats);

            logger.WriteLineInfo("// *** Execute ***");
            bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
            bool autoLaunchCount         = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
            int  defaultValue            = analyzeRunToRunVariance ? 2 : 1;
            int  launchCount             = Math.Max(
                1,
                autoLaunchCount ? defaultValue : benchmark.Job.Run.LaunchCount);

            var noOverheadCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.NoOverhead);

            for (int launchIndex = 1; launchIndex <= launchCount; launchIndex++)
            {
                string printedLaunchCount = (analyzeRunToRunVariance && autoLaunchCount && launchIndex <= 2)
                    ? ""
                    : " / " + launchCount;
                logger.WriteLineInfo($"// Launch: {launchIndex}{printedLaunchCount}");

                // use diagnoser only for the last run (we need single result, not many)
                bool useDiagnoser = launchIndex == launchCount && noOverheadCompositeDiagnoser != null;

                var executeResult = toolchain.Executor.Execute(
                    new ExecuteParameters(
                        buildResult,
                        benchmark,
                        logger,
                        resolver,
                        config,
                        useDiagnoser ? noOverheadCompositeDiagnoser : null));

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError($"Executable {buildResult.ArtifactsPaths.ExecutablePath} not found");
                }
                if (executeResult.ExitCode != 0)
                {
                    logger.WriteLineError("ExitCode != 0");
                }

                executeResults.Add(executeResult);

                var measurements = executeResults
                                   .SelectMany(r => r.Data)
                                   .Select(line => Measurement.Parse(logger, line, 0))
                                   .Where(r => r.IterationMode != IterationMode.Unknown)
                                   .ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError("No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (useDiagnoser)
                {
                    if (config.HasMemoryDiagnoser())
                    {
                        gcStats = GcStats.Parse(executeResult.Data.Last());
                    }

                    noOverheadCompositeDiagnoser.ProcessResults(
                        new DiagnoserResults(benchmark, measurements.Where(measurement => !measurement.IterationMode.IsIdle()).Sum(m => m.Operations), gcStats));
                }

                if (autoLaunchCount && launchIndex == 2 && analyzeRunToRunVariance)
                {
                    // TODO: improve this logic
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent    = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            var extraRunCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.ExtraRun);

            if (extraRunCompositeDiagnoser != null)
            {
                logger.WriteLineInfo("// Run, Diagnostic");

                var executeResult = toolchain.Executor.Execute(
                    new ExecuteParameters(buildResult, benchmark, logger, resolver, config, extraRunCompositeDiagnoser));

                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();

                extraRunCompositeDiagnoser.ProcessResults(
                    new DiagnoserResults(benchmark, allRuns.Where(measurement => !measurement.IterationMode.IsIdle()).Sum(m => m.Operations), gcStats));

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                logger.WriteLine();
            }

            var separateLogicCompositeDiagnoser = config.GetCompositeDiagnoser(benchmark, Diagnosers.RunMode.SeparateLogic);

            if (separateLogicCompositeDiagnoser != null)
            {
                logger.WriteLineInfo("// Run, Diagnostic [SeparateLogic]");

                separateLogicCompositeDiagnoser.Handle(HostSignal.SeparateLogic, new DiagnoserActionParameters(null, benchmark, config));
            }

            return(executeResults, gcStats);
        }
Beispiel #13
0
        private static List <ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver, out GcStats gcStats)
        {
            var executeResults = new List <ExecuteResult>();

            gcStats = default(GcStats);

            logger.WriteLineInfo("// *** Execute ***");
            bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
            bool autoLaunchCount         = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
            int  defaultValue            = analyzeRunToRunVariance ? 2 : 1;
            int  launchCount             = Math.Max(
                1,
                autoLaunchCount ? defaultValue: benchmark.Job.Run.LaunchCount);

            for (int launchIndex = 0; launchIndex < launchCount; launchIndex++)
            {
                string printedLaunchCount = (analyzeRunToRunVariance &&
                                             autoLaunchCount &&
                                             launchIndex < 2)
                    ? ""
                    : " / " + launchCount;
                logger.WriteLineInfo($"// Launch: {launchIndex + 1}{printedLaunchCount}");

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver);

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError($"Executable {buildResult.ArtifactsPaths.ExecutablePath} not found");
                }
                if (executeResult.ExitCode != 0)
                {
                    logger.WriteLineError("ExitCode != 0");
                }
                executeResults.Add(executeResult);

                var measurements = executeResults
                                   .SelectMany(r => r.Data)
                                   .Select(line => Measurement.Parse(logger, line, 0))
                                   .Where(r => r.IterationMode != IterationMode.Unknown).
                                   ToArray();

                if (!measurements.Any())
                {
                    // Something went wrong during the benchmark, don't bother doing more runs
                    logger.WriteLineError("No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
                    break;
                }

                if (autoLaunchCount && launchIndex == 1 && analyzeRunToRunVariance)
                {
                    // TODO: improve this logic
                    var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
                    var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
                    var percent    = idleApprox / mainApprox * 100;
                    launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
                }
            }
            logger.WriteLine();

            // Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
            if (config.GetDiagnosers().Any())
            {
                logger.WriteLineInfo("// Run, Diagnostic");
                var compositeDiagnoser = config.GetCompositeDiagnoser();

                var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver, compositeDiagnoser);

                var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
                gcStats = GcStats.Parse(executeResult.Data.Last());
                var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns, gcStats);
                compositeDiagnoser.ProcessResults(benchmark, report);

                if (!executeResult.FoundExecutable)
                {
                    logger.WriteLineError("Executable not found");
                }
                logger.WriteLine();
            }

            return(executeResults);
        }
Beispiel #14
0
 public DiagnoserResults(Benchmark benchmark, long totalOperations, GcStats gcStats)
 {
     Benchmark       = benchmark;
     TotalOperations = totalOperations;
     GcStats         = gcStats;
 }