protected static void AddEtwData( IterationResult iteration, ScenarioExecutionResult scenarioExecutionResult, ITestOutputHelper output) { string[] modulesOfInterest = new string[] { "Anonymously Hosted DynamicMethods Assembly", "clrjit.dll", "coreclr.dll", "dotnet.exe", "MusicStore.dll", "AllReady.dll", "Word2VecScenario.dll", "ntoskrnl.exe", "System.Private.CoreLib.dll", "Unknown", }; // Get the list of processes of interest. try { var processes = new SimpleTraceEventParser().GetProfileData(scenarioExecutionResult); // Extract the Pmc data for each one of the processes. foreach (var process in processes) { if (process.Id != scenarioExecutionResult.ProcessExitInfo.ProcessId) { continue; } iteration.Measurements.Add(new Metric($"PMC/{process.Name}/Duration", "ms"), process.LifeSpan.Duration.TotalMilliseconds); // Add process metrics values. foreach (var pmcData in process.PerformanceMonitorCounterData) { iteration.Measurements.Add(new Metric($"PMC/{process.Name}/{pmcData.Key.Name}", pmcData.Key.Unit), pmcData.Value); } foreach (var module in process.Modules) { var moduleName = Path.GetFileName(module.FullName); if (modulesOfInterest.Any(m => m.Equals(moduleName, StringComparison.OrdinalIgnoreCase))) { foreach (var pmcData in module.PerformanceMonitorCounterData) { Metric m = new Metric($"PMC/{process.Name}!{moduleName}/{pmcData.Key.Name}", pmcData.Key.Unit); // Sometimes the etw parser gives duplicate module entries which leads to duplicate keys // but I haven't hunted down the reason. For now it is first one wins. if (!iteration.Measurements.ContainsKey(m)) { iteration.Measurements.Add(m, pmcData.Value); } } } } } } catch (InvalidOperationException e) { output.WriteLine("Error while processing ETW log: " + scenarioExecutionResult.EventLogFileName); output.WriteLine(e.ToString()); } }
/// <summary> /// When serializing the result data to benchview this is called to determine if any of the metrics should be reported differently /// than they were collected. We use this to collect several measurements in each iteration, then present those measurements /// to benchview as if each was a distinct test model with its own set of iterations of a single measurement. /// </summary> public virtual bool TryGetBenchviewCustomMetricReporting(Metric originalMetric, out Metric newMetric, out string newScenarioModelName) { if (originalMetric.Name.StartsWith("PMC/")) { int prefixLength = "PMC/".Length; int secondSlash = originalMetric.Name.IndexOf('/', prefixLength); newScenarioModelName = originalMetric.Name.Substring(prefixLength, secondSlash - prefixLength); string newMetricName = originalMetric.Name.Substring(secondSlash + 1); newMetric = new Metric(newMetricName, originalMetric.Unit); return(true); } else { newMetric = default(Metric); newScenarioModelName = null; return(false); } }
/// <summary> /// When serializing the result data to benchview this is called to determine if any of the metrics should be reported differently /// than they were collected. Both web apps use this to collect several measurements in each iteration, then present those measurements /// to benchview as if each was the Duration metric of a distinct scenario test with its own set of iterations. /// </summary> public override bool TryGetBenchviewCustomMetricReporting(Metric originalMetric, out Metric newMetric, out string newScenarioModelName) { if (originalMetric.Equals(TrainingMetric)) { newScenarioModelName = "Training"; } else if (originalMetric.Equals(FirstSearchMetric)) { newScenarioModelName = "First Search"; } else if (originalMetric.Equals(MedianSearchMetric)) { newScenarioModelName = "Median Search"; } else { return(base.TryGetBenchviewCustomMetricReporting(originalMetric, out newMetric, out newScenarioModelName)); } newMetric = Metric.ElapsedTimeMilliseconds; return(true); }