public NetCoreAssemblyRuntimeLoader(string path, IBenchmarkOutput trace) { _trace = trace; if (!File.Exists(path)) { trace.Error($"[NetCoreAssemblyRuntimeLoader] Unable to find requested assembly [{path}]"); return; } Assembly = AssemblyLoadContext.Default.LoadFromAssemblyPath(path); if (Assembly == null) { trace.Error($"[NetCoreAssemblyRuntimeLoader] Found assembly [{path}], but was unable to load it."); return; } _dependencyContext = DependencyContext.Load(Assembly); _loadContext = AssemblyLoadContext.GetLoadContext(Assembly); _resolver = new CompositeCompilationAssemblyResolver(new ICompilationAssemblyResolver[] { new AppBaseCompilationAssemblyResolver(Path.GetDirectoryName(path)), new ReferenceAssemblyPathResolver(), new PackageCompilationAssemblyResolver() }); _loadContext.Resolving += LoadContextOnResolving; _referencedAssemblies = new Lazy <Assembly[]>(LoadReferencedAssemblies); }
public static BenchmarkMethodMetadata GetSetupMethod(TypeInfo classWithBenchmarks) { Contract.Requires(classWithBenchmarks != null); var setupMethods = classWithBenchmarks.GetMethods().Where( y => y.IsDefined(typeof(PerfSetupAttribute), true)).ToList(); if (!setupMethods.Any() || IsTypeInvalidForBenchmarks(classWithBenchmarks)) { return(BenchmarkMethodMetadata.Empty); } // Need to log and throw an error here for benchmarks that have multiple setups declared if (setupMethods.Count > 1) { var ex = new NBenchException( $"{classWithBenchmarks.Name} has a declared {setupMethods.Count} PerfSetupAttributes. A maximum of 1 is allowed per class. Failing..."); ReflectionOutput.Error(ex.Message); throw ex; } var matchingMethod = setupMethods.Single(); var takesContext = MethodTakesBenchmarkContext(matchingMethod); return(new BenchmarkMethodMetadata(matchingMethod, takesContext, false)); }
private Assembly[] LoadReferencedAssemblies() { var assemblies = new List <Assembly>() { Assembly }; #if DEBUG _trace.WriteLine($"[NetCoreAssemblyRuntimeLoader][LoadReferencedAssemblies] Loading references for [{Assembly}]"); #endif foreach (var assemblyName in Assembly.GetReferencedAssemblies()) { try { #if DEBUG _trace.WriteLine($"[NetCoreAssemblyRuntimeLoader][LoadReferencedAssemblies] Attempting to load [{assemblyName}]"); #endif assemblies.Add(_loadContext.LoadFromAssemblyName(assemblyName)); } catch (Exception ex) { // exception occurred, but we don't care #if DEBUG _trace.Error(ex, $"[NetCoreAssemblyRuntimeLoader][LoadReferencedAssemblies] Failed to load [{assemblyName}]"); #endif } } return(assemblies.ToArray()); }
public AssemblyRuntimeLoader(string path, IBenchmarkOutput trace) { _trace = trace; if (!File.Exists(path)) { trace.Error($"[NetFrameworkAssemblyRuntimeLoader] Unable to find requested assembly [{path}]"); return; } Assembly = System.Reflection.Assembly.LoadFile(path); if (Assembly == null) { trace.Error($"[NetFrameworkAssemblyRuntimeLoader] Found assembly [{path}], but was unable to load it."); return; } _binaryDirectory = Path.GetDirectoryName(path); AppDomain.CurrentDomain.AssemblyResolve += CurrentDomainOnAssemblyResolve; _referencedAssemblies = new Lazy <Assembly[]>(LoadReferencedAssemblies); }
/// <summary> /// Executes the tests /// </summary> /// <returns>True if all tests passed.</returns> public TestRunnerResult Execute() { // Perform core / thread optimizations if we're running in single-threaded mode // But not if the user has specified that they're going to be running multi-threaded benchmarks SetProcessPriority(_package.Concurrent); IBenchmarkOutput output = CreateOutput(); var discovery = new ReflectionDiscovery(output); var result = new TestRunnerResult() { AllTestsPassed = true }; try { foreach (var testFile in _package.Files) { var assembly = AssemblyRuntimeLoader.LoadAssembly(testFile); var benchmarks = discovery.FindBenchmarks(assembly); foreach (var benchmark in benchmarks) { // verify if the benchmark should be included/excluded from the list of benchmarks to be run if (_package.ShouldRunBenchmark(benchmark.BenchmarkName)) { output.WriteLine($"------------ STARTING {benchmark.BenchmarkName} ---------- "); benchmark.Run(); benchmark.Finish(); // if one assert fails, all fail result.AllTestsPassed = result.AllTestsPassed && benchmark.AllAssertsPassed; output.WriteLine($"------------ FINISHED {benchmark.BenchmarkName} ---------- "); result.ExecutedTestsCount = result.ExecutedTestsCount + 1; } else { output.WriteLine($"------------ NOTRUN {benchmark.BenchmarkName} ---------- "); result.IgnoredTestsCount = result.IgnoredTestsCount + 1; } } } } catch (Exception ex) { output.Error(ex, "Error while executing the tests."); result.AllTestsPassed = false; } return(result); }
/// <summary> /// Executes the tests /// </summary> /// <returns>True if all tests passed.</returns> public TestRunnerResult Execute() { // Perform core / thread optimizations if we're running in single-threaded mode // But not if the user has specified that they're going to be running multi-threaded benchmarks SetProcessPriority(_package.Concurrent); // pass in the runner settings so we can include them in benchmark reports // also, toggles tracing on or off var runnerSettings = new RunnerSettings() { ConcurrentModeEnabled = _package.Concurrent, TracingEnabled = _package.Tracing }; IBenchmarkOutput output = CreateOutput(); var discovery = new ReflectionDiscovery(output, DefaultBenchmarkAssertionRunner.Instance, // one day we might be able to pass in custom assertion runners, hence why this is here runnerSettings); var result = new TestRunnerResult() { AllTestsPassed = true }; try { foreach (var testFile in _package.Files) { var assembly = AssemblyRuntimeLoader.LoadAssembly(testFile); var benchmarks = discovery.FindBenchmarks(assembly); foreach (var benchmark in benchmarks) { // verify if the benchmark should be included/excluded from the list of benchmarks to be run if (_package.ShouldRunBenchmark(benchmark.BenchmarkName)) { output.StartBenchmark(benchmark.BenchmarkName); benchmark.Run(); benchmark.Finish(); // if one assert fails, all fail result.AllTestsPassed = result.AllTestsPassed && benchmark.AllAssertsPassed; output.FinishBenchmark(benchmark.BenchmarkName); result.ExecutedTestsCount = result.ExecutedTestsCount + 1; } else { output.SkipBenchmark(benchmark.BenchmarkName); result.IgnoredTestsCount = result.IgnoredTestsCount + 1; } } } } catch (Exception ex) { output.Error(ex, "Error while executing the tests."); result.AllTestsPassed = false; } return(result); }
public void Error(Exception ex, string message) { _benchmarkOutput.Error(ex, new Error(ex, message).ToString()); }