public IList<TestFileSummary> Read(ProcessStream processStream, TestOptions testOptions, TestContext testContext, ITestMethodRunnerCallback callback, bool debugEnabled) { if (processStream == null) throw new ArgumentNullException("processStream"); if (testOptions == null) throw new ArgumentNullException("testOptions"); if (testContext == null) throw new ArgumentNullException("testContext"); lastTestEvent = DateTime.Now; var timeout = (testContext.TestFileSettings.TestFileTimeout ?? testOptions.TestFileTimeoutMilliseconds) + 500; // Add buffer to timeout to account for serialization var readerTask = Task<IList<TestFileSummary>>.Factory.StartNew(() => ReadFromStream(processStream.StreamReader, testContext, testOptions, callback, debugEnabled)); while (readerTask.Status == TaskStatus.WaitingToRun || (readerTask.Status == TaskStatus.Running && (DateTime.Now - lastTestEvent).TotalMilliseconds < timeout)) { Thread.Sleep(100); } if (readerTask.IsCompleted) { ChutzpahTracer.TraceInformation("Finished reading stream from test file '{0}'", testContext.FirstInputTestFile); return readerTask.Result; } else { // We timed out so kill the process and return an empty test file summary ChutzpahTracer.TraceError("Test file '{0}' timed out after running for {1} milliseconds", testContext.FirstInputTestFile, (DateTime.Now - lastTestEvent).TotalMilliseconds); processStream.TimedOut = true; processStream.KillProcess(); return testContext.ReferencedFiles.Where(x => x.IsFileUnderTest).Select(file => new TestFileSummary(file.Path)).ToList(); } }
private void BuildTestContexts( TestOptions options, IEnumerable <PathInfo> scriptPaths, ParallelOptions parallelOptions, CancellationTokenSource cancellationSource, int resultCount, ConcurrentBag <TestContext> testContexts, ITestMethodRunnerCallback callback, TestCaseSummary overallSummary) { Parallel.ForEach(scriptPaths, parallelOptions, testFile => { ChutzpahTracer.TraceInformation("Building test context for {0}", testFile.FullPath); try { if (cancellationSource.IsCancellationRequested) { return; } TestContext testContext; resultCount++; if (testContextBuilder.TryBuildContext(testFile, options, out testContext)) { testContexts.Add(testContext); } else { ChutzpahTracer.TraceWarning("Unable to build test context for {0}", testFile.FullPath); } // Limit the number of files we can scan to attempt to build a context for // This is important in the case of folder scanning where many JS files may not be // test files. if (resultCount >= options.FileSearchLimit) { ChutzpahTracer.TraceError("File search limit hit!!!"); cancellationSource.Cancel(); } } catch (Exception e) { var error = new TestError { InputTestFile = testFile.FullPath, Message = e.ToString() }; overallSummary.Errors.Add(error); callback.FileError(error); ChutzpahTracer.TraceError(e, "Error during building test context for {0}", testFile.FullPath); } finally { ChutzpahTracer.TraceInformation("Finished building test context for {0}", testFile.FullPath); } }); }
protected override string RunTests(List <string> methods, ITestMethodRunnerCallback callback) { RunTests_Methods = methods; RunTests_Callback = callback; return(RunTests_ReturnValue); }
private void FireErrorOutput(TestContext testContext, ITestMethodRunnerCallback callback, StreamingTestFileContext testFileContext, JsRunnerOutput jsRunnerOutput) { var error = jsRunnerOutput as JsError; error.Error.InputTestFile = testFileContext.ReferencedFile.Path; error.Error.PathFromTestSettingsDirectory = testFileContext.ReferencedFile.PathFromTestSettingsDirectory; callback.FileError(testContext, error.Error); testFileContext.TestFileSummary.Errors.Add(error.Error); if (testFileContext.TestContext.TestFileSettings.CreateFailedTestForFileError.GetValueOrDefault()) { var fileErrorTest = new TestCase(); fileErrorTest.InputTestFile = testFileContext.ReferencedFile.Path; fileErrorTest.PathFromTestSettingsDirectory = testFileContext.ReferencedFile.PathFromTestSettingsDirectory; fileErrorTest.TestName = string.Format("!! File Error #{0} - Error encountered outside of test case execution !!", testFileContext.TestFileSummary.Errors.Count); fileErrorTest.TestResults.Add(new TestResult { Passed = false, StackTrace = error.Error.StackAsString ?? error.Error.FormatStackObject(), Message = error.Error.Message }); callback.TestStarted(testContext, fileErrorTest); callback.TestFinished(testContext, fileErrorTest); testFileContext.TestFileSummary.AddTestCase(fileErrorTest); } ChutzpahTracer.TraceError("Error received from Phantom {0}", error.Error.Message); }
/// <summary> /// Initialization of the package; this method is called right after the package is sited, so this is the place /// where you can put all the initilaization code that rely on services provided by VisualStudio. /// </summary> protected override void Initialize() { Trace.WriteLine(string.Format(CultureInfo.CurrentCulture, "Entering Initialize() of: {0}", ToString())); base.Initialize(); dte = (DTE2)GetService(typeof(DTE)); if (dte == null) { //if dte is null then we throw a excpetion //this is a fatal error throw new ArgumentNullException("dte"); } testRunner = TestRunner.Create(); processHelper = new ProcessHelper(); Logger = new Logger(this); Settings = GetDialogPage(typeof(ChutzpahSettings)) as ChutzpahSettings; statusBar = GetService(typeof(SVsStatusbar)) as IVsStatusbar; runnerCallback = new ParallelRunnerCallbackAdapter(new VisualStudioRunnerCallback(dte, statusBar)); // Add our command handlers for menu (commands must exist in the .vsct file) var mcs = GetService(typeof(IMenuCommandService)) as OleMenuCommandService; if (null != mcs) { // Command - Run JS Tests var runJsTestsCmd = new CommandID(GuidList.guidChutzpahCmdSet, (int)PkgCmdIDList.cmdidRunJSTests); var runJsTestMenuCmd = new OleMenuCommand(RunJSTestCmdCallback, runJsTestsCmd); runJsTestMenuCmd.BeforeQueryStatus += RunJSTestsCmdQueryStatus; mcs.AddCommand(runJsTestMenuCmd); // Command - Run JS tests in browser var runJsTestsInBrowserCmd = new CommandID(GuidList.guidChutzpahCmdSet, (int)PkgCmdIDList.cmdidRunInBrowser); var runJsTestInBrowserMenuCmd = new OleMenuCommand(RunJSTestInBrowserCmdCallback, runJsTestsInBrowserCmd); runJsTestInBrowserMenuCmd.BeforeQueryStatus += RunJSTestsInBrowserCmdQueryStatus; mcs.AddCommand(runJsTestInBrowserMenuCmd); // Command - Run Code Coverage var runJsTestCodeCoverageCmd = new CommandID(GuidList.guidChutzpahCmdSet, (int)PkgCmdIDList.cmdidRunCodeCoverage); var runJsTestCodeCoverageMenuCmd = new OleMenuCommand(RunCodeCoverageCmdCallback, runJsTestCodeCoverageCmd); runJsTestCodeCoverageMenuCmd.BeforeQueryStatus += RunCodeCoverageCmdQueryStatus; mcs.AddCommand(runJsTestCodeCoverageMenuCmd); var runJsTestDebuggerCmd = new CommandID(GuidList.guidChutzpahCmdSet, (int)PkgCmdIDList.cmdidDebugTests); var runJsTestDebuggerMenuCmd = new OleMenuCommand(RunDebuggerCmdCallback, runJsTestDebuggerCmd); runJsTestDebuggerMenuCmd.BeforeQueryStatus += RunDebuggerCmdQueryStatus; mcs.AddCommand(runJsTestDebuggerMenuCmd); } this.solutionListener = new SolutionEventsListener(this); this.solutionListener.SolutionUnloaded += OnSolutionUnloaded; this.solutionListener.SolutionProjectChanged += OnSolutionProjectChanged; this.solutionListener.StartListeningForChanges(); }
private void FireTestFinished(ITestMethodRunnerCallback callback, TestFileContext testFileContext, JsRunnerOutput jsRunnerOutput, int testIndex) { var jsTestCase = jsRunnerOutput as JsTestCase; jsTestCase.TestCase.InputTestFile = testFileContext.ReferencedFile.Path; AddLineNumber(testFileContext.ReferencedFile, testIndex, jsTestCase); callback.TestFinished(jsTestCase.TestCase); testFileContext.TestFileSummary.AddTestCase(jsTestCase.TestCase); }
private void FireErrorOutput(ITestMethodRunnerCallback callback, TestFileContext testFileContext, JsRunnerOutput jsRunnerOutput) { var error = jsRunnerOutput as JsError; error.Error.InputTestFile = testFileContext.ReferencedFile.Path; callback.FileError(error.Error); testFileContext.TestFileSummary.Errors.Add(error.Error); ChutzpahTracer.TraceError("Eror recieved from Phantom {0}", error.Error.Message); }
private void FireTestFinished(ITestMethodRunnerCallback callback, StreamingTestFileContext testFileContext, JsRunnerOutput jsRunnerOutput, int testIndex) { var jsTestCase = jsRunnerOutput as JsTestCase; jsTestCase.TestCase.InputTestFile = testFileContext.ReferencedFile.Path; AddLineNumber(testFileContext.ReferencedFile, testIndex, jsTestCase); callback.TestFinished(jsTestCase.TestCase); testFileContext.TestFileSummary.AddTestCase(jsTestCase.TestCase); ChutzpahTracer.TraceInformation("Test Case Finished:'{0}'", jsTestCase.TestCase.GetDisplayName()); }
public TestCaseSummary RunTests(IEnumerable <string> testPaths, TestOptions options, ITestMethodRunnerCallback callback = null) { callback = options.OpenInBrowser || callback == null ? RunnerCallback.Empty : callback; callback.TestSuiteStarted(); var summary = ProcessTestPaths(testPaths, options, TestExecutionMode.Execution, callback); callback.TestSuiteFinished(summary); return(summary); }
public TestCaseSummary RunTests(IEnumerable <string> testPaths, TestOptions options, ITestMethodRunnerCallback callback = null, TestContext testContext = null) { callback = options.TestLaunchMode == TestLaunchMode.FullBrowser || callback == null ? RunnerCallback.Empty : callback; callback.TestSuiteStarted(testContext); var testCaseSummary = ProcessTestPaths(testPaths, options, TestExecutionMode.Execution, callback); callback.TestSuiteFinished(testContext, testCaseSummary); return(testCaseSummary); }
private void FireLogOutput(ITestMethodRunnerCallback callback, TestFileContext testFileContext, JsRunnerOutput jsRunnerOutput) { var log = jsRunnerOutput as JsLog; // This is an internal log message if (log.Log.Message.StartsWith(internalLogPrefix)) { ChutzpahTracer.TraceInformation("Phantom Log - {0}", log.Log.Message.Substring(internalLogPrefix.Length).Trim()); return; } log.Log.InputTestFile = testFileContext.ReferencedFile.Path; callback.FileLog(log.Log); testFileContext.TestFileSummary.Logs.Add(log.Log); }
/// <summary> /// Runs the specified tests in the given type, calling the callback as appropriate. /// This override point exists primarily for unit testing purposes. /// </summary> /// <param name="methods">The test methods to run</param> /// <param name="callback">The run status information callback.</param> protected virtual string RunTests(List <string> methods, ITestMethodRunnerCallback callback) { IRunnerLogger logger = new TestClassCallbackDispatcher(this, callback); IExecutorWrapper wrapper = TestAssembly.ExecutorWrapper; try { XmlNode classNode = wrapper.RunTests(TypeName, methods, node => XmlLoggerAdapter.LogNode(node, logger)); return(classNode.OuterXml); } catch (Exception ex) { logger.ExceptionThrown(wrapper.AssemblyFilename, ex); return(String.Empty); } }
private void FireFileFinished(ITestMethodRunnerCallback callback, string testFilesString, IEnumerable <TestFileContext> testFileContexts, JsRunnerOutput jsRunnerOutput) { var jsFileDone = jsRunnerOutput as JsFileDone; var testFileSummary = new TestFileSummary(testFilesString); testFileSummary.TimeTaken = jsFileDone.TimeTaken; foreach (var context in testFileContexts) { context.TestFileSummary.TimeTaken = jsFileDone.TimeTaken; testFileSummary.AddTestCases(context.TestFileSummary.Tests); } callback.FileFinished(testFilesString, testFileSummary); }
private bool PerformBatchCompile(ITestMethodRunnerCallback callback, IEnumerable <TestContext> testContexts) { try { batchCompilerService.Compile(testContexts); } catch (ChutzpahCompilationFailedException e) { callback.ExceptionThrown(e, e.SettingsFile); ChutzpahTracer.TraceError(e, "Error during batch compile from {0}", e.SettingsFile); return(false); } return(true); }
public IList<TestFileSummary> Read(ProcessStream processStream, TestOptions testOptions, TestContext testContext, ITestMethodRunnerCallback callback, bool debugEnabled) { if (processStream == null) throw new ArgumentNullException("processStream"); if (testOptions == null) throw new ArgumentNullException("testOptions"); if (testContext == null) throw new ArgumentNullException("testContext"); lastTestEvent = DateTime.Now; var timeout = (testContext.TestFileSettings.TestFileTimeout ?? testOptions.TestFileTimeoutMilliseconds) + 500; // Add buffer to timeout to account for serialization var codeCoverageEnabled = (!testContext.TestFileSettings.EnableCodeCoverage.HasValue && testOptions.CoverageOptions.Enabled) || (testContext.TestFileSettings.EnableCodeCoverage.HasValue && testContext.TestFileSettings.EnableCodeCoverage.Value); var streamingTestFileContexts = testContext.ReferencedFiles .Where(x => x.IsFileUnderTest) .Select(x => new StreamingTestFileContext(x, testContext, codeCoverageEnabled)) .ToList(); var deferredEvents = new List<Action<StreamingTestFileContext>>(); var readerTask = Task<IList<TestFileSummary>>.Factory.StartNew(() => ReadFromStream(processStream.StreamReader, testContext, testOptions, streamingTestFileContexts, deferredEvents, callback, debugEnabled)); while (readerTask.Status == TaskStatus.WaitingToRun || (readerTask.Status == TaskStatus.Running && (DateTime.Now - lastTestEvent).TotalMilliseconds < timeout)) { Thread.Sleep(100); } if (readerTask.IsCompleted) { ChutzpahTracer.TraceInformation("Finished reading stream from test file '{0}'", testContext.FirstInputTestFile); return readerTask.Result; } else { // Since we times out make sure we play the deferred events so we do not lose errors // We will just attach these events to the first test context at this point since we do // not know where they belong PlayDeferredEvents(streamingTestFileContexts.FirstOrDefault(), deferredEvents); // We timed out so kill the process and return an empty test file summary ChutzpahTracer.TraceError("Test file '{0}' timed out after running for {1} milliseconds", testContext.FirstInputTestFile, (DateTime.Now - lastTestEvent).TotalMilliseconds); processStream.TimedOut = true; processStream.KillProcess(); return testContext.ReferencedFiles.Where(x => x.IsFileUnderTest).Select(file => new TestFileSummary(file.Path)).ToList(); } }
private IList <TestFileSummary> InvokeTestRunner(TestOptions options, TestContext testContext, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback) { var browser = (options.Engine ?? testContext.TestFileSettings.Engine).GetValueOrDefault(); var provider = testExecutionProviders.FirstOrDefault(x => x.CanHandleBrowser(browser)); if (provider == null) { throw new ArgumentException("Could not find browser"); } provider.SetupEnvironment(options, testContext); return(provider.Execute(options, testContext, testExecutionMode, callback)); }
/// <summary> /// Runs the specified test methods. /// </summary> /// <param name="testMethods">The test methods to run.</param> /// <param name="callback">The run status information callback.</param> /// <returns>Returns the result as XML.</returns> public virtual string Run(IEnumerable<TestMethod> testMethods, ITestMethodRunnerCallback callback) { Guard.ArgumentNotNullOrEmpty("testMethods", testMethods); Guard.ArgumentNotNull("callback", callback); List<string> methodNames = new List<string>(); foreach (TestMethod testMethod in testMethods) { if (testMethod.TestClass != this) throw new ArgumentException("All test methods must belong to this test class"); methodNames.Add(testMethod.MethodName); testMethod.RunResults.Clear(); } return RunTests(methodNames, callback); }
private TestFileSummary InvokeTestRunner(string headlessBrowserPath, TestOptions options, TestContext testContext, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback) { string runnerPath = fileProbe.FindFilePath(testContext.TestRunner); string fileUrl = BuildHarnessUrl(testContext.TestHarnessPath, testContext.IsRemoteHarness); string runnerArgs = BuildRunnerArgs(options, testContext, fileUrl, runnerPath, testExecutionMode); Func <ProcessStream, TestFileSummary> streamProcessor = processStream => testCaseStreamReaderFactory.Create().Read(processStream, options, testContext, callback, m_debugEnabled); var processResult = process.RunExecutableAndProcessOutput(headlessBrowserPath, runnerArgs, streamProcessor); HandleTestProcessExitCode(processResult.ExitCode, testContext.InputTestFile, processResult.Model.Errors, callback); return(processResult.Model); }
public IList <TestFileSummary> Execute(TestOptions testOptions, TestContext testContext, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback) { string runnerPath = fileProbe.FindFilePath(testContext.TestRunner); string fileUrl = BuildHarnessUrl(testContext); string runnerArgs = BuildRunnerArgs(testOptions, testContext, fileUrl, runnerPath, testExecutionMode); var streamTimeout = ((testContext.TestFileSettings.TestFileTimeout ?? testOptions.TestFileTimeoutMilliseconds) + 500).GetValueOrDefault(); // Add buffer to timeout to account for serialization Func <ProcessStreamStringSource, TestCaseStreamReadResult> streamProcessor = processStream => readerFactory.Create().Read(processStream, testOptions, testContext, callback); var processResult = processTools.RunExecutableAndProcessOutput(headlessBrowserPath, runnerArgs, streamProcessor, streamTimeout, null); HandleTestProcessExitCode(processResult.ExitCode, testContext.FirstInputTestFile, processResult.Model.TestFileSummaries.Select(x => x.Errors).FirstOrDefault(), callback); return(processResult.Model.TestFileSummaries); }
/// <summary> /// Runs the specified test methods. /// </summary> /// <param name="testMethods">The test methods to run.</param> /// <param name="callback">The run status information callback.</param> /// <returns>Returns the result as XML.</returns> public virtual string Run(IEnumerable <TestMethod> testMethods, ITestMethodRunnerCallback callback) { Guard.ArgumentNotNullOrEmpty("testMethods", testMethods); Guard.ArgumentNotNull("callback", callback); List <string> methodNames = new List <string>(); foreach (TestMethod testMethod in testMethods) { if (testMethod.TestClass != this) { throw new ArgumentException("All test methods must belong to this test class"); } methodNames.Add(testMethod.MethodName); testMethod.RunResults.Clear(); } return(RunTests(methodNames, callback)); }
public IList <TestFileSummary> Execute(TestOptions testOptions, TestContext testContext, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback) { string runnerPath = fileProbe.FindFilePath(testContext.TestRunner); string fileUrl = BuildHarnessUrl(testContext); bool preventDownloadOfEngineDepenedencies = testContext.TestFileSettings.EngineOptions != null && testContext.TestFileSettings.EngineOptions.PreventDownloadOfEngineDepenedencies; string runnerArgs = BuildRunnerArgs(testOptions, testContext, fileUrl, runnerPath, testExecutionMode, isRunningElevated, preventDownloadOfEngineDepenedencies); var streamTimeout = ((testContext.TestFileSettings.TestFileTimeout ?? testOptions.TestFileTimeoutMilliseconds) + 500).GetValueOrDefault(); // Add buffer to timeout to account for serialization TestCaseStreamReadResult streamProcessor(ProcessStreamStringSource processStream) => readerFactory.Create().Read(processStream, testOptions, testContext, callback); var environmentVariables = BuildEnvironmentVariables(); var processResult = processTools.RunExecutableAndProcessOutput(headlessBrowserPath, runnerArgs, streamProcessor, streamTimeout, environmentVariables); HandleTestProcessExitCode(processResult.ExitCode, testContext.FirstInputTestFile, processResult.Model.TestFileSummaries.Select(x => x.Errors).FirstOrDefault(), callback); return(processResult.Model.TestFileSummaries); }
/// <summary> /// Runs the specified test methods. /// </summary> /// <param name="testMethods">The test methods to run.</param> /// <param name="callback">The run status information callback.</param> /// <returns>Returns the result as XML.</returns> public string Run(IEnumerable <TestMethod> testMethods, ITestMethodRunnerCallback callback) { Guard.ArgumentNotNullOrEmpty("testMethods", testMethods); Guard.ArgumentNotNull("callback", callback); var sortedMethods = new Dictionary <TestAssembly, List <TestMethod> >(); foreach (TestAssembly testAssembly in testAssemblies) { sortedMethods[testAssembly] = new List <TestMethod>(); } foreach (TestMethod testMethod in testMethods) { List <TestMethod> methodList = null; if (!sortedMethods.TryGetValue(testMethod.TestClass.TestAssembly, out methodList)) { throw new ArgumentException("Test method " + testMethod.MethodName + " on test class " + testMethod.TestClass.TypeName + " in test assembly " + testMethod.TestClass.TestAssembly.AssemblyFilename + " is not in this test environment", "testMethods"); } methodList.Add(testMethod); } string result = ""; foreach (var kvp in sortedMethods) { if (kvp.Value.Count > 0) { result += kvp.Key.Run(kvp.Value, callback); } } return("<assemblies>" + result + "</assemblies>"); }
private IList<TestFileSummary> InvokeTestRunner(string headlessBrowserPath, TestOptions options, TestContext testContext, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback) { string runnerPath = fileProbe.FindFilePath(testContext.TestRunner); string fileUrl = BuildHarnessUrl(testContext.TestHarnessPath, testContext.IsRemoteHarness); string runnerArgs = BuildRunnerArgs(options, testContext, fileUrl, runnerPath, testExecutionMode); Func<ProcessStream, IList<TestFileSummary>> streamProcessor = processStream => testCaseStreamReaderFactory.Create().Read(processStream, options, testContext, callback, m_debugEnabled); var processResult = process.RunExecutableAndProcessOutput(headlessBrowserPath, runnerArgs, streamProcessor); HandleTestProcessExitCode(processResult.ExitCode, testContext.FirstInputTestFile, processResult.Model.Select(x => x.Errors).FirstOrDefault(), callback); return processResult.Model; }
public TestCaseSummary RunTests(IEnumerable <string> testPaths, ITestMethodRunnerCallback callback = null) { return(RunTests(testPaths, new TestOptions(), callback)); }
private void FireCoverageObject(ITestMethodRunnerCallback callback, StreamingTestFileContext testFileContext, JsRunnerOutput jsRunnerOutput) { var jsCov = jsRunnerOutput as JsCoverage; testFileContext.TestFileSummary.CoverageObject = testFileContext.TestContext.CoverageEngine.DeserializeCoverageObject(jsCov.Object, testFileContext.TestContext); }
/// <summary> /// Initializes a new instance of the <see cref="TestClassCallbackDispatcher"/> class. /// </summary> /// <param name="testClass">The test class.</param> /// <param name="callback">The run status information callback.</param> public TestClassCallbackDispatcher(TestClass testClass, ITestMethodRunnerCallback callback) { this.testClass = testClass; this.callback = callback; }
public TestCaseSummary RunTests(string testPath, ITestMethodRunnerCallback callback = null) { return(RunTests(testPath, new TestOptions(), callback)); }
private void FireLogOutput(ITestMethodRunnerCallback callback, StreamingTestFileContext testFileContext, JsRunnerOutput jsRunnerOutput) { var log = jsRunnerOutput as JsLog; // This is an internal log message if (log.Log.Message.StartsWith(internalLogPrefix)) { ChutzpahTracer.TraceInformation("Phantom Log - {0}", log.Log.Message.Substring(internalLogPrefix.Length).Trim()); return; } log.Log.InputTestFile = testFileContext.ReferencedFile.Path; callback.FileLog(log.Log); testFileContext.TestFileSummary.Logs.Add(log.Log); }
private void FireCoverageObject(ITestMethodRunnerCallback callback, TestFileContext testFileContext, JsRunnerOutput jsRunnerOutput) { var jsCov = jsRunnerOutput as JsCoverage; testFileContext.TestFileSummary.CoverageObject = coverageEngine.DeserializeCoverageObject(jsCov.Object, testFileContext.TestContext); }
private void ExecuteTestContexts( TestOptions options, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback, ConcurrentBag <TestContext> testContexts, ParallelOptions parallelOptions, string headlessBrowserPath, ConcurrentQueue <TestFileSummary> testFileSummaries, TestCaseSummary overallSummary) { Parallel.ForEach( testContexts, parallelOptions, testContext => { ChutzpahTracer.TraceInformation("Start test run for {0} in {1} mode", testContext.InputTestFile, testExecutionMode); try { testHarnessBuilder.CreateTestHarness(testContext, options); if (options.OpenInBrowser) { ChutzpahTracer.TraceInformation( "Launching test harness '{0}' for file '{1}' in a browser", testContext.TestHarnessPath, testContext.InputTestFile); process.LaunchFileInBrowser(testContext.TestHarnessPath); } else { ChutzpahTracer.TraceInformation( "Invoking headless browser on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testContext.InputTestFile); var testSummary = InvokeTestRunner( headlessBrowserPath, options, testContext, testExecutionMode, callback); ChutzpahTracer.TraceInformation( "Test harness '{0}' for file '{1}' finished with {2} passed, {3} failed and {4} errors", testContext.TestHarnessPath, testContext.InputTestFile, testSummary.PassedCount, testSummary.FailedCount, testSummary.Errors.Count); ChutzpahTracer.TraceInformation( "Finished running headless browser on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testContext.InputTestFile); testFileSummaries.Enqueue(testSummary); } } catch (Exception e) { var error = new TestError { InputTestFile = testContext.InputTestFile, Message = e.ToString() }; overallSummary.Errors.Add(error); callback.FileError(error); ChutzpahTracer.TraceError(e, "Error during test execution of {0}", testContext.InputTestFile); } finally { ChutzpahTracer.TraceInformation("Finished test run for {0} in {1} mode", testContext.InputTestFile, testExecutionMode); } }); // Clean up test context foreach (var testContext in testContexts) { // Don't clean up context if in debug mode if (!m_debugEnabled && !options.OpenInBrowser) { try { ChutzpahTracer.TraceInformation("Cleaning up test context for {0}", testContext.InputTestFile); testContextBuilder.CleanupContext(testContext); } catch (Exception e) { ChutzpahTracer.TraceError(e, "Error cleaning up test context for {0}", testContext.InputTestFile); } } } }
private void FireFileFinished(ITestMethodRunnerCallback callback, string testFilesString, IEnumerable<StreamingTestFileContext> testFileContexts, JsRunnerOutput jsRunnerOutput) { var jsFileDone = jsRunnerOutput as JsFileDone; var testFileSummary = new TestFileSummary(testFilesString); testFileSummary.TimeTaken = jsFileDone.TimeTaken; foreach (var context in testFileContexts) { context.TestFileSummary.TimeTaken = jsFileDone.TimeTaken; testFileSummary.AddTestCases(context.TestFileSummary.Tests); } callback.FileFinished(testFilesString, testFileSummary); }
public TestMethodRunnerCallbackWrapper(ITestMethodRunnerCallback innerCallback) { this.innerCallback = innerCallback; }
/// <inheritdoc/> public void Run(IEnumerable<TestMethod> testMethods, ITestMethodRunnerCallback callback) { Guard.ArgumentNotNullOrEmpty("testMethods", testMethods); Guard.ArgumentNotNull("callback", callback); var sortedMethods = new Dictionary<TestAssembly, List<TestMethod>>(); foreach (TestAssembly testAssembly in testAssemblies) sortedMethods[testAssembly] = new List<TestMethod>(); foreach (TestMethod testMethod in testMethods) { List<TestMethod> methodList = null; if (!sortedMethods.TryGetValue(testMethod.TestClass.TestAssembly, out methodList)) throw new ArgumentException("Test method " + testMethod.MethodName + " on test class " + testMethod.TestClass.TypeName + " in test assembly " + testMethod.TestClass.TestAssembly.AssemblyFilename + " is not in this test environment", "testMethods"); methodList.Add(testMethod); } foreach (var kvp in sortedMethods) if (kvp.Value.Count > 0) kvp.Key.Run(kvp.Value, callback); }
/// <summary> /// Runs the specified test methods. /// </summary> /// <param name="testMethods">The test methods to run.</param> /// <param name="callback">The run status information callback.</param> /// <returns>Returns the result as XML.</returns> public virtual string Run(IEnumerable<TestMethod> testMethods, ITestMethodRunnerCallback callback) { Guard.ArgumentNotNullOrEmpty("testMethods", testMethods); Guard.ArgumentNotNull("callback", callback); var sortedMethods = new Dictionary<TestClass, List<TestMethod>>(); foreach (TestClass testClass in testClasses) sortedMethods[testClass] = new List<TestMethod>(); foreach (TestMethod testMethod in testMethods) { List<TestMethod> methodList = null; if (!sortedMethods.TryGetValue(testMethod.TestClass, out methodList)) throw new ArgumentException("Test method " + testMethod.MethodName + " on test class " + testMethod.TestClass.TypeName + " is not in this assembly", "testMethods"); methodList.Add(testMethod); } XmlDocument doc = new XmlDocument(); doc.LoadXml("<assembly/>"); XmlNode assemblyNode = doc.ChildNodes[0]; AddAttribute(assemblyNode, "name", ExecutorWrapper.AssemblyFilename); AddAttribute(assemblyNode, "run-date", DateTime.Now.ToString("yyyy-MM-dd")); AddAttribute(assemblyNode, "run-time", DateTime.Now.ToString("HH:mm:ss")); if (ExecutorWrapper.ConfigFilename != null) AddAttribute(assemblyNode, "configFile", ExecutorWrapper.ConfigFilename); callback.AssemblyStart(this); var callbackWrapper = new TestMethodRunnerCallbackWrapper(callback); int passed = 0; int failed = 0; int skipped = 0; double duration = 0.0; string result = ""; foreach (var kvp in sortedMethods) if (kvp.Value.Count > 0) { result += kvp.Key.Run(kvp.Value, callbackWrapper); foreach (TestMethod testMethod in kvp.Value) foreach (TestResult runResult in testMethod.RunResults) { duration += runResult.Duration; if (runResult is TestPassedResult) passed++; else if (runResult is TestFailedResult) failed++; else skipped++; } } callback.AssemblyFinished(this, callbackWrapper.Total, callbackWrapper.Failed, callbackWrapper.Skipped, callbackWrapper.Time); AddAttribute(assemblyNode, "time", duration.ToString("0.000", CultureInfo.InvariantCulture)); AddAttribute(assemblyNode, "total", passed + failed + skipped); AddAttribute(assemblyNode, "passed", passed); AddAttribute(assemblyNode, "failed", failed); AddAttribute(assemblyNode, "skipped", skipped); AddAttribute(assemblyNode, "environment", String.Format("{0}-bit .NET {1}", IntPtr.Size * 8, Environment.Version)); AddAttribute(assemblyNode, "test-framework", String.Format("xUnit.net {0}", ExecutorWrapper.XunitVersion)); return assemblyNode.OuterXml.Replace(" />", ">") + result + "</assembly>"; }
/// <inheritdoc/> public virtual void Run(IEnumerable<TestMethod> testMethods, ITestMethodRunnerCallback callback) { Guard.ArgumentNotNullOrEmpty("testMethods", testMethods); Guard.ArgumentNotNull("callback", callback); var sortedMethods = new Dictionary<TestClass, List<TestMethod>>(); foreach (TestClass testClass in testClasses) sortedMethods[testClass] = new List<TestMethod>(); foreach (TestMethod testMethod in testMethods) { List<TestMethod> methodList = null; if (!sortedMethods.TryGetValue(testMethod.TestClass, out methodList)) throw new ArgumentException("Test method " + testMethod.MethodName + " on test class " + testMethod.TestClass.TypeName + " is not in this assembly", "testMethods"); methodList.Add(testMethod); } callback.AssemblyStart(this); var callbackWrapper = new TestMethodRunnerCallbackWrapper(callback); foreach (var kvp in sortedMethods) if (kvp.Value.Count > 0) kvp.Key.Run(kvp.Value, callbackWrapper); callback.AssemblyFinished(this, callbackWrapper.Total, callbackWrapper.Failed, callbackWrapper.Skipped, callbackWrapper.Time); }
private void FireFileStarted(ITestMethodRunnerCallback callback, TestContext testContext) { callback.FileStarted(testContext.InputTestFilesString); }
private static void HandleTestProcessExitCode(int exitCode, string inputTestFile, IList<TestError> errors, ITestMethodRunnerCallback callback) { string errorMessage = null; switch ((TestProcessExitCode)exitCode) { case TestProcessExitCode.AllPassed: case TestProcessExitCode.SomeFailed: return; case TestProcessExitCode.Timeout: errorMessage = "Timeout occurred when executing test file"; break; default: errorMessage = "Unknown error occurred when executing test file. Received exit code of " + exitCode; break; } if (!string.IsNullOrEmpty(errorMessage)) { var error = new TestError { InputTestFile = inputTestFile, Message = errorMessage }; errors.Add(error); callback.FileError(error); ChutzpahTracer.TraceError("Headless browser returned with an error: {0}", errorMessage); } }
/// <summary> /// Initialization of the package; this method is called right after the package is sited, so this is the place /// where you can put all the initialization code that rely on services provided by Visual Studio. /// </summary> protected override void Initialize() { Trace.WriteLine(string.Format(CultureInfo.CurrentCulture, "Entering Initialize() of: {0}", ToString())); base.Initialize(); dte = (DTE2)GetService(typeof(DTE)); if (dte == null) { //if dte is null then we throw a exception //this is a fatal error throw new ArgumentNullException("dte"); } testRunner = TestRunner.Create(); processHelper = new ProcessHelper(); Logger = new Logger(this); Settings = GetDialogPage(typeof(ChutzpahSettings)) as ChutzpahSettings; statusBar = GetService(typeof(SVsStatusbar)) as IVsStatusbar; runnerCallback = new ParallelRunnerCallbackAdapter(new VisualStudioRunnerCallback(dte, statusBar)); // Add our command handlers for menu (commands must exist in the .vsct file) var mcs = GetService(typeof(IMenuCommandService)) as OleMenuCommandService; if (null != mcs) { // Command - Run JS Tests var runJsTestsCmd = new CommandID(GuidList.guidChutzpahCmdSet, (int)PkgCmdIDList.cmdidRunJSTests); var runJsTestMenuCmd = new OleMenuCommand(RunJSTestCmdCallback, runJsTestsCmd); runJsTestMenuCmd.BeforeQueryStatus += RunJSTestsCmdQueryStatus; mcs.AddCommand(runJsTestMenuCmd); // Command - Run JS tests in browser var runJsTestsInBrowserCmd = new CommandID(GuidList.guidChutzpahCmdSet, (int)PkgCmdIDList.cmdidRunInBrowser); var runJsTestInBrowserMenuCmd = new OleMenuCommand(RunJSTestInBrowserCmdCallback, runJsTestsInBrowserCmd); runJsTestInBrowserMenuCmd.BeforeQueryStatus += RunJSTestsInBrowserCmdQueryStatus; mcs.AddCommand(runJsTestInBrowserMenuCmd); // Command - Run Code Coverage var runJsTestCodeCoverageCmd = new CommandID(GuidList.guidChutzpahCmdSet, (int)PkgCmdIDList.cmdidRunCodeCoverage); var runJsTestCodeCoverageMenuCmd = new OleMenuCommand(RunCodeCoverageCmdCallback, runJsTestCodeCoverageCmd); runJsTestCodeCoverageMenuCmd.BeforeQueryStatus += RunCodeCoverageCmdQueryStatus; mcs.AddCommand(runJsTestCodeCoverageMenuCmd); var runJsTestDebuggerCmd = new CommandID(GuidList.guidChutzpahCmdSet, (int)PkgCmdIDList.cmdidDebugTests); var runJsTestDebuggerMenuCmd = new OleMenuCommand(RunDebuggerCmdCallback, runJsTestDebuggerCmd); runJsTestDebuggerMenuCmd.BeforeQueryStatus += RunDebuggerCmdQueryStatus; mcs.AddCommand(runJsTestDebuggerMenuCmd); } this.solutionListener = new SolutionEventsListener(this); this.solutionListener.SolutionUnloaded += OnSolutionUnloaded; this.solutionListener.SolutionProjectChanged += OnSolutionProjectChanged; this.solutionListener.StartListeningForChanges(); }
private void FireTestFinished(ITestMethodRunnerCallback callback, StreamingTestFileContext testFileContext, JsRunnerOutput jsRunnerOutput, int testIndex) { var jsTestCase = jsRunnerOutput as JsTestCase; jsTestCase.TestCase.InputTestFile = testFileContext.ReferencedFile.Path; AddLineNumber(testFileContext.ReferencedFile, testIndex, jsTestCase); callback.TestFinished(jsTestCase.TestCase); testFileContext.TestFileSummary.AddTestCase(jsTestCase.TestCase); ChutzpahTracer.TraceInformation("Test Case Finished:'{0}'", jsTestCase.TestCase.GetDisplayName()); }
private IList <TestFileSummary> ReadFromStream(StreamReader stream, TestContext testContext, TestOptions testOptions, ITestMethodRunnerCallback callback, bool debugEnabled) { var codeCoverageEnabled = (!testContext.TestFileSettings.EnableCodeCoverage.HasValue && testOptions.CoverageOptions.Enabled) || (testContext.TestFileSettings.EnableCodeCoverage.HasValue && testContext.TestFileSettings.EnableCodeCoverage.Value); var testFileContexts = testContext.ReferencedFiles .Where(x => x.IsFileUnderTest) .Select(x => new TestFileContext(x, testContext, codeCoverageEnabled)) .ToList(); var testIndex = 0; string line; TestFileContext currentTestFileContext = null; if (testFileContexts.Count == 1) { currentTestFileContext = testFileContexts.First(); } var deferredEvents = new List <Action <TestFileContext> >(); while ((line = stream.ReadLine()) != null) { if (debugEnabled) { Console.WriteLine(line); } var match = prefixRegex.Match(line); if (!match.Success) { continue; } var type = match.Groups["type"].Value; var json = match.Groups["json"].Value; // Only update last event timestamp if it is an important event. // Log and error could happen even though no test progress is made if (!type.Equals("Log") && !type.Equals("Error")) { lastTestEvent = DateTime.Now; } try { switch (type) { case "FileStart": FireFileStarted(callback, testContext); break; case "CoverageObject": var jsCov = jsonSerializer.Deserialize <JsCoverage>(json); if (currentTestFileContext == null) { deferredEvents.Add((fileContext) => FireCoverageObject(callback, fileContext, jsCov)); } else { FireCoverageObject(callback, currentTestFileContext, jsCov); } break; case "FileDone": var jsFileDone = jsonSerializer.Deserialize <JsFileDone>(json); FireFileFinished(callback, testContext.InputTestFilesString, testFileContexts, jsFileDone); break; case "TestStart": var jsTestCaseStart = jsonSerializer.Deserialize <JsTestCase>(json); TestFileContext newContext = null; var testName = jsTestCaseStart.TestCase.TestName.Trim(); var moduleName = (jsTestCaseStart.TestCase.ModuleName ?? "").Trim(); var fileContexts = GetFileMatches(testName, testFileContexts); if (fileContexts.Count == 0 && currentTestFileContext == null) { // If there are no matches and not file context has been used yet // then just choose the first context newContext = testFileContexts[0]; } else if (fileContexts.Count == 0) { // If there is already a current context and no matches we just keep using that context // unless this test name has been used already in the current context. In that case // move to the next one that hasn't seen this file yet var testAlreadySeenInCurrentContext = currentTestFileContext.HasTestBeenSeen(moduleName, testName); if (testAlreadySeenInCurrentContext) { newContext = testFileContexts.FirstOrDefault(x => !x.HasTestBeenSeen(moduleName, testName)) ?? currentTestFileContext; } } else if (fileContexts.Count > 1) { // If we found the test has more than one file match // try to choose the best match, otherwise just choose the first one // If we have no file context yet take the first one if (currentTestFileContext == null) { newContext = fileContexts.First(); } else { // In this case we have an existing file context so we need to // 1. Check to see if this test has been seen already on that context // if so we need to try the next file context that matches it // 2. If it is not seen yet in the current context and the current context // is one of the matches then keep using it var testAlreadySeenInCurrentContext = currentTestFileContext.HasTestBeenSeen(moduleName, testName); var currentContextInFileMatches = fileContexts.Any(x => x == currentTestFileContext); if (!testAlreadySeenInCurrentContext && currentContextInFileMatches) { // Keep the current context newContext = currentTestFileContext; } else { // Either take first not used context OR the first one newContext = fileContexts.Where(x => !x.IsUsed).FirstOrDefault() ?? fileContexts.First(); } } } else if (fileContexts.Count == 1) { // We found a unique match newContext = fileContexts[0]; } if (newContext != null && newContext != currentTestFileContext) { currentTestFileContext = newContext; testIndex = 0; } currentTestFileContext.IsUsed = true; currentTestFileContext.MarkTestSeen(moduleName, testName); PlayDeferredEvents(currentTestFileContext, deferredEvents); jsTestCaseStart.TestCase.InputTestFile = currentTestFileContext.ReferencedFile.Path; callback.TestStarted(jsTestCaseStart.TestCase); break; case "TestDone": var jsTestCaseDone = jsonSerializer.Deserialize <JsTestCase>(json); var currentTestIndex = testIndex; FireTestFinished(callback, currentTestFileContext, jsTestCaseDone, currentTestIndex); testIndex++; break; case "Log": var log = jsonSerializer.Deserialize <JsLog>(json); if (currentTestFileContext != null) { FireLogOutput(callback, currentTestFileContext, log); } else { deferredEvents.Add((fileContext) => FireLogOutput(callback, fileContext, log)); } break; case "Error": var error = jsonSerializer.Deserialize <JsError>(json); if (currentTestFileContext != null) { FireErrorOutput(callback, currentTestFileContext, error); } else { deferredEvents.Add((fileContext) => FireErrorOutput(callback, fileContext, error)); } break; } } catch (SerializationException e) { // Ignore malformed json and move on ChutzpahTracer.TraceError(e, "Recieved malformed json from Phantom in this line: '{0}'", line); } } return(testFileContexts.Select(x => x.TestFileSummary).ToList()); }
private void FireTestFinished(ITestMethodRunnerCallback callback, StreamingTestFileContext testFileContext, JsRunnerOutput jsRunnerOutput, int testIndex) { var jsTestCase = jsRunnerOutput as JsTestCase; jsTestCase.TestCase.InputTestFile = testFileContext.ReferencedFile.Path; AddLineNumber(testFileContext.ReferencedFile, testIndex, jsTestCase); callback.TestFinished(jsTestCase.TestCase); testFileContext.TestFileSummary.AddTestCase(jsTestCase.TestCase); }
private TestCaseSummary ProcessTestPaths(IEnumerable <string> testPaths, TestOptions options, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback) { options.TestExecutionMode = testExecutionMode; stopWatch.Start(); string headlessBrowserPath = fileProbe.FindFilePath(HeadlessBrowserName); if (testPaths == null) { throw new ArgumentNullException("testPaths"); } if (headlessBrowserPath == null) { throw new FileNotFoundException("Unable to find headless browser: " + HeadlessBrowserName); } if (fileProbe.FindFilePath(TestRunnerJsName) == null) { throw new FileNotFoundException("Unable to find test runner base js file: " + TestRunnerJsName); } var overallSummary = new TestCaseSummary(); // Concurrent list to collect test contexts var testContexts = new ConcurrentBag <TestContext>(); // Concurrent collection used to gather the parallel results from var testFileSummaries = new ConcurrentQueue <TestFileSummary>(); var resultCount = 0; var cancellationSource = new CancellationTokenSource(); // Given the input paths discover the potential test files var scriptPaths = FindTestFiles(testPaths, options); // Group the test files by their chutzpah.json files. Then check if those settings file have batching mode enabled. // If so, we keep those tests in a group together to be used in one context // Otherwise, we put each file in its own test group so each get their own context var testRunConfiguration = BuildTestRunConfiguration(scriptPaths, options); ConfigureTracing(testRunConfiguration); var parallelism = testRunConfiguration.MaxDegreeOfParallelism.HasValue ? Math.Min(options.MaxDegreeOfParallelism, testRunConfiguration.MaxDegreeOfParallelism.Value) : options.MaxDegreeOfParallelism; var parallelOptions = new ParallelOptions { MaxDegreeOfParallelism = parallelism, CancellationToken = cancellationSource.Token }; ChutzpahTracer.TraceInformation("Chutzpah run started in mode {0} with parallelism set to {1}", testExecutionMode, parallelOptions.MaxDegreeOfParallelism); // Build test contexts in parallel given a list of files each BuildTestContexts(options, testRunConfiguration.TestGroups, parallelOptions, cancellationSource, resultCount, testContexts, callback, overallSummary); // Compile the test contexts if (!PerformBatchCompile(callback, testContexts)) { return(overallSummary); } // Build test harness for each context and execute it in parallel ExecuteTestContexts(options, testExecutionMode, callback, testContexts, parallelOptions, headlessBrowserPath, testFileSummaries, overallSummary); // Gather TestFileSummaries into TaseCaseSummary foreach (var fileSummary in testFileSummaries) { overallSummary.Append(fileSummary); } stopWatch.Stop(); overallSummary.SetTotalRunTime((int)stopWatch.Elapsed.TotalMilliseconds); overallSummary.TransformResult = transformProcessor.ProcessTransforms(testContexts, overallSummary); // Clear the settings file cache since in VS Chutzpah is not unloaded from memory. // If we don't clear then the user can never update the file. testSettingsService.ClearCache(); ChutzpahTracer.TraceInformation( "Chutzpah run finished with {0} passed, {1} failed and {2} errors", overallSummary.PassedCount, overallSummary.FailedCount, overallSummary.Errors.Count); return(overallSummary); }
public TestCaseSummary RunTests(IEnumerable<string> testPaths, ITestMethodRunnerCallback callback = null) { return RunTests(testPaths, new TestOptions(), callback); }
protected override string RunTests(List<string> methods, ITestMethodRunnerCallback callback) { RunTests_Methods = methods; RunTests_Callback = callback; return RunTests_ReturnValue; }
private TestCaseSummary ProcessTestPaths(IEnumerable<string> testPaths, TestOptions options, TestRunnerMode testRunnerMode, ITestMethodRunnerCallback callback) { stopWatch.Start(); string headlessBrowserPath = fileProbe.FindFilePath(HeadlessBrowserName); if (testPaths == null) throw new ArgumentNullException("testPaths"); if (headlessBrowserPath == null) throw new FileNotFoundException("Unable to find headless browser: " + HeadlessBrowserName); if (fileProbe.FindFilePath(TestRunnerJsName) == null) throw new FileNotFoundException("Unable to find test runner base js file: " + TestRunnerJsName); var overallSummary = new TestCaseSummary(); // Concurrent collection used to gather the parallel results from var testFileSummaries = new ConcurrentQueue<TestFileSummary>(); var resultCount = 0; var cancellationSource = new CancellationTokenSource(); var parallelOptions = new ParallelOptions { MaxDegreeOfParallelism = options.MaxDegreeOfParallelism, CancellationToken = cancellationSource.Token }; Parallel.ForEach(fileProbe.FindScriptFiles(testPaths, options.TestingMode), parallelOptions, testFile => { try { if (cancellationSource.IsCancellationRequested) return; TestContext testContext; resultCount++; if (testContextBuilder.TryBuildContext(testFile, options, out testContext)) { if (options.OpenInBrowser) { ChutzpahTracer.TraceInformation("Launching test harness '{0}' for file '{1}' in a browser", testContext.TestHarnessPath, testContext.InputTestFile); process.LaunchFileInBrowser(testContext.TestHarnessPath); } else { ChutzpahTracer.TraceInformation("Invoking test runner on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testContext.InputTestFile); var testSummary = InvokeTestRunner(headlessBrowserPath, options, testContext, testRunnerMode, callback); testFileSummaries.Enqueue(testSummary); } if (!m_debugEnabled && !options.OpenInBrowser) { ChutzpahTracer.TraceInformation("Cleaning up test context artifacts"); // Don't clean up context if in debug mode testContextBuilder.CleanupContext(testContext); } } // Limit the number of files we can scan to attempt to build a context for // This is important in the case of folder scanning where many JS files may not be // test files. if (resultCount >= options.FileSearchLimit) { cancellationSource.Cancel(); } } catch (Exception e) { callback.ExceptionThrown(e, testFile.FullPath); } }); // Gather TestFileSummaries into TaseCaseSummary foreach (var fileSummary in testFileSummaries) { overallSummary.Append(fileSummary); } stopWatch.Stop(); overallSummary.SetTotalRunTime((int)stopWatch.Elapsed.TotalMilliseconds); compilerCache.Save(); return overallSummary; }
public TestCaseSummary RunTests(IEnumerable<string> testPaths, TestOptions options, ITestMethodRunnerCallback callback = null) { callback = options.TestLaunchMode == TestLaunchMode.FullBrowser || callback == null ? RunnerCallback.Empty : callback; callback.TestSuiteStarted(); var summary = ProcessTestPaths(testPaths, options, TestExecutionMode.Execution, callback); callback.TestSuiteFinished(summary); return summary; }
public IList <TestFileSummary> Read(ProcessStream processStream, TestOptions testOptions, TestContext testContext, ITestMethodRunnerCallback callback, bool debugEnabled) { if (processStream == null) { throw new ArgumentNullException("processStream"); } if (testOptions == null) { throw new ArgumentNullException("testOptions"); } if (testContext == null) { throw new ArgumentNullException("testContext"); } lastTestEvent = DateTime.Now; var timeout = (testContext.TestFileSettings.TestFileTimeout ?? testOptions.TestFileTimeoutMilliseconds) + 500; // Add buffer to timeout to account for serialization var readerTask = Task <IList <TestFileSummary> > .Factory.StartNew(() => ReadFromStream(processStream.StreamReader, testContext, testOptions, callback, debugEnabled)); while (readerTask.Status == TaskStatus.WaitingToRun || (readerTask.Status == TaskStatus.Running && (DateTime.Now - lastTestEvent).TotalMilliseconds < timeout)) { Thread.Sleep(100); } if (readerTask.IsCompleted) { ChutzpahTracer.TraceInformation("Finished reading stream from test file '{0}'", testContext.FirstInputTestFile); return(readerTask.Result); } else { // We timed out so kill the process and return an empty test file summary ChutzpahTracer.TraceError("Test file '{0}' timed out after running for {1} milliseconds", testContext.FirstInputTestFile, (DateTime.Now - lastTestEvent).TotalMilliseconds); processStream.TimedOut = true; processStream.KillProcess(); return(testContext.ReferencedFiles.Where(x => x.IsFileUnderTest).Select(file => new TestFileSummary(file.Path)).ToList()); } }
private TestCaseSummary ProcessTestPaths(IEnumerable<string> testPaths, TestOptions options, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback) { options.TestExecutionMode = testExecutionMode; stopWatch.Start(); string headlessBrowserPath = fileProbe.FindFilePath(HeadlessBrowserName); if (testPaths == null) throw new ArgumentNullException("testPaths"); if (headlessBrowserPath == null) throw new FileNotFoundException("Unable to find headless browser: " + HeadlessBrowserName); if (fileProbe.FindFilePath(TestRunnerJsName) == null) throw new FileNotFoundException("Unable to find test runner base js file: " + TestRunnerJsName); var overallSummary = new TestCaseSummary(); // Concurrent list to collect test contexts var testContexts = new ConcurrentBag<TestContext>(); // Concurrent collection used to gather the parallel results from var testFileSummaries = new ConcurrentQueue<TestFileSummary>(); var resultCount = 0; var cancellationSource = new CancellationTokenSource(); // Given the input paths discover the potential test files var scriptPaths = FindTestFiles(testPaths, options); // Group the test files by their chutzpah.json files. Then check if those settings file have batching mode enabled. // If so, we keep those tests in a group together to be used in one context // Otherwise, we put each file in its own test group so each get their own context var testRunConfiguration = BuildTestRunConfiguration(scriptPaths, options); ConfigureTracing(testRunConfiguration); var parallelism = testRunConfiguration.MaxDegreeOfParallelism.HasValue ? Math.Min(options.MaxDegreeOfParallelism, testRunConfiguration.MaxDegreeOfParallelism.Value) : options.MaxDegreeOfParallelism; var parallelOptions = new ParallelOptions { MaxDegreeOfParallelism = parallelism, CancellationToken = cancellationSource.Token }; ChutzpahTracer.TraceInformation("Chutzpah run started in mode {0} with parallelism set to {1}", testExecutionMode, parallelOptions.MaxDegreeOfParallelism); // Build test contexts in parallel given a list of files each BuildTestContexts(options, testRunConfiguration.TestGroups, parallelOptions, cancellationSource, resultCount, testContexts, callback, overallSummary); // Compile the test contexts if (!PerformBatchCompile(callback, testContexts)) { return overallSummary; } // Build test harness for each context and execute it in parallel ExecuteTestContexts(options, testExecutionMode, callback, testContexts, parallelOptions, headlessBrowserPath, testFileSummaries, overallSummary); // Gather TestFileSummaries into TaseCaseSummary foreach (var fileSummary in testFileSummaries) { overallSummary.Append(fileSummary); } stopWatch.Stop(); overallSummary.SetTotalRunTime((int)stopWatch.Elapsed.TotalMilliseconds); overallSummary.TransformResult = transformProcessor.ProcessTransforms(testContexts, overallSummary); // Clear the settings file cache since in VS Chutzpah is not unloaded from memory. // If we don't clear then the user can never update the file. testSettingsService.ClearCache(); ChutzpahTracer.TraceInformation( "Chutzpah run finished with {0} passed, {1} failed and {2} errors", overallSummary.PassedCount, overallSummary.FailedCount, overallSummary.Errors.Count); return overallSummary; }
private void ExecuteTestContexts( TestOptions options, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback, ConcurrentBag <TestContext> testContexts, ParallelOptions parallelOptions, string headlessBrowserPath, ConcurrentQueue <TestFileSummary> testFileSummaries, TestCaseSummary overallSummary) { Parallel.ForEach( testContexts, parallelOptions, testContext => { ChutzpahTracer.TraceInformation("Start test run for {0} in {1} mode", testContext.FirstInputTestFile, testExecutionMode); try { try { testHarnessBuilder.CreateTestHarness(testContext, options); } catch (IOException) { // Mark this creation failed so we do not try to clean it up later // This is to work around a bug in TestExplorer that runs chutzpah in parallel on // the same files // TODO(mmanela): Re-evalute if this is needed once they fix that bug testContext.TestHarnessCreationFailed = true; ChutzpahTracer.TraceWarning("Marking test harness creation failed for harness {0} and test file {1}", testContext.TestHarnessPath, testContext.FirstInputTestFile); throw; } if (options.TestLaunchMode == TestLaunchMode.FullBrowser) { ChutzpahTracer.TraceInformation( "Launching test harness '{0}' for file '{1}' in a browser", testContext.TestHarnessPath, testContext.FirstInputTestFile); // Allow override from command line. var browserArgs = testContext.TestFileSettings.BrowserArguments; if (!string.IsNullOrWhiteSpace(options.BrowserArgs)) { var path = BrowserPathHelper.GetBrowserPath(options.BrowserName); browserArgs = new Dictionary <string, string> { { Path.GetFileNameWithoutExtension(path), options.BrowserArgs } }; } process.LaunchFileInBrowser(testContext.TestHarnessPath, options.BrowserName, browserArgs); } else if (options.TestLaunchMode == TestLaunchMode.HeadlessBrowser) { ChutzpahTracer.TraceInformation( "Invoking headless browser on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testContext.FirstInputTestFile); var testSummaries = InvokeTestRunner( headlessBrowserPath, options, testContext, testExecutionMode, callback); foreach (var testSummary in testSummaries) { ChutzpahTracer.TraceInformation( "Test harness '{0}' for file '{1}' finished with {2} passed, {3} failed and {4} errors", testContext.TestHarnessPath, testSummary.Path, testSummary.PassedCount, testSummary.FailedCount, testSummary.Errors.Count); ChutzpahTracer.TraceInformation( "Finished running headless browser on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testSummary.Path); testFileSummaries.Enqueue(testSummary); } } else if (options.TestLaunchMode == TestLaunchMode.Custom) { if (options.CustomTestLauncher == null) { throw new ArgumentNullException("TestOptions.CustomTestLauncher"); } ChutzpahTracer.TraceInformation( "Launching custom test on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testContext.FirstInputTestFile); options.CustomTestLauncher.LaunchTest(testContext); } else { Debug.Assert(false); } } catch (Exception e) { var error = new TestError { InputTestFile = testContext.InputTestFiles.FirstOrDefault(), Message = e.ToString() }; overallSummary.Errors.Add(error); callback.FileError(error); ChutzpahTracer.TraceError(e, "Error during test execution of {0}", testContext.FirstInputTestFile); } finally { ChutzpahTracer.TraceInformation("Finished test run for {0} in {1} mode", testContext.FirstInputTestFile, testExecutionMode); } }); // Clean up test context foreach (var testContext in testContexts) { // Don't clean up context if in debug mode if (!m_debugEnabled && !testContext.TestHarnessCreationFailed && options.TestLaunchMode != TestLaunchMode.FullBrowser && options.TestLaunchMode != TestLaunchMode.Custom) { try { ChutzpahTracer.TraceInformation("Cleaning up test context for {0}", testContext.FirstInputTestFile); testContextBuilder.CleanupContext(testContext); } catch (Exception e) { ChutzpahTracer.TraceError(e, "Error cleaning up test context for {0}", testContext.FirstInputTestFile); } } } }
private TestCaseSummary ProcessTestPaths(IEnumerable <string> testPaths, TestOptions options, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback) { ChutzpahTracer.TraceInformation("Chutzpah run started in mode {0} with parallelism set to {1}", testExecutionMode, options.MaxDegreeOfParallelism); options.TestExecutionMode = testExecutionMode; stopWatch.Start(); string headlessBrowserPath = fileProbe.FindFilePath(HeadlessBrowserName); if (testPaths == null) { throw new ArgumentNullException("testPaths"); } if (headlessBrowserPath == null) { throw new FileNotFoundException("Unable to find headless browser: " + HeadlessBrowserName); } if (fileProbe.FindFilePath(TestRunnerJsName) == null) { throw new FileNotFoundException("Unable to find test runner base js file: " + TestRunnerJsName); } var overallSummary = new TestCaseSummary(); // Concurrent list to collect test contexts var testContexts = new ConcurrentBag <TestContext>(); // Concurrent collection used to gather the parallel results from var testFileSummaries = new ConcurrentQueue <TestFileSummary>(); var resultCount = 0; var cancellationSource = new CancellationTokenSource(); var parallelOptions = new ParallelOptions { MaxDegreeOfParallelism = options.MaxDegreeOfParallelism, CancellationToken = cancellationSource.Token }; var scriptPaths = FindTestFiles(testPaths, options); // Build test contexts in parallel BuildTestContexts(options, scriptPaths, parallelOptions, cancellationSource, resultCount, testContexts, callback, overallSummary); // Compile the test contexts if (!PerformBatchCompile(callback, testContexts)) { return(overallSummary); } // Build test harness for each context and execute it in parallel ExecuteTestContexts(options, testExecutionMode, callback, testContexts, parallelOptions, headlessBrowserPath, testFileSummaries, overallSummary); // Gather TestFileSummaries into TaseCaseSummary foreach (var fileSummary in testFileSummaries) { overallSummary.Append(fileSummary); } stopWatch.Stop(); overallSummary.SetTotalRunTime((int)stopWatch.Elapsed.TotalMilliseconds); compilerCache.Save(); // Clear the settings file cache since in VS Chutzpah is not unloaded from memory. // If we don't clear then the user can never update the file. testSettingsService.ClearCache(); ChutzpahTracer.TraceInformation( "Chutzpah run finished with {0} passed, {1} failed and {2} errors", overallSummary.PassedCount, overallSummary.FailedCount, overallSummary.Errors.Count); return(overallSummary); }
public TestCaseSummary RunTests(string testPath, TestOptions options, ITestMethodRunnerCallback callback = null) { return(RunTests(new[] { testPath }, options, callback)); }
private IList<TestFileSummary> ReadFromStream(StreamReader stream, TestContext testContext, TestOptions testOptions, IList<StreamingTestFileContext> streamingTestFileContexts, IList<Action<StreamingTestFileContext>> deferredEvents, ITestMethodRunnerCallback callback, bool debugEnabled) { var testIndex = 0; string line; StreamingTestFileContext currentTestFileContext = null; if (streamingTestFileContexts.Count == 1) { currentTestFileContext = streamingTestFileContexts.First(); } while ((line = stream.ReadLine()) != null) { if (debugEnabled) Console.WriteLine(line); var match = prefixRegex.Match(line); if (!match.Success) continue; var type = match.Groups["type"].Value; var json = match.Groups["json"].Value; // Only update last event timestamp if it is an important event. // Log and error could happen even though no test progress is made if (!type.Equals("Log") && !type.Equals("Error")) { lastTestEvent = DateTime.Now; } try { switch (type) { case "FileStart": FireFileStarted(callback, testContext); break; case "CoverageObject": var jsCov = jsonSerializer.Deserialize<JsCoverage>(json); if (currentTestFileContext == null) { AddDeferredEvent((fileContext) => FireCoverageObject(callback, fileContext, jsCov), deferredEvents); } else { FireCoverageObject(callback, currentTestFileContext, jsCov); } break; case "FileDone": var jsFileDone = jsonSerializer.Deserialize<JsFileDone>(json); FireFileFinished(callback, testContext.InputTestFilesString, streamingTestFileContexts, jsFileDone); break; case "TestStart": var jsTestCaseStart = jsonSerializer.Deserialize<JsTestCase>(json); StreamingTestFileContext newContext = null; var testName = jsTestCaseStart.TestCase.TestName.Trim(); var moduleName = (jsTestCaseStart.TestCase.ModuleName ?? "").Trim(); var fileContexts = GetFileMatches(testName, streamingTestFileContexts); if (fileContexts.Count == 0 && currentTestFileContext == null) { // If there are no matches and not file context has been used yet // then just choose the first context newContext = streamingTestFileContexts[0]; } else if (fileContexts.Count == 0) { // If there is already a current context and no matches we just keep using that context // unless this test name has been used already in the current context. In that case // move to the next one that hasn't seen this file yet var testAlreadySeenInCurrentContext = currentTestFileContext.HasTestBeenSeen(moduleName, testName); if (testAlreadySeenInCurrentContext) { newContext = streamingTestFileContexts.FirstOrDefault(x => !x.HasTestBeenSeen(moduleName, testName)) ?? currentTestFileContext; } } else if (fileContexts.Count > 1) { // If we found the test has more than one file match // try to choose the best match, otherwise just choose the first one // If we have no file context yet take the first one if (currentTestFileContext == null) { newContext = fileContexts.First(); } else { // In this case we have an existing file context so we need to // 1. Check to see if this test has been seen already on that context // if so we need to try the next file context that matches it // 2. If it is not seen yet in the current context and the current context // is one of the matches then keep using it var testAlreadySeenInCurrentContext = currentTestFileContext.HasTestBeenSeen(moduleName, testName); var currentContextInFileMatches = fileContexts.Any(x => x == currentTestFileContext); if (!testAlreadySeenInCurrentContext && currentContextInFileMatches) { // Keep the current context newContext = currentTestFileContext; } else { // Either take first not used context OR the first one newContext = fileContexts.Where(x => !x.IsUsed).FirstOrDefault() ?? fileContexts.First(); } } } else if (fileContexts.Count == 1) { // We found a unique match newContext = fileContexts[0]; } if (newContext != null && newContext != currentTestFileContext) { currentTestFileContext = newContext; testIndex = 0; } currentTestFileContext.IsUsed = true; currentTestFileContext.MarkTestSeen(moduleName, testName); PlayDeferredEvents(currentTestFileContext, deferredEvents); jsTestCaseStart.TestCase.InputTestFile = currentTestFileContext.ReferencedFile.Path; callback.TestStarted(jsTestCaseStart.TestCase); ChutzpahTracer.TraceInformation("Test Case Started:'{0}'", jsTestCaseStart.TestCase.GetDisplayName()); break; case "TestDone": var jsTestCaseDone = jsonSerializer.Deserialize<JsTestCase>(json); var currentTestIndex = testIndex; FireTestFinished(callback, currentTestFileContext, jsTestCaseDone, currentTestIndex); testIndex++; break; case "Log": var log = jsonSerializer.Deserialize<JsLog>(json); if (currentTestFileContext != null) { FireLogOutput(callback, currentTestFileContext, log); } else { AddDeferredEvent((fileContext) => FireLogOutput(callback, fileContext, log), deferredEvents); } break; case "Error": var error = jsonSerializer.Deserialize<JsError>(json); if (currentTestFileContext != null) { FireErrorOutput(callback, currentTestFileContext, error); } else { AddDeferredEvent((fileContext) => FireErrorOutput(callback, fileContext, error), deferredEvents); } break; } } catch (SerializationException e) { // Ignore malformed json and move on ChutzpahTracer.TraceError(e, "Recieved malformed json from Phantom in this line: '{0}'", line); } } return streamingTestFileContexts.Select(x => x.TestFileSummary).ToList(); }
private bool PerformBatchCompile(ITestMethodRunnerCallback callback, IEnumerable<TestContext> testContexts) { try { batchCompilerService.Compile(testContexts); } catch (ChutzpahCompilationFailedException e) { callback.ExceptionThrown(e, e.SettingsFile); ChutzpahTracer.TraceError(e, "Error during batch compile from {0}", e.SettingsFile); return false; } return true; }
private void BuildTestContexts( TestOptions options, List<List<PathInfo>> scriptPathGroups, ParallelOptions parallelOptions, CancellationTokenSource cancellationSource, int resultCount, ConcurrentBag<TestContext> testContexts, ITestMethodRunnerCallback callback, TestCaseSummary overallSummary) { Parallel.ForEach(scriptPathGroups, parallelOptions, testFiles => { var pathString = string.Join(",", testFiles.Select(x => x.FullPath)); ChutzpahTracer.TraceInformation("Building test context for {0}", pathString); try { if (cancellationSource.IsCancellationRequested) return; TestContext testContext; resultCount++; if (testContextBuilder.TryBuildContext(testFiles, options, out testContext)) { testContexts.Add(testContext); } else { ChutzpahTracer.TraceWarning("Unable to build test context for {0}", pathString); } // Limit the number of files we can scan to attempt to build a context for // This is important in the case of folder scanning where many JS files may not be // test files. if (resultCount >= options.FileSearchLimit) { ChutzpahTracer.TraceError("File search limit hit!!!"); cancellationSource.Cancel(); } } catch (Exception e) { var error = new TestError { InputTestFile = testFiles.Select(x => x.FullPath).FirstOrDefault(), Message = e.ToString() }; overallSummary.Errors.Add(error); callback.FileError(error); ChutzpahTracer.TraceError(e, "Error during building test context for {0}", pathString); } finally { ChutzpahTracer.TraceInformation("Finished building test context for {0}", pathString); } }); }
private void ExecuteTestContexts( TestOptions options, TestExecutionMode testExecutionMode, ITestMethodRunnerCallback callback, ConcurrentBag<TestContext> testContexts, ParallelOptions parallelOptions, string headlessBrowserPath, ConcurrentQueue<TestFileSummary> testFileSummaries, TestCaseSummary overallSummary) { Parallel.ForEach( testContexts, parallelOptions, testContext => { ChutzpahTracer.TraceInformation("Start test run for {0} in {1} mode", testContext.FirstInputTestFile, testExecutionMode); try { testHarnessBuilder.CreateTestHarness(testContext, options); if (options.TestLaunchMode == TestLaunchMode.FullBrowser) { ChutzpahTracer.TraceInformation( "Launching test harness '{0}' for file '{1}' in a browser", testContext.TestHarnessPath, testContext.FirstInputTestFile); // Allow override from command line. var browserArgs = testContext.TestFileSettings.BrowserArguments; if (!string.IsNullOrWhiteSpace(options.BrowserArgs)) { var path = BrowserPathHelper.GetBrowserPath(options.BrowserName); browserArgs = new Dictionary<string, string> { { Path.GetFileNameWithoutExtension(path), options.BrowserArgs } }; } process.LaunchFileInBrowser(testContext.TestHarnessPath, options.BrowserName, browserArgs); } else if (options.TestLaunchMode == TestLaunchMode.HeadlessBrowser) { ChutzpahTracer.TraceInformation( "Invoking headless browser on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testContext.FirstInputTestFile); var testSummaries = InvokeTestRunner( headlessBrowserPath, options, testContext, testExecutionMode, callback); foreach (var testSummary in testSummaries) { ChutzpahTracer.TraceInformation( "Test harness '{0}' for file '{1}' finished with {2} passed, {3} failed and {4} errors", testContext.TestHarnessPath, testSummary.Path, testSummary.PassedCount, testSummary.FailedCount, testSummary.Errors.Count); ChutzpahTracer.TraceInformation( "Finished running headless browser on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testSummary.Path); testFileSummaries.Enqueue(testSummary); } } else if (options.TestLaunchMode == TestLaunchMode.Custom) { if (options.CustomTestLauncher == null) { throw new ArgumentNullException("TestOptions.CustomTestLauncher"); } ChutzpahTracer.TraceInformation( "Launching custom test on test harness '{0}' for file '{1}'", testContext.TestHarnessPath, testContext.FirstInputTestFile); options.CustomTestLauncher.LaunchTest(testContext); } else { Debug.Assert(false); } } catch (Exception e) { var error = new TestError { InputTestFile = testContext.InputTestFiles.FirstOrDefault(), Message = e.ToString() }; overallSummary.Errors.Add(error); callback.FileError(error); ChutzpahTracer.TraceError(e, "Error during test execution of {0}", testContext.FirstInputTestFile); } finally { ChutzpahTracer.TraceInformation("Finished test run for {0} in {1} mode", testContext.FirstInputTestFile, testExecutionMode); } }); // Clean up test context foreach (var testContext in testContexts) { // Don't clean up context if in debug mode if (!m_debugEnabled && options.TestLaunchMode != TestLaunchMode.FullBrowser && options.TestLaunchMode != TestLaunchMode.Custom) { try { ChutzpahTracer.TraceInformation("Cleaning up test context for {0}", testContext.FirstInputTestFile); testContextBuilder.CleanupContext(testContext); } catch (Exception e) { ChutzpahTracer.TraceError(e, "Error cleaning up test context for {0}", testContext.FirstInputTestFile); } } } }
private void FireErrorOutput(ITestMethodRunnerCallback callback, StreamingTestFileContext testFileContext, JsRunnerOutput jsRunnerOutput) { var error = jsRunnerOutput as JsError; error.Error.InputTestFile = testFileContext.ReferencedFile.Path; callback.FileError(error.Error); testFileContext.TestFileSummary.Errors.Add(error.Error); ChutzpahTracer.TraceError("Eror recieved from Phantom {0}", error.Error.Message); }
public TestCaseSummary RunTests(string testPath, ITestMethodRunnerCallback callback = null) { return RunTests(testPath, new TestOptions(), callback); }
/// <summary> /// Runs the specified tests in the given type, calling the callback as appropriate. /// This override point exists primarily for unit testing purposes. /// </summary> /// <param name="methods">The test methods to run</param> /// <param name="callback">The run status information callback.</param> protected virtual string RunTests(List<string> methods, ITestMethodRunnerCallback callback) { IRunnerLogger logger = new TestClassCallbackDispatcher(this, callback); IExecutorWrapper wrapper = TestAssembly.ExecutorWrapper; try { XmlNode classNode = wrapper.RunTests(TypeName, methods, node => XmlLoggerAdapter.LogNode(node, logger)); return classNode.OuterXml; } catch (Exception ex) { logger.ExceptionThrown(wrapper.AssemblyFilename, ex); return String.Empty; } }
private static void HandleTestProcessExitCode(int exitCode, string inputTestFile, IList <TestError> errors, ITestMethodRunnerCallback callback) { string errorMessage = null; switch ((TestProcessExitCode)exitCode) { case TestProcessExitCode.AllPassed: case TestProcessExitCode.SomeFailed: return; case TestProcessExitCode.Timeout: errorMessage = "Timeout occurred when executing test file"; break; default: errorMessage = "Unknown error occurred when executing test file. Received exit code of " + exitCode; break; } if (!string.IsNullOrEmpty(errorMessage)) { var error = new TestError { InputTestFile = inputTestFile, Message = errorMessage }; errors.Add(error); callback.FileError(error); ChutzpahTracer.TraceError("Headless browser returned with an error: {0}", errorMessage); } }
public TestCaseSummary RunTests(string testPath, TestOptions options, ITestMethodRunnerCallback callback = null) { return RunTests(new[] { testPath }, options, callback); }