private void ConnectionReceivedEvent(object sender, EventReceivedEventArgs e) { switch (e.Name) { case TP.ResultEvent.Name: var result = (TP.ResultEvent)e.Event; TestOutcome outcome = TestOutcome.None; switch (result.outcome) { case "passed": outcome = TestOutcome.Passed; break; case "failed": outcome = TestOutcome.Failed; break; case "skipped": outcome = TestOutcome.Skipped; break; } RecordEnd( _frameworkHandle, _curTestResult, _stdOut.ToString(), _stdErr.ToString(), outcome, result ); _stdOut.Clear(); _stdErr.Clear(); break; case TP.StartEvent.Name: var start = (TP.StartEvent)e.Event; // Create the TestResult object right away, so that // StartTime is initialized correctly. _curTestResult = null; foreach (var test in GetTestCases()) { if (test.Key == start.test) { _curTestResult = new TestResult(test.Value); break; } } if (_curTestResult != null) { _frameworkHandle.RecordStart(_curTestResult.TestCase); } else { Warning(Strings.Test_UnexpectedResult.FormatUI(start.classname, start.method)); } break; case TP.StdErrEvent.Name: var err = (TP.StdErrEvent)e.Event; _stdErr.Append(err.content); break; case TP.StdOutEvent.Name: var outp = (TP.StdOutEvent)e.Event; _stdOut.Append(outp.content); break; case TP.DoneEvent.Name: _done.Set(); break; } }
public void CasePassed(PassResult result) { var testCase = new TestCase(result.Case.Name, new Uri(Constants.EXECUTOR_URI_STRING), source); frameworkHandle.RecordStart(testCase); var testResult = new TestResult(testCase) { Outcome = TestOutcome.Passed }; frameworkHandle.RecordEnd(testCase, TestOutcome.Passed); frameworkHandle.RecordResult(testResult); }
internal static void RunTests( IQmlTestRunner qmlTestRunner, string source, IEnumerable <TestCase> testCases, IFrameworkHandle frameworkHandle, IDiscoveryContext context) { try { foreach (TestCase testCase in testCases) { frameworkHandle.RecordStart(testCase); } Dictionary <string, TestCase> dict = testCases.ToDictionary(tc => tc.FullyQualifiedName); string functions = String.Join(" ", testCases.Select(tc => "\"" + tc.FullyQualifiedName + "\"")); string arguments = "-xml -input " + source + " " + functions; QmlTestRunnerResult result = qmlTestRunner.Execute(arguments, context); ParseQmlTestRunnerXmlOutput(frameworkHandle, dict, result.StandardOutput); } catch (Exception ex) { frameworkHandle.SendMessage(TestMessageLevel.Error, ex.StackTrace); } }
private void ProcessTestRunnerEmit(string line) { try { TestEvent testEvent = JsonConvert.DeserializeObject <TestEvent>(line); // Extract test from list of tests var test = _currentTests.Where(n => n.DisplayName == testEvent.title); if (test.Count() > 0) { if (testEvent.type == "test start") { _currentResult = new TestResult(test.First()); _currentResult.StartTime = DateTimeOffset.Now; _frameworkHandle.RecordStart(test.First()); } else if (testEvent.type == "result") { RecordEnd(_frameworkHandle, test.First(), _currentResult, testEvent.result); } } else if (testEvent.type == "suite end") { _currentResultObject = testEvent.result; } } catch (JsonReaderException) { // Often lines emitted while running tests are not test results, and thus will fail to parse above } }
public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { // more on filtering // https://github.com/nunit/nunit3-vs-adapter/blob/master/src/NUnitTestAdapter/VsTestFilter.cs List <string> supportedProperties = new List <string>(); supportedProperties.Add("FullyQualifiedName"); ITestCaseFilterExpression fe = runContext.GetTestCaseFilter(supportedProperties, PropertyProvider); log.Debug("Run settings:\n" + runContext.RunSettings.SettingsXml); log.Debug("RunTests from Test Cases"); foreach (TestCase tc in tests) { if (fe == null || fe.MatchTestCase(tc, p => PropertyValueProvider(tc, p))) { log.Debug("Run test case: " + tc.FullyQualifiedName + " / " + tc.Id); frameworkHandle.RecordStart(tc); DateTime startTime = DateTime.Now; TestResult tr = runner.RunTest(tc, runContext); DateTime endTime = DateTime.Now; tr.Duration = endTime - startTime; frameworkHandle.RecordEnd(tc, tr.Outcome); frameworkHandle.RecordResult(tr); } else { log.Debug("Test case filtered out: " + tc.FullyQualifiedName + " / " + tc.Id); } } }
public void RunTests( IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { _cancellationTokenSource = new CancellationTokenSource(); foreach (var test in tests) { var configuration = new EngineConfiguration(new string[] { test.Source }, runContext); var testEngine = new Engine(configuration); var outputHandler = new TestAdapterOutputHandler(frameworkHandle); testEngine.SetOutputHandler(outputHandler); var testResult = new TestResult(test); frameworkHandle.RecordStart(test); var kernelTestResult = testEngine.Execute(_cancellationTokenSource.Token).KernelTestResults[0]; testResult.Outcome = kernelTestResult.Result ? TestOutcome.Passed : TestOutcome.Failed; var messages = new Collection <TestResultMessage>(); foreach (var message in outputHandler.Messages) { messages.Add(new TestResultMessage(String.Empty, message)); } frameworkHandle.RecordEnd(test, testResult.Outcome); frameworkHandle.RecordResult(testResult); } }
public override void TestStarted(TestCase test) { var testCase = test.ToVsTestCase(); // The test case is starting frameworkHandle.RecordStart(testCase); }
public void ReportTestsStarted(IEnumerable <TestCase> testCases) { foreach (TestCase testCase in testCases) { _frameworkHandle.RecordStart(testCase.ToVsTestCase()); } }
/// <summary> /// Records the start tests. /// </summary> /// <param name="argument">The argument.</param> /// <param name="frameworkHandle">The framework handle.</param> internal static void RecordStartTests(TestCaseArgument argument, IFrameworkHandle frameworkHandle) { foreach (TestCase testCase in argument.TestCases) { frameworkHandle.RecordStart(testCase); } }
private async Task Run(TestCase testCase, DiscoveredTestData testData, TestRunContext testRunContext, StepBinder stepBinder, IFrameworkHandle frameworkHandle, SemaphoreSlim simultaneousTestCasesSemaphore) { await simultaneousTestCasesSemaphore .WaitAsync() .ConfigureAwait(false); try { frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Starting test \"{testCase.DisplayName}\""); frameworkHandle.RecordStart(testCase); var executor = stepsExecutorFactory(stepBinder); var testResult = await executor .Execute(testCase, testData, testRunContext, frameworkHandle) .ConfigureAwait(false); // https://github.com/Microsoft/vstest/blob/master/src/Microsoft.TestPlatform.CrossPlatEngine/Adapter/TestExecutionRecorder.cs <- comments here seem to suggest that we need to call RecordEnd just before RecordResult frameworkHandle.RecordEnd(testCase, testResult.Outcome); frameworkHandle.RecordResult(testResult); frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Finished test \"{testCase.DisplayName}\""); } finally { simultaneousTestCasesSemaphore.Release(); } }
/// <summary> /// Runs the tests. /// </summary> /// <param name="tests">Tests to be run.</param> /// <param name="runContext">Context to use when executing the tests.</param> /// <param param name="frameworkHandle">Handle to the framework to record results and to do framework operations.</param> public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { //if (Debugger.IsAttached) Debugger.Break(); //else Debugger.Launch(); try { var parsed = XElement.Parse(runContext.RunSettings.SettingsXml); runContext.RunSettings.GetSettings(AppConfig.Name).Load(parsed.Element(AppConfig.Name).CreateReader()); } catch (Exception ex) { Console.WriteLine($"Framework: Error while loading SettingsXml - {ex.Message} {ex.Data}"); } m_cancelled = false; try { foreach (TestCase test in tests) { if (m_cancelled) { break; } frameworkHandle.RecordStart(test); frameworkHandle.SendMessage(TestMessageLevel.Informational, "Framework: Starting external test for " + test.DisplayName); var testOutcome = RunExternalTest(test, runContext, frameworkHandle, test); frameworkHandle.RecordResult(testOutcome); frameworkHandle.SendMessage(TestMessageLevel.Informational, "Framework: Test result:" + testOutcome.Outcome.ToString()); frameworkHandle.RecordEnd(test, testOutcome.Outcome); } } catch (Exception e) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Framework: Exception during test execution: " + e.Message); frameworkHandle.SendMessage(TestMessageLevel.Error, "Framework: " + e.StackTrace); } }
/// <summary> /// Runs the tests. /// </summary> /// <param name="tests">Tests to be run.</param> /// <param name="runContext">Context to use when executing the tests.</param> /// <param param name="frameworkHandle">Handle to the framework to record results and to do framework operations.</param> public void RunTests(IEnumerable<TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { m_cancelled = false; try { foreach (TestCase test in tests) { if (m_cancelled) { break; } frameworkHandle.RecordStart(test); frameworkHandle.SendMessage(TestMessageLevel.Informational, "Starting external test for " + test.DisplayName); var testOutcome = RunExternalTest(test, runContext, frameworkHandle, test); frameworkHandle.RecordResult(testOutcome); frameworkHandle.SendMessage(TestMessageLevel.Informational, "Test result:" + testOutcome.Outcome.ToString()); } } catch(Exception e) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Exception during test execution: " +e.Message); } }
private void RunTests(string source, IRunContext runContext, IFrameworkHandle frameworkHandle) { foreach (var result in ExternalTestExecutor.GetTestResults(source, null).Select(c => CreateTestResult(source, c))) { frameworkHandle.RecordStart(result.TestCase); frameworkHandle.RecordResult(result); frameworkHandle.RecordEnd(result.TestCase, result.Outcome); } }
private static void RunTestMethod(IFrameworkHandle frameworkHandle, TestCase testCase, TestRunner runner, TestMethod method) { frameworkHandle.RecordStart(testCase); try { var result = runner.Run(method.Owner, method).GetAwaiter().GetResult(); if (result == null) { frameworkHandle.SendMessage(TestMessageLevel.Warning, "Got no result"); return; } var msResult = new TestResult(testCase) { StartTime = result.StartedAtUtc, EndTime = result.EndedAtUtc, DisplayName = method.Name.Replace("_", " "), Outcome = TestOutcome.Passed, Duration = result.Elapsed, ErrorMessage = result.Exception?.Message, ErrorStackTrace = result.Exception?.StackTrace }; if (result.IsIgnored) { msResult.Outcome = TestOutcome.Skipped; } else if (result.IsSuccess) { msResult.Outcome = TestOutcome.Passed; } else { msResult.Outcome = TestOutcome.Failed; } frameworkHandle.RecordEnd(testCase, msResult.Outcome); frameworkHandle.RecordResult(msResult); } catch (Exception ex) { frameworkHandle.RecordEnd(testCase, TestOutcome.Failed); frameworkHandle.RecordResult(new TestResult(testCase) { DisplayName = method.Name.Replace("_", " "), Outcome = TestOutcome.Failed, ErrorMessage = ex.Message, ErrorStackTrace = ex.StackTrace }); } }
private void RunTests(IEnumerable <TestCase> tests) { _executor.InitTestRuns(); foreach (var test in tests) { if (_cancelled) { break; } _frameworkHandle.RecordStart(test); var result = RunTest(test); _frameworkHandle.RecordResult(result); } }
private void RunTests(string source, IEnumerable <TestCase> tests, KarmaTestResults.Karma karma, IFrameworkHandle frameworkHandle, IKarmaLogger logger) { var vsConfig = CreateVsConfig(tests, karma); var runKarma = Run(source, vsConfig, logger); if (runKarma == null) { logger.Error("No karma"); return; } var consolidatedResults = runKarma.ConsolidateResults(logger); var testNames = tests.Select(t => t.DisplayName).Union(consolidatedResults.Select(r => r.Test.DisplayName)); var results = from displayName in testNames join test in tests on displayName equals test.DisplayName into test_ from test in test_.DefaultIfEmpty() join result in consolidatedResults on displayName equals result.Test.DisplayName into result_ from result in result_.DefaultIfEmpty() select new TestCaseResult(test, result, source); foreach (var result in results) { frameworkHandle.RecordStart(result.Test); foreach (var res in result.Result.Results) { frameworkHandle.RecordResult(new TestResult(result.Test) { ComputerName = Environment.MachineName, DisplayName = res.Browser.Name, Outcome = res.Outcome, Duration = res.Time, ErrorMessage = res.Message }); } frameworkHandle.RecordEnd(result.Test, result.Result.Outcome); } }
private async Task RunMappedTest(TestCase testCase, DiscoveredTestData testData, TestRunContext testRunContext, StepBinder stepBinder, IFrameworkHandle frameworkHandle) { frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Starting test \"{testCase.DisplayName}\""); frameworkHandle.RecordStart(testCase); var executor = new StepsExecutor(stepBinder); // Deliberately resume on same context to try to avoid Visual Studio Test Explorer "bug" (?) that doesn't // always detect the end of the test run when multiple tests are run in parallel. var testResult = await executor .Execute(testCase, testData, testRunContext, frameworkHandle) .ConfigureAwait(true); // https://github.com/Microsoft/vstest/blob/master/src/Microsoft.TestPlatform.CrossPlatEngine/Adapter/TestExecutionRecorder.cs <- comments here seem to suggest that we need to call RecordEnd just before RecordResult frameworkHandle.RecordEnd(testCase, testResult.Outcome); frameworkHandle.RecordResult(testResult); frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Finished test \"{testCase.DisplayName}\""); }
public GaugeRunner(IEnumerable <TestCase> tests, bool isBeingDebugged, bool isParallelRun, IFrameworkHandle frameworkHandle) { _tests = tests.GroupBy(t => t.Source) .SelectMany(spec => spec.OrderBy(t => t.LineNumber)) .ToList(); _pendingTests = new List <TestCase>(_tests); _isBeingDebugged = isBeingDebugged; _frameworkHandle = frameworkHandle; var projectRoot = _tests.First().GetPropertyValue(TestDiscoverer.GaugeProjectRoot, string.Empty); var gaugeCustomBuildPath = _tests.First().GetPropertyValue(TestDiscoverer.GaugeCustomBuildPath, string.Empty); var scenarios = new List <string>(); foreach (var testCase in _tests) { _frameworkHandle.RecordStart(testCase); _frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Executing Test: {testCase}"); scenarios.Add($"\"{testCase.Source}:{testCase.LineNumber}\""); } _gaugeProcess = GaugeProcess.ForExecution(projectRoot, scenarios, gaugeCustomBuildPath, _isBeingDebugged, isParallelRun); _gaugeProcess.OutputDataReceived += OnOutputDataReceived; }
/// <summary> /// Runs the tests. /// </summary> /// <param name="tests">Tests to be run.</param> /// <param name="runContext">Context to use when executing the tests.</param> /// <param param name="frameworkHandle">Handle to the framework to record results and to do framework operations.</param> public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { m_cancelled = false; try { foreach (TestCase test in tests) { if (m_cancelled) { break; } frameworkHandle.RecordStart(test); frameworkHandle.SendMessage(TestMessageLevel.Informational, "Starting external test for " + test.DisplayName); var testOutcome = RunExternalTest(test, runContext, frameworkHandle, test); frameworkHandle.RecordResult(testOutcome); frameworkHandle.SendMessage(TestMessageLevel.Informational, "Test result:" + testOutcome.Outcome.ToString()); } } catch (Exception e) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Exception during test execution: " + e.Message); } }
public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { var testExecutor = new TestRunner(); var testCases = tests as IList <TestCase> ?? tests.ToList(); var allTestClassInstances = testCases.Select(testCase => testCase.LocalExtensionData).ToList(); var uniqueTestClassInstances = allTestClassInstances.DistinctBy(o => o.GetType().FullName).ToList(); RunMethodsWithAttribute <BeforeAllAttribute>(uniqueTestClassInstances); foreach (var testCase in testCases) { var instance = testCase.LocalExtensionData; var instanceAsArray = new[] { instance }; RunMethodsWithAttribute <BeforeEachAttribute>(instanceAsArray); var methodInfo = instance.GetType().GetMethod(testCase.FullyQualifiedName); frameworkHandle.RecordStart(testCase); var testRunnerContext = new TestRunnerContext(instance, methodInfo, testCase, frameworkHandle, _testingStopped); testExecutor.Run(testRunnerContext); RunMethodsWithAttribute <AfterEachAttribute>(instanceAsArray); } RunMethodsWithAttribute <AfterAllAttribute>(uniqueTestClassInstances); }
public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { foreach (var testCase in tests) { frameworkHandle.RecordStart(testCase); var startTime = DateTimeOffset.Now; var result = _session.Run(testCase.Id); var endTime = DateTimeOffset.Now; var testResult = new TestResult(testCase) { DisplayName = testCase.DisplayName, StartTime = startTime, Duration = endTime - startTime, EndTime = endTime, ComputerName = Environment.MachineName }; var stackTrace = new StringBuilder(); var errorMessage = new StringBuilder(); foreach (var message in result.Messages) { if (message.StackTrace != null) { stackTrace.AppendLine(message.StackTrace); } switch (message.Type) { case MessageType.State: testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, message.Text)); break; case MessageType.StdOutput: testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, message.Text)); break; case MessageType.StdError: testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardErrorCategory, message.Text)); break; case MessageType.Exception: errorMessage.AppendLine(message.Text); break; case MessageType.Trace: testResult.Messages.Add(new TestResultMessage(TestResultMessage.DebugTraceCategory, message.Text)); break; default: testResult.Messages.Add(new TestResultMessage(TestResultMessage.AdditionalInfoCategory, message.Text)); break; } } testResult.ErrorStackTrace = stackTrace.ToString(); testResult.ErrorMessage = errorMessage.ToString(); switch (result.State) { case State.Passed: testResult.Outcome = TestOutcome.Passed; break; case State.Failed: testResult.Outcome = TestOutcome.Failed; break; case State.Skiped: testResult.Outcome = TestOutcome.Skipped; break; case State.NotFound: testResult.Outcome = TestOutcome.NotFound; break; default: testResult.Outcome = TestOutcome.None; break; } frameworkHandle.RecordResult(testResult); frameworkHandle.RecordEnd(testCase, testResult.Outcome); if (_canceled) { _canceled = false; break; } } }
/// <summary> /// Runs the tests. /// </summary> /// <param name="tests">The tests.</param> /// <param name="runContext">The run context.</param> /// <param name="frameworkHandle">The framework handle.</param> public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { foreach (var test in tests) { if (this.canceled) { return; } var testResult = new TestResult(test); var target = Path.ChangeExtension(test.Source, ".feature"); try { frameworkHandle.RecordStart(test); File.Copy(test.Source, target); string projectDirectory = GetProjectDirectory(test.Source); string cucumberJsFilePath = this.GetCucumberJsFilePath(projectDirectory); string nodeCommandLineArguments = runContext.IsBeingDebugged ? $"--debug=5858 \"{cucumberJsFilePath}\" \"{target}:{test.LineNumber}\" -f json" : $"\"{cucumberJsFilePath}\" \"{target}:{test.LineNumber}\" -f json"; Process process = this.StartProcess("node", nodeCommandLineArguments, workingDirectory: projectDirectory); if (runContext.IsBeingDebugged) { DteHelpers.DebugAttachToNode(process.Id, 5678); } string jsonResult = WaitForProcessToExitAndVerifyOutputIsValid(process, testResult); var results = JsonConvert.DeserializeObject <List <CucumberJsResult> >(jsonResult); var duration = 0L; List <string> testResultOutputMessages = new List <string>(); List <string> testResultErrorMessages = new List <string>(); List <string> testResultErrorStackTrace = new List <string>(); TestOutcome testOutcome = TestOutcome.Passed; foreach (var feature in results) { testResultOutputMessages.Add($"{feature.Keyword}: {feature.Name}"); foreach (var element in feature.Elements) { testResultOutputMessages.Add($"{element.Keyword}: {element.Name}"); string description = element.Description; if (!string.IsNullOrEmpty(description)) { testResultOutputMessages.Add(description); } foreach (var step in element.Steps) { string keyword = step.Keyword; string name = step.Name; var message = $"{keyword}{name}"; testResultOutputMessages.Add(message); var stepResult = step.Result; duration += stepResult.Duration; string status = stepResult.Status; if (status == "failed") { testOutcome = TestOutcome.Failed; string errorMessage = stepResult.ErrorMessage; testResultErrorMessages.Add(errorMessage); break; } else if (((status == "undefined") || (status == "skipped")) && (testOutcome == TestOutcome.Passed)) { // step was not found testOutcome = TestOutcome.Skipped; string errorMessage = $"Step definition '{keyword}{name}' not found."; testResultErrorMessages.Add(errorMessage); testResultErrorStackTrace.Add($"{feature.Uri}:{step.LineNumber}"); break; } } if (testOutcome != TestOutcome.Passed) { break; } } if (testOutcome != TestOutcome.Passed) { break; } } testResult.Duration = TimeSpan.FromTicks(duration); testResult.Outcome = testOutcome; if (testResultErrorMessages.Count > 0) { testResult.ErrorMessage = string.Join(Environment.NewLine, testResultErrorMessages); } if (testResultErrorStackTrace.Count > 0) { testResult.ErrorStackTrace = string.Join(Environment.NewLine, testResultErrorStackTrace); } if (testResultOutputMessages.Count > 0) { testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, string.Join(Environment.NewLine, testResultOutputMessages))); testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, Environment.NewLine)); } if (testResultErrorMessages.Count > 0) { testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, string.Join(Environment.NewLine, testResultErrorMessages))); testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardErrorCategory, string.Join(Environment.NewLine, testResultErrorMessages))); } } catch (Exception ex) { testResult.Outcome = TestOutcome.Failed; testResult.ErrorMessage = ex.ToString(); testResult.ErrorStackTrace = ex.StackTrace; } finally { File.Delete(target); } frameworkHandle.RecordResult(testResult); } }
private void RunTests_Combine(IEnumerable <TestCase> tests) { if (!tests.Any()) { return; // Sanity check } List <TestCase> groupedtests = new List <TestCase>(); List <TestCase> singledtests = new List <TestCase>(); List <TestCase> remainingtests = new List <TestCase>(); Catch2Interface.TestCaseGroup testcasegroup = new Catch2Interface.TestCaseGroup(); testcasegroup.Source = tests.First().Source; LogDebug(TestMessageLevel.Informational, $"Start Grouping tests for {testcasegroup.Source}"); // Select tests with the same source foreach (var test in tests) { if (testcasegroup.Source != test.Source) { remainingtests.Add(test); continue; } if (Catch2Interface.Executor.CanExecuteCombined(test.DisplayName, SharedUtils.GetTags(test))) { LogDebug(TestMessageLevel.Informational, $"Add to group: {test.DisplayName}"); testcasegroup.Names.Add(test.DisplayName); _frameworkHandle.RecordStart(test); // Indicate in the GUI test is running groupedtests.Add(test); } else { singledtests.Add(test); } } // Log sort result LogDebug(TestMessageLevel.Informational, $"Grouped/Singled/Remaining testcase count: {groupedtests.Count}/{singledtests.Count}/{remainingtests.Count}"); // Check if source actually exists if (!File.Exists(testcasegroup.Source)) { LogVerbose(TestMessageLevel.Informational, $"Test executable not found: {testcasegroup.Source}"); SkipTests(groupedtests); } // Run tests LogVerbose(TestMessageLevel.Informational, $"Run {testcasegroup.Names.Count} grouped testcases."); var testresults = _executor.Run(testcasegroup); if (!string.IsNullOrEmpty(_executor.Log)) { LogNormal(TestMessageLevel.Informational, $"Executor log:{Environment.NewLine}{_executor.Log}"); } // Process results LogDebug(TestMessageLevel.Informational, $"Testcase result count: {testresults.TestResults.Count}"); foreach (var test in groupedtests) { var testresult = testresults.FindTestResult(test.DisplayName); LogDebug(TestMessageLevel.Informational, $"Processed testcase: {test.DisplayName}"); TestResult result = new TestResult(test); if (testresult == null) { LogDebug(TestMessageLevel.Informational, $"Combined testcase result not found for: {test.DisplayName}"); result.Outcome = TestOutcome.None; _frameworkHandle.RecordResult(result); singledtests.Add(test); } else { RecordTestResult(result, testresult); } } if (singledtests.Count > 0) { LogDebug(TestMessageLevel.Informational, $"Process singled tests (count: {singledtests.Count})"); RunTests_Single(singledtests); } if (remainingtests.Count > 0) { LogDebug(TestMessageLevel.Informational, $"Process remaining tests (count: {remainingtests.Count})"); RunTests_Combine(remainingtests); } }
private void RunTestCase( IFrameworkHandle frameworkHandle, IRunContext runContext, TestCase test, Dictionary<string, PythonProjectSettings> sourceToSettings ) { var testResult = new TestResult(test); frameworkHandle.RecordStart(test); testResult.StartTime = DateTimeOffset.Now; PythonProjectSettings settings; if (!sourceToSettings.TryGetValue(test.Source, out settings)) { sourceToSettings[test.Source] = settings = LoadProjectSettings(test.Source, _interpreterService); } if (settings == null) { frameworkHandle.SendMessage( TestMessageLevel.Error, "Unable to determine interpreter to use for " + test.Source); RecordEnd( frameworkHandle, test, testResult, null, "Unable to determine interpreter to use for " + test.Source, TestOutcome.Failed); return; } var debugMode = PythonDebugMode.None; if (runContext.IsBeingDebugged && _app != null) { debugMode = settings.EnableNativeCodeDebugging ? PythonDebugMode.PythonAndNative : PythonDebugMode.PythonOnly; } var testCase = new PythonTestCase(settings, test, debugMode); var dte = _app != null ? _app.GetDTE() : null; if (dte != null && debugMode != PythonDebugMode.None) { dte.Debugger.DetachAll(); } if (!File.Exists(settings.Factory.Configuration.InterpreterPath)) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Interpreter path does not exist: " + settings.Factory.Configuration.InterpreterPath); return; } var env = new Dictionary<string, string>(); var pythonPathVar = settings.Factory.Configuration.PathEnvironmentVariable; var pythonPath = testCase.SearchPaths; if (!string.IsNullOrWhiteSpace(pythonPathVar)) { if (_app != null) { var settingsManager = SettingsManagerCreator.GetSettingsManager(dte); if (settingsManager != null) { var store = settingsManager.GetReadOnlySettingsStore(SettingsScope.UserSettings); if (store != null && store.CollectionExists(@"PythonTools\Options\General")) { var settingStr = store.GetString(@"PythonTools\Options\General", "ClearGlobalPythonPath", "True"); bool settingBool; if (bool.TryParse(settingStr, out settingBool) && !settingBool) { pythonPath += ";" + Environment.GetEnvironmentVariable(pythonPathVar); } } } } env[pythonPathVar] = pythonPath; } foreach (var envVar in testCase.Environment) { env[envVar.Key] = envVar.Value; } using (var proc = ProcessOutput.Run( !settings.IsWindowsApplication ? settings.Factory.Configuration.InterpreterPath : settings.Factory.Configuration.WindowsInterpreterPath, testCase.Arguments, testCase.WorkingDirectory, env, false, null )) { bool killed = false; #if DEBUG frameworkHandle.SendMessage(TestMessageLevel.Informational, "cd " + testCase.WorkingDirectory); frameworkHandle.SendMessage(TestMessageLevel.Informational, "set " + (pythonPathVar ?? "") + "=" + (pythonPath ?? "")); frameworkHandle.SendMessage(TestMessageLevel.Informational, proc.Arguments); #endif proc.Wait(TimeSpan.FromMilliseconds(500)); if (debugMode != PythonDebugMode.None) { if (proc.ExitCode.HasValue) { // Process has already exited frameworkHandle.SendMessage(TestMessageLevel.Error, "Failed to attach debugger because the process has already exited."); if (proc.StandardErrorLines.Any()) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Standard error from Python:"); foreach (var line in proc.StandardErrorLines) { frameworkHandle.SendMessage(TestMessageLevel.Error, line); } } } try { if (debugMode == PythonDebugMode.PythonOnly) { string qualifierUri = string.Format("tcp://{0}@localhost:{1}", testCase.DebugSecret, testCase.DebugPort); while (!_app.AttachToProcess(proc, PythonRemoteDebugPortSupplierUnsecuredId, qualifierUri)) { if (proc.Wait(TimeSpan.FromMilliseconds(500))) { break; } } } else { var engines = new[] { PythonDebugEngineGuid, VSConstants.DebugEnginesGuids.NativeOnly_guid }; while (!_app.AttachToProcess(proc, engines)) { if (proc.Wait(TimeSpan.FromMilliseconds(500))) { break; } } } #if DEBUG } catch (COMException ex) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Error occurred connecting to debuggee."); frameworkHandle.SendMessage(TestMessageLevel.Error, ex.ToString()); try { proc.Kill(); } catch (InvalidOperationException) { // Process has already exited } killed = true; } #else } catch (COMException) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Error occurred connecting to debuggee."); try { proc.Kill(); } catch (InvalidOperationException) { // Process has already exited } killed = true; } #endif } // https://pytools.codeplex.com/workitem/2290 // Check that proc.WaitHandle was not null to avoid crashing if // a test fails to start running. We will report failure and // send the error message from stdout/stderr. var handles = new WaitHandle[] { _cancelRequested, proc.WaitHandle }; if (handles[1] == null) { killed = true; } if (!killed && WaitHandle.WaitAny(handles) == 0) { try { proc.Kill(); } catch (InvalidOperationException) { // Process has already exited } killed = true; } else { RecordEnd(frameworkHandle, test, testResult, string.Join(Environment.NewLine, proc.StandardOutputLines), string.Join(Environment.NewLine, proc.StandardErrorLines), (proc.ExitCode == 0 && !killed) ? TestOutcome.Passed : TestOutcome.Failed); } }
private void RunTests_Combine(IEnumerable <TestCase> tests) { if (!tests.Any()) { return; // Sanity check } List <TestCase> groupedtests = new List <TestCase>(); List <TestCase> singledtests = new List <TestCase>(); List <TestCase> remainingtests = new List <TestCase>(); List <TestCase> retrytests = new List <TestCase>(); Catch2Interface.TestCaseGroup testcasegroup = new Catch2Interface.TestCaseGroup(); testcasegroup.Source = tests.First().Source; LogDebug(TestMessageLevel.Informational, $"Start Grouping tests for {testcasegroup.Source}"); // Select tests with the same source foreach (var test in tests) { if (testcasegroup.Source != test.Source) { remainingtests.Add(test); continue; } if (_executor.CanExecuteCombined(test.DisplayName, SharedUtils.GetTags(test))) { LogDebug(TestMessageLevel.Informational, $"Add to group: {test.DisplayName}"); testcasegroup.Names.Add(test.DisplayName); _frameworkHandle.RecordStart(test); // Indicate in the GUI test is running groupedtests.Add(test); } else { singledtests.Add(test); } } // Log sort result LogDebug(TestMessageLevel.Informational, $"Grouped/Singled/Remaining testcase count: {groupedtests.Count}/{singledtests.Count}/{remainingtests.Count}"); // Check if source actually exists if (!File.Exists(testcasegroup.Source)) { LogVerbose(TestMessageLevel.Informational, $"Test executable not found: {testcasegroup.Source}"); SkipTests(groupedtests); } // Run tests if (_runContext.IsBeingDebugged) { string caselistfilename = _executor.MakeCaselistFilename(testcasegroup.Source); // Prepare testcase list file _executor.CreateTestcaseListFile(testcasegroup, caselistfilename); LogVerbose(TestMessageLevel.Informational, "Start debug run."); _frameworkHandle .LaunchProcessWithDebuggerAttached(testcasegroup.Source , _executor.WorkingDirectory(testcasegroup.Source) , _executor.GenerateCommandlineArguments_Combined_Dbg(caselistfilename) , _settings.GetEnviromentVariablesForDebug()); // Do not process output in Debug mode foreach (var test in groupedtests) { TestResult result = new TestResult(test); result.Outcome = TestOutcome.None; _frameworkHandle.RecordResult(result); } return; } LogVerbose(TestMessageLevel.Informational, $"Run {testcasegroup.Names.Count} grouped testcases."); var testresults = _executor.Run(testcasegroup); if (!string.IsNullOrEmpty(_executor.Log)) { LogNormal(TestMessageLevel.Informational, $"Executor log:{Environment.NewLine}{_executor.Log}"); } // Process results LogDebug(TestMessageLevel.Informational, $"Testcase result count: {testresults.TestResults.Count}"); foreach (var test in groupedtests) { var testresult = testresults.FindTestResult(test.DisplayName); LogDebug(TestMessageLevel.Informational, $"Processed testcase: {test.DisplayName}"); TestResult result = new TestResult(test); if (testresult == null) { if (testresults.TimedOut) { LogDebug(TestMessageLevel.Informational, $"Combined testcase result not found for: {test.DisplayName}"); result.Outcome = TestOutcome.Skipped; // When test result not found, probably a timeout occured and the test was skipped as a result. result.ErrorMessage = "Timeout of combined testcase execution."; _frameworkHandle.RecordResult(result); } else if (testresults.IsPartialOutput) { LogDebug(TestMessageLevel.Informational, $"Combined testcase result not found for: {test.DisplayName}{Environment.NewLine}Looks like it was caused by a previous test crashing the test executable. Adding it to the retry list for another combined test execution run."); retrytests.Add(test); } else { LogNormal(TestMessageLevel.Warning, $"Combined testcase result not found for: {test.DisplayName}{Environment.NewLine}Trying again by running it in isolation, i.e., not combined with other test cases. To prevent this try updating to a later version of Catch2 or changing the test case name."); singledtests.Add(test); } } else { RecordTestResult(result, testresult); } } if (retrytests.Count > 0) { LogDebug(TestMessageLevel.Informational, $"Process retry tests (count: {retrytests.Count})"); RunTests_Combine(retrytests); } if (singledtests.Count > 0) { LogDebug(TestMessageLevel.Informational, $"Process singled tests (count: {singledtests.Count})"); RunTests_Single(singledtests); } if (remainingtests.Count > 0) { LogDebug(TestMessageLevel.Informational, $"Process remaining tests (count: {remainingtests.Count})"); RunTests_Combine(remainingtests); } }
private void RunTestCase( IFrameworkHandle frameworkHandle, IRunContext runContext, TestCase test, Dictionary <string, PythonProjectSettings> sourceToSettings ) { var testResult = new TestResult(test); frameworkHandle.RecordStart(test); testResult.StartTime = DateTimeOffset.Now; PythonProjectSettings settings; if (!sourceToSettings.TryGetValue(test.Source, out settings)) { sourceToSettings[test.Source] = settings = LoadProjectSettings(test.Source); } if (settings == null) { frameworkHandle.SendMessage( TestMessageLevel.Error, "Unable to determine interpreter to use for " + test.Source); RecordEnd( frameworkHandle, test, testResult, null, "Unable to determine interpreter to use for " + test.Source, TestOutcome.Failed); return; } var debugMode = PythonDebugMode.None; if (runContext.IsBeingDebugged && _app != null) { debugMode = settings.EnableNativeCodeDebugging ? PythonDebugMode.PythonAndNative : PythonDebugMode.PythonOnly; } var testCase = new PythonTestCase(settings, test, debugMode); var dte = _app != null?_app.GetDTE() : null; if (dte != null && debugMode != PythonDebugMode.None) { dte.Debugger.DetachAll(); } if (!File.Exists(settings.Factory.Configuration.InterpreterPath)) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Interpreter path does not exist: " + settings.Factory.Configuration.InterpreterPath); return; } var env = new Dictionary <string, string>(); var pythonPathVar = settings.Factory.Configuration.PathEnvironmentVariable; var pythonPath = testCase.SearchPaths; if (!string.IsNullOrWhiteSpace(pythonPathVar)) { if (_app != null) { var settingsManager = SettingsManagerCreator.GetSettingsManager(dte); if (settingsManager != null) { var store = settingsManager.GetReadOnlySettingsStore(SettingsScope.UserSettings); if (store != null && store.CollectionExists(@"PythonTools\Options\General")) { var settingStr = store.GetString(@"PythonTools\Options\General", "ClearGlobalPythonPath", "True"); bool settingBool; if (bool.TryParse(settingStr, out settingBool) && !settingBool) { pythonPath += ";" + Environment.GetEnvironmentVariable(pythonPathVar); } } } } env[pythonPathVar] = pythonPath; } foreach (var envVar in testCase.Environment) { env[envVar.Key] = envVar.Value; } using (var proc = ProcessOutput.Run( !settings.IsWindowsApplication ? settings.Factory.Configuration.InterpreterPath : settings.Factory.Configuration.WindowsInterpreterPath, testCase.Arguments, testCase.WorkingDirectory, env, false, null )) { bool killed = false; #if DEBUG frameworkHandle.SendMessage(TestMessageLevel.Informational, "cd " + testCase.WorkingDirectory); frameworkHandle.SendMessage(TestMessageLevel.Informational, "set " + (pythonPathVar ?? "") + "=" + (pythonPath ?? "")); frameworkHandle.SendMessage(TestMessageLevel.Informational, proc.Arguments); #endif proc.Wait(TimeSpan.FromMilliseconds(500)); if (debugMode != PythonDebugMode.None) { if (proc.ExitCode.HasValue) { // Process has already exited frameworkHandle.SendMessage(TestMessageLevel.Error, "Failed to attach debugger because the process has already exited."); if (proc.StandardErrorLines.Any()) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Standard error from Python:"); foreach (var line in proc.StandardErrorLines) { frameworkHandle.SendMessage(TestMessageLevel.Error, line); } } } try { if (debugMode == PythonDebugMode.PythonOnly) { string qualifierUri = string.Format("tcp://{0}@localhost:{1}", testCase.DebugSecret, testCase.DebugPort); while (!_app.AttachToProcess(proc, PythonRemoteDebugPortSupplierUnsecuredId, qualifierUri)) { if (proc.Wait(TimeSpan.FromMilliseconds(500))) { break; } } } else { var engines = new[] { PythonDebugEngineGuid, VSConstants.DebugEnginesGuids.NativeOnly_guid }; while (!_app.AttachToProcess(proc, engines)) { if (proc.Wait(TimeSpan.FromMilliseconds(500))) { break; } } } #if DEBUG } catch (COMException ex) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Error occurred connecting to debuggee."); frameworkHandle.SendMessage(TestMessageLevel.Error, ex.ToString()); try { proc.Kill(); } catch (InvalidOperationException) { // Process has already exited } killed = true; } #else } catch (COMException) {
private void RunTestCase(VisualStudioApp app, IFrameworkHandle frameworkHandle, IRunContext runContext, TestCase test, Dictionary <string, NodejsProjectSettings> sourceToSettings) { var testResult = new TestResult(test); frameworkHandle.RecordStart(test); testResult.StartTime = DateTimeOffset.Now; NodejsProjectSettings settings; if (!sourceToSettings.TryGetValue(test.Source, out settings)) { sourceToSettings[test.Source] = settings = LoadProjectSettings(test.Source); } if (settings == null) { frameworkHandle.SendMessage( TestMessageLevel.Error, "Unable to determine interpreter to use for " + test.Source); RecordEnd( frameworkHandle, test, testResult, null, "Unable to determine interpreter to use for " + test.Source, TestOutcome.Failed); return; } #if DEV15 // VS 2017 doesn't install some assemblies to the GAC that are needed to work with the // debugger, and as the tests don't execute in the devenv.exe process, those assemblies // fail to load - so load them manually from PublicAssemblies. // Use the executable name, as this is only needed for the out of proc test execution // that may interact with the debugger (vstest.executionengine.x86.exe). string currentProc = Process.GetCurrentProcess().MainModule.FileName; if (Path.GetFileName(currentProc).ToLowerInvariant().Equals("vstest.executionengine.x86.exe")) { string baseDir = Path.GetDirectoryName(currentProc); string publicAssemblies = Path.Combine(baseDir, "..\\..\\..\\PublicAssemblies"); Assembly.LoadFrom(Path.Combine(publicAssemblies, "Microsoft.VisualStudio.OLE.Interop.dll")); Assembly.LoadFrom(Path.Combine(publicAssemblies, "envdte90.dll")); Assembly.LoadFrom(Path.Combine(publicAssemblies, "envdte80.dll")); Assembly.LoadFrom(Path.Combine(publicAssemblies, "envdte.dll")); } #endif NodejsTestInfo testInfo = new NodejsTestInfo(test.FullyQualifiedName); List <string> args = new List <string>(); int port = 0; if (runContext.IsBeingDebugged && app != null) { app.GetDTE().Debugger.DetachAll(); args.AddRange(GetDebugArgs(settings, out port)); } var workingDir = Path.GetDirectoryName(CommonUtils.GetAbsoluteFilePath(settings.WorkingDir, testInfo.ModulePath)); args.AddRange(GetInterpreterArgs(test, workingDir, settings.ProjectRootDir)); //Debug.Fail("attach debugger"); if (!File.Exists(settings.NodeExePath)) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Interpreter path does not exist: " + settings.NodeExePath); return; } lock (_syncObject) { _nodeProcess = ProcessOutput.Run( settings.NodeExePath, args, workingDir, null, false, null, false); #if DEBUG frameworkHandle.SendMessage(TestMessageLevel.Informational, "cd " + workingDir); frameworkHandle.SendMessage(TestMessageLevel.Informational, _nodeProcess.Arguments); #endif _nodeProcess.Wait(TimeSpan.FromMilliseconds(500)); if (runContext.IsBeingDebugged && app != null) { try { //the '#ping=0' is a special flag to tell VS node debugger not to connect to the port, //because a connection carries the consequence of setting off --debug-brk, and breakpoints will be missed. string qualifierUri = string.Format(CultureInfo.InvariantCulture, "tcp://localhost:{0}#ping=0", port); while (!app.AttachToProcess(_nodeProcess, NodejsRemoteDebugPortSupplierUnsecuredId, qualifierUri)) { if (_nodeProcess.Wait(TimeSpan.FromMilliseconds(500))) { break; } } #if DEBUG } catch (COMException ex) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Error occurred connecting to debuggee."); frameworkHandle.SendMessage(TestMessageLevel.Error, ex.ToString()); KillNodeProcess(); } #else } catch (COMException) {
public void Started(TestCaseToRun tc) { var testCase = FindTestCase(tc.FullyQualifiedName); _frameworkHandle.RecordStart(testCase); }
private void RunTestCase(VisualStudioApp app, IFrameworkHandle frameworkHandle, IRunContext runContext, TestCase test, Dictionary<string, NodejsProjectSettings> sourceToSettings) { var testResult = new TestResult(test); frameworkHandle.RecordStart(test); testResult.StartTime = DateTimeOffset.Now; NodejsProjectSettings settings; if (!sourceToSettings.TryGetValue(test.Source, out settings)) { sourceToSettings[test.Source] = settings = LoadProjectSettings(test.Source); } if (settings == null) { frameworkHandle.SendMessage( TestMessageLevel.Error, "Unable to determine interpreter to use for " + test.Source); RecordEnd( frameworkHandle, test, testResult, null, "Unable to determine interpreter to use for " + test.Source, TestOutcome.Failed); return; } NodejsTestInfo testInfo = new NodejsTestInfo(test.FullyQualifiedName); List<string> args = new List<string>(); int port = 0; if (runContext.IsBeingDebugged && app != null) { app.GetDTE().Debugger.DetachAll(); args.AddRange(GetDebugArgs(settings, out port)); } var workingDir = Path.GetDirectoryName(CommonUtils.GetAbsoluteFilePath(settings.WorkingDir, testInfo.ModulePath)); args.AddRange(GetInterpreterArgs(test, workingDir, settings.ProjectRootDir)); //Debug.Fail("attach debugger"); if (!File.Exists(settings.NodeExePath)) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Interpreter path does not exist: " + settings.NodeExePath); return; } lock (_syncObject) { _nodeProcess = ProcessOutput.Run( settings.NodeExePath, args, workingDir, null, false, null, false); #if DEBUG frameworkHandle.SendMessage(TestMessageLevel.Informational, "cd " + workingDir); frameworkHandle.SendMessage(TestMessageLevel.Informational, _nodeProcess.Arguments); #endif _nodeProcess.Wait(TimeSpan.FromMilliseconds(500)); if (runContext.IsBeingDebugged && app != null) { try { //the '#ping=0' is a special flag to tell VS node debugger not to connect to the port, //because a connection carries the consequence of setting off --debug-brk, and breakpoints will be missed. string qualifierUri = string.Format("tcp://localhost:{0}#ping=0", port); while (!app.AttachToProcess(_nodeProcess, NodejsRemoteDebugPortSupplierUnsecuredId, qualifierUri)) { if (_nodeProcess.Wait(TimeSpan.FromMilliseconds(500))) { break; } } #if DEBUG } catch (COMException ex) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Error occurred connecting to debuggee."); frameworkHandle.SendMessage(TestMessageLevel.Error, ex.ToString()); KillNodeProcess(); } #else } catch (COMException) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Error occurred connecting to debuggee."); KillNodeProcess(); } #endif } }
private void RunTestCase(VisualStudioApp app, IFrameworkHandle frameworkHandle, IRunContext runContext, TestCase test, Dictionary <string, NodejsProjectSettings> sourceToSettings) { var testResult = new TestResult(test); frameworkHandle.RecordStart(test); testResult.StartTime = DateTimeOffset.Now; NodejsProjectSettings settings; if (!sourceToSettings.TryGetValue(test.Source, out settings)) { sourceToSettings[test.Source] = settings = LoadProjectSettings(test.Source); } if (settings == null) { frameworkHandle.SendMessage( TestMessageLevel.Error, "Unable to determine interpreter to use for " + test.Source); RecordEnd( frameworkHandle, test, testResult, null, "Unable to determine interpreter to use for " + test.Source, TestOutcome.Failed); return; } NodejsTestInfo testInfo = new NodejsTestInfo(test.FullyQualifiedName); List <string> args = new List <string>(); int port = 0; if (runContext.IsBeingDebugged && app != null) { app.GetDTE().Debugger.DetachAll(); args.AddRange(GetDebugArgs(settings, out port)); } var workingDir = Path.GetDirectoryName(CommonUtils.GetAbsoluteFilePath(settings.WorkingDir, testInfo.ModulePath)); args.AddRange(GetInterpreterArgs(test, workingDir, settings.ProjectRootDir)); //Debug.Fail("attach debugger"); if (!File.Exists(settings.NodeExePath)) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Interpreter path does not exist: " + settings.NodeExePath); return; } lock (_syncObject) { _nodeProcess = ProcessOutput.Run( settings.NodeExePath, args, workingDir, null, false, null, false); #if DEBUG frameworkHandle.SendMessage(TestMessageLevel.Informational, "cd " + workingDir); frameworkHandle.SendMessage(TestMessageLevel.Informational, _nodeProcess.Arguments); #endif _nodeProcess.Wait(TimeSpan.FromMilliseconds(500)); if (runContext.IsBeingDebugged && app != null) { try { //the '#ping=0' is a special flag to tell VS node debugger not to connect to the port, //because a connection carries the consequence of setting off --debug-brk, and breakpoints will be missed. string qualifierUri = string.Format("tcp://localhost:{0}#ping=0", port); while (!app.AttachToProcess(_nodeProcess, NodejsRemoteDebugPortSupplierUnsecuredId, qualifierUri)) { if (_nodeProcess.Wait(TimeSpan.FromMilliseconds(500))) { break; } } #if DEBUG } catch (COMException ex) { frameworkHandle.SendMessage(TestMessageLevel.Error, "Error occurred connecting to debuggee."); frameworkHandle.SendMessage(TestMessageLevel.Error, ex.ToString()); KillNodeProcess(); } #else } catch (COMException) {
public void NotifyStart(string testFullName) { _frameworkHandle.RecordStart(_tests[testFullName]); }
public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle) { var dir = Directory.GetCurrentDirectory(); var folder = Path.Combine(dir, "TestResults"); Directory.CreateDirectory(folder); var file = Path.Combine(folder, $"{Environment.UserName}_{Environment.MachineName}_{DateTime.Now:yyyy-MM-dd_HH_mm_ss}.csv"); using (var fs = File.OpenWrite(file)) using (var tw = new StreamWriter(fs)) { tw.WriteCsvLine("Test Name", "Iteration", "Is Warmup", "Duration", "Iteration Status", "Run Status"); var logger = new TestLogger(frameworkHandle); var settings = new AdapterSettings(logger); settings.Load(runContext.RunSettings.SettingsXml); logger.InitSettings(settings); frameworkHandle.EnableShutdownAfterTestRun = true; var toRun = Convert(tests); var missing = tests.Except(toRun.Select(x => x.testCase)); foreach (var m in missing) { frameworkHandle.RecordEnd(m, TestOutcome.NotFound); } var lifecycleEvents = TestLifeCyclesCallbacks(tests.Select(x => x.Source).Distinct().ToList()); var beforeAll = lifecycleEvents.OfType <ITestLifecycleBeforeAllTests>().ToArray(); var afterAll = lifecycleEvents.OfType <ITestLifecycleAfterAllTests>().ToArray(); // generate report details for all runs etc of all calls // parallel etc in here using (var globalCtx = TestContext.Start(frameworkHandle, settings)) { foreach (var evnt in beforeAll) { evnt.BeforeAllTests(globalCtx).GetAwaiter().GetResult(); } } foreach (var t in toRun) { var testResult = new TestResult(t.testCase); if (t.perfTest.Skipped) { testResult.Outcome = TestOutcome.Skipped; frameworkHandle.RecordResult(testResult); tw.WriteCsvLine(t.perfTest.Name, "-", "-", "-", "Skipped"); continue; } frameworkHandle.RecordStart(t.testCase); using (var context = TestContext.Start(t.perfTest, settings)) { var sw = Stopwatch.StartNew(); var task = t.perfTest.ExecuteAsync(context); Task.WaitAll(task); sw.Stop(); var result = task.Result; var errors = result.Select(x => x.Error).Where(x => x != null).ToList(); if (errors.Any()) { testResult.ErrorStackTrace = string.Join("\n\n-------\n\n", errors.Select(x => x.StackTrace)); testResult.ErrorMessage = string.Join("\n\n-------\n\n", errors.Select(x => x.Message)); testResult.Outcome = TestOutcome.Failed; } else { testResult.Outcome = TestOutcome.Passed; } int counter = 0; foreach (var r in result.Where(x => x.IsWarmup)) { tw.WriteCsvLine(t.perfTest.Name, ++counter, r.IsWarmup, r.Duration.TotalSeconds, r.Error == null ? TestOutcome.Passed : TestOutcome.Failed, testResult.Outcome); } counter = 0; foreach (var r in result.Where(x => !x.IsWarmup)) { tw.WriteCsvLine(t.perfTest.Name, ++counter, r.IsWarmup, r.Duration.TotalSeconds, r.Error == null ? TestOutcome.Passed : TestOutcome.Failed, testResult.Outcome); } // process the results here testResult.Duration = sw.Elapsed; var runs = result.Where(x => x.IsWarmup == false).Select(x => x.Duration); var warmups = result.Where(x => x.IsWarmup == true).Select(x => x.Duration); var mean = TimeSpanStatistics.Mean(runs); var standardDeviation = TimeSpanStatistics.StandardDeviation(runs); // format a table of output results here var msg = $@"Warm up Count : {warmups.Count()} Warm up Duration : {new TimeSpan(warmups.Sum(x => x.Ticks))} Executed : {runs.Count()} Mean Duration: {mean} Standard Deviation Duration: {standardDeviation} "; testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, msg)); testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, context.Output)); foreach (var r in result.Where(x => !string.IsNullOrWhiteSpace(x.Output))) { testResult.Messages.Add(new TestResultMessage(TestResultMessage.AdditionalInfoCategory, r.Output)); } frameworkHandle.RecordResult(testResult); } } using (var globalCtx = TestContext.Start(frameworkHandle, settings)) { foreach (var evnt in afterAll) { evnt.AfterAllTests(globalCtx).GetAwaiter().GetResult(); } } } }
private void RunTest(TestSourceSettings settings, ITestLogger logger, IRunContext runContext, IFrameworkHandle frameworkHandle, Spec spec) { var testCase = CreateTestCase(settings, spec); var outcome = TestOutcome.None; frameworkHandle.RecordStart(testCase); foreach (var result in spec.Results) { if (result.Skipped && outcome != TestOutcome.Failed) { outcome = TestOutcome.Skipped; } if (result.Success && outcome == TestOutcome.None) { outcome = TestOutcome.Passed; } if (!result.Success && !result.Skipped) { outcome = TestOutcome.Failed; } frameworkHandle.RecordResult(GetResult(testCase, result, frameworkHandle)); } frameworkHandle.RecordEnd(testCase, outcome); }
private void ConectionReceivedEvent(object sender, EventReceivedEventArgs e) { switch (e.Name) { case TP.ResultEvent.Name: var result = (TP.ResultEvent)e.Event; TestOutcome outcome = TestOutcome.None; switch (result.outcome) { case "passed": outcome = TestOutcome.Passed; break; case "failed": outcome = TestOutcome.Failed; break; case "skipped": outcome = TestOutcome.Skipped; break; } var testResult = new TestResult(_curTest); RecordEnd( _frameworkHandle, _curTest, testResult, _stdOut.ToString(), _stdErr.ToString(), outcome, result ); _stdOut.Clear(); _stdErr.Clear(); break; case TP.StartEvent.Name: var start = (TP.StartEvent)e.Event; _curTest = null; foreach (var test in _tests) { string testFile, testClass, testMethod; TestDiscoverer.ParseFullyQualifiedTestName( test.FullyQualifiedName, out testFile, out testClass, out testMethod ); string testFilePath = CommonUtils.GetAbsoluteFilePath(_settings.ProjectHome, test.CodeFilePath); var modulePath = ModulePath.FromFullPath(testFilePath); if (start.test == modulePath.ModuleName + "." + testClass + "." + testMethod) { _curTest = test; break; } } if (_curTest != null) { _frameworkHandle.RecordStart(_curTest); } else { Warning( string.Format( "Unexpected test result: {0} {1} {2}", start.classname, start.method ) ); } break; case TP.StdErrEvent.Name: var err = (TP.StdErrEvent)e.Event; _stdErr.Append(err.content); break; case TP.StdOutEvent.Name: var outp = (TP.StdOutEvent)e.Event; _stdOut.Append(outp.content); break; case TP.DoneEvent.Name: _done.Set(); break; } }