示例#1
0
        public void Run()
        {
            try
            {
                _frameworkHandle.SendMessage(TestMessageLevel.Informational,
                                             $"Invoking : {_gaugeProcess}");
                _gaugeProcess.Start();
                _gaugeProcess.BeginOutputReadLine();

                if (_isBeingDebugged)
                {
                    DTEHelper.AttachToProcess(_gaugeProcess.Id);
                    _frameworkHandle.SendMessage(TestMessageLevel.Informational,
                                                 $"Attaching to ProcessID {_gaugeProcess.Id}");
                }
                _gaugeProcess.WaitForExit();
            }
            catch (Exception ex)
            {
                _frameworkHandle.SendMessage(TestMessageLevel.Error, ex.Message);
                foreach (var testCase in _tests)
                {
                    var result = new TestResult(testCase)
                    {
                        Outcome      = TestOutcome.None,
                        ErrorMessage = $"{ex.Message}\n{ex.StackTrace}"
                    };
                    _frameworkHandle.RecordResult(result);
                    _frameworkHandle.RecordEnd(testCase, result.Outcome);
                    _pendingTests.Remove(testCase);
                }
            }
        }
示例#2
0
        private static void RunTestMethod(IFrameworkHandle frameworkHandle, TestCase testCase, TestRunner runner,
                                          TestMethod method)
        {
            frameworkHandle.RecordStart(testCase);
            try
            {
                var result = runner.Run(method.Owner, method).GetAwaiter().GetResult();
                if (result == null)
                {
                    frameworkHandle.SendMessage(TestMessageLevel.Warning, "Got no result");
                    return;
                }

                var msResult = new TestResult(testCase)
                {
                    StartTime       = result.StartedAtUtc,
                    EndTime         = result.EndedAtUtc,
                    DisplayName     = method.Name.Replace("_", " "),
                    Outcome         = TestOutcome.Passed,
                    Duration        = result.Elapsed,
                    ErrorMessage    = result.Exception?.Message,
                    ErrorStackTrace = result.Exception?.StackTrace
                };

                if (result.IsIgnored)
                {
                    msResult.Outcome = TestOutcome.Skipped;
                }
                else if (result.IsSuccess)
                {
                    msResult.Outcome = TestOutcome.Passed;
                }
                else
                {
                    msResult.Outcome = TestOutcome.Failed;
                }

                frameworkHandle.RecordEnd(testCase, msResult.Outcome);
                frameworkHandle.RecordResult(msResult);
            }
            catch (Exception ex)
            {
                frameworkHandle.RecordEnd(testCase, TestOutcome.Failed);
                frameworkHandle.RecordResult(new TestResult(testCase)
                {
                    DisplayName     = method.Name.Replace("_", " "),
                    Outcome         = TestOutcome.Failed,
                    ErrorMessage    = ex.Message,
                    ErrorStackTrace = ex.StackTrace
                });
            }
        }
        public void CasePassed(PassResult result)
        {
            var testCase = new TestCase(result.Case.Name, new Uri(Constants.EXECUTOR_URI_STRING), source);

            frameworkHandle.RecordStart(testCase);
            var testResult = new TestResult(testCase)
            {
                Outcome = TestOutcome.Passed
            };

            frameworkHandle.RecordEnd(testCase, TestOutcome.Passed);
            frameworkHandle.RecordResult(testResult);
        }
示例#4
0
        public void RunTests(
            IEnumerable <TestCase> tests,
            IRunContext runContext,
            IFrameworkHandle frameworkHandle)
        {
            _cancellationTokenSource = new CancellationTokenSource();

            foreach (var test in tests)
            {
                var configuration = new EngineConfiguration(new string[] { test.Source }, runContext);
                var testEngine    = new Engine(configuration);

                var outputHandler = new TestAdapterOutputHandler(frameworkHandle);
                testEngine.SetOutputHandler(outputHandler);

                var testResult = new TestResult(test);

                frameworkHandle.RecordStart(test);

                var kernelTestResult = testEngine.Execute(_cancellationTokenSource.Token).KernelTestResults[0];

                testResult.Outcome = kernelTestResult.Result ? TestOutcome.Passed : TestOutcome.Failed;

                var messages = new Collection <TestResultMessage>();

                foreach (var message in outputHandler.Messages)
                {
                    messages.Add(new TestResultMessage(String.Empty, message));
                }

                frameworkHandle.RecordEnd(test, testResult.Outcome);
                frameworkHandle.RecordResult(testResult);
            }
        }
        public void NotifyEnd(string testFullName, TestResultShim testResult)
        {
            var test = _tests[testFullName];

            var result = new TestResult(test)
            {
                Outcome     = MapToOutcome(testResult),
                DisplayName = testFullName
            };

            if (result.Outcome == TestOutcome.Failed)
            {
                result.ErrorMessage    = testResult.FailureReason;
                result.ErrorStackTrace = testResult.FailureStackTrace;
            }
            else if (result.Outcome == TestOutcome.Skipped)
            {
                // TODO: can we include the reason skipped in VS output somehow?
                result.Messages.Add(
                    new TestResultMessage("ReasonSkipped", testResult.ReasonSkipped));
            }

            _frameworkHandle.RecordEnd(test, result.Outcome);
            _frameworkHandle.RecordResult(result);
        }
示例#6
0
        private async Task Run(TestCase testCase, DiscoveredTestData testData, TestRunContext testRunContext, StepBinder stepBinder, IFrameworkHandle frameworkHandle, SemaphoreSlim simultaneousTestCasesSemaphore)
        {
            await simultaneousTestCasesSemaphore
            .WaitAsync()
            .ConfigureAwait(false);

            try
            {
                frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Starting test \"{testCase.DisplayName}\"");

                frameworkHandle.RecordStart(testCase);

                var executor = stepsExecutorFactory(stepBinder);

                var testResult = await executor
                                 .Execute(testCase, testData, testRunContext, frameworkHandle)
                                 .ConfigureAwait(false);

                // https://github.com/Microsoft/vstest/blob/master/src/Microsoft.TestPlatform.CrossPlatEngine/Adapter/TestExecutionRecorder.cs <- comments here seem to suggest that we need to call RecordEnd just before RecordResult
                frameworkHandle.RecordEnd(testCase, testResult.Outcome);
                frameworkHandle.RecordResult(testResult);

                frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Finished test \"{testCase.DisplayName}\"");
            }
            finally
            {
                simultaneousTestCasesSemaphore.Release();
            }
        }
示例#7
0
        public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle)
        {
            // more on filtering
            // https://github.com/nunit/nunit3-vs-adapter/blob/master/src/NUnitTestAdapter/VsTestFilter.cs
            List <string> supportedProperties = new List <string>();

            supportedProperties.Add("FullyQualifiedName");
            ITestCaseFilterExpression fe = runContext.GetTestCaseFilter(supportedProperties, PropertyProvider);

            log.Debug("Run settings:\n" + runContext.RunSettings.SettingsXml);

            log.Debug("RunTests from Test Cases");
            foreach (TestCase tc in tests)
            {
                if (fe == null || fe.MatchTestCase(tc, p => PropertyValueProvider(tc, p)))
                {
                    log.Debug("Run test case: " + tc.FullyQualifiedName + " / " + tc.Id);
                    frameworkHandle.RecordStart(tc);
                    DateTime   startTime = DateTime.Now;
                    TestResult tr        = runner.RunTest(tc, runContext);
                    DateTime   endTime   = DateTime.Now;
                    tr.Duration = endTime - startTime;
                    frameworkHandle.RecordEnd(tc, tr.Outcome);
                    frameworkHandle.RecordResult(tr);
                }
                else
                {
                    log.Debug("Test case filtered out: " + tc.FullyQualifiedName + " / " + tc.Id);
                }
            }
        }
示例#8
0
        private static void RecordEnd(IFrameworkHandle frameworkHandle, TestResult result, string stdout, string stderr, TestOutcome outcome, TP.ResultEvent resultInfo)
        {
            result.EndTime  = DateTimeOffset.Now;
            result.Duration = TimeSpan.FromSeconds(resultInfo.durationInSecs);
            result.Outcome  = outcome;

            // Replace \n with \r\n to be more friendly when copying output...
            stdout = stdout.Replace("\r\n", "\n").Replace("\n", "\r\n");
            stderr = stderr.Replace("\r\n", "\n").Replace("\n", "\r\n");

            result.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, stdout));
            result.Messages.Add(new TestResultMessage(TestResultMessage.StandardErrorCategory, stderr));
            result.Messages.Add(new TestResultMessage(TestResultMessage.AdditionalInfoCategory, stderr));
            if (resultInfo.traceback != null)
            {
                result.ErrorStackTrace = resultInfo.traceback;
                result.Messages.Add(new TestResultMessage(TestResultMessage.DebugTraceCategory, resultInfo.traceback));
            }
            if (resultInfo.message != null)
            {
                result.ErrorMessage = resultInfo.message;
            }

            frameworkHandle.RecordResult(result);
            frameworkHandle.RecordEnd(result.TestCase, outcome);
        }
        /// <summary>
        /// Runs the tests.
        /// </summary>
        /// <param name="tests">Tests to be run.</param>
        /// <param name="runContext">Context to use when executing the tests.</param>
        /// <param param name="frameworkHandle">Handle to the framework to record results and to do framework operations.</param>
        public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle)
        {
            //if (Debugger.IsAttached) Debugger.Break();
            //else Debugger.Launch();

            try
            {
                var parsed = XElement.Parse(runContext.RunSettings.SettingsXml);
                runContext.RunSettings.GetSettings(AppConfig.Name).Load(parsed.Element(AppConfig.Name).CreateReader());
            }
            catch (Exception ex) { Console.WriteLine($"Framework: Error while loading SettingsXml - {ex.Message} {ex.Data}"); }
            m_cancelled = false;
            try
            {
                foreach (TestCase test in tests)
                {
                    if (m_cancelled)
                    {
                        break;
                    }
                    frameworkHandle.RecordStart(test);
                    frameworkHandle.SendMessage(TestMessageLevel.Informational, "Framework: Starting external test for " + test.DisplayName);
                    var testOutcome = RunExternalTest(test, runContext, frameworkHandle, test);
                    frameworkHandle.RecordResult(testOutcome);
                    frameworkHandle.SendMessage(TestMessageLevel.Informational, "Framework: Test result:" + testOutcome.Outcome.ToString());
                    frameworkHandle.RecordEnd(test, testOutcome.Outcome);
                }
            }
            catch (Exception e)
            {
                frameworkHandle.SendMessage(TestMessageLevel.Error, "Framework: Exception during test execution: " + e.Message);
                frameworkHandle.SendMessage(TestMessageLevel.Error, "Framework: " + e.StackTrace);
            }
        }
示例#10
0
 private void RunTests(string source, IRunContext runContext, IFrameworkHandle frameworkHandle)
 {
     foreach (var result in ExternalTestExecutor.GetTestResults(source, null).Select(c => CreateTestResult(source, c)))
     {
         frameworkHandle.RecordStart(result.TestCase);
         frameworkHandle.RecordResult(result);
         frameworkHandle.RecordEnd(result.TestCase, result.Outcome);
     }
 }
示例#11
0
 private void RunTests(string source, IRunContext runContext, IFrameworkHandle frameworkHandle)
 {
     foreach (var result in ExternalTestExecutor.GetTestResults(source, null).Select(c => CreateTestResult(source, c)))
     {
         frameworkHandle.RecordStart(result.TestCase);
         frameworkHandle.RecordResult(result);
         frameworkHandle.RecordEnd(result.TestCase, result.Outcome);
     }
 }
        private void ReportTestResult(TestResult testResult)
        {
            VsTestResult result = testResult.ToVsTestResult();

            _throttle.Execute(delegate
            {
                // This is part of a workaround for a Visual Studio bug. See above.
                _frameworkHandle.RecordResult(result);
            });
            _frameworkHandle.RecordEnd(result.TestCase, result.Outcome);
        }
示例#13
0
        public override void TestFinished(TestCase test)
        {
            var testCase = test.ToVsTestCase();
            var result   = test.ToVsTestResult();
            var outcome  = ChutzpahExtensionMethods.ToVsTestOutcome(test.Passed);


            frameworkHandle.RecordResult(result);

            // The test case is done
            frameworkHandle.RecordEnd(testCase, outcome);
        }
示例#14
0
        /// <summary>
        /// Processes the results.
        /// </summary>
        /// <param name="tests">The tests.</param>
        /// <param name="argument">The argument.</param>
        /// <param name="frameworkHandle">The framework handle.</param>
        internal void ProcessResults(IEnumerable <TestCase> tests, TestCaseArgument argument, IFrameworkHandle frameworkHandle)
        {
            TrxSchemaReader reader  = new TrxSchemaReader(this.logger, tests);
            TestRunType     testRun = reader.Read(argument.TestRunOptions.ReportOutputPath);

            if (testRun == null)
            {
                return;
            }

            foreach (TrxResult result in reader.ProcessStatLightResult(testRun))
            {
                TestResult testResult = result.GetTestResult(this.logger);
                frameworkHandle.RecordResult(testResult);
                frameworkHandle.RecordEnd(result.TestCase, testResult.Outcome);
            }
        }
        public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle)
        {
            // "Run tests in parallel" option from the UI sets MaxCpuCount in RunConfiguration
            // Ref: https://blogs.msdn.microsoft.com/devops/2016/10/10/parallel-test-execution/
            var isParallelRun = runContext.RunSettings.SettingsXml.Contains("<MaxCpuCount>");

            if (isParallelRun && runContext.IsBeingDebugged)
            {
                frameworkHandle.SendMessage(TestMessageLevel.Error, "Cannot debug specs in parallel, disable parallel run to debug specs.");
                foreach (var testCase in tests)
                {
                    frameworkHandle.RecordEnd(testCase, TestOutcome.None);
                }
                return;
            }
            _gaugeRunner = new GaugeRunner(tests, runContext.IsBeingDebugged, isParallelRun, frameworkHandle);
            _gaugeRunner.Run();
        }
示例#16
0
        private void RunTests(string source, IEnumerable <TestCase> tests, KarmaTestResults.Karma karma, IFrameworkHandle frameworkHandle, IKarmaLogger logger)
        {
            var vsConfig = CreateVsConfig(tests, karma);
            var runKarma = Run(source, vsConfig, logger);

            if (runKarma == null)
            {
                logger.Error("No karma");
                return;
            }

            var consolidatedResults = runKarma.ConsolidateResults(logger);
            var testNames           = tests.Select(t => t.DisplayName).Union(consolidatedResults.Select(r => r.Test.DisplayName));

            var results = from displayName in testNames
                          join test in tests
                          on displayName equals test.DisplayName
                          into test_
                          from test in test_.DefaultIfEmpty()
                          join result in consolidatedResults
                          on displayName equals result.Test.DisplayName
                          into result_
                          from result in result_.DefaultIfEmpty()
                          select new TestCaseResult(test, result, source);

            foreach (var result in results)
            {
                frameworkHandle.RecordStart(result.Test);
                foreach (var res in result.Result.Results)
                {
                    frameworkHandle.RecordResult(new TestResult(result.Test)
                    {
                        ComputerName = Environment.MachineName,
                        DisplayName  = res.Browser.Name,
                        Outcome      = res.Outcome,
                        Duration     = res.Time,
                        ErrorMessage = res.Message
                    });
                }
                frameworkHandle.RecordEnd(result.Test, result.Result.Outcome);
            }
        }
示例#17
0
        private async Task RunMappedTest(TestCase testCase, DiscoveredTestData testData, TestRunContext testRunContext, StepBinder stepBinder, IFrameworkHandle frameworkHandle)
        {
            frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Starting test \"{testCase.DisplayName}\"");

            frameworkHandle.RecordStart(testCase);

            var executor = new StepsExecutor(stepBinder);

            // Deliberately resume on same context to try to avoid Visual Studio Test Explorer "bug" (?) that doesn't
            // always detect the end of the test run when multiple tests are run in parallel.
            var testResult = await executor
                             .Execute(testCase, testData, testRunContext, frameworkHandle)
                             .ConfigureAwait(true);

            // https://github.com/Microsoft/vstest/blob/master/src/Microsoft.TestPlatform.CrossPlatEngine/Adapter/TestExecutionRecorder.cs <- comments here seem to suggest that we need to call RecordEnd just before RecordResult
            frameworkHandle.RecordEnd(testCase, testResult.Outcome);
            frameworkHandle.RecordResult(testResult);

            frameworkHandle.SendMessage(TestMessageLevel.Informational, $"Finished test \"{testCase.DisplayName}\"");
        }
        public void Run(TestRunnerContext testRunnerContext)
        {
            var testCase   = testRunnerContext.TestCase;
            var testResult = new TestResult(testCase);

            IFrameworkHandle frameworkHandle = testRunnerContext.FrameworkHandle;

            if (testRunnerContext.TestCancelled)
            {
                testResult.Outcome = TestOutcome.Skipped;
                frameworkHandle.RecordResult(testResult);

                return;
            }

            var stopwatch = Stopwatch.StartNew();

            try
            {
                testRunnerContext.Method.Invoke(testRunnerContext.Instance, null);

                testResult.Outcome = TestOutcome.Passed;
            }
            catch (TargetInvocationException e)
            {
                Console.WriteLine("OOOPS");
                testResult.ErrorMessage = e.InnerException?.Message;
                testResult.Outcome      = TestOutcome.Failed;
            }
            finally
            {
                stopwatch.Stop();
                testResult.Duration = stopwatch.Elapsed;

                frameworkHandle.RecordResult(testResult);
                frameworkHandle.RecordEnd(testCase, testResult.Outcome);
            }
        }
示例#19
0
        public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle)
        {
            dynamic runConfiguration =
                runContext.RunSettings.GetSettings(Constants.RunConfigurationSettingsName);

            // "Run tests in parallel" option from the UI sets MaxCpuCount in RunConfiguration
            // By default the MaxCpuCount=0, which is ambiguous with default(int)
            // Use MaxCpuCountSet property to determine if user initiated the test run in parallel.
            // Ref: https://blogs.msdn.microsoft.com/devops/2016/10/10/parallel-test-execution/
            bool isParallelRun = runConfiguration != null && runConfiguration.Settings.MaxCpuCountSet;

            if (isParallelRun && runContext.IsBeingDebugged)
            {
                frameworkHandle.SendMessage(TestMessageLevel.Error, "Cannot debug specs in parallel, disable parallel run to debug specs.");
                foreach (var testCase in tests)
                {
                    frameworkHandle.RecordEnd(testCase, TestOutcome.None);
                }
                return;
            }
            _gaugeRunner = new GaugeRunner(tests, runContext.IsBeingDebugged, isParallelRun, frameworkHandle);
            _gaugeRunner.Run();
        }
        internal static void ParseQmlTestRunnerXmlOutput(IFrameworkHandle frameworkHandle, Dictionary <string, TestCase> dict, string stdout)
        {
            using (StringReader reader = new StringReader(stdout))
            {
                XPathDocument xml = new XPathDocument(reader);
                foreach (XPathNavigator testFunction in xml.CreateNavigator().Select("/TestCase/TestFunction"))
                {
                    string name = testFunction.GetAttribute("name", "");
                    if (!dict.ContainsKey(name))
                    {
                        continue;
                    }

                    TestResult testResult = new TestResult(dict[name]);
                    testResult.Duration = TimeSpan.FromMilliseconds(testFunction.SelectSingleNode("Duration/@msecs").ValueAsDouble);
                    testResult.Outcome  = TestOutcome.Passed;
                    foreach (XPathNavigator incident in testFunction.Select("Incident"))
                    {
                        string outcome = incident.GetAttribute("type", "");
                        testResult.TestCase.LineNumber = int.Parse(incident.GetAttribute("line", ""));
                        if (!outcome.Equals("pass", StringComparison.InvariantCulture))
                        {
                            testResult.Outcome = TestOutcome.Failed;
                            XPathNavigator description = incident.SelectSingleNode("Description/text()");
                            if (description != null)
                            {
                                testResult.ErrorMessage += description.ToString() + Environment.NewLine;
                            }
                        }
                    }

                    frameworkHandle.RecordResult(testResult);
                    frameworkHandle.RecordEnd(testResult.TestCase, testResult.Outcome);
                }
            }
        }
        public void Ended(TestCaseToRun tc, TestOutcome outcome)
        {
            var testCase = FindTestCase(tc.FullyQualifiedName);

            _frameworkHandle.RecordEnd(testCase, ConvertOutcome(outcome));
        }
示例#22
0
        private void RunTest(TestSourceSettings settings, ITestLogger logger, IRunContext runContext, IFrameworkHandle frameworkHandle, Spec spec)
        {
            var testCase = CreateTestCase(settings, spec);
            var outcome = TestOutcome.None;

            frameworkHandle.RecordStart(testCase);
            foreach (var result in spec.Results)
            {
                if (result.Skipped && outcome != TestOutcome.Failed)
                {
                    outcome = TestOutcome.Skipped;
                }

                if (result.Success && outcome == TestOutcome.None)
                {
                    outcome = TestOutcome.Passed;
                }

                if (!result.Success && !result.Skipped)
                {
                    outcome = TestOutcome.Failed;
                }

                frameworkHandle.RecordResult(GetResult(testCase, result, frameworkHandle));
            }
            frameworkHandle.RecordEnd(testCase, outcome);
        }
示例#23
0
        public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle)
        {
            foreach (var testCase in tests)
            {
                frameworkHandle.RecordStart(testCase);
                var startTime  = DateTimeOffset.Now;
                var result     = _session.Run(testCase.Id);
                var endTime    = DateTimeOffset.Now;
                var testResult = new TestResult(testCase)
                {
                    DisplayName  = testCase.DisplayName,
                    StartTime    = startTime,
                    Duration     = endTime - startTime,
                    EndTime      = endTime,
                    ComputerName = Environment.MachineName
                };

                var stackTrace   = new StringBuilder();
                var errorMessage = new StringBuilder();
                foreach (var message in result.Messages)
                {
                    if (message.StackTrace != null)
                    {
                        stackTrace.AppendLine(message.StackTrace);
                    }

                    switch (message.Type)
                    {
                    case MessageType.State:
                        testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, message.Text));
                        break;

                    case MessageType.StdOutput:
                        testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, message.Text));
                        break;

                    case MessageType.StdError:
                        testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardErrorCategory, message.Text));
                        break;

                    case MessageType.Exception:
                        errorMessage.AppendLine(message.Text);
                        break;

                    case MessageType.Trace:
                        testResult.Messages.Add(new TestResultMessage(TestResultMessage.DebugTraceCategory, message.Text));
                        break;

                    default:
                        testResult.Messages.Add(new TestResultMessage(TestResultMessage.AdditionalInfoCategory, message.Text));
                        break;
                    }
                }

                testResult.ErrorStackTrace = stackTrace.ToString();
                testResult.ErrorMessage    = errorMessage.ToString();
                switch (result.State)
                {
                case State.Passed:
                    testResult.Outcome = TestOutcome.Passed;
                    break;

                case State.Failed:
                    testResult.Outcome = TestOutcome.Failed;
                    break;

                case State.Skiped:
                    testResult.Outcome = TestOutcome.Skipped;
                    break;

                case State.NotFound:
                    testResult.Outcome = TestOutcome.NotFound;
                    break;

                default:
                    testResult.Outcome = TestOutcome.None;
                    break;
                }

                frameworkHandle.RecordResult(testResult);
                frameworkHandle.RecordEnd(testCase, testResult.Outcome);
                if (_canceled)
                {
                    _canceled = false;
                    break;
                }
            }
        }
示例#24
0
        private void RunTestCases(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle, NodejsProjectSettings settings)
        {
            // May be null, but this is handled by RunTestCase if it matters.
            // No VS instance just means no debugging, but everything else is
            // okay.
            if (tests.Count() == 0)
            {
                return;
            }

            using (var app = VisualStudioApp.FromEnvironmentVariable(NodejsConstants.NodeToolsProcessIdEnvironmentVariable))
            {
                var port     = 0;
                var nodeArgs = new List <string>();
                // .njsproj file path -> project settings
                var sourceToSettings = new Dictionary <string, NodejsProjectSettings>();
                var testObjects      = new List <TestCaseObject>();

                if (!File.Exists(settings.NodeExePath))
                {
                    frameworkHandle.SendMessage(TestMessageLevel.Error, "Interpreter path does not exist: " + settings.NodeExePath);
                    return;
                }

                // All tests being run are for the same test file, so just use the first test listed to get the working dir
                var testInfo   = new NodejsTestInfo(tests.First().FullyQualifiedName);
                var workingDir = Path.GetDirectoryName(CommonUtils.GetAbsoluteFilePath(settings.WorkingDir, testInfo.ModulePath));

                var nodeVersion = Nodejs.GetNodeVersion(settings.NodeExePath);

                // We can only log telemetry when we're running in VS.
                // Since the required assemblies are not on disk if we're not running in VS, we have to reference them in a separate method
                // this way the .NET framework only tries to load the assemblies when we actually need them.
                if (app != null)
                {
                    LogTelemetry(tests.Count(), nodeVersion, runContext.IsBeingDebugged);
                }

                foreach (var test in tests)
                {
                    if (_cancelRequested.WaitOne(0))
                    {
                        break;
                    }

                    if (settings == null)
                    {
                        frameworkHandle.SendMessage(
                            TestMessageLevel.Error,
                            $"Unable to determine interpreter to use for {test.Source}.");
                        frameworkHandle.RecordEnd(test, TestOutcome.Failed);
                    }

                    var args = new List <string>();
                    args.AddRange(GetInterpreterArgs(test, workingDir, settings.ProjectRootDir));

                    // Fetch the run_tests argument for starting node.exe if not specified yet
                    if (nodeArgs.Count == 0 && args.Count > 0)
                    {
                        nodeArgs.Add(args[0]);
                    }

                    testObjects.Add(new TestCaseObject(args[1], args[2], args[3], args[4], args[5]));
                }

                if (runContext.IsBeingDebugged && app != null)
                {
                    app.GetDTE().Debugger.DetachAll();
                    // Ensure that --debug-brk is the first argument
                    nodeArgs.InsertRange(0, GetDebugArgs(out port));
                }

                _nodeProcess = ProcessOutput.Run(
                    settings.NodeExePath,
                    nodeArgs,
                    settings.WorkingDir,
                    /* env */ null,
                    /* visible */ false,
                    /* redirector */ new TestExecutionRedirector(this.ProcessTestRunnerEmit),
                    /* quote args */ false);

                if (runContext.IsBeingDebugged && app != null)
                {
                    try
                    {
                        //the '#ping=0' is a special flag to tell VS node debugger not to connect to the port,
                        //because a connection carries the consequence of setting off --debug-brk, and breakpoints will be missed.
                        var qualifierUri = string.Format("tcp://localhost:{0}#ping=0", port);
                        while (!app.AttachToProcess(_nodeProcess, NodejsRemoteDebugPortSupplierUnsecuredId, qualifierUri))
                        {
                            if (_nodeProcess.Wait(TimeSpan.FromMilliseconds(500)))
                            {
                                break;
                            }
                        }
#if DEBUG
                    }
                    catch (COMException ex)
                    {
                        frameworkHandle.SendMessage(TestMessageLevel.Error, "Error occurred connecting to debuggee.");
                        frameworkHandle.SendMessage(TestMessageLevel.Error, ex.ToString());
                        KillNodeProcess();
                    }
#else
                    } catch (COMException) {
示例#25
0
        public void RunTests(IEnumerable <TestCase> tests, IRunContext runContext, IFrameworkHandle frameworkHandle)
        {
            var dir = Directory.GetCurrentDirectory();

            var folder = Path.Combine(dir, "TestResults");

            Directory.CreateDirectory(folder);
            var file = Path.Combine(folder, $"{Environment.UserName}_{Environment.MachineName}_{DateTime.Now:yyyy-MM-dd_HH_mm_ss}.csv");

            using (var fs = File.OpenWrite(file))
                using (var tw = new StreamWriter(fs))
                {
                    tw.WriteCsvLine("Test Name", "Iteration", "Is Warmup", "Duration", "Iteration Status", "Run Status");

                    var logger   = new TestLogger(frameworkHandle);
                    var settings = new AdapterSettings(logger);
                    settings.Load(runContext.RunSettings.SettingsXml);
                    logger.InitSettings(settings);

                    frameworkHandle.EnableShutdownAfterTestRun = true;
                    var toRun = Convert(tests);

                    var missing = tests.Except(toRun.Select(x => x.testCase));
                    foreach (var m in missing)
                    {
                        frameworkHandle.RecordEnd(m, TestOutcome.NotFound);
                    }

                    var lifecycleEvents = TestLifeCyclesCallbacks(tests.Select(x => x.Source).Distinct().ToList());
                    var beforeAll       = lifecycleEvents.OfType <ITestLifecycleBeforeAllTests>().ToArray();
                    var afterAll        = lifecycleEvents.OfType <ITestLifecycleAfterAllTests>().ToArray();

                    // generate report details for all runs etc of all calls

                    // parallel etc in here
                    using (var globalCtx = TestContext.Start(frameworkHandle, settings))
                    {
                        foreach (var evnt in beforeAll)
                        {
                            evnt.BeforeAllTests(globalCtx).GetAwaiter().GetResult();
                        }
                    }

                    foreach (var t in toRun)
                    {
                        var testResult = new TestResult(t.testCase);
                        if (t.perfTest.Skipped)
                        {
                            testResult.Outcome = TestOutcome.Skipped;
                            frameworkHandle.RecordResult(testResult);

                            tw.WriteCsvLine(t.perfTest.Name, "-", "-", "-", "Skipped");
                            continue;
                        }
                        frameworkHandle.RecordStart(t.testCase);
                        using (var context = TestContext.Start(t.perfTest, settings))
                        {
                            var sw   = Stopwatch.StartNew();
                            var task = t.perfTest.ExecuteAsync(context);

                            Task.WaitAll(task);
                            sw.Stop();
                            var result = task.Result;

                            var errors = result.Select(x => x.Error).Where(x => x != null).ToList();
                            if (errors.Any())
                            {
                                testResult.ErrorStackTrace = string.Join("\n\n-------\n\n", errors.Select(x => x.StackTrace));
                                testResult.ErrorMessage    = string.Join("\n\n-------\n\n", errors.Select(x => x.Message));

                                testResult.Outcome = TestOutcome.Failed;
                            }
                            else
                            {
                                testResult.Outcome = TestOutcome.Passed;
                            }

                            int counter = 0;
                            foreach (var r in result.Where(x => x.IsWarmup))
                            {
                                tw.WriteCsvLine(t.perfTest.Name, ++counter, r.IsWarmup, r.Duration.TotalSeconds, r.Error == null ? TestOutcome.Passed : TestOutcome.Failed, testResult.Outcome);
                            }
                            counter = 0;
                            foreach (var r in result.Where(x => !x.IsWarmup))
                            {
                                tw.WriteCsvLine(t.perfTest.Name, ++counter, r.IsWarmup, r.Duration.TotalSeconds, r.Error == null ? TestOutcome.Passed : TestOutcome.Failed, testResult.Outcome);
                            }

                            // process the results here
                            testResult.Duration = sw.Elapsed;


                            var runs    = result.Where(x => x.IsWarmup == false).Select(x => x.Duration);
                            var warmups = result.Where(x => x.IsWarmup == true).Select(x => x.Duration);

                            var mean = TimeSpanStatistics.Mean(runs);
                            var standardDeviation = TimeSpanStatistics.StandardDeviation(runs);

                            // format a table of output results here
                            var msg = $@"Warm up Count : {warmups.Count()}
Warm up Duration : {new TimeSpan(warmups.Sum(x => x.Ticks))}
Executed : {runs.Count()}
Mean Duration: {mean}
Standard Deviation Duration: {standardDeviation}
";

                            testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, msg));
                            testResult.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, context.Output));

                            foreach (var r in result.Where(x => !string.IsNullOrWhiteSpace(x.Output)))
                            {
                                testResult.Messages.Add(new TestResultMessage(TestResultMessage.AdditionalInfoCategory, r.Output));
                            }


                            frameworkHandle.RecordResult(testResult);
                        }
                    }

                    using (var globalCtx = TestContext.Start(frameworkHandle, settings))
                    {
                        foreach (var evnt in afterAll)
                        {
                            evnt.AfterAllTests(globalCtx).GetAwaiter().GetResult();
                        }
                    }
                }
        }