Exemple #1
0
        /// <summary>
        /// Attempts to match the line with each regex specified by the current state
        /// </summary>
        /// <param name="state">Current state</param>
        /// <param name="logData">Input line</param>
        /// <returns>true if a match occurs</returns>
        private bool AttemptMatch(ITestResultParserState state, LogData logData)
        {
            foreach (var regexActionPair in state.RegexesToMatch)
            {
                try
                {
                    var match = regexActionPair.Regex.Match(logData.Line);
                    if (match.Success)
                    {
                        // Reset this value on a match
                        _stateContext.LinesWithinWhichMatchIsExpected = -1;

                        _currentState = (JestParserStates)regexActionPair.MatchAction(match, _stateContext);
                        return(true);
                    }
                }
                catch (RegexMatchTimeoutException)
                {
                    Logger.Warning($"JestTestResultParser : AttemptMatch : failed due to timeout while trying to match" +
                                   $" { regexActionPair.Regex.ToString() } at line {logData.LineNumber}");
                    Telemetry.AddAndAggregate("RegexTimeout",
                                              new List <string> {
                        regexActionPair.Regex.ToString()
                    }, JestTelemetryConstants.EventArea);
                }
            }

            state.PeformNoPatternMatchedAction(logData.Line, _stateContext);

            return(false);
        }
        private Enum PassedTestsSummaryMatched(Match match, AbstractParserStateContext stateContext)
        {
            var mochaStateContext = stateContext as MochaParserStateContext;

            Logger.Info($"{ParserName} : {StateName} : Passed test summary encountered at line {mochaStateContext.CurrentLineNumber}.");

            mochaStateContext.LinesWithinWhichMatchIsExpected = 1;
            mochaStateContext.NextExpectedMatch        = "failed/pending tests summary";
            mochaStateContext.LastFailedTestCaseNumber = 0;

            // Handling parse errors is unnecessary
            var totalPassed = int.Parse(match.Groups[RegexCaptureGroups.PassedTests].Value);

            mochaStateContext.TestRun.TestRunSummary.TotalPassed = totalPassed;

            // Fire telemetry if summary does not agree with parsed tests count
            if (mochaStateContext.TestRun.TestRunSummary.TotalPassed != mochaStateContext.TestRun.PassedTests.Count)
            {
                Logger.Error($"{ParserName} : {StateName} : Passed tests count does not match passed summary" +
                             $" at line {mochaStateContext.CurrentLineNumber}");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.PassedSummaryMismatch,
                                          new List <int> {
                    mochaStateContext.TestRun.TestRunId
                }, MochaTelemetryConstants.EventArea);
            }

            // Extract the test run time from the passed tests summary
            ExtractTestRunTime(match, mochaStateContext);

            Logger.Info($"{ParserName} : {StateName} : Transitioned to state ExpectingTestRunSummary" +
                        $" at line {mochaStateContext.CurrentLineNumber}.");
            return(MochaParserStates.ExpectingTestRunSummary);
        }
Exemple #3
0
        private Enum FailedTestCaseMatched(Match match, AbstractParserStateContext stateContext)
        {
            var mochaStateContext = stateContext as MochaParserStateContext;

            // If a failed test case is encountered while in the summary state it indicates either completion
            // or corruption of summary. Since Summary is Gospel to us, we will ignore the latter and publish
            // the run regardless.
            AttemptPublishAndResetParser();

            // Handling parse errors is unnecessary
            var testCaseNumber = int.Parse(match.Groups[RegexCaptureGroups.FailedTestCaseNumber].Value);

            // If it was not 1 there's a good chance we read some random line as a failed test case hence consider it a
            // as a match but do not add it to our list of test cases
            if (testCaseNumber != 1)
            {
                Logger.Error($"{ParserName} : {StateName} : Expecting failed test case with" +
                             $" number {mochaStateContext.LastFailedTestCaseNumber + 1} but found {testCaseNumber} instead");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.UnexpectedFailedTestCaseNumber,
                                          new List <int> {
                    mochaStateContext.TestRun.TestRunId
                }, MochaTelemetryConstants.EventArea);

                return(MochaParserStates.ExpectingTestResults);
            }

            // Increment either ways whether it was expected or context was reset and the encountered number was 1
            mochaStateContext.LastFailedTestCaseNumber++;

            var testResult = PrepareTestResult(TestOutcome.Failed, match);

            mochaStateContext.TestRun.FailedTests.Add(testResult);

            return(MochaParserStates.ExpectingTestResults);
        }
Exemple #4
0
        private Enum PassedTestCaseMatched(Match match, AbstractParserStateContext stateContext)
        {
            var mochaStateContext = stateContext as MochaParserStateContext;

            // If a passed test case is encountered while in the stack traces state it indicates corruption
            // or incomplete stack trace data
            // This check is safety check for when we try to parse stack trace contents, as of now it will always evaluate to true
            if (mochaStateContext.StackTracesToExpectPostSummary != 0)
            {
                Logger.Error($"{ParserName} : {StateName} : Expecting stack traces but found passed test case instead at line {mochaStateContext.CurrentLineNumber}.");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.ExpectingStackTracesButFoundPassedTest,
                                          new List <int> {
                    mochaStateContext.TestRun.TestRunId
                }, MochaTelemetryConstants.EventArea);
            }

            AttemptPublishAndResetParser();

            var testResult = PrepareTestResult(TestOutcome.Passed, match);

            mochaStateContext.TestRun.PassedTests.Add(testResult);

            Logger.Info($"{ParserName} : {StateName} : Transitioned to state ExpectingTestResults " +
                        $"at line {mochaStateContext.CurrentLineNumber}.");

            return(MochaParserStates.ExpectingTestResults);
        }
        /// <summary>
        /// Validate the input data
        /// </summary>
        /// <param name="data">Log line that was passed to the parser</param>
        /// <returns>True if valid</returns>
        private bool IsValidInput(string data)
        {
            if (data == null)
            {
                Logger.Error("PythonTestResultParser : IsValidInput : Received null data");
                Telemetry.AddAndAggregate(PythonTelemetryConstants.InvalidInput,
                                          new List <int> {
                    _currentTestRunId
                }, PythonTelemetryConstants.EventArea);
            }

            return(data != null);
        }
        private bool TryParseSummaryOutcome(LogData logData)
        {
            if (_currentTestRun.TestRunSummary == null)
            {
                // This is safe check, if must be true always because parsers will try to parse for Outcome if Test and Time Summary already parsed.
                Logger.Error($"PythonTestResultParser : TryParseSummaryOutcome : TestRunSummary is null at line {logData.LineNumber}");
                Telemetry.AddAndAggregate(PythonTelemetryConstants.TestRunSummaryCorrupted,
                                          new List <int> {
                    _currentTestRunId
                }, PythonTelemetryConstants.EventArea);
                return(false);
            }

            var resultSummaryMatch = PythonRegexes.TestOutcomeSummary.Match(logData.Line);

            if (resultSummaryMatch.Success)
            {
                var resultIdentifer = resultSummaryMatch.Groups[RegexCaptureGroups.TestOutcome].Value;

                var failureCountPatternMatch = PythonRegexes.SummaryFailure.Match(resultIdentifer);
                if (failureCountPatternMatch.Success)
                {
                    _currentTestRun.TestRunSummary.TotalFailed = int.Parse(failureCountPatternMatch.Groups[RegexCaptureGroups.FailedTests].Value);
                }

                // TODO: We should have a separate bucket for errors
                var errorCountPatternMatch = PythonRegexes.SummaryErrors.Match(resultIdentifer);
                if (errorCountPatternMatch.Success)
                {
                    _currentTestRun.TestRunSummary.TotalFailed += int.Parse(errorCountPatternMatch.Groups[RegexCaptureGroups.Errors].Value);
                }

                var skippedCountPatternMatch = PythonRegexes.SummarySkipped.Match(resultIdentifer);
                if (skippedCountPatternMatch.Success)
                {
                    _currentTestRun.TestRunSummary.TotalSkipped = int.Parse(skippedCountPatternMatch.Groups[RegexCaptureGroups.SkippedTests].Value);
                }

                // Since total passed count is not available, calculate the count based on available statistics.
                _currentTestRun.TestRunSummary.TotalPassed = _currentTestRun.TestRunSummary.TotalTests - (_currentTestRun.TestRunSummary.TotalFailed + _currentTestRun.TestRunSummary.TotalSkipped);
                return(true);
            }

            Logger.Error($"PythonTestResultParser : TryParseSummaryOutcome : Expected match for SummaryTestOutcome was not found at line {logData.LineNumber}");
            Telemetry.AddAndAggregate(PythonTelemetryConstants.TestOutcomeSummaryNotFound,
                                      new List <int> {
                _currentTestRunId
            }, PythonTelemetryConstants.EventArea);
            return(false);
        }
Exemple #7
0
        private Enum TestRunStartMatched(Match match, AbstractParserStateContext stateContext)
        {
            var jestStateContext = stateContext as JestParserStateContext;

            Logger.Error($"{ParserName} : {StateName} : Transitioned to state ExpectingTestResults" +
                         $" at line {jestStateContext.CurrentLineNumber} as test run start indicator was encountered before encountering" +
                         $" the full summary.");
            Telemetry.AddAndAggregate(JestTelemetryConstants.UnexpectedTestRunStart,
                                      new List <int> {
                jestStateContext.TestRun.TestRunId
            }, JestTelemetryConstants.EventArea);

            AttemptPublishAndResetParser();

            return(JestParserStates.ExpectingTestResults);
        }
        private Enum FailedTestCaseMatched(Match match, AbstractParserStateContext stateContext)
        {
            var mochaStateContext = stateContext as MochaParserStateContext;

            // Handling parse errors is unnecessary
            var testCaseNumber = int.Parse(match.Groups[RegexCaptureGroups.FailedTestCaseNumber].Value);

            // In the event the failed test case number does not match the expected test case number log an error
            if (testCaseNumber != mochaStateContext.LastFailedTestCaseNumber + 1)
            {
                Logger.Error($"{ParserName} : {StateName} : Expecting failed test case with" +
                             $" number {mochaStateContext.LastFailedTestCaseNumber + 1} but found {testCaseNumber} instead");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.UnexpectedFailedTestCaseNumber,
                                          new List <int> {
                    mochaStateContext.TestRun.TestRunId
                }, MochaTelemetryConstants.EventArea);

                // If it was not 1 there's a good chance we read some random line as a failed test case hence consider it a
                // as a match but do not add it to our list of test cases
                if (testCaseNumber != 1)
                {
                    return(MochaParserStates.ExpectingTestResults);
                }

                // If the number was 1 then there's a good chance this is the beginning of the next test run, hence reset and start over
                // This is something we might choose to change if we realize there is a chance we can get such false detections often in the middle of a run
                AttemptPublishAndResetParser();
            }

            // Increment either ways whether it was expected or context was reset and the encountered number was 1
            mochaStateContext.LastFailedTestCaseNumber++;

            var testResult = PrepareTestResult(TestOutcome.Failed, match);

            mochaStateContext.TestRun.FailedTests.Add(testResult);

            return(MochaParserStates.ExpectingTestResults);
        }
        /// <summary>
        /// Parses input data to detect python test result.
        /// </summary>
        /// <param name="logData">Data to be parsed.</param>
        public override void Parse(LogData logData)
        {
            // Validate data input
            if (!IsValidInput(logData.Line))
            {
                return;
            }

            // TODO: Fix an appropriate threshold based on performance on hosted machine with load
            using (var timer = new SimpleTimer("PythonParserParseOperation", PythonTelemetryConstants.EventArea,
                                               PythonTelemetryConstants.PythonParserTotalTime, logData.LineNumber, Logger, Telemetry, ParseOperationPermissibleThreshold))
            {
                try
                {
                    Telemetry.AddOrUpdate(PythonTelemetryConstants.TotalLinesParsed,
                                          logData.LineNumber, PythonTelemetryConstants.EventArea);

                    switch (_state)
                    {
                    case ParserState.ExpectingSummary:

                        if (string.IsNullOrWhiteSpace(logData.Line))
                        {
                            return;
                        }

                        // Summary Test count and total time should have already been parsed
                        // Try to parse test outcome, number of tests for each outcome
                        if (TryParseSummaryOutcome(logData))
                        {
                            PublishAndReset(logData);
                            return;
                        }

                        // Summary was not parsed, reset the parser and try parse again.
                        Reset(logData);
                        Parse(logData);
                        break;

                    case ParserState.ExpectingFailedResults:

                        // Try to parse for failed results and summary
                        // If summary is parsed, change the state
                        if (TryParseForFailedResult(logData))
                        {
                            _stackTraceLinesAllowedToParse = 50;
                            return;
                        }

                        if (TryParseSummaryTestAndTime(logData))
                        {
                            _state = ParserState.ExpectingSummary;
                            Logger.Info($"PythonTestResultParser : ExpectingFailedResults: transitioned to state ExpectingSummary at line {logData.LineNumber}");
                            return;
                        }

                        // Not expected, as Summary has not been encountered yet
                        // If a new TestResult is found, reset the parser and Parse again
                        if (TryParseTestResult(logData))
                        {
                            Logger.Error($"PythonTestResultParser : Parse : Expecting failed result or summary but found new test result at line {logData.LineNumber}.");
                            Telemetry.AddAndAggregate(PythonTelemetryConstants.SummaryOrFailedTestsNotFound,
                                                      new List <int> {
                                _currentTestRunId
                            }, PythonTelemetryConstants.EventArea);
                            Reset(logData);
                            Parse(logData);
                        }

                        TryParseStackTrace(logData);

                        break;

                    case ParserState.ExpectingTestResults:

                    default:

                        if (TryParseTestResult(logData))
                        {
                            return;
                        }

                        // Change the state and clear the partial result if failed result or summary is found
                        if (TryParseForFailedResult(logData))
                        {
                            _partialTestResult             = null;
                            _state                         = ParserState.ExpectingFailedResults;
                            _stackTraceLinesAllowedToParse = 50;
                            Logger.Info($"PythonTestResultParser : ExpectingTestResults: transitioned to state ExpectingFailedResults at line {logData.LineNumber}");

                            return;
                        }

                        if (TryParseSummaryTestAndTime(logData))
                        {
                            _partialTestResult = null;
                            _state             = ParserState.ExpectingSummary;
                            Logger.Info($"PythonTestResultParser : ExpectingTestResults: transitioned to state ExpectingSummary at line {logData.LineNumber}");
                            return;
                        }

                        break;
                    }
                }
                catch (RegexMatchTimeoutException regexMatchTimeoutException)
                {
                    Logger.Warning($"PythonTestResultParser : Parse : failed due to timeout with exception { regexMatchTimeoutException } at line {logData.LineNumber}");
                    Telemetry.AddAndAggregate(PythonTelemetryConstants.RegexTimeout,
                                              new List <string> {
                        "UnknownRegex"
                    }, PythonTelemetryConstants.EventArea);
                }
                catch (Exception ex)
                {
                    Logger.Error($"PythonTestResultParser : Parse : Unable to parse the log line {logData.Line} with exception {ex.ToString()} at line {logData.LineNumber}");
                    Telemetry.AddAndAggregate(PythonTelemetryConstants.ParseException,
                                              new List <string> {
                        ex.Message
                    }, PythonTelemetryConstants.EventArea);

                    Reset(logData);

                    // Rethrow the exception so that the invoker of Parser is notified of a failure
                    throw;
                }
            }
        }
Exemple #10
0
        private Enum FailedTestCaseMatched(Match match, AbstractParserStateContext stateContext)
        {
            var mochaStateContext = stateContext as MochaParserStateContext;

            // Handling parse errors is unnecessary
            var testCaseNumber = int.Parse(match.Groups[RegexCaptureGroups.FailedTestCaseNumber].Value);

            // In the event the failed test case number does not match the expected test case number log an error
            if (testCaseNumber != mochaStateContext.LastFailedTestCaseNumber + 1)
            {
                Logger.Error($"{ParserName} : {StateName} : Expecting stack trace with" +
                             $" number {mochaStateContext.LastFailedTestCaseNumber + 1} but found {testCaseNumber} instead");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.UnexpectedFailedStackTraceNumber,
                                          new List <int> {
                    mochaStateContext.TestRun.TestRunId
                }, MochaTelemetryConstants.EventArea);

                // If it was not 1 there's a good chance we read some random line as a failed test case hence consider it a
                // as a match but do not consider it a valid stack trace
                if (testCaseNumber != 1)
                {
                    // If we are parsing stack traces then we should not return this as
                    // a successful match. If we do so then stack trace addition will not
                    // happen for the current line
                    return(MochaParserStates.ExpectingStackTraces);
                }

                Telemetry.AddAndAggregate(MochaTelemetryConstants.AttemptPublishAndResetParser,
                                          new List <string> {
                    $"Expecting stack trace with number {mochaStateContext.LastFailedTestCaseNumber} but found {testCaseNumber} instead"
                }, MochaTelemetryConstants.EventArea);

                // If the number was 1 then there's a good chance this is the beginning of the next test run, hence reset and start over
                AttemptPublishAndResetParser();

                mochaStateContext.LastFailedTestCaseNumber++;

                var testResult = PrepareTestResult(TestOutcome.Failed, match);
                mochaStateContext.TestRun.FailedTests.Add(testResult);

                Logger.Info($"{ParserName} : {StateName} : Transitioned to state ExpectingTestResults " +
                            $"at line {mochaStateContext.CurrentLineNumber}.");

                return(MochaParserStates.ExpectingTestResults);
            }

            mochaStateContext.LastFailedTestCaseNumber++;

            // Only add the stack trace if a failed test had been encountered
            if (mochaStateContext.CurrentStackTraceIndex < mochaStateContext.TestRun.FailedTests.Count)
            {
                // Consider matching the name of the test in the stack trace with what was parsed earlier
                // Suite name is also available. Should we use it for reporting?
                mochaStateContext.TestRun.FailedTests[mochaStateContext.CurrentStackTraceIndex].StackTrace = match.Value;
            }

            // Expect the stack trace to not be more than 50 lines long
            // This is to ensure we don't skip publishing the run if the stack traces appear corrupted
            mochaStateContext.LinesWithinWhichMatchIsExpected = 50;

            mochaStateContext.StackTracesToExpectPostSummary--;

            if (mochaStateContext.StackTracesToExpectPostSummary == 0)
            {
                AttemptPublishAndResetParser();
                return(MochaParserStates.ExpectingTestResults);
            }

            return(MochaParserStates.ExpectingStackTraces);
        }
Exemple #11
0
        /// <inheritdoc/>
        public override void Parse(LogData logData)
        {
            if (logData == null || logData.Line == null)
            {
                Logger.Error("JestTestResultParser : Parse : Input line was null.");
                return;
            }

            // TODO: Fix an appropriate threshold based on performance on hosted machine with load
            using (var timer = new SimpleTimer("JestParserParseOperation", JestTelemetryConstants.EventArea,
                                               JestTelemetryConstants.JestParserTotalTime, logData.LineNumber, Logger, Telemetry, ParseOperationPermissibleThreshold))
            {
                try
                {
                    _stateContext.CurrentLineNumber = logData.LineNumber;
                    Telemetry.AddOrUpdate(JestTelemetryConstants.TotalLinesParsed, logData.LineNumber, JestTelemetryConstants.EventArea);

                    // State model for the jest parser that defines the Regexs to match against in each state
                    // Each state re-orders the Regexs based on the frequency of expected matches
                    switch (_currentState)
                    {
                    // This state primarily looks for test run start indicator and
                    // transitions to the next one after encountering one
                    case JestParserStates.ExpectingTestRunStart:

                        if (AttemptMatch(TestRunStart, logData))
                        {
                            return;
                        }
                        break;

                    // This state primarily looks for test results and transitions
                    // to the next one after a stack trace or summary is encountered
                    case JestParserStates.ExpectingTestResults:

                        if (AttemptMatch(ExpectingTestResults, logData))
                        {
                            return;
                        }
                        break;

                    // This state primarily looks for stack traces/failed test cases
                    // and transitions on encountering summary
                    case JestParserStates.ExpectingStackTraces:

                        if (AttemptMatch(ExpectingStackTraces, logData))
                        {
                            return;
                        }
                        break;

                    // This state primarily looks for test run summary
                    // and transitions back to testresults state on encountering
                    // another test run start marker indicating tests being run from
                    // more than one file
                    case JestParserStates.ExpectingTestRunSummary:

                        if (AttemptMatch(ExpectingTestRunSummary, logData))
                        {
                            return;
                        }
                        break;
                    }
                }
                catch (Exception e)
                {
                    Logger.Error($"JestTestResultParser : Parse : Failed with exception {e}.");

                    // This might start taking a lot of space if each and every parse operation starts throwing
                    // But if that happens then there's a lot more stuff broken.
                    Telemetry.AddAndAggregate(JestTelemetryConstants.Exceptions, new List <string> {
                        e.Message
                    }, JestTelemetryConstants.EventArea);

                    // Rethrowing this so that the plugin is aware that the parser is erroring out
                    // Ideally this would never should happen
                    throw;
                }
            }
        }
Exemple #12
0
        /// <summary>
        /// Publishes the run and resets the parser by resetting the state context and current state
        /// </summary>
        private void AttemptPublishAndResetParser()
        {
            Logger.Info($"JestTestResultParser : Resetting the parser and attempting to publish the test run at line {_stateContext.CurrentLineNumber}.");
            var testRunToPublish = _stateContext.TestRun;

            // We have encountered passed test cases but no passed summary was encountered
            if (testRunToPublish.PassedTests.Count != 0 && testRunToPublish.TestRunSummary.TotalPassed == 0)
            {
                Logger.Error("JestTestResultParser : Passed tests were encountered but no passed summary was encountered.");
                Telemetry.AddAndAggregate(JestTelemetryConstants.PassedTestCasesFoundButNoPassedSummary,
                                          new List <int> {
                    _stateContext.TestRun.TestRunId
                }, JestTelemetryConstants.EventArea);
            }
            else if (_stateContext.VerboseOptionEnabled && testRunToPublish.TestRunSummary.TotalPassed != testRunToPublish.PassedTests.Count)
            {
                // If encountered failed tests does not match summary fire telemetry
                Logger.Error($"JestTestResultParser : Passed tests count does not match passed summary" +
                             $" at line {_stateContext.CurrentLineNumber}");
                Telemetry.AddAndAggregate(JestTelemetryConstants.PassedSummaryMismatch,
                                          new List <int> {
                    testRunToPublish.TestRunId
                }, JestTelemetryConstants.EventArea);
            }

            // We have encountered failed test cases but no failed summary was encountered
            if (testRunToPublish.FailedTests.Count != 0 && testRunToPublish.TestRunSummary.TotalFailed == 0)
            {
                Logger.Error("JestTestResultParser : Failed tests were encountered but no failed summary was encountered.");
                Telemetry.AddAndAggregate(JestTelemetryConstants.FailedTestCasesFoundButNoFailedSummary,
                                          new List <int> {
                    _stateContext.TestRun.TestRunId
                }, JestTelemetryConstants.EventArea);
            }
            else if (testRunToPublish.TestRunSummary.TotalFailed != testRunToPublish.FailedTests.Count)
            {
                // If encountered failed tests does not match summary fire telemtry
                Logger.Error($"JestTestResultParser : Failed tests count does not match failed summary" +
                             $" at line {_stateContext.CurrentLineNumber}");
                Telemetry.AddAndAggregate(JestTelemetryConstants.FailedSummaryMismatch,
                                          new List <int> {
                    testRunToPublish.TestRunId
                }, JestTelemetryConstants.EventArea);
            }

            // Ensure some summary data was detected before attempting a publish, ie. check if the state is not test results state
            switch (_currentState)
            {
            case JestParserStates.ExpectingTestRunStart:

                Logger.Error("JestTestResultParser : Skipping publish as no test cases or summary has been encountered.");

                break;

            case JestParserStates.ExpectingTestResults:

            case JestParserStates.ExpectingStackTraces:

                if (testRunToPublish.PassedTests.Count != 0 ||
                    testRunToPublish.FailedTests.Count != 0 ||
                    testRunToPublish.SkippedTests.Count != 0)
                {
                    Logger.Error("JestTestResultParser : Skipping publish as testcases were encountered but no summary was encountered.");
                    Telemetry.AddAndAggregate(JestTelemetryConstants.TestCasesFoundButNoSummary,
                                              new List <int> {
                        _stateContext.TestRun.TestRunId
                    }, JestTelemetryConstants.EventArea);
                }

                break;

            case JestParserStates.ExpectingTestRunSummary:

                if (testRunToPublish.TestRunSummary.TotalTests == 0)
                {
                    Logger.Error("JestTestResultParser : Skipping publish as total tests was 0.");
                    Telemetry.AddAndAggregate(JestTelemetryConstants.TotalTestsZero,
                                              new List <int> {
                        _stateContext.TestRun.TestRunId
                    }, JestTelemetryConstants.EventArea);
                    break;
                }

                if (testRunToPublish.TestRunSummary.TotalExecutionTime.TotalMilliseconds == 0)
                {
                    Logger.Error("JestTestResultParser : Total test run time was 0 or not encountered.");
                    Telemetry.AddAndAggregate(JestTelemetryConstants.TotalTestRunTimeZero,
                                              new List <int> {
                        _stateContext.TestRun.TestRunId
                    }, JestTelemetryConstants.EventArea);
                }

                // Trim the stack traces of extra newlines etc.
                foreach (var failedTest in testRunToPublish.FailedTests)
                {
                    if (failedTest.StackTrace != null)
                    {
                        failedTest.StackTrace = failedTest.StackTrace.TrimEnd();
                    }
                }

                // Only publish if total tests was not zero
                TestRunManager.PublishAsync(testRunToPublish);

                break;
            }

            ResetParser();
        }
Exemple #13
0
        /// <inheritdoc/>
        public override void Parse(LogData logData)
        {
            if (logData == null || logData.Line == null)
            {
                Logger.Error("MochaTestResultParser : Parse : Input line was null.");
                return;
            }

            // TODO: Fix an appropriate threshold based on performance on hosted machine with load
            using (var timer = new SimpleTimer("MochaParserParseOperation", MochaTelemetryConstants.EventArea,
                                               MochaTelemetryConstants.MochaParserTotalTime, logData.LineNumber, Logger, Telemetry, ParseOperationPermissibleThreshold))
            {
                try
                {
                    _stateContext.CurrentLineNumber = logData.LineNumber;
                    Telemetry.AddOrUpdate(MochaTelemetryConstants.TotalLinesParsed, logData.LineNumber, MochaTelemetryConstants.EventArea);

                    // State model for the mocha parser that defines the Regexs to match against in each state
                    // Each state re-orders the Regexs based on the frequency of expected matches
                    switch (_currentState)
                    {
                    // This state primarily looks for test results
                    // and transitions to the next one after a line of summary is encountered
                    case MochaParserStates.ExpectingTestResults:

                        if (AttemptMatch(ExpectingTestResults, logData))
                        {
                            return;
                        }
                        break;

                    // This state primarily looks for test run summary
                    // If failed tests were found to be present transitions to the next one to look for stack traces
                    // else goes back to the first state after publishing the run
                    case MochaParserStates.ExpectingTestRunSummary:

                        if (AttemptMatch(ExpectingTestRunSummary, logData))
                        {
                            return;
                        }
                        break;

                    // This state primarily looks for stack traces
                    // If any other match occurs before all the expected stack traces are found,
                    // it fires telemetry for unexpected behavior but moves on to the next test run
                    case MochaParserStates.ExpectingStackTraces:

                        if (AttemptMatch(ExpectingStackTraces, logData))
                        {
                            return;
                        }
                        break;
                    }
                }
                catch (Exception e)
                {
                    Logger.Error($"MochaTestResultParser : Parse : Failed with exception {e}.");

                    // This might start taking a lot of space if each and every parse operation starts throwing
                    // But if that happens then there's a lot more stuff broken.
                    Telemetry.AddAndAggregate("Exceptions", new List <string> {
                        e.Message
                    }, MochaTelemetryConstants.EventArea);

                    // Rethrowing this so that the plugin is aware that the parser is erroring out
                    // Ideally this never should happen
                    throw;
                }
            }
        }
Exemple #14
0
        /// <summary>
        /// Publishes the run and resets the parser by resetting the state context and current state
        /// </summary>
        private void AttemptPublishAndResetParser()
        {
            Logger.Info($"MochaTestResultParser : Resetting the parser and attempting to publish the test run at line {_stateContext.CurrentLineNumber}.");
            var testRunToPublish = _stateContext.TestRun;

            // We have encountered failed test cases but no failed summary was encountered
            if (testRunToPublish.FailedTests.Count != 0 && testRunToPublish.TestRunSummary.TotalFailed == 0)
            {
                Logger.Error("MochaTestResultParser : Failed tests were encountered but no failed summary was encountered.");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.FailedTestCasesFoundButNoFailedSummary,
                                          new List <int> {
                    _stateContext.TestRun.TestRunId
                }, MochaTelemetryConstants.EventArea);
            }
            else if (testRunToPublish.TestRunSummary.TotalFailed != testRunToPublish.FailedTests.Count)
            {
                // If encountered failed tests does not match summary fire telemetry
                Logger.Error($"MochaTestResultParser : Failed tests count does not match failed summary" +
                             $" at line {_stateContext.CurrentLineNumber}");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.FailedSummaryMismatch,
                                          new List <int> {
                    testRunToPublish.TestRunId
                }, MochaTelemetryConstants.EventArea);
            }

            // We have encountered pending test cases but no pending summary was encountered
            if (testRunToPublish.SkippedTests.Count != 0 && testRunToPublish.TestRunSummary.TotalSkipped == 0)
            {
                Logger.Error("MochaTestResultParser : Skipped tests were encountered but no skipped summary was encountered.");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.PendingTestCasesFoundButNoFailedSummary,
                                          new List <int> {
                    _stateContext.TestRun.TestRunId
                }, MochaTelemetryConstants.EventArea);
            }
            else if (testRunToPublish.TestRunSummary.TotalSkipped != testRunToPublish.SkippedTests.Count)
            {
                // If encountered skipped tests does not match summary fire telemetry
                Logger.Error($"MochaTestResultParser : Pending tests count does not match pending summary" +
                             $" at line {_stateContext.CurrentLineNumber}");
                Telemetry.AddAndAggregate(MochaTelemetryConstants.PendingSummaryMismatch,
                                          new List <int> {
                    testRunToPublish.TestRunId
                }, MochaTelemetryConstants.EventArea);
            }

            // Ensure some summary data was detected before attempting a publish, ie. check if the state is not test results state
            switch (_currentState)
            {
            case MochaParserStates.ExpectingTestResults:
                if (testRunToPublish.PassedTests.Count != 0 ||
                    testRunToPublish.FailedTests.Count != 0 ||
                    testRunToPublish.SkippedTests.Count != 0)
                {
                    Logger.Error("MochaTestResultParser : Skipping publish as testcases were encountered but no summary was encountered.");
                    Telemetry.AddAndAggregate(MochaTelemetryConstants.PassedTestCasesFoundButNoPassedSummary,
                                              new List <int> {
                        _stateContext.TestRun.TestRunId
                    }, MochaTelemetryConstants.EventArea);
                }
                break;

            default:
                // Publish the test run if reset and publish was called from any state other than the test results state

                // Calculate total tests
                testRunToPublish.TestRunSummary.TotalTests =
                    testRunToPublish.TestRunSummary.TotalPassed +
                    testRunToPublish.TestRunSummary.TotalFailed +
                    testRunToPublish.TestRunSummary.TotalSkipped;

                // Trim the stack traces of extra newlines etc.
                foreach (var failedTest in testRunToPublish.FailedTests)
                {
                    if (failedTest.StackTrace != null)
                    {
                        failedTest.StackTrace = failedTest.StackTrace.TrimEnd();
                    }
                }

                TestRunManager.PublishAsync(testRunToPublish);
                break;
            }

            ResetParser();
        }
Exemple #15
0
        private Enum FailedOrPendingTestCaseMatched(Match match, AbstractParserStateContext stateContext)
        {
            var jasmineStateContext = stateContext as JasmineParserStateContext;
            var testCaseNumber      = int.Parse(match.Groups[RegexCaptureGroups.FailedTestCaseNumber].Value);

            // Set this by default to -1, if a genuine stack trace was encountered then the actual index will be set.
            jasmineStateContext.CurrentStackTraceIndex = -1;

            // If it is a failed testcase , FailureStarterMatched is true
            if (jasmineStateContext.FailureStarterMatched)
            {
                if (testCaseNumber != jasmineStateContext.LastFailedTestCaseNumber + 1)
                {
                    // There's a good chance we read some random line as a failed test case hence consider it a
                    // as a match but do not add it to our list of test cases

                    Logger.Error($"{ParserName} : {StateName} : Expecting failed test case with" +
                                 $" number {jasmineStateContext.LastFailedTestCaseNumber + 1} but found {testCaseNumber} instead");
                    Telemetry.AddAndAggregate(JasmineTelemetryConstants.UnexpectedFailedTestCaseNumber,
                                              new List <int> {
                        jasmineStateContext.TestRun.TestRunId
                    }, JasmineTelemetryConstants.EventArea);

                    return(JasmineParserStates.ExpectingTestResults);
                }

                // Increment
                jasmineStateContext.LastFailedTestCaseNumber++;

                var failedTestResult = PrepareTestResult(TestOutcome.Failed, match);
                jasmineStateContext.TestRun.FailedTests.Add(failedTestResult);

                jasmineStateContext.CurrentStackTraceIndex = jasmineStateContext.TestRun.FailedTests.Count - 1;

                // Expect the stack trace to not be more than 50 lines long
                // This is to ensure we don't skip publishing the run if the stack traces appear corrupted
                jasmineStateContext.LinesWithinWhichMatchIsExpected = 50;
                jasmineStateContext.NextExpectedMatch = "next failed test case or pending test cases start or test run summary";
                jasmineStateContext.TestRun.FailedTests[jasmineStateContext.CurrentStackTraceIndex].StackTrace = match.Value;

                return(JasmineParserStates.ExpectingTestResults);
            }

            // If it is a pending testcase , PendingStarterMatched is true
            if (jasmineStateContext.PendingStarterMatched)
            {
                if (testCaseNumber != jasmineStateContext.LastPendingTestCaseNumber + 1)
                {
                    // There's a good chance we read some random line as a pending test case hence consider it a
                    // as a match but do not add it to our list of test cases

                    Logger.Error($"{ParserName} : {StateName} : Expecting pending test case with" +
                                 $" number {jasmineStateContext.LastPendingTestCaseNumber + 1} but found {testCaseNumber} instead");
                    Telemetry.AddAndAggregate(JasmineTelemetryConstants.UnexpectedPendingTestCaseNumber,
                                              new List <int> {
                        jasmineStateContext.TestRun.TestRunId
                    }, JasmineTelemetryConstants.EventArea);

                    return(JasmineParserStates.ExpectingTestResults);
                }

                // Increment
                jasmineStateContext.LastPendingTestCaseNumber++;

                var skippedTestResult = PrepareTestResult(TestOutcome.NotExecuted, match);
                jasmineStateContext.TestRun.SkippedTests.Add(skippedTestResult);

                return(JasmineParserStates.ExpectingTestResults);
            }

            // If none of the starter has matched, it must be a random line. Fire telemetry and log error
            Logger.Error($"{ParserName} : {StateName} : Expecting failed/pending test case " +
                         $" but encountered test case with {testCaseNumber} without encountering failed/pending starter.");
            Telemetry.AddAndAggregate(JasmineTelemetryConstants.FailedPendingTestCaseWithoutStarterMatch,
                                      new List <int> {
                jasmineStateContext.TestRun.TestRunId
            }, JasmineTelemetryConstants.EventArea);

            return(JasmineParserStates.ExpectingTestResults);
        }