Example #1
0
        public async Task <bool> PublishAsync(TestRunContext runContext, List <string> testResultFiles, PublishOptions publishOptions, CancellationToken cancellationToken = default(CancellationToken))
        {
            try
            {
                TestDataProvider testDataProvider = ParseTestResultsFile(runContext, testResultFiles);
                var publishTasks = new List <Task>();

                if (testDataProvider != null)
                {
                    var testRunData = testDataProvider.GetTestRunData();
                    //publishing run level attachment
                    Task <IList <TestRun> > publishtestRunDataTask = Task.Run(() => _testRunPublisher.PublishTestRunDataAsync(runContext, _projectName, testRunData, publishOptions, cancellationToken));
                    Task uploadBuildDataAttachmentTask             = Task.Run(() => UploadBuildDataAttachment(runContext, testDataProvider.GetBuildData(), cancellationToken));

                    publishTasks.Add(publishtestRunDataTask);

                    //publishing build level attachment
                    publishTasks.Add(uploadBuildDataAttachmentTask);

                    await Task.WhenAll(publishTasks);

                    IList <TestRun> publishedRuns = publishtestRunDataTask.Result;

                    _calculateTestRunSummary = _featureFlagService.GetFeatureFlagState(TestResultsConstants.CalculateTestRunSummaryFeatureFlag, TestResultsConstants.TFSServiceInstanceGuid);

                    var isTestRunOutcomeFailed = GetTestRunOutcome(_executionContext, testRunData, out TestRunSummary testRunSummary);

                    // Storing testrun summary in environment variable, which will be read by PublishPipelineMetadataTask and publish to evidence store.
                    if (_calculateTestRunSummary)
                    {
                        TestResultUtils.StoreTestRunSummaryInEnvVar(_executionContext, testRunSummary, _testRunner, "PublishTestResults");
                    }

                    // Check failed results for flaky aware
                    // Fallback to flaky aware if there are any failures.
                    bool isFlakyCheckEnabled = _featureFlagService.GetFeatureFlagState(TestResultsConstants.EnableFlakyCheckInAgentFeatureFlag, TestResultsConstants.TCMServiceInstanceGuid);

                    if (isTestRunOutcomeFailed && isFlakyCheckEnabled)
                    {
                        var runOutcome = _testRunPublisherHelper.CheckRunsForFlaky(publishedRuns, _projectName);
                        if (runOutcome != null && runOutcome.HasValue)
                        {
                            isTestRunOutcomeFailed = runOutcome.Value;
                        }
                    }

                    return(isTestRunOutcomeFailed);
                }

                return(false);
            }
            catch (Exception ex)
            {
                _executionContext.Warning("Failed to publish test run data: " + ex.ToString());
            }
            return(false);
        }
Example #2
0
 public void InitializePublisher(IExecutionContext context, string projectName, VssConnection connection, string testRunner, bool publishRunLevelAttachments)
 {
     Trace.Entering();
     _executionContext   = context;
     _projectName        = projectName;
     _testRunner         = testRunner;
     _resultReader       = GetTestResultReader(_testRunner, publishRunLevelAttachments);
     _testRunPublisher   = HostContext.GetService <ITestRunPublisher>();
     _featureFlagService = HostContext.GetService <IFeatureFlagService>();
     _testRunPublisher.InitializePublisher(_executionContext, connection, projectName, _resultReader);
     _calculateTestRunSummary = _featureFlagService.GetFeatureFlagState(TestResultsConstants.CalculateTestRunSummaryFeatureFlag, TestResultsConstants.TFSServiceInstanceGuid);
     Trace.Leaving();
 }
        public async Task <bool> PublishAsync(TestRunContext runContext, List <string> testResultFiles, PublishOptions publishOptions, CancellationToken cancellationToken = default(CancellationToken))
        {
            try
            {
                TestDataProvider testDataProvider = ParseTestResultsFile(runContext, testResultFiles);
                var publishTasks = new List <Task>();

                if (testDataProvider != null)
                {
                    var testRunData = testDataProvider.GetTestRunData();
                    //publishing run level attachment
                    publishTasks.Add(Task.Run(() => _testRunPublisher.PublishTestRunDataAsync(runContext, _projectName, testRunData, publishOptions, cancellationToken)));

                    //publishing build level attachment
                    publishTasks.Add(Task.Run(() => UploadBuildDataAttachment(runContext, testDataProvider.GetBuildData(), cancellationToken)));

                    await Task.WhenAll(publishTasks);

                    _calculateTestRunSummary = _featureFlagService.GetFeatureFlagState(TestResultsConstants.CalculateTestRunSummaryFeatureFlag, TestResultsConstants.TFSServiceInstanceGuid);

                    var runOutcome = GetTestRunOutcome(_executionContext, testRunData, out TestRunSummary testRunSummary);

                    // Storing testrun summary in environment variable, which will be read by PublishPipelineMetadataTask and publish to evidence store.
                    if (_calculateTestRunSummary)
                    {
                        TestResultUtils.StoreTestRunSummaryInEnvVar(_executionContext, testRunSummary, _testRunner, "PublishTestResults");
                    }

                    return(runOutcome);
                }

                return(false);
            }
            catch (Exception ex)
            {
                _executionContext.Warning("Failed to publish test run data: " + ex.ToString());
            }
            return(false);
        }
Example #4
0
        /// <summary>
        /// Publish single test run
        /// </summary>
        private async Task <bool> PublishAllTestResultsToSingleTestRunAsync(List <string> resultFiles, ITestRunPublisher publisher, TestRunContext runContext, string resultReader, string runTitle, int?buildId, CancellationToken cancellationToken)
        {
            bool isTestRunOutcomeFailed = false;

            try
            {
                //use local time since TestRunData defaults to local times
                DateTime                  minStartDate          = DateTime.MaxValue;
                DateTime                  maxCompleteDate       = DateTime.MinValue;
                DateTime                  presentTime           = DateTime.UtcNow;
                bool                      dateFormatError       = false;
                TimeSpan                  totalTestCaseDuration = TimeSpan.Zero;
                List <string>             runAttachments        = new List <string>();
                List <TestCaseResultData> runResults            = new List <TestCaseResultData>();
                TestRunSummary            testRunSummary        = new TestRunSummary();
                //read results from each file
                foreach (string resultFile in resultFiles)
                {
                    cancellationToken.ThrowIfCancellationRequested();
                    //test case results
                    _executionContext.Debug(StringUtil.Format("Reading test results from file '{0}'", resultFile));
                    TestRunData resultFileRunData = publisher.ReadResultsFromFile(runContext, resultFile);
                    isTestRunOutcomeFailed = isTestRunOutcomeFailed || GetTestRunOutcome(resultFileRunData, testRunSummary);

                    if (resultFileRunData != null)
                    {
                        if (resultFileRunData.Results != null && resultFileRunData.Results.Length > 0)
                        {
                            try
                            {
                                if (string.IsNullOrEmpty(resultFileRunData.StartDate) || string.IsNullOrEmpty(resultFileRunData.CompleteDate))
                                {
                                    dateFormatError = true;
                                }

                                //As per discussion with Manoj(refer bug 565487): Test Run duration time should be minimum Start Time to maximum Completed Time when merging
                                if (!string.IsNullOrEmpty(resultFileRunData.StartDate))
                                {
                                    DateTime startDate = DateTime.Parse(resultFileRunData.StartDate, null, DateTimeStyles.RoundtripKind);
                                    minStartDate = minStartDate > startDate ? startDate : minStartDate;

                                    if (!string.IsNullOrEmpty(resultFileRunData.CompleteDate))
                                    {
                                        DateTime endDate = DateTime.Parse(resultFileRunData.CompleteDate, null, DateTimeStyles.RoundtripKind);
                                        maxCompleteDate = maxCompleteDate < endDate ? endDate : maxCompleteDate;
                                    }
                                }
                            }
                            catch (FormatException)
                            {
                                _executionContext.Warning(StringUtil.Loc("InvalidDateFormat", resultFile, resultFileRunData.StartDate, resultFileRunData.CompleteDate));
                                dateFormatError = true;
                            }

                            //continue to calculate duration as a fallback for case: if there is issue with format or dates are null or empty
                            foreach (TestCaseResultData tcResult in resultFileRunData.Results)
                            {
                                int durationInMs = Convert.ToInt32(tcResult.DurationInMs);
                                totalTestCaseDuration = totalTestCaseDuration.Add(TimeSpan.FromMilliseconds(durationInMs));
                            }

                            runResults.AddRange(resultFileRunData.Results);

                            //run attachments
                            if (resultFileRunData.Attachments != null)
                            {
                                runAttachments.AddRange(resultFileRunData.Attachments);
                            }
                        }
                        else
                        {
                            _executionContext.Output(StringUtil.Loc("NoResultFound", resultFile));
                        }
                    }
                    else
                    {
                        _executionContext.Warning(StringUtil.Loc("InvalidResultFiles", resultFile, resultReader));
                    }
                }

                //publish run if there are results.
                if (runResults.Count > 0)
                {
                    string runName = string.IsNullOrWhiteSpace(runTitle)
                    ? StringUtil.Format("{0}_TestResults_{1}", _resultReader.Name, buildId)
                    : runTitle;

                    if (DateTime.Compare(minStartDate, maxCompleteDate) > 0)
                    {
                        _executionContext.Warning(StringUtil.Loc("InvalidCompletedDate", maxCompleteDate, minStartDate));
                        dateFormatError = true;
                    }

                    minStartDate    = DateTime.Equals(minStartDate, DateTime.MaxValue) ? presentTime : minStartDate;
                    maxCompleteDate = dateFormatError || DateTime.Equals(maxCompleteDate, DateTime.MinValue) ? minStartDate.Add(totalTestCaseDuration) : maxCompleteDate;

                    // create test run
                    TestRunData testRunData = new TestRunData(
                        name: runName,
                        startedDate: minStartDate.ToString("o"),
                        completedDate: maxCompleteDate.ToString("o"),
                        state: "InProgress",
                        isAutomated: true,
                        buildId: runContext != null ? runContext.BuildId : 0,
                        buildFlavor: runContext != null ? runContext.Configuration : string.Empty,
                        buildPlatform: runContext != null ? runContext.Platform : string.Empty,
                        releaseUri: runContext != null ? runContext.ReleaseUri : null,
                        releaseEnvironmentUri: runContext != null ? runContext.ReleaseEnvironmentUri : null
                        );
                    testRunData.PipelineReference = runContext.PipelineReference;
                    testRunData.Attachments       = runAttachments.ToArray();
                    testRunData.AddCustomField(_testRunSystemCustomFieldName, runContext.TestRunSystem);
                    AddTargetBranchInfoToRunCreateModel(testRunData, runContext.TargetBranchName);

                    TestRun testRun = await publisher.StartTestRunAsync(testRunData, _executionContext.CancellationToken);

                    await publisher.AddResultsAsync(testRun, runResults.ToArray(), _executionContext.CancellationToken);

                    TestRun updatedRun = await publisher.EndTestRunAsync(testRunData, testRun.Id, true, _executionContext.CancellationToken);

                    // Check failed results for flaky aware
                    // Fallback to flaky aware if there are any failures.
                    bool isFlakyCheckEnabled = _featureFlagService.GetFeatureFlagState(TestResultsConstants.EnableFlakyCheckInAgentFeatureFlag, TestResultsConstants.TCMServiceInstanceGuid);

                    if (isTestRunOutcomeFailed && isFlakyCheckEnabled)
                    {
                        IList <TestRun> publishedRuns = new List <TestRun>();
                        publishedRuns.Add(updatedRun);
                        var runOutcome = _testRunPublisherHelper.CheckRunsForFlaky(publishedRuns, _projectName);
                        if (runOutcome != null && runOutcome.HasValue)
                        {
                            isTestRunOutcomeFailed = runOutcome.Value;
                        }
                    }

                    StoreTestRunSummaryInEnvVar(testRunSummary);
                }
            }
            catch (Exception ex) when(!(ex is OperationCanceledException && _executionContext.CancellationToken.IsCancellationRequested))
            {
                // Not catching all the operationcancelled exceptions, as the pipeline cancellation should cancel the command as well.
                // Do not fail the task.
                LogPublishTestResultsFailureWarning(ex);
            }
            return(isTestRunOutcomeFailed);
        }