/// <summary> /// Publish separate test run for each result file that has results. /// </summary> private async Task <bool> PublishToNewTestRunPerTestResultFileAsync(List <string> resultFiles, ITestRunPublisher publisher, TestRunContext runContext, string resultReader, string runTitle, int batchSize, CancellationToken cancellationToken) { bool isTestRunOutcomeFailed = false; try { IList <TestRun> publishedRuns = new List <TestRun>(); var groupedFiles = resultFiles .Select((resultFile, index) => new { Index = index, file = resultFile }) .GroupBy(pair => pair.Index / batchSize) .Select(bucket => bucket.Select(pair => pair.file).ToList()) .ToList(); bool changeTestRunTitle = resultFiles.Count > 1; TestRunSummary testRunSummary = new TestRunSummary(); foreach (var files in groupedFiles) { // Publish separate test run for each result file that has results. var publishTasks = files.Select(async resultFile => { cancellationToken.ThrowIfCancellationRequested(); string runName = runTitle; if (!string.IsNullOrWhiteSpace(runTitle) && changeTestRunTitle) { runName = GetRunName(runTitle); } _executionContext.Debug(StringUtil.Format("Reading test results from file '{0}'", resultFile)); TestRunData testRunData = publisher.ReadResultsFromFile(runContext, resultFile, runName); testRunData.PipelineReference = runContext.PipelineReference; isTestRunOutcomeFailed = isTestRunOutcomeFailed || GetTestRunOutcome(testRunData, testRunSummary); cancellationToken.ThrowIfCancellationRequested(); if (testRunData != null) { if (testRunData.Results != null && testRunData.Results.Length > 0) { testRunData.AddCustomField(_testRunSystemCustomFieldName, runContext.TestRunSystem); AddTargetBranchInfoToRunCreateModel(testRunData, runContext.TargetBranchName); TestRun testRun = await publisher.StartTestRunAsync(testRunData, _executionContext.CancellationToken); await publisher.AddResultsAsync(testRun, testRunData.Results, _executionContext.CancellationToken); TestRun updatedRun = await publisher.EndTestRunAsync(testRunData, testRun.Id, cancellationToken: _executionContext.CancellationToken); publishedRuns.Add(updatedRun); } else { _executionContext.Output(StringUtil.Loc("NoResultFound", resultFile)); } } else { _executionContext.Warning(StringUtil.Loc("InvalidResultFiles", resultFile, resultReader)); } }); await Task.WhenAll(publishTasks); } // Check failed results for flaky aware // Fallback to flaky aware if there are any failures. bool isFlakyCheckEnabled = _featureFlagService.GetFeatureFlagState(TestResultsConstants.EnableFlakyCheckInAgentFeatureFlag, TestResultsConstants.TCMServiceInstanceGuid); if (isTestRunOutcomeFailed && isFlakyCheckEnabled) { var runOutcome = _testRunPublisherHelper.CheckRunsForFlaky(publishedRuns, _projectName); if (runOutcome != null && runOutcome.HasValue) { isTestRunOutcomeFailed = runOutcome.Value; } } StoreTestRunSummaryInEnvVar(testRunSummary); } catch (Exception ex) when(!(ex is OperationCanceledException)) { //Do not fail the task. LogPublishTestResultsFailureWarning(ex); } return(isTestRunOutcomeFailed); }
private bool GetTestRunOutcome(IExecutionContext executionContext, IList <TestRunData> testRunDataList, out TestRunSummary testRunSummary) { bool anyFailedTests = false; testRunSummary = new TestRunSummary(); foreach (var testRunData in testRunDataList) { foreach (var testCaseResult in testRunData.TestResults) { testRunSummary.Total += 1; Enum.TryParse(testCaseResult.Outcome, out TestOutcome outcome); switch (outcome) { case TestOutcome.Failed: case TestOutcome.Aborted: testRunSummary.Failed += 1; anyFailedTests = true; break; case TestOutcome.Passed: testRunSummary.Passed += 1; break; case TestOutcome.Inconclusive: testRunSummary.Skipped += 1; break; default: break; } if (!_calculateTestRunSummary && anyFailedTests) { return(anyFailedTests); } } } return(anyFailedTests); }
/// <summary> /// Publish single test run /// </summary> private async Task <bool> PublishAllTestResultsToSingleTestRunAsync(List <string> resultFiles, ITestRunPublisher publisher, TestRunContext runContext, string resultReader, string runTitle, int?buildId, CancellationToken cancellationToken) { bool isTestRunOutcomeFailed = false; try { //use local time since TestRunData defaults to local times DateTime minStartDate = DateTime.MaxValue; DateTime maxCompleteDate = DateTime.MinValue; DateTime presentTime = DateTime.UtcNow; bool dateFormatError = false; TimeSpan totalTestCaseDuration = TimeSpan.Zero; List <string> runAttachments = new List <string>(); List <TestCaseResultData> runResults = new List <TestCaseResultData>(); TestRunSummary testRunSummary = new TestRunSummary(); //read results from each file foreach (string resultFile in resultFiles) { cancellationToken.ThrowIfCancellationRequested(); //test case results _executionContext.Debug(StringUtil.Format("Reading test results from file '{0}'", resultFile)); TestRunData resultFileRunData = publisher.ReadResultsFromFile(runContext, resultFile); isTestRunOutcomeFailed = isTestRunOutcomeFailed || GetTestRunOutcome(resultFileRunData, testRunSummary); if (resultFileRunData != null) { if (resultFileRunData.Results != null && resultFileRunData.Results.Length > 0) { try { if (string.IsNullOrEmpty(resultFileRunData.StartDate) || string.IsNullOrEmpty(resultFileRunData.CompleteDate)) { dateFormatError = true; } //As per discussion with Manoj(refer bug 565487): Test Run duration time should be minimum Start Time to maximum Completed Time when merging if (!string.IsNullOrEmpty(resultFileRunData.StartDate)) { DateTime startDate = DateTime.Parse(resultFileRunData.StartDate, null, DateTimeStyles.RoundtripKind); minStartDate = minStartDate > startDate ? startDate : minStartDate; if (!string.IsNullOrEmpty(resultFileRunData.CompleteDate)) { DateTime endDate = DateTime.Parse(resultFileRunData.CompleteDate, null, DateTimeStyles.RoundtripKind); maxCompleteDate = maxCompleteDate < endDate ? endDate : maxCompleteDate; } } } catch (FormatException) { _executionContext.Warning(StringUtil.Loc("InvalidDateFormat", resultFile, resultFileRunData.StartDate, resultFileRunData.CompleteDate)); dateFormatError = true; } //continue to calculate duration as a fallback for case: if there is issue with format or dates are null or empty foreach (TestCaseResultData tcResult in resultFileRunData.Results) { int durationInMs = Convert.ToInt32(tcResult.DurationInMs); totalTestCaseDuration = totalTestCaseDuration.Add(TimeSpan.FromMilliseconds(durationInMs)); } runResults.AddRange(resultFileRunData.Results); //run attachments if (resultFileRunData.Attachments != null) { runAttachments.AddRange(resultFileRunData.Attachments); } } else { _executionContext.Output(StringUtil.Loc("NoResultFound", resultFile)); } } else { _executionContext.Warning(StringUtil.Loc("InvalidResultFiles", resultFile, resultReader)); } } //publish run if there are results. if (runResults.Count > 0) { string runName = string.IsNullOrWhiteSpace(runTitle) ? StringUtil.Format("{0}_TestResults_{1}", _resultReader.Name, buildId) : runTitle; if (DateTime.Compare(minStartDate, maxCompleteDate) > 0) { _executionContext.Warning(StringUtil.Loc("InvalidCompletedDate", maxCompleteDate, minStartDate)); dateFormatError = true; } minStartDate = DateTime.Equals(minStartDate, DateTime.MaxValue) ? presentTime : minStartDate; maxCompleteDate = dateFormatError || DateTime.Equals(maxCompleteDate, DateTime.MinValue) ? minStartDate.Add(totalTestCaseDuration) : maxCompleteDate; // create test run TestRunData testRunData = new TestRunData( name: runName, startedDate: minStartDate.ToString("o"), completedDate: maxCompleteDate.ToString("o"), state: "InProgress", isAutomated: true, buildId: runContext != null ? runContext.BuildId : 0, buildFlavor: runContext != null ? runContext.Configuration : string.Empty, buildPlatform: runContext != null ? runContext.Platform : string.Empty, releaseUri: runContext != null ? runContext.ReleaseUri : null, releaseEnvironmentUri: runContext != null ? runContext.ReleaseEnvironmentUri : null ); testRunData.PipelineReference = runContext.PipelineReference; testRunData.Attachments = runAttachments.ToArray(); testRunData.AddCustomField(_testRunSystemCustomFieldName, runContext.TestRunSystem); AddTargetBranchInfoToRunCreateModel(testRunData, runContext.TargetBranchName); TestRun testRun = await publisher.StartTestRunAsync(testRunData, _executionContext.CancellationToken); await publisher.AddResultsAsync(testRun, runResults.ToArray(), _executionContext.CancellationToken); await publisher.EndTestRunAsync(testRunData, testRun.Id, true, _executionContext.CancellationToken); } StoreTestRunSummaryInEnvVar(testRunSummary); } catch (Exception ex) when(!(ex is OperationCanceledException)) { //Do not fail the task. LogPublishTestResultsFailureWarning(ex); } return(isTestRunOutcomeFailed); }
/// <summary> /// Called when a test run is completed. /// </summary> /// <param name="sender"> /// The sender. /// </param> /// <param name="e"> /// Test run complete events arguments. /// </param> internal void TestRunCompleteHandler(object sender, TestRunCompleteEventArgs e) { // Create test run // If abort occurs there is no call to TestResultHandler which results in testRun not created. // This happens when some test aborts in the first batch of execution. if (this.testRun == null) { CreateTestRun(); } XmlPersistence helper = new XmlPersistence(); XmlTestStoreParameters parameters = XmlTestStoreParameters.GetParameters(); XmlElement rootElement = helper.CreateRootElement("TestRun"); // Save runId/username/creation time etc. this.testRun.Finished = DateTime.UtcNow; helper.SaveSingleFields(rootElement, this.testRun, parameters); // Save test settings helper.SaveObject(this.testRun.RunConfiguration, rootElement, "TestSettings", parameters); // Save test results helper.SaveIEnumerable(this.results.Values, rootElement, "Results", ".", null, parameters); // Save test definitions helper.SaveIEnumerable(this.testElements.Values, rootElement, "TestDefinitions", ".", null, parameters); // Save test entries helper.SaveIEnumerable(this.entries.Values, rootElement, "TestEntries", ".", "TestEntry", parameters); // Save default categories List <TestListCategory> categories = new List <TestListCategory>(); categories.Add(TestListCategory.UncategorizedResults); categories.Add(TestListCategory.AllResults); helper.SaveList <TestListCategory>(categories, rootElement, "TestLists", ".", "TestList", parameters); // Save summary if (this.testRunOutcome == TrxLoggerObjectModel.TestOutcome.Passed) { this.testRunOutcome = TrxLoggerObjectModel.TestOutcome.Completed; } List <string> errorMessages = new List <string>(); List <CollectorDataEntry> collectorEntries = this.converter.ToCollectionEntries(e.AttachmentSets, this.testRun, this.testResultsDirPath); IList <String> resultFiles = this.converter.ToResultFiles(e.AttachmentSets, this.testRun, this.testResultsDirPath, errorMessages); if (errorMessages.Count > 0) { // Got some errors while attaching files, report them and set the outcome of testrun to be Error... this.testRunOutcome = TrxLoggerObjectModel.TestOutcome.Error; foreach (string msg in errorMessages) { RunInfo runMessage = new RunInfo(msg, null, Environment.MachineName, TrxLoggerObjectModel.TestOutcome.Error); this.runLevelErrorsAndWarnings.Add(runMessage); } } TestRunSummary runSummary = new TestRunSummary( this.totalTests, this.passTests + this.failTests, this.passTests, this.failTests, this.testRunOutcome, this.runLevelErrorsAndWarnings, this.runLevelStdOut.ToString(), resultFiles, collectorEntries); helper.SaveObject(runSummary, rootElement, "ResultSummary", parameters); //Save results to Trx file this.DeriveTrxFilePath(); this.PopulateTrxFile(this.trxFilePath, rootElement); }
public Task StoreTestRunRecord(TestRunSummary testRunSummary) { return(_testRunSummariesContainer.GetBlockBlobClient(testRunSummary.RecordId).UploadAsync(testRunSummary.AsStream())); }