/// <summary> /// used by the run fuction to run the tests /// </summary> /// <param name="runner"></param> /// <param name="resultsFile"></param> private void RunTests(IAssetRunner runner, string resultsFile) { try { if (_ciRun) { _xmlBuilder = new JunitXmlBuilder(); _xmlBuilder.XmlName = resultsFile; } TestSuiteRunResults results = runner.Run(); if (results == null) { Environment.Exit((int)Launcher.ExitCodeEnum.Failed); } _xmlBuilder.CreateXmlFromRunResults(results); //if there is an error if (results.TestRuns.Any(tr => tr.TestState == TestState.Failed || tr.TestState == TestState.Error)) { Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; } //this is the total run summary ConsoleWriter.ActiveTestRun = null; string runStatus = (Launcher.ExitCode == ExitCodeEnum.Passed || Launcher.ExitCode == ExitCodeEnum.Unstable) ? "Job succeeded" : "Job failed"; int numFailures = results.TestRuns.Count(t => t.TestState == TestState.Failed); int numSuccess = results.TestRuns.Count(t => t.TestState == TestState.Passed); int numErrors = results.TestRuns.Count(t => t.TestState == TestState.Error); ConsoleWriter.WriteLine(Resources.LauncherDoubleSeperator); ConsoleWriter.WriteLine(string.Format(Resources.LauncherDisplayStatistics, runStatus, results.TestRuns.Count, numSuccess, numFailures, numErrors)); if (!runner.RunWasCancelled) { results.TestRuns.ForEach(tr => ConsoleWriter.WriteLine(((tr.HasWarnings) ? "Warning".PadLeft(7) : tr.TestState.ToString().PadRight(7)) + ": " + tr.TestPath)); ConsoleWriter.WriteLine(Resources.LauncherDoubleSeperator); if (ConsoleWriter.ErrorSummaryLines != null && ConsoleWriter.ErrorSummaryLines.Count > 0) { ConsoleWriter.WriteLine("Job Errors summary:"); ConsoleWriter.ErrorSummaryLines.ForEach(line => ConsoleWriter.WriteLine(line)); } } //ConsoleWriter.WriteLine("Returning " + runStatus + "."); } finally { try { runner.Dispose(); } catch (Exception ex) { ConsoleWriter.WriteLine(string.Format(Resources.LauncherRunnerDisposeError, ex.Message)); }; } }
/// <summary> /// converts all data from the test resutls in to the Junit xml format and writes the xml file to disk. /// </summary> /// <param name="results"></param> public void CreateXmlFromRunResults(TestSuiteRunResults results) { _testSuites = new testsuites(); testsuite ts = new testsuite { errors = results.NumErrors.ToString(), tests = results.NumTests.ToString(), failures = results.NumFailures.ToString(), name = results.SuiteName, package = ClassName }; foreach (TestRunResults testRes in results.TestRuns) { testcase tc; if (testRes.TestType == TestType.LoadRunner.ToString()) { tc = CreateXmlFromLRRunResults(testRes); } else { tc = CreateXmlFromUFTRunResults(testRes); } ts.AddTestCase(tc); } _testSuites.AddTestsuite(ts); if (File.Exists(XmlName)) File.Delete(XmlName); using (Stream s = File.OpenWrite(XmlName)) { _serializer.Serialize(s, _testSuites); } }
internal void AppendResults(TestSuiteRunResults desc) { this.TestRuns.AddRange(desc.TestRuns); this.TotalRunTime += desc.TotalRunTime; this.NumErrors += desc.NumErrors; this.NumFailures += desc.NumFailures; this.NumTests += desc.NumTests; }
/// <summary> /// converts all data from the test results in to the Junit xml format and writes the xml file to disk. /// </summary> /// <param name="results"></param> public bool CreateXmlFromRunResults(TestSuiteRunResults results, out string error) { error = string.Empty; _testSuites = new testsuites(); testsuite uftts = new testsuite { errors = IntToString(results.NumErrors), tests = IntToString(results.NumTests), failures = IntToString(results.NumFailures), name = results.SuiteName, package = ClassName, time = DoubleToString(results.TotalRunTime.TotalSeconds) }; foreach (TestRunResults testRes in results.TestRuns) { if (testRes.TestType == TestType.LoadRunner.ToString()) { testsuite lrts = CreateXmlFromLRRunResults(testRes); _testSuites.AddTestsuite(lrts); } else { testcase ufttc = CreateXmlFromUFTRunResults(testRes); uftts.AddTestCase(ufttc); } } if (uftts.testcase.Length > 0) { _testSuites.AddTestsuite(uftts); } try { if (File.Exists(XmlName)) { File.Delete(XmlName); } using (Stream s = File.OpenWrite(XmlName)) { _serializer.Serialize(s, _testSuites); } return(File.Exists(XmlName)); } catch (Exception ex) { error = ex.Message; return(false); } }
/// <summary> /// gets test index given it's name /// </summary> /// <param name="strName"></param> /// <param name="results"></param> /// <returns></returns> public int GetIdxByTestName(string strName, TestSuiteRunResults results) { TestRunResults res = null; int retVal = -1; for (int i = 0; i < results.TestRuns.Count(); ++i) { res = results.TestRuns[i]; if (res != null && res.TestName == strName) { retVal = i; break; } } return(retVal); }
private void UpdateCounters(TestRunResults test, TestSuiteRunResults testSuite) { if (test.TestState != TestState.Running && test.TestState != TestState.Waiting && test.TestState != TestState.Unknown) { ++testSuite.NumTests; } if (test.TestState == TestState.Failed) { ++testSuite.NumFailures; } if (test.TestState == TestState.Error) { ++testSuite.NumErrors; } }
/// <summary> /// converts all data from the test results in to the Junit xml format and writes the xml file to disk. /// </summary> /// <param name="results"></param> public void CreateXmlFromRunResults(TestSuiteRunResults results) { _testSuites = new testsuites(); testsuite uftts = new testsuite { errors = results.NumErrors.ToString(), tests = results.NumTests.ToString(), failures = results.NumFailures.ToString(), name = results.SuiteName, package = ClassName }; foreach (TestRunResults testRes in results.TestRuns) { if (testRes.TestType == TestType.LoadRunner.ToString()) { testsuite lrts = CreateXmlFromLRRunResults(testRes); _testSuites.AddTestsuite(lrts); } else { testcase ufttc = CreateXmlFromUFTRunResults(testRes); uftts.AddTestCase(ufttc); } } if (uftts.testcase.Length > 0) { _testSuites.AddTestsuite(uftts); } if (File.Exists(XmlName)) { File.Delete(XmlName); } using (Stream s = File.OpenWrite(XmlName)) { _serializer.Serialize(s, _testSuites); } }
/// <summary> /// runs the tests given to the object. /// </summary> /// <returns></returns> public override TestSuiteRunResults Run() { if (!Connected) { return(null); } TestSuiteRunResults activeRunDesc = new TestSuiteRunResults(); //find all the testSets under if given some folders in our list try { FindAllTestSetsUnderFolders(); } catch (Exception ex) { ConsoleWriter.WriteErrLine(string.Format(Resources.AlmRunnerErrorBadQcInstallation, ex.Message, ex.StackTrace)); return(null); } //run all the TestSets foreach (string testset in TestSets) { string testset1 = testset.TrimEnd("\\".ToCharArray()); int pos = testset1.LastIndexOf('\\'); string tsDir = ""; string tsName = testset1; if (pos != -1) { tsDir = testset1.Substring(0, pos).Trim("\\".ToCharArray()); tsName = testset1.Substring(pos, testset1.Length - pos).Trim("\\".ToCharArray()); } TestSuiteRunResults desc = RunTestSet(tsDir, tsName, Timeout, RunMode, RunHost); if (desc != null) { activeRunDesc.AppendResults(desc); } } return(activeRunDesc); }
/// <summary> /// updates the test status in our list of tests /// </summary> /// <param name="targetTestSet"></param> /// <param name="testExecStatusObj"></param> private TestRunResults UpdateTestStatus(TestSuiteRunResults runResults, ITestSet targetTestSet, TestExecStatus testExecStatusObj, bool onlyUpdateState) { TestRunResults qTest = null; ITSTest currentTest = null; try { //find the test for the given status object currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; if (currentTest == null) { return qTest; } //find the test in our list int testIndex = GetIdxByTestName(currentTest.Name, runResults); qTest = runResults.TestRuns[testIndex]; if (qTest.TestType == null) { qTest.TestType = GetTestType(currentTest); } //update the state qTest.PrevTestState = qTest.TestState; qTest.TestState = GetTsStateFromQcState(testExecStatusObj.Status); if (!onlyUpdateState) { try { //duration and status are updated according to the run qTest.Runtime = TimeSpan.FromSeconds(currentTest.LastRun.Field("RN_DURATION")); } catch { //a problem getting duration, maybe the test isn't done yet - don't stop the flow.. } switch (qTest.TestState) { case TestState.Failed: qTest.FailureDesc = GenerateFailedLog(currentTest.LastRun); if (string.IsNullOrWhiteSpace(qTest.FailureDesc)) qTest.FailureDesc = testExecStatusObj.Status + " : " + testExecStatusObj.Message; break; case TestState.Error: qTest.ErrorDesc = testExecStatusObj.Status + " : " + testExecStatusObj.Message; break; default: break; } //check qc version for link type bool oldQc = CheckIsOldQc(); //string testLink = "<a href=\"testdirector:mydtqc01.isr.hp.com:8080/qcbin," + m_qcProject + "," + m_qcDomain + "," + targetTestSet.Name+ ";test-instance:" + testExecStatusObj.TestInstance + "\"> Alm link</a>"; string serverURl = m_qcServer.TrimEnd("/".ToCharArray()); if (serverURl.ToLower().StartsWith("http://")) serverURl = serverURl.Substring(7); //string testLinkInLabQc10 = "td://" + m_qcProject + "." + m_qcDomain + "." + m_qcServer.Replace("http://", "") + "/Test%20Lab?Action=FindTestInstance&TestSetID=" + targetTestSet.ID + "&TestInstanceID=" + testExecStatusObj.TSTestId; //string testLinkInLab = "td://" + m_qcProject + "." + m_qcDomain + "." + m_qcServer.Replace("http://", "") + "/TestLabModule-000000003649890581?EntityType=ITestInstance&EntityID=" + testExecStatusObj.TSTestId; int runid = GetTestRunId(currentTest); string linkStr = GetTestRunLink(currentTest, runid); string statusString = GetTsStateFromQcState(testExecStatusObj.Status as string).ToString(); ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerTestStat, currentTest.Name, statusString, testExecStatusObj.Message, linkStr)); runResults.TestRuns[testIndex] = qTest; } } catch (Exception ex) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerErrorGettingStat, currentTest.Name, ex.Message)); } return qTest; }
/// <summary> /// used by the run fuction to run the tests /// </summary> /// <param name="runner"></param> /// <param name="resultsFile"></param> private void RunTests(IAssetRunner runner, string resultsFile) { try { if (_ciRun) { _xmlBuilder = new JunitXmlBuilder(); _xmlBuilder.XmlName = resultsFile; } TestSuiteRunResults results = runner.Run(); if (results == null) { Environment.Exit((int)Launcher.ExitCodeEnum.Failed); } _xmlBuilder.CreateXmlFromRunResults(results); //if there is an error if (results.TestRuns.Any(tr => tr.TestState == TestState.Failed || tr.TestState == TestState.Error)) { Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; } int numFailures = results.TestRuns.Count(t => t.TestState == TestState.Failed); int numSuccess = results.TestRuns.Count(t => t.TestState == TestState.Passed); int numErrors = results.TestRuns.Count(t => t.TestState == TestState.Error); //TODO: Temporery fix to remove since jenkins doesnt retrive resutls from jobs that marked as failed and unstable marks jobs with only failed tests if ((numErrors <= 0) && (numFailures > 0)) { Launcher.ExitCode = Launcher.ExitCodeEnum.Unstable; } foreach (var testRun in results.TestRuns) { if (testRun.FatalErrors > 0 && !testRun.TestPath.Equals("")) { Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; break; } } //this is the total run summary ConsoleWriter.ActiveTestRun = null; string runStatus = ""; switch (Launcher.ExitCode) { case ExitCodeEnum.Passed: runStatus = "Job succeeded"; break; case ExitCodeEnum.Unstable: runStatus = "Job unstable (Passed with failed tests)"; break; case ExitCodeEnum.Aborted: runStatus = "Job failed due to being Aborted"; break; case ExitCodeEnum.Failed: runStatus = "Job failed"; break; default: runStatus = "Error: Job status is Undefined"; break; } ConsoleWriter.WriteLine(Resources.LauncherDoubleSeperator); ConsoleWriter.WriteLine(string.Format(Resources.LauncherDisplayStatistics, runStatus, results.TestRuns.Count, numSuccess, numFailures, numErrors)); if (!runner.RunWasCancelled) { results.TestRuns.ForEach(tr => ConsoleWriter.WriteLine(((tr.HasWarnings) ? "Warning".PadLeft(7) : tr.TestState.ToString().PadRight(7)) + ": " + tr.TestPath)); ConsoleWriter.WriteLine(Resources.LauncherDoubleSeperator); if (ConsoleWriter.ErrorSummaryLines != null && ConsoleWriter.ErrorSummaryLines.Count > 0) { ConsoleWriter.WriteLine("Job Errors summary:"); ConsoleWriter.ErrorSummaryLines.ForEach(line => ConsoleWriter.WriteLine(line)); } } //ConsoleWriter.WriteLine("Returning " + runStatus + "."); } finally { try { runner.Dispose(); } catch (Exception ex) { ConsoleWriter.WriteLine(string.Format(Resources.LauncherRunnerDisposeError, ex.Message)); }; } }
/// <summary> /// runs all tests given to this runner and returns a suite of run resutls /// </summary> /// <returns>The rest run results for each test</returns> public override TestSuiteRunResults Run() { //create a new Run Results object TestSuiteRunResults activeRunDesc = new TestSuiteRunResults(); double totalTime = 0; try { var start = DateTime.Now; foreach (var test in _tests) { if (RunCancelled()) { break; } var testStart = DateTime.Now; string errorReason = string.Empty; TestRunResults runResult = null; try { runResult = RunHPToolsTest(test, ref errorReason); } catch (Exception ex) { runResult = new TestRunResults(); runResult.TestState = TestState.Error; runResult.ErrorDesc = ex.Message; runResult.TestName = test.TestName; } //get the original source for this test, for grouping tests under test classes runResult.TestGroup = test.TestGroup; activeRunDesc.TestRuns.Add(runResult); //if fail was terminated before this step, continue if (runResult.TestState != TestState.Failed) { if (runResult.TestState != TestState.Error) { Helper.GetTestStateFromReport(runResult); } else { if (string.IsNullOrEmpty(runResult.ErrorDesc)) { if (RunCancelled()) { runResult.ErrorDesc = HpToolsLauncher.Properties.Resources.ExceptionUserCancelled; } else { runResult.ErrorDesc = HpToolsLauncher.Properties.Resources.ExceptionExternalProcess; } } runResult.ReportLocation = null; runResult.TestState = TestState.Error; } } if (runResult.TestState == TestState.Passed && runResult.HasWarnings) { runResult.TestState = TestState.Warning; ConsoleWriter.WriteLine(Resources.FsRunnerTestDoneWarnings); } else { ConsoleWriter.WriteLine(string.Format(Resources.FsRunnerTestDone, runResult.TestState)); } ConsoleWriter.WriteLine(DateTime.Now.ToString(Launcher.DateFormat) + " Test complete: " + runResult.TestPath + "\n-------------------------------------------------------------------------------------------------------"); UpdateCounters(runResult.TestState); var testTotalTime = (DateTime.Now - testStart).TotalSeconds; } totalTime = (DateTime.Now - start).TotalSeconds; } finally { activeRunDesc.NumTests = _tests.Count; activeRunDesc.NumErrors = _errors; activeRunDesc.TotalRunTime = TimeSpan.FromSeconds(totalTime); activeRunDesc.NumFailures = _fail; foreach (IFileSysTestRunner cleanupRunner in _colRunnersForCleanup.Values) { cleanupRunner.CleanUp(); } } return(activeRunDesc); }
/// <summary> /// analyzes and runs the tests given in the param file. /// </summary> public void Run() { _ciRun = true; if (_runType == TestStorageType.Unknown) { Enum.TryParse <TestStorageType>(_ciParams["runType"], true, out _runType); } if (_runType == TestStorageType.Unknown) { WriteToConsole(Resources.LauncherNoRuntype); return; } if (!_ciParams.ContainsKey("resultsFilename")) { WriteToConsole(Resources.LauncherNoResFilenameFound); return; } string resultsFilename = _ciParams["resultsFilename"]; UniqueTimeStamp = _ciParams.ContainsKey("uniqueTimeStamp") ? _ciParams["uniqueTimeStamp"] : resultsFilename.ToLower().Replace("results", "").Replace(".xml", ""); //run the entire set of test once //create the runner according to type IAssetRunner runner = CreateRunner(_runType, _ciParams, true); //runner instantiation failed (no tests to run or other problem) if (runner == null) { Environment.Exit((int)Launcher.ExitCodeEnum.Failed); } TestSuiteRunResults results = runner.Run(); RunTests(runner, resultsFilename, results); if (_runType.Equals(TestStorageType.FileSystem)) { string onCheckFailedTests = (_ciParams.ContainsKey("onCheckFailedTest") ? _ciParams["onCheckFailedTest"] : ""); _rerunFailedTests = !string.IsNullOrEmpty(onCheckFailedTests) && Convert.ToBoolean(onCheckFailedTests.ToLower()); //the "On failure" option is selected and the run build contains failed tests if (_rerunFailedTests.Equals(true) && Launcher.ExitCode != ExitCodeEnum.Passed) { ConsoleWriter.WriteLine("There are failed tests. Rerun the selected tests."); //rerun the selected tests (either the entire set or just the selected ones) //create the runner according to type runner = CreateRunner(_runType, _ciParams, false); //runner instantiation failed (no tests to run or other problem) if (runner == null) { Environment.Exit((int)Launcher.ExitCodeEnum.Failed); } TestSuiteRunResults rerunResults = runner.Run(); results.AppendResults(rerunResults); RunTests(runner, resultsFilename, results); } } ConsoleQuickEdit.Enable(); if (Launcher.ExitCode != ExitCodeEnum.Passed) { Environment.Exit((int)Launcher.ExitCode); } }
private void SetTestResults(ITSTest currentTest, IExecutionStatus executionStatus, ITestSet targetTestSet, TestRunResults activeTestDesc, TestSuiteRunResults runDesc, string testPath, string abortFilename) { // write the status for each test for (int k = 1; k <= executionStatus.Count; ++k) { if (System.IO.File.Exists(abortFilename)) { break; } TestExecStatus testExecStatusObj = executionStatus[k]; currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; if (currentTest == null) { ConsoleWriter.WriteLine(string.Format("currentTest is null for test.{0} after whole execution", k)); continue; } activeTestDesc = UpdateTestStatus(runDesc, targetTestSet, testExecStatusObj, false); UpdateCounters(activeTestDesc, runDesc); activeTestDesc.TestPath = testPath; } }
/// <summary> /// runs all tests given to this runner and returns a suite of run results /// </summary> /// <returns>The rest run results for each test</returns> public override TestSuiteRunResults Run() { //create a new Run Results object TestSuiteRunResults activeRunDesc = new TestSuiteRunResults(); double totalTime = 0; try { var start = DateTime.Now; Dictionary <string, int> indexList = new Dictionary <string, int>(); foreach (var test in _tests) { indexList[test.TestPath] = 0; } Dictionary <string, int> rerunList = createDictionary(_tests); foreach (var test in _tests) { if (indexList[test.TestPath] == 0) { indexList[test.TestPath] = 1; } if (RunCancelled()) { break; } var testStart = DateTime.Now; string errorReason = string.Empty; TestRunResults runResult = null; try { runResult = RunHpToolsTest(test, ref errorReason); } catch (Exception ex) { runResult = new TestRunResults { TestState = TestState.Error, ErrorDesc = ex.Message, TestName = test.TestName, TestPath = test.TestPath }; } //get the original source for this test, for grouping tests under test classes runResult.TestGroup = test.TestGroup; activeRunDesc.TestRuns.Add(runResult); //if fail was terminated before this step, continue if (runResult.TestState != TestState.Failed) { if (runResult.TestState != TestState.Error) { Helper.GetTestStateFromReport(runResult); } else { if (string.IsNullOrEmpty(runResult.ErrorDesc)) { runResult.ErrorDesc = RunCancelled() ? HpToolsLauncher.Properties.Resources.ExceptionUserCancelled : HpToolsLauncher.Properties.Resources.ExceptionExternalProcess; } runResult.ReportLocation = null; runResult.TestState = TestState.Error; } } if (runResult.TestState == TestState.Passed && runResult.HasWarnings) { runResult.TestState = TestState.Warning; ConsoleWriter.WriteLine(Resources.FsRunnerTestDoneWarnings); } else { ConsoleWriter.WriteLine(string.Format(Resources.FsRunnerTestDone, runResult.TestState)); } ConsoleWriter.WriteLine(DateTime.Now.ToString(Launcher.DateFormat) + " Test complete: " + runResult.TestPath + "\n-------------------------------------------------------------------------------------------------------"); UpdateCounters(runResult.TestState); var testTotalTime = (DateTime.Now - testStart).TotalSeconds; //create test folders if (rerunList[test.TestPath] > 0) { if (!Directory.Exists(Path.Combine(test.TestPath, "Report1"))) { rerunList[test.TestPath]--; } else { indexList[test.TestPath]++; rerunList[test.TestPath]--; } } //update report folder String uftReportDir = Path.Combine(test.TestPath, "Report"); String uftReportDirNew = Path.Combine(test.TestPath, "Report" + indexList[test.TestPath]); try { if (Directory.Exists(uftReportDir)) { if (Directory.Exists(uftReportDirNew)) { Helper.DeleteDirectory(uftReportDirNew); } Directory.Move(uftReportDir, uftReportDirNew); } } catch { System.Threading.Thread.Sleep(1000); Directory.Move(uftReportDir, uftReportDirNew); } } totalTime = (DateTime.Now - start).TotalSeconds; } finally { activeRunDesc.NumTests = _tests.Count; activeRunDesc.NumErrors = _errors; activeRunDesc.TotalRunTime = TimeSpan.FromSeconds(totalTime); activeRunDesc.NumFailures = _fail; foreach (IFileSysTestRunner cleanupRunner in _colRunnersForCleanup.Values) { cleanupRunner.CleanUp(); } } return(activeRunDesc); }
private void RunTests(IAssetRunner runner, string resultsFile, TestSuiteRunResults results) { try { if (_ciRun) { _xmlBuilder = new JunitXmlBuilder(); _xmlBuilder.XmlName = resultsFile; } if (results == null) { Environment.Exit((int)Launcher.ExitCodeEnum.Failed); } _xmlBuilder.CreateXmlFromRunResults(results); if (results.TestRuns.Count == 0) { Console.WriteLine("No tests were run"); Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; Environment.Exit((int)Launcher.ExitCode); } //if there is an error if (results.TestRuns.Any(tr => tr.TestState == TestState.Failed || tr.TestState == TestState.Error)) { Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; } int numFailures = results.TestRuns.Count(t => t.TestState == TestState.Failed); int numSuccess = results.TestRuns.Count(t => t.TestState == TestState.Passed); int numErrors = results.TestRuns.Count(t => t.TestState == TestState.Error); int numWarnings = results.TestRuns.Count(t => t.TestState == TestState.Warning); if ((numErrors <= 0) && (numFailures > 0)) { Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; } if ((numErrors <= 0) && (numFailures > 0) && (numSuccess > 0)) { Launcher.ExitCode = Launcher.ExitCodeEnum.Unstable; } foreach (var testRun in results.TestRuns) { if (testRun.FatalErrors > 0 && !testRun.TestPath.Equals("")) { Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; break; } } //this is the total run summary ConsoleWriter.ActiveTestRun = null; string runStatus = ""; switch (Launcher.ExitCode) { case ExitCodeEnum.Passed: runStatus = "Job succeeded"; break; case ExitCodeEnum.Unstable: runStatus = "Job unstable (Passed with failed tests)"; break; case ExitCodeEnum.Aborted: runStatus = "Job failed due to being Aborted"; break; case ExitCodeEnum.Failed: runStatus = "Job failed"; break; default: runStatus = "Error: Job status is Undefined"; break; } ConsoleWriter.WriteLine(Resources.LauncherDoubleSeperator); ConsoleWriter.WriteLine(string.Format(Resources.LauncherDisplayStatistics, runStatus, results.TestRuns.Count, numSuccess, numFailures, numErrors, numWarnings)); int testIndex = 1; if (!runner.RunWasCancelled) { results.TestRuns.ForEach(tr => { ConsoleWriter.WriteLine(((tr.HasWarnings) ? "Warning".PadLeft(7) : tr.TestState.ToString().PadRight(7)) + ": " + tr.TestPath + "[" + testIndex + "]"); testIndex++; }); ConsoleWriter.WriteLine(Resources.LauncherDoubleSeperator); if (ConsoleWriter.ErrorSummaryLines != null && ConsoleWriter.ErrorSummaryLines.Count > 0) { ConsoleWriter.WriteLine("Job Errors summary:"); ConsoleWriter.ErrorSummaryLines.ForEach(line => ConsoleWriter.WriteLine(line)); } string onCheckFailedTests = (_ciParams.ContainsKey("onCheckFailedTest") ? _ciParams["onCheckFailedTest"] : ""); _rerunFailedTests = !string.IsNullOrEmpty(onCheckFailedTests) && Convert.ToBoolean(onCheckFailedTests.ToLower()); if (!_rerunFailedTests) { Environment.Exit((int)Launcher.ExitCode); } } } finally { try { runner.Dispose(); } catch (Exception ex) { ConsoleWriter.WriteLine(string.Format(Resources.LauncherRunnerDisposeError, ex.Message)); }; } }
private void UpdateCounters(TestRunResults test, TestSuiteRunResults testSuite) { if (test.TestState != TestState.Running && test.TestState != TestState.Waiting && test.TestState != TestState.Unknown) ++testSuite.NumTests; if (test.TestState == TestState.Failed) ++testSuite.NumFailures; if (test.TestState == TestState.Error) ++testSuite.NumErrors; }
/// <summary> /// runs a test set with given parameters (and a valid connection to the QC server) /// </summary> /// <param name="tsFolderName">testSet folder name</param> /// <param name="tsName">testSet name</param> /// <param name="timeout">-1 for unlimited, or number of miliseconds</param> /// <param name="runMode">run on LocalMachine or remote</param> /// <param name="runHost">if run on remote machine - remote machine name</param> /// <returns></returns> public TestSuiteRunResults RunTestSet(string tsFolderName, string tsName, double timeout, QcRunMode runMode, string runHost) { string currentTestSetInstances = ""; TestSuiteRunResults runDesc = new TestSuiteRunResults(); TestRunResults activeTestDesc = null; var tsFactory = tdConnection.TestSetFactory; var tsTreeManager = (ITestSetTreeManager)tdConnection.TestSetTreeManager; List tsList = null; string tsPath = "Root\\" + tsFolderName; ITestSetFolder tsFolder = null; try { tsFolder = (ITestSetFolder)tsTreeManager.get_NodeByPath(tsPath); } catch (COMException ex) { //not found tsFolder = null; } if (tsFolder == null) { //node wasn't found, folder = null ConsoleWriter.WriteErrLine(string.Format(Resources.AlmRunnerNoSuchFolder, tsFolder)); //this will make sure run will fail at the end. (since there was an error) Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; return(null); } else { tsList = tsFolder.FindTestSets(tsName); } if (tsList == null) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerCantFindTest, tsName)); //this will make sure run will fail at the end. (since there was an error) Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; return(null); } ITestSet targetTestSet = null; foreach (ITestSet ts in tsList) { if (ts.Name.Equals(tsName, StringComparison.InvariantCultureIgnoreCase)) { targetTestSet = ts; break; } } if (targetTestSet == null) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerCantFindTest, tsName)); //this will make sure run will fail at the end. (since there was an error) Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; return(null); } ConsoleWriter.WriteLine(Resources.GeneralDoubleSeperator); ConsoleWriter.WriteLine(Resources.AlmRunnerStartingExecution); ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerDisplayTest, tsName, targetTestSet.ID)); ITSScheduler Scheduler = null; try { //need to run this to install everyhting needed http://AlmServer:8080/qcbin/start_a.jsp?common=true //start the scheduler Scheduler = targetTestSet.StartExecution(""); } catch (Exception ex) { Scheduler = null; } try { currentTestSetInstances = GetTestInstancesString(targetTestSet); } catch (Exception ex) { } if (Scheduler == null) { Console.WriteLine(GetAlmNotInstalledError()); //proceeding with program execution is tasteless, since nothing will run without a properly installed QC. Environment.Exit((int)Launcher.ExitCodeEnum.Failed); } TSTestFactory tsTestFactory = targetTestSet.TSTestFactory; ITDFilter2 tdFilter = tsTestFactory.Filter; tdFilter["TC_CYCLE_ID"] = targetTestSet.ID.ToString(); IList tList = tsTestFactory.NewList(tdFilter.Text); try { //set up for the run depending on where the test instances are to execute switch (runMode) { case QcRunMode.RUN_LOCAL: // run all tests on the local machine Scheduler.RunAllLocally = true; break; case QcRunMode.RUN_REMOTE: // run tests on a specified remote machine Scheduler.TdHostName = runHost; break; // RunAllLocally must not be set for remote invocation of tests. As such, do not do this: Scheduler.RunAllLocally = False case QcRunMode.RUN_PLANNED_HOST: // run on the hosts as planned in the test set Scheduler.RunAllLocally = false; break; } } catch (Exception ex) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerProblemWithHost, ex.Message)); } ConsoleWriter.WriteLine(Resources.AlmRunnerNumTests + tList.Count); int i = 1; foreach (ITSTest3 test in tList) { string runOnHost = runHost; if (runMode == QcRunMode.RUN_PLANNED_HOST) { runOnHost = test.HostName; } //if host isn't taken from QC (PLANNED) and not from the test definition (REMOTE), take it from LOCAL (machineName) string hostName = runOnHost; if (runMode == QcRunMode.RUN_LOCAL) { hostName = Environment.MachineName; } ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerDisplayTestRunOnHost, i, test.Name, hostName)); Scheduler.RunOnHost[test.ID] = runOnHost; var testResults = new TestRunResults(); testResults.TestName = test.Name; runDesc.TestRuns.Add(testResults); i = i + 1; } Stopwatch sw = Stopwatch.StartNew(); Stopwatch testSw = null; try { //tests are actually run Scheduler.Run(); } catch (Exception ex) { ConsoleWriter.WriteLine(Resources.AlmRunnerRunError + ex.Message); } ConsoleWriter.WriteLine(Resources.AlmRunnerSchedStarted + DateTime.Now.ToString(Launcher.DateFormat)); ConsoleWriter.WriteLine(Resources.SingleSeperator); IExecutionStatus executionStatus = Scheduler.ExecutionStatus; bool tsExecutionFinished = false; ITSTest prevTest = null; ITSTest currentTest = null; string abortFilename = System.IO.Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location) + "\\stop" + Launcher.UniqueTimeStamp + ".txt"; //wait for the tests to end ("normally" or because of the timeout) while ((tsExecutionFinished == false) && (timeout == -1 || sw.Elapsed.TotalSeconds < timeout)) { executionStatus.RefreshExecStatusInfo("all", true); tsExecutionFinished = executionStatus.Finished; if (System.IO.File.Exists(abortFilename)) { break; } for (int j = 1; j <= executionStatus.Count; ++j) { TestExecStatus testExecStatusObj = executionStatus[j]; activeTestDesc = UpdateTestStatus(runDesc, targetTestSet, testExecStatusObj, true); if (activeTestDesc.PrevTestState != activeTestDesc.TestState) { TestState tstate = activeTestDesc.TestState; if (tstate == TestState.Running) { currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; int testIndex = GetIdxByTestName(currentTest.Name, runDesc); int prevRunId = GetTestRunId(currentTest); runDesc.TestRuns[testIndex].PrevRunId = prevRunId; //closing previous test if (prevTest != null) { WriteTestRunSummary(prevTest); } //starting new test prevTest = currentTest; //assign the new test the consol writer so it will gather the output ConsoleWriter.ActiveTestRun = runDesc.TestRuns[testIndex]; ConsoleWriter.WriteLine(DateTime.Now.ToString(Launcher.DateFormat) + " Running: " + currentTest.Name); //tell user that the test is running ConsoleWriter.WriteLine(DateTime.Now.ToString(Launcher.DateFormat) + " Running test: " + activeTestDesc.TestName + ", Test id: " + testExecStatusObj.TestId + ", Test instance id: " + testExecStatusObj.TSTestId); //start timing the new test run string foldername = ""; ITestSetFolder folder = targetTestSet.TestSetFolder as ITestSetFolder; if (folder != null) { foldername = folder.Name.Replace(".", "_"); } //the test group is it's test set. (dots are problematic since jenkins parses them as seperators between packadge and class) activeTestDesc.TestGroup = foldername + "\\" + targetTestSet.Name; activeTestDesc.TestGroup = activeTestDesc.TestGroup.Replace(".", "_"); } TestState enmState = GetTsStateFromQcState(testExecStatusObj.Status as string); string statusString = enmState.ToString(); if (enmState == TestState.Running) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerStat, activeTestDesc.TestName, testExecStatusObj.TSTestId, statusString)); } else if (enmState != TestState.Waiting) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerStatWithMessage, activeTestDesc.TestName, testExecStatusObj.TSTestId, statusString, testExecStatusObj.Message)); } if (System.IO.File.Exists(abortFilename)) { break; } } } //wait 0.2 seconds Thread.Sleep(200); //check for abortion if (System.IO.File.Exists(abortFilename)) { _blnRunCancelled = true; ConsoleWriter.WriteLine(Resources.GeneralStopAborted); //stop all test instances in this testSet. Scheduler.Stop(currentTestSetInstances); ConsoleWriter.WriteLine(Resources.GeneralAbortedByUser); //stop working Environment.Exit((int)Launcher.ExitCodeEnum.Aborted); } } //check status for each test if (timeout == -1 || sw.Elapsed.TotalSeconds < timeout) { //close last test if (prevTest != null) { WriteTestRunSummary(prevTest); } //done with all tests, stop collecting output in the testRun object. ConsoleWriter.ActiveTestRun = null; for (int k = 1; k <= executionStatus.Count; ++k) { if (System.IO.File.Exists(abortFilename)) { break; } TestExecStatus testExecStatusObj = executionStatus[k]; activeTestDesc = UpdateTestStatus(runDesc, targetTestSet, testExecStatusObj, false); UpdateCounters(activeTestDesc, runDesc); currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; string testPath = "Root\\" + tsFolderName + "\\" + tsName + "\\" + activeTestDesc.TestName; activeTestDesc.TestPath = testPath; } //update the total runtime runDesc.TotalRunTime = sw.Elapsed; ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerTestsetDone, tsName, DateTime.Now.ToString(Launcher.DateFormat))); } else { _blnRunCancelled = true; ConsoleWriter.WriteLine(Resources.GeneralTimedOut); Launcher.ExitCode = Launcher.ExitCodeEnum.Aborted; } return(runDesc); }
/// <summary> /// gets test index given it's name /// </summary> /// <param name="strName"></param> /// <param name="results"></param> /// <returns></returns> public int GetIdxByTestName(string strName, TestSuiteRunResults results) { TestRunResults res = null; int retVal = -1; for (int i = 0; i < results.TestRuns.Count(); ++i) { res = results.TestRuns[i]; if (res != null && res.TestName == strName) { retVal = i; break; } } return retVal; }
/// <summary> /// updates the test status in our list of tests /// </summary> /// <param name="targetTestSet"></param> /// <param name="testExecStatusObj"></param> private TestRunResults UpdateTestStatus(TestSuiteRunResults runResults, ITestSet targetTestSet, TestExecStatus testExecStatusObj, bool onlyUpdateState) { TestRunResults qTest = null; ITSTest currentTest = null; try { //find the test for the given status object currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; //find the test in our list int testIndex = GetIdxByTestName(currentTest.Name, runResults); qTest = runResults.TestRuns[testIndex]; if (qTest.TestType == null) { qTest.TestType = GetTestType(currentTest); } //update the state qTest.PrevTestState = qTest.TestState; qTest.TestState = GetTsStateFromQcState(testExecStatusObj.Status); if (!onlyUpdateState) { try { //duration and status are updated according to the run qTest.Runtime = TimeSpan.FromSeconds(currentTest.LastRun.Field("RN_DURATION")); } catch { //a problem getting duration, maybe the test isn't done yet - don't stop the flow.. } switch (qTest.TestState) { case TestState.Failed: qTest.FailureDesc = GenerateFailedLog(currentTest.LastRun); if (string.IsNullOrWhiteSpace(qTest.FailureDesc)) { qTest.FailureDesc = testExecStatusObj.Status + " : " + testExecStatusObj.Message; } break; case TestState.Error: qTest.ErrorDesc = testExecStatusObj.Status + " : " + testExecStatusObj.Message; break; default: break; } //check qc version for link type bool oldQc = CheckIsOldQc(); //string testLink = "<a href=\"testdirector:mydtqc01.isr.hp.com:8080/qcbin," + m_qcProject + "," + m_qcDomain + "," + targetTestSet.Name+ ";test-instance:" + testExecStatusObj.TestInstance + "\"> Alm link</a>"; string serverURl = m_qcServer.TrimEnd("/".ToCharArray()); if (serverURl.ToLower().StartsWith("http://")) { serverURl = serverURl.Substring(7); } //string testLinkInLabQc10 = "td://" + m_qcProject + "." + m_qcDomain + "." + m_qcServer.Replace("http://", "") + "/Test%20Lab?Action=FindTestInstance&TestSetID=" + targetTestSet.ID + "&TestInstanceID=" + testExecStatusObj.TSTestId; //string testLinkInLab = "td://" + m_qcProject + "." + m_qcDomain + "." + m_qcServer.Replace("http://", "") + "/TestLabModule-000000003649890581?EntityType=ITestInstance&EntityID=" + testExecStatusObj.TSTestId; int runid = GetTestRunId(currentTest); string linkStr = GetTestRunLink(currentTest, runid); string statusString = GetTsStateFromQcState(testExecStatusObj.Status as string).ToString(); ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerTestStat, currentTest.Name, statusString, testExecStatusObj.Message, linkStr)); runResults.TestRuns[testIndex] = qTest; } } catch (Exception ex) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerErrorGettingStat, currentTest.Name, ex.Message)); } return(qTest); }
/// <summary> /// runs the tests given to the object. /// </summary> /// <returns></returns> public override TestSuiteRunResults Run() { if (!Connected) return null; TestSuiteRunResults activeRunDesc = new TestSuiteRunResults(); //find all the testSets under if given some folders in our list try { FindAllTestSetsUnderFolders(); } catch (Exception ex) { ConsoleWriter.WriteErrLine(string.Format(Resources.AlmRunnerErrorBadQcInstallation, ex.Message, ex.StackTrace)); return null; } //run all the TestSets foreach (string testset in TestSets) { string testset1 = testset.TrimEnd("\\".ToCharArray()); int pos = testset1.LastIndexOf('\\'); string tsDir = ""; string tsName = testset1; if (pos != -1) { tsDir = testset1.Substring(0, pos).Trim("\\".ToCharArray()); tsName = testset1.Substring(pos, testset1.Length - pos).Trim("\\".ToCharArray()); } TestSuiteRunResults desc = RunTestSet(tsDir, tsName, Timeout, RunMode, RunHost); if (desc != null) activeRunDesc.AppendResults(desc); } return activeRunDesc; }
/// <summary> /// runs all tests given to this runner and returns a suite of run resutls /// </summary> /// <returns>The rest run results for each test</returns> public override TestSuiteRunResults Run() { //create a new Run Results object TestSuiteRunResults activeRunDesc = new TestSuiteRunResults(); double totalTime = 0; try { var start = DateTime.Now; foreach (var test in _tests) { if (RunCancelled()) break; var testStart = DateTime.Now; string errorReason = string.Empty; TestRunResults runResult = null; try { runResult = RunHPToolsTest(test, ref errorReason); } catch (Exception ex) { runResult = new TestRunResults(); runResult.TestState = TestState.Error; runResult.ErrorDesc = ex.Message; runResult.TestName = test.TestName; } //get the original source for this test, for grouping tests under test classes runResult.TestGroup = test.TestGroup; activeRunDesc.TestRuns.Add(runResult); //if fail was terminated before this step, continue if (runResult.TestState != TestState.Failed) { if (runResult.TestState != TestState.Error) { Helper.GetTestStateFromReport(runResult); } else { if (string.IsNullOrEmpty(runResult.ErrorDesc)) { if (RunCancelled()) { runResult.ErrorDesc = HpToolsLauncher.Properties.Resources.ExceptionUserCancelled; } else { runResult.ErrorDesc = HpToolsLauncher.Properties.Resources.ExceptionExternalProcess; } } runResult.ReportLocation = null; runResult.TestState = TestState.Error; } } if (runResult.TestState == TestState.Passed && runResult.HasWarnings) { runResult.TestState = TestState.Warning; ConsoleWriter.WriteLine(Resources.FsRunnerTestDoneWarnings); } else { ConsoleWriter.WriteLine(string.Format(Resources.FsRunnerTestDone, runResult.TestState)); } ConsoleWriter.WriteLine(DateTime.Now.ToString(Launcher.DateFormat) + " Test complete: " + runResult.TestPath + "\n-------------------------------------------------------------------------------------------------------"); UpdateCounters(runResult.TestState); var testTotalTime = (DateTime.Now - testStart).TotalSeconds; } totalTime = (DateTime.Now - start).TotalSeconds; } finally { activeRunDesc.NumTests = _tests.Count; activeRunDesc.NumErrors = _errors; activeRunDesc.TotalRunTime = TimeSpan.FromSeconds(totalTime); activeRunDesc.NumFailures = _fail; foreach (IFileSysTestRunner cleanupRunner in _colRunnersForCleanup.Values) { cleanupRunner.CleanUp(); } } return activeRunDesc; }
/// <summary> /// runs a test set with given parameters (and a valid connection to the QC server) /// </summary> /// <param name="tsFolderName">testSet folder name</param> /// <param name="tsName">testSet name</param> /// <param name="timeout">-1 for unlimited, or number of miliseconds</param> /// <param name="runMode">run on LocalMachine or remote</param> /// <param name="runHost">if run on remote machine - remote machine name</param> /// <returns></returns> public TestSuiteRunResults RunTestSet(string tsFolderName, string tsName, double timeout, QcRunMode runMode, string runHost) { string currentTestSetInstances = ""; TestSuiteRunResults runDesc = new TestSuiteRunResults(); TestRunResults activeTestDesc = null; var tsFactory = tdConnection.TestSetFactory; var tsTreeManager = (ITestSetTreeManager)tdConnection.TestSetTreeManager; List tsList = null; string tsPath = "Root\\" + tsFolderName; ITestSetFolder tsFolder = null; try { tsFolder = (ITestSetFolder)tsTreeManager.get_NodeByPath(tsPath); } catch (COMException ex) { //not found tsFolder = null; } if (tsFolder == null) { //node wasn't found, folder = null ConsoleWriter.WriteErrLine(string.Format(Resources.AlmRunnerNoSuchFolder, tsFolder)); //this will make sure run will fail at the end. (since there was an error) Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; return null; } else { tsList = tsFolder.FindTestSets(tsName); } if (tsList == null) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerCantFindTest, tsName)); //this will make sure run will fail at the end. (since there was an error) Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; return null; } ITestSet targetTestSet = null; foreach (ITestSet ts in tsList) { if (ts.Name.Equals(tsName, StringComparison.InvariantCultureIgnoreCase)) { targetTestSet = ts; break; } } if (targetTestSet == null) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerCantFindTest, tsName)); //this will make sure run will fail at the end. (since there was an error) Launcher.ExitCode = Launcher.ExitCodeEnum.Failed; return null; } ConsoleWriter.WriteLine(Resources.GeneralDoubleSeperator); ConsoleWriter.WriteLine(Resources.AlmRunnerStartingExecution); ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerDisplayTest, tsName, targetTestSet.ID)); ITSScheduler Scheduler = null; try { //need to run this to install everyhting needed http://AlmServer:8080/qcbin/start_a.jsp?common=true //start the scheduler Scheduler = targetTestSet.StartExecution(""); } catch (Exception ex) { Scheduler = null; } try { currentTestSetInstances = GetTestInstancesString(targetTestSet); } catch (Exception ex) { } if (Scheduler == null) { Console.WriteLine(GetAlmNotInstalledError()); //proceeding with program execution is tasteless, since nothing will run without a properly installed QC. Environment.Exit((int)Launcher.ExitCodeEnum.Failed); } TSTestFactory tsTestFactory = targetTestSet.TSTestFactory; ITDFilter2 tdFilter = tsTestFactory.Filter; tdFilter["TC_CYCLE_ID"] = targetTestSet.ID.ToString(); IList tList = tsTestFactory.NewList(tdFilter.Text); try { //set up for the run depending on where the test instances are to execute switch (runMode) { case QcRunMode.RUN_LOCAL: // run all tests on the local machine Scheduler.RunAllLocally = true; break; case QcRunMode.RUN_REMOTE: // run tests on a specified remote machine Scheduler.TdHostName = runHost; break; // RunAllLocally must not be set for remote invocation of tests. As such, do not do this: Scheduler.RunAllLocally = False case QcRunMode.RUN_PLANNED_HOST: // run on the hosts as planned in the test set Scheduler.RunAllLocally = false; break; } } catch (Exception ex) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerProblemWithHost, ex.Message)); } ConsoleWriter.WriteLine(Resources.AlmRunnerNumTests + tList.Count); int i = 1; foreach (ITSTest3 test in tList) { string runOnHost = runHost; if (runMode == QcRunMode.RUN_PLANNED_HOST) runOnHost = test.HostName; //if host isn't taken from QC (PLANNED) and not from the test definition (REMOTE), take it from LOCAL (machineName) string hostName = runOnHost; if (runMode == QcRunMode.RUN_LOCAL) { hostName = Environment.MachineName; } ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerDisplayTestRunOnHost, i, test.Name, hostName)); Scheduler.RunOnHost[test.ID] = runOnHost; var testResults = new TestRunResults(); testResults.TestName = test.Name; runDesc.TestRuns.Add(testResults); i = i + 1; } Stopwatch sw = Stopwatch.StartNew(); Stopwatch testSw = null; try { //tests are actually run Scheduler.Run(); } catch (Exception ex) { ConsoleWriter.WriteLine(Resources.AlmRunnerRunError + ex.Message); } ConsoleWriter.WriteLine(Resources.AlmRunnerSchedStarted + DateTime.Now.ToString(Launcher.DateFormat)); ConsoleWriter.WriteLine(Resources.SingleSeperator); IExecutionStatus executionStatus = Scheduler.ExecutionStatus; bool tsExecutionFinished = false; ITSTest prevTest = null; ITSTest currentTest = null; string abortFilename = System.IO.Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location) + "\\stop" + Launcher.UniqueTimeStamp + ".txt"; //wait for the tests to end ("normally" or because of the timeout) while ((tsExecutionFinished == false) && (timeout == -1 || sw.Elapsed.TotalSeconds < timeout)) { executionStatus.RefreshExecStatusInfo("all", true); tsExecutionFinished = executionStatus.Finished; if (System.IO.File.Exists(abortFilename)) { break; } for (int j = 1; j <= executionStatus.Count; ++j) { TestExecStatus testExecStatusObj = executionStatus[j]; currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; if (currentTest == null) { ConsoleWriter.WriteLine(string.Format("currentTest is null for test.{0} during execution", j)); continue; } activeTestDesc = UpdateTestStatus(runDesc, targetTestSet, testExecStatusObj, true); if (activeTestDesc.PrevTestState != activeTestDesc.TestState) { TestState tstate = activeTestDesc.TestState; if (tstate == TestState.Running) { //currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; int testIndex = GetIdxByTestName(currentTest.Name, runDesc); int prevRunId = GetTestRunId(currentTest); runDesc.TestRuns[testIndex].PrevRunId = prevRunId; //closing previous test if (prevTest != null) { WriteTestRunSummary(prevTest); } //starting new test prevTest = currentTest; //assign the new test the consol writer so it will gather the output ConsoleWriter.ActiveTestRun = runDesc.TestRuns[testIndex]; ConsoleWriter.WriteLine(DateTime.Now.ToString(Launcher.DateFormat) + " Running: " + currentTest.Name); //tell user that the test is running ConsoleWriter.WriteLine(DateTime.Now.ToString(Launcher.DateFormat) + " Running test: " + activeTestDesc.TestName + ", Test id: " + testExecStatusObj.TestId + ", Test instance id: " + testExecStatusObj.TSTestId); //start timing the new test run string foldername = ""; ITestSetFolder folder = targetTestSet.TestSetFolder as ITestSetFolder; if (folder != null) foldername = folder.Name.Replace(".", "_"); //the test group is it's test set. (dots are problematic since jenkins parses them as seperators between packadge and class) activeTestDesc.TestGroup = foldername + "\\" + targetTestSet.Name; activeTestDesc.TestGroup = activeTestDesc.TestGroup.Replace(".", "_"); } TestState enmState = GetTsStateFromQcState(testExecStatusObj.Status as string); string statusString = enmState.ToString(); if (enmState == TestState.Running) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerStat, activeTestDesc.TestName, testExecStatusObj.TSTestId, statusString)); } else if (enmState != TestState.Waiting) { ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerStatWithMessage, activeTestDesc.TestName, testExecStatusObj.TSTestId, statusString, testExecStatusObj.Message)); } if (System.IO.File.Exists(abortFilename)) { break; } } } //wait 0.2 seconds Thread.Sleep(200); //check for abortion if (System.IO.File.Exists(abortFilename)) { _blnRunCancelled = true; ConsoleWriter.WriteLine(Resources.GeneralStopAborted); //stop all test instances in this testSet. Scheduler.Stop(currentTestSetInstances); ConsoleWriter.WriteLine(Resources.GeneralAbortedByUser); //stop working Environment.Exit((int)Launcher.ExitCodeEnum.Aborted); } } //check status for each test if (timeout == -1 || sw.Elapsed.TotalSeconds < timeout) { //close last test if (prevTest != null) { WriteTestRunSummary(prevTest); } //done with all tests, stop collecting output in the testRun object. ConsoleWriter.ActiveTestRun = null; for (int k = 1; k <= executionStatus.Count; ++k) { if (System.IO.File.Exists(abortFilename)) { break; } TestExecStatus testExecStatusObj = executionStatus[k]; currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; if (currentTest == null) { ConsoleWriter.WriteLine(string.Format("currentTest is null for test.{0} after whole execution", k)); continue; } activeTestDesc = UpdateTestStatus(runDesc, targetTestSet, testExecStatusObj, false); UpdateCounters(activeTestDesc, runDesc); //currentTest = targetTestSet.TSTestFactory[testExecStatusObj.TSTestId]; string testPath = "Root\\" + tsFolderName + "\\" + tsName + "\\" + activeTestDesc.TestName; activeTestDesc.TestPath = testPath; } //update the total runtime runDesc.TotalRunTime = sw.Elapsed; ConsoleWriter.WriteLine(string.Format(Resources.AlmRunnerTestsetDone, tsName, DateTime.Now.ToString(Launcher.DateFormat))); } else { _blnRunCancelled = true; ConsoleWriter.WriteLine(Resources.GeneralTimedOut); Launcher.ExitCode = Launcher.ExitCodeEnum.Aborted; } return runDesc; }
/// <summary> /// runs all tests given to this runner and returns a suite of run resutls /// </summary> /// <returns>The rest run results for each test</returns> public override TestSuiteRunResults Run() { //create a new Run Results object TestSuiteRunResults activeRunDesc = new TestSuiteRunResults(); TestInfo firstTest = _tests[0]; double totalTime = 0; var start = DateTime.Now; KillAllAborterProcesses(); Process aborter = StartHPToolsAborter(); DateTime robotAlive = DateTime.Now; try { LogCleanupTestInfo(); foreach (var test in _tests) { if (IsRobotTooLongAlive(robotAlive)) { RunRobotCleanup(); robotAlive = DateTime.Now; } if (IsTestPlaceholder(test)) { continue; } if (RunCancelled()) { break; } TestRunResults runResult = null; runResult = ExecuteTest(test); if (RunCancelled()) { break; } if (IsTestFailed(runResult) && IsCleanupTestDefined() && !IsCleanupTest(test)) { Console.WriteLine("Test Failed: CLEANUP AND RE-RUN"); RunRobotCleanup(); ExecuteTest(GetCleanupTest()); if (RunCancelled()) { break; } runResult = ExecuteTest(test); } activeRunDesc.TestRuns.Add(runResult); AnalyzeRunResult(runResult); } } catch (Exception) { //Ignore } finally { if (!aborter.HasExited) { aborter.Kill(); } totalTime = (DateTime.Now - start).TotalSeconds; activeRunDesc.NumTests = _tests.Count; activeRunDesc.NumErrors = _errors; activeRunDesc.TotalRunTime = TimeSpan.FromSeconds(totalTime); activeRunDesc.NumFailures = _fail; RunRobotCleanup(); } return(activeRunDesc); }
/// <summary> /// converts all data from the test results in to the Junit xml format and writes the xml file to disk. /// </summary> /// <param name="results"></param> public void CreateXmlFromRunResults(TestSuiteRunResults results) { _testSuites = new testsuites(); testsuite uftts = new testsuite { errors = results.NumErrors.ToString(), tests = results.NumTests.ToString(), failures = results.NumFailures.ToString(), name = results.SuiteName, package = ClassName }; foreach (TestRunResults testRes in results.TestRuns) { if (testRes.TestType == TestType.LoadRunner.ToString()) { testsuite lrts = CreateXmlFromLRRunResults(testRes); _testSuites.AddTestsuite(lrts); } else { //Console.WriteLine("CreateXmlFromRunResults, UFT test"); testcase ufttc = CreateXmlFromUFTRunResults(testRes); uftts.AddTestCase(ufttc); } } if (uftts.testcase.Length > 0) { //Console.WriteLine("CreateXmlFromRunResults, add test case to test suite"); _testSuites.AddTestsuite(uftts); } else { //Console.WriteLine("CreateXmlFromRunResults, no uft test case to write"); } if (File.Exists(XmlName)) { //Console.WriteLine("CreateXmlFromRunResults, file exist - delete file"); File.Delete(XmlName); } // else //{ //Console.WriteLine("CreateXmlFromRunResults, file does not exist"); // } using (Stream s = File.OpenWrite(XmlName)) { //Console.WriteLine("CreateXmlFromRunResults, write test results to xml file"); //Console.WriteLine("_testSuites: " + _testSuites.name + " tests: " + _testSuites.tests); //Console.WriteLine("_testSuites: " + _testSuites.ToString()); _serializer.Serialize(s, _testSuites); } //Console.WriteLine("CreateXmlFromRunResults, XmlName: " + XmlName); /*if (File.Exists(XmlName)) * { * Console.WriteLine("CreateXmlFromRunResults, results file was created"); * } else * { * Console.WriteLine("CreateXmlFromRunResults, results file was not created"); * }*/ }