/// <summary> /// Executes the specified test case /// </summary> /// <param name="vm">Test explorer view model</param> /// <param name="testToRun">Test that hosts the test case</param> /// <param name="caseToRun">Test case to run</param> /// <param name="token">Token to cancel tests</param> private async Task ExecuteCaseAsync(TestExplorerToolWindowViewModel vm, TestItem testToRun, TestCaseItem caseToRun, CancellationToken token) { if (token.IsCancellationRequested) { return; } var timeout = testToRun.Plan.TimeoutValue; caseToRun.Log("Test set execution started" + (timeout == 0 ? "" : $" with {timeout}ms timeout")); var watch = new Stopwatch(); watch.Start(); try { // --- Set the test case machine context caseToRun.Plan.MachineContext = this; // --- Execute arrange ExecuteArrange(caseToRun.Plan, testToRun.Plan.ArrangeAssignments); ReportTimeDetail("Arrange:", caseToRun, watch); // --- Execute the test code var success = await InvokeCodeAsync(caseToRun, testToRun.Plan.Act, timeout, token, watch); ReportTimeDetail("Act:", caseToRun, watch); if (success) { // --- Execute assertions if (ExecuteAssert(caseToRun.Plan, testToRun.Plan.Assertions, out var stopIndex)) { caseToRun.State = TestState.Success; } else { caseToRun.State = TestState.Failed; caseToRun.Log($"Assertion #{stopIndex} failed.", LogEntryType.Fail); } ReportTimeDetail("Assert:", caseToRun, watch); } } catch (Exception ex) { HandleException(caseToRun, ex); } finally { vm.UpdateCounters(); caseToRun.Log($"Test execution completed in {watch.Elapsed.TotalSeconds:####0.####} seconds"); ReportTestResult(caseToRun); } }
/// <summary> /// Execute the tests within the specified test file /// </summary> /// <param name="vm">Test explorer view model</param> /// <param name="fileToRun">Test file to run</param> /// <param name="token">Token to cancel tests</param> private async Task ExecuteFileTests(TestExplorerToolWindowViewModel vm, TestFileItem fileToRun, CancellationToken token) { if (token.IsCancellationRequested) { return; } // --- Preare this file for testing fileToRun.Log("Test file execution started"); var watch = new Stopwatch(); watch.Start(); try { // --- Iterate through all test sets foreach (var setToRun in fileToRun.TestSetsToRun) { if (token.IsCancellationRequested) { break; } setToRun.State = TestState.Running; SetTestFileState(fileToRun); await ExecuteSetTestsAsync(vm, setToRun, token); SetTestFileState(fileToRun); vm.UpdateCounters(); } } catch (Exception ex) { HandleException(fileToRun, ex); } finally { watch.Stop(); // --- Mark inconclusive nodes fileToRun.TestSetsToRun.ForEach(i => { if (i.State == TestState.NotRun) { SetSubTreeState(i, TestState.Inconclusive); } }); SetTestFileState(fileToRun); // --- Report outcome vm.UpdateCounters(); ReportEllapsedTime("Test file", fileToRun, watch); } }
/// <summary> /// Initializes the tree node with the specified parent /// </summary> /// <param name="vm">Parent view model</param> /// <param name="parent">Parent node</param> protected TestItemBase(TestExplorerToolWindowViewModel vm, TestItemBase parent) { Parent = parent; Vm = vm ?? throw new ArgumentNullException(nameof(vm)); }
/// <summary> /// Executes the test within a test set /// </summary> /// <param name="vm">Test explorer view model</param> /// <param name="testToRun">The test to run</param> /// <param name="token">Token to cancel tests</param> private async Task ExecuteTestsAsync(TestExplorerToolWindowViewModel vm, TestItem testToRun, CancellationToken token) { if (token.IsCancellationRequested) { return; } // --- Prepare a test for testing var timeout = testToRun.Plan.TimeoutValue; testToRun.Log("Test set execution started" + (timeout == 0 ? "" : $" with {timeout}ms timeout")); var watch = new Stopwatch(); watch.Start(); try { // --- Set the test machine context testToRun.Plan.MachineContext = this; var cpu = Package.MachineViewModel.SpectrumVm.Cpu as IZ80CpuTestSupport; var plan = testToRun.Plan; // --- Execute setup code if (plan.Setup != null) { cpu?.SetIffValues(!testToRun.Plan.DisableInterrupt); var success = await InvokeCodeAsync(testToRun, plan.Setup, plan.TimeoutValue, token, watch); ReportTimeDetail("Setup:", testToRun, watch); if (!success) { testToRun.Log("Test setup code invocation failed.", LogEntryType.Fail); return; } } if (testToRun.TestCasesToRun.Count == 0) { // --- This test has a single default test case // --- Execute arrange ExecuteArrange(testToRun.Plan, testToRun.Plan.ArrangeAssignments); ReportTimeDetail("Arrange:", testToRun, watch); // --- Set interrupt mode cpu?.SetIffValues(!testToRun.Plan.DisableInterrupt); // --- Execute the test code var success = await InvokeCodeAsync(testToRun, testToRun.Plan.Act, timeout, token, watch); ReportTimeDetail("Act:", testToRun, watch); if (success) { // --- Execute assertions if (ExecuteAssert(testToRun.Plan, testToRun.Plan.Assertions, out var stopIndex)) { testToRun.State = TestState.Success; } else { testToRun.State = TestState.Failed; testToRun.Log($"Assertion #{stopIndex} failed.", LogEntryType.Fail); } ReportTimeDetail("Assert:", testToRun, watch); } } else { // --- This test has a individual test cases // --- Iterate through test cases testToRun.Plan.CurrentTestCaseIndex = -1; foreach (var caseToRun in testToRun.TestCasesToRun) { if (token.IsCancellationRequested) { return; } caseToRun.State = TestState.Running; testToRun.Plan.CurrentTestCaseIndex++; await ExecuteCaseAsync(vm, testToRun, caseToRun, token); vm.UpdateCounters(); } } if (plan.Cleanup != null) { // --- Execute cleanup code cpu?.SetIffValues(!testToRun.Plan.DisableInterrupt); var success = await InvokeCodeAsync(testToRun, plan.Cleanup, plan.TimeoutValue, token, watch); ReportTimeDetail("Cleanup:", testToRun, watch); if (!success) { testToRun.Log("Test cleanup code invocation failed.", LogEntryType.Fail); } } } catch (Exception ex) { HandleException(testToRun, ex); } finally { watch.Stop(); // --- Mark inconclusive tests testToRun.TestCasesToRun.ForEach(i => { if (i.State == TestState.NotRun) { SetSubTreeState(i, TestState.Inconclusive); } }); SetTestState(testToRun); // --- Report outcome vm.UpdateCounters(); ReportEllapsedTime("Test", testToRun, watch); if (testToRun.TestCasesToRun.Count == 0) { ReportTestResult(testToRun); } } }
/// <summary> /// Execute the tests within the specified test set /// </summary> /// <param name="vm">Test explorer view model</param> /// <param name="setToRun">Test set to run</param> /// <param name="token">Token to cancel tests</param> private async Task ExecuteSetTestsAsync(TestExplorerToolWindowViewModel vm, TestSetItem setToRun, CancellationToken token) { if (token.IsCancellationRequested) { return; } // --- Prepare test set for testing setToRun.Log("Test set execution started"); var watch = new Stopwatch(); watch.Start(); try { // --- Set the test set machine context setToRun.Plan.MachineContext = this; // --- Set the startup state of the Spectrum VM var startup = await Package.StateFileManager.SetProjectMachineStartupState(setToRun.Plan.Sp48Mode); if (!startup) { throw new TaskCanceledException(); } // --- Inject the source code into the vm var plan = setToRun.Plan; Package.CodeManager.InjectCodeIntoVm(plan.CodeOutput); // --- Set up registers with default values ExecuteAssignment(plan.InitAssignments); // --- Iterate through individual test cases foreach (var testToRun in setToRun.TestsToRun) { if (token.IsCancellationRequested) { return; } testToRun.State = TestState.Running; SetTestSetState(setToRun); await ExecuteTestsAsync(vm, testToRun, token); SetTestSetState(setToRun); vm.UpdateCounters(); } // --- Stop the Spectrum VM await Package.MachineViewModel.Stop(); } catch (Exception ex) { HandleException(setToRun, ex); } finally { watch.Stop(); // --- Mark inconclusive tests setToRun.TestsToRun.ForEach(i => { if (i.State == TestState.NotRun) { SetSubTreeState(i, TestState.Inconclusive); } }); SetTestSetState(setToRun); // --- Report outcome vm.UpdateCounters(); ReportEllapsedTime("Test set", setToRun, watch); } }
/// <summary> /// Execute all test held by the specified root node /// </summary> /// <param name="vm">Test explorer view model</param> /// <param name="rootToRun">Root node instance</param> /// <param name="token">Token to cancel tests</param> private async Task ExecuteTestTreeAsync(TestExplorerToolWindowViewModel vm, TestRootItem rootToRun, CancellationToken token) { if (token.IsCancellationRequested) { return; } // --- Init running tests vm.TestRoot.SubTreeForEach(item => item.LogItems.Clear()); vm.TestRoot.Log("Test execution started"); // --- Start running tests var watch = new Stopwatch(); watch.Start(); rootToRun.State = TestState.Running; try { // --- Run each test file foreach (var fileToRun in rootToRun.TestFilesToRun) { if (token.IsCancellationRequested) { return; } fileToRun.State = TestState.Running; SetTestRootState(rootToRun); await ExecuteFileTests(vm, fileToRun, token); SetTestRootState(rootToRun); vm.UpdateCounters(); } } catch (Exception ex) { HandleException(rootToRun, ex); } finally { watch.Stop(); // --- Mark inconclusive nodes rootToRun.TestFilesToRun.ForEach(i => { if (i.State == TestState.NotRun) { SetSubTreeState(i, TestState.Inconclusive); } }); SetTestRootState(rootToRun); // --- Report outcome details vm.UpdateCounters(); ReportEllapsedTime("Tests", vm.TestRoot, watch); if (token.IsCancellationRequested) { vm.TestRoot.Log("Test run has been cancelled by the user.", LogEntryType.Fail); } if (vm.Counters.Success == 1) { vm.TestRoot.Log("1 test successfully ran.", LogEntryType.Success); } else if (vm.Counters.Success > 1) { vm.TestRoot.Log($"{vm.Counters.Success} tests successfully ran.", LogEntryType.Success); } if (vm.Counters.Failed == 1) { vm.TestRoot.Log("1 test failed.", LogEntryType.Fail); } else if (vm.Counters.Failed > 1) { vm.TestRoot.Log($"{vm.Counters.Failed} tests failed.", LogEntryType.Fail); } if (vm.Counters.Aborted > 0 || vm.Counters.Inconclusive > 0) { vm.TestRoot.Log("The test result is inconclusive.", LogEntryType.Fail); } } }
/// <summary> /// Executes all tests that start with the specified node /// </summary> /// <param name="vm">Test explorer view model</param> /// <param name="node">Root node of the subtree to run the tests for</param> /// <param name="token">Token to stop tests</param> public Task RunTestsFromNodeAsync(TestExplorerToolWindowViewModel vm, TestItemBase node, CancellationToken token) { TestRootItem rootToRun = null; switch (node) { case TestRootItem rootNode: // --- Prepare all file nodes to run rootNode.TestFilesToRun.Clear(); foreach (var child in rootNode.ChildItems) { if (!(child is TestFileItem fileItem)) { continue; } rootNode.TestFilesToRun.Add(fileItem); fileItem.CollectAllToRun(); } rootToRun = rootNode; break; case TestSetItem setNode: { // --- Prepare this test set to run setNode.TestsToRun.Clear(); setNode.CollectAllToRun(); var fileItem = setNode.Parent as TestFileItem; var root = rootToRun = fileItem.Parent as TestRootItem; root.TestFilesToRun.Clear(); root.TestFilesToRun.Add(fileItem); fileItem.TestSetsToRun.Clear(); fileItem.TestSetsToRun.Add(setNode); break; } case TestItem testNode: { // --- Prepare this test to run testNode.TestCasesToRun.Clear(); testNode.CollectAllToRun(); var setItem = testNode.Parent as TestSetItem; var fileItem = setItem.Parent as TestFileItem; var root = rootToRun = fileItem.Parent as TestRootItem; root.TestFilesToRun.Clear(); root.TestFilesToRun.Add(fileItem); fileItem.TestSetsToRun.Clear(); fileItem.TestSetsToRun.Add(setItem); setItem.TestsToRun.Clear(); setItem.TestsToRun.Add(testNode); break; } case TestCaseItem caseNode: { // --- Prepare this test case to run var testItem = caseNode.Parent as TestItem; var setItem = testItem.Parent as TestSetItem; var fileItem = setItem?.Parent as TestFileItem; var root = rootToRun = fileItem.Parent as TestRootItem; root.TestFilesToRun.Clear(); root.TestFilesToRun.Add(fileItem); fileItem.TestSetsToRun.Clear(); fileItem.TestSetsToRun.Add(setItem); setItem.TestsToRun.Clear(); setItem.TestsToRun.Add(testItem); testItem.TestCasesToRun.Clear(); testItem.TestCasesToRun.Add(caseNode); break; } } return(rootToRun != null ? ExecuteTestTreeAsync(vm, rootToRun, token) : Task.FromResult(0)); }