/// <summary> /// <para> /// Deploys a new test cluster as specified by the cluster definition passed or connects /// to a cluster previously deployed by this method when the cluster definition of the /// existing cluster and the definition passed here are the same. /// </para> /// </summary> /// <param name="clusterDefinition">The cluster definition model.</param> /// <param name="options"> /// Optionally specifies the options that <see cref="ClusterFixture"/> will use to /// manage the test cluster. /// </param> /// <returns> /// <para> /// The <see cref="TestFixtureStatus"/>: /// </para> /// <list type="table"> /// <item> /// <term><see cref="TestFixtureStatus.Disabled"/></term> /// <description> /// Returned when cluster unit testing is disabled due to the <c>NEON_CLUSTER_TESTING</c> environment /// variable not being present on the current machine which means that <see cref="TestHelper.IsClusterTestingEnabled"/> /// returns <c>false</c>. /// </description> /// </item> /// <item> /// <term><see cref="TestFixtureStatus.Started"/></term> /// <description> /// Returned when one of the <c>Start()</c> methods is called for the first time for the fixture /// instance, indicating that an existing cluster has been connected or a new cluster has been deployed. /// </description> /// </item> /// <item> /// <term><see cref="TestFixtureStatus.AlreadyRunning"/></term> /// <description> /// Returned when one of the <c>Start()</c> methods has already been called by your test /// class instance. /// </description> /// </item> /// </list> /// </returns> /// <exception cref="NeonKubeException">Thrown when the test cluster could not be deployed.</exception> /// <remarks> /// <para> /// <b>IMPORTANT:</b> Only one <see cref="ClusterFixture"/> can be run at a time on /// any one computer. This is due to the fact that cluster state like the kubeconfig, /// neonKUBE logins, logs and other files will be written to <b>~/.neonkube/spaces/$fixture/*</b> /// so multiple fixture instances will be confused when trying to manage these same files. /// </para> /// <para> /// This means that not only will running <see cref="ClusterFixture"/> based tests in parallel /// within the same instance of Visual Studio fail, but running these tests in different /// Visual Studio instances will also fail. /// </para> /// </remarks> public TestFixtureStatus StartWithClusterDefinition(ClusterDefinition clusterDefinition, ClusterFixtureOptions options = null) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); if (clusterDefinition.IsLocked) { throw new NeonKubeException("Test clusters need to be unlocked. Please set [isLocked: false] in your cluster definition."); } if (!TestHelper.IsClusterTestingEnabled) { return(TestFixtureStatus.Disabled); } if (started) { return(TestFixtureStatus.AlreadyRunning); } options ??= new ClusterFixtureOptions(); this.options = options.Clone(); if (this.Cluster != null) { return(TestFixtureStatus.AlreadyRunning); } // Set the clusterspace mode, using any previously downloaded node image unless // the user specifies a custom image. We're going to host the fixture state // files in this fixed folder: // // ~/.neonkube/spaces/$fixture/* clusterspaceFolder = KubeHelper.SetClusterSpaceMode(string.IsNullOrEmpty(options.ImageUriOrPath) ? KubeClusterspaceMode.EnabledWithSharedCache : KubeClusterspaceMode.Enabled, KubeHelper.ClusterspacePrefix("fixture")); // Figure out whether the user passed an image URI or file path to override // the default node image. var imageUriOrPath = options.ImageUriOrPath; var imageUri = (string)null; var imagePath = (string)null; if (string.IsNullOrEmpty(imageUriOrPath)) { imageUriOrPath = KubeDownloads.GetDefaultNodeImageUri(clusterDefinition.Hosting.Environment); } if (imageUriOrPath.StartsWith("http://", StringComparison.InvariantCultureIgnoreCase) || imageUriOrPath.StartsWith("https://", StringComparison.InvariantCultureIgnoreCase)) { imageUri = imageUriOrPath; } else { imagePath = imageUriOrPath; } //------------------------------------------------------------- // We need to deal with some scenarios here: // // 1. No cluster context or login exists for the target cluster. // // A conflicting cluster may still exist though, having been deployed // by another computer or perhaps the kubecontext/logins on the current // machine may have been modified. We need to be sure to remove any // conflicting resources in this case. // // 2. Cluster context and login exist on the current machine for the target // cluster but the cluster is unhealthy or locked. We'll abort for locked // clusters and remove and redeploy for unhealth clusters. // // 3. Cluster context and login exist and the cluster is healthy. In this case, // we need to compare the deployed cluster version against the current version // and remove/redeploy when the versions don't match. // // 4. Cluster context and login exist and the cluster is healthy and cluster versions // match. In this case, We'll compare the existing cluster definition with that for // the new cluster and also compare the cluster versions and if they match and // [RemoveClusterOnStart=false] we'll just use the existing cluster. // // 5. The current cluster matches the target but [RemoveClusterOnStart=true]. // We need to remove the current cluster in this case so we'll deploy a // fresh one. // Determine whether a test cluster with the same name exists and if // its cluster definition matches the test cluster's definition. ; var clusterExists = false; var clusterContextName = KubeContextName.Parse($"root@{clusterDefinition.Name}"); var clusterContext = KubeHelper.Config.GetContext(clusterContextName); var clusterLogin = KubeHelper.GetClusterLogin(clusterContextName); if (clusterContext != null && clusterLogin != null && !clusterLogin.SetupDetails.SetupPending) { clusterExists = ClusterDefinition.AreSimilar(clusterDefinition, clusterLogin.ClusterDefinition); } if (clusterExists && !options.RemoveClusterOnStart) { // It looks like the test cluster may already exist. We'll verify // that it's running, healthy, unlocked and the cluster versions match. // When all of these conditions are true, we'll use the existing cluster, // otherwise we'll remove the cluster as well as its context/login, // and deploy a new cluster below. using (var cluster = new ClusterProxy(clusterLogin.ClusterDefinition, new HostingManagerFactory())) { KubeHelper.SetCurrentContext(clusterContextName); var isLocked = cluster.IsLockedAsync().ResultWithoutAggregate(); var clusterInfo = cluster.GetClusterInfoAsync().ResultWithoutAggregate(); var clusterHealth = cluster.GetClusterHealthAsync().ResultWithoutAggregate(); if (isLocked.HasValue && isLocked.Value) { throw new NeonKubeException($"Cluster is locked: {cluster.Name}"); } if (clusterHealth.State == ClusterState.Healthy && clusterInfo.ClusterVersion == KubeVersions.NeonKube) { // We need to reset an existing cluster to ensure it's in a known state. cluster.ResetAsync().WaitWithoutAggregate(); started = true; IsRunning = true; Cluster = new ClusterProxy(KubeHelper.CurrentContext, new HostingManagerFactory()); return(TestFixtureStatus.Started); } cluster.RemoveAsync(removeOrphans: true).WaitWithoutAggregate(); } } else { // There is no known existing cluster but there still might be a cluster // deployed by another machine or fragments of a partially deployed cluster, // so we need to do a preemptive cluster remove. using (var cluster = new ClusterProxy(clusterDefinition, new HostingManagerFactory())) { cluster.RemoveAsync(removeOrphans: true).WaitWithoutAggregate(); } } // Provision the new cluster. WriteTestOutputLine($"PREPARE CLUSTER: {clusterDefinition.Name}"); try { var controller = KubeSetup.CreateClusterPrepareController( clusterDefinition: clusterDefinition, nodeImageUri: imageUri, nodeImagePath: imagePath, maxParallel: options.MaxParallel, unredacted: options.Unredacted, neonCloudHeadendUri: options.NeonCloudHeadendUri); switch (controller.RunAsync().ResultWithoutAggregate()) { case SetupDisposition.Succeeded: WriteTestOutputLine("CLUSTER PREPARE: SUCCESS"); break; case SetupDisposition.Failed: WriteTestOutputLine("CLUSTER PREPARE: FAIL"); throw new NeonKubeException("Cluster prepare failed."); case SetupDisposition.Cancelled: default: throw new NotImplementedException(); } } finally { if (options.CaptureDeploymentLogs) { CaptureDeploymentLogs(); } } // Setup the cluster. WriteTestOutputLine($"SETUP CLUSTER: {clusterDefinition.Name}"); try { var controller = KubeSetup.CreateClusterSetupController( clusterDefinition: clusterDefinition, maxParallel: options.MaxParallel, unredacted: options.Unredacted); switch (controller.RunAsync().ResultWithoutAggregate()) { case SetupDisposition.Succeeded: WriteTestOutputLine("CLUSTER SETUP: SUCCESS"); break; case SetupDisposition.Failed: WriteTestOutputLine("CLUSTER SETUP: FAILED"); throw new NeonKubeException("Cluster setup failed."); case SetupDisposition.Cancelled: default: throw new NotImplementedException(); } } finally { if (options.CaptureDeploymentLogs) { CaptureDeploymentLogs(); } } // NOTE: We just deployed brand new cluster so there's no need to reset it. started = true; IsRunning = true; Cluster = new ClusterProxy(KubeHelper.CurrentContext, new HostingManagerFactory()); return(TestFixtureStatus.Started); }
/// <inheritdoc/> public override async Task RunAsync(CommandLine commandLine) { if (commandLine.HasHelpOption) { Help(); Program.Exit(0); } Console.WriteLine(); var context = KubeHelper.CurrentContext; if (context == null) { Console.Error.WriteLine($"*** ERROR: There is no current cluster."); Program.Exit(1); } var force = commandLine.HasOption("--force"); // Determine the individual components to be reset. We'll default to // resetting all of them when none of these options are specified. var crio = commandLine.HasOption("--crio"); var auth = commandLine.HasOption("--auth"); var harbor = commandLine.HasOption("--harbor"); var minio = commandLine.HasOption("--minio"); var monitoring = commandLine.HasOption("--monitoring"); if (!crio && !auth && !harbor && !minio && monitoring) { crio = true; auth = true; harbor = true; minio = true; monitoring = true; } // Obtain the namespaces to be retained. var keep = commandLine.GetOption("--keep-namespaces"); var keepNamespaces = new List <string>(); if (!string.IsNullOrEmpty(keep)) { foreach (var @namespace in keep.Split(',', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries)) { keepNamespaces.Add(@namespace); } } using (var cluster = new ClusterProxy(context, new HostingManagerFactory())) { var status = await cluster.GetClusterHealthAsync(); switch (status.State) { case ClusterState.Healthy: case ClusterState.Unhealthy: if (!force) { var isLocked = await cluster.IsLockedAsync(); if (!isLocked.HasValue) { Console.Error.WriteLine($"*** ERROR: [{cluster.Name}] lock status is unknown."); Program.Exit(1); } if (isLocked.Value) { Console.Error.WriteLine($"*** ERROR: [{cluster.Name}] is locked."); Program.Exit(1); } if (!Program.PromptYesNo($"Are you sure you want to reset: {cluster.Name}?")) { Program.Exit(0); } } Console.WriteLine("Resetting cluster (this may take a while)..."); await cluster.ResetAsync( new ClusterResetOptions() { KeepNamespaces = keepNamespaces, ResetCrio = crio, ResetAuth = auth, ResetHarbor = harbor, ResetMinio = minio, ResetMonitoring = monitoring }, progressMessage => Console.WriteLine(progressMessage)); Console.WriteLine($"RESET: {cluster.Name}"); break; default: Console.Error.WriteLine($"*** ERROR: Cluster is not running."); Program.Exit(1); break; } } }