/// <summary> /// Installs hypervisor guest integration services. /// </summary> /// <param name="controller">The setup controller.</param> public void BaseInstallGuestIntegrationServices(ISetupController controller) { Covenant.Requires <ArgumentException>(controller != null, nameof(controller)); var hostingEnvironment = controller.Get <HostingEnvironment>(KubeSetupProperty.HostingEnvironment); // This currently applies only to on-premise hypervisors. if (!KubeHelper.IsOnPremiseHypervisorEnvironment(hostingEnvironment)) { return; } InvokeIdempotent("base/guest-integration", () => { controller.LogProgress(this, verb: "setup", message: "guest integration services"); var guestServicesScript = $@"#!/bin/bash set -euo pipefail cat <<EOF >> /etc/initramfs-tools/modules hv_vmbus hv_storvsc hv_blkvsc hv_netvsc EOF {KubeNodeFolder.Bin}/safe-apt-get install -yq linux-virtual linux-cloud-tools-virtual linux-tools-virtual update-initramfs -u "; SudoCommand(CommandBundle.FromScript(guestServicesScript), RunOptions.Defaults | RunOptions.FaultOnError); }); }
/// <summary> /// Ensures that sensitive folders and files on the local workstation are encrypted at rest /// for security purposes. These include the users <b>.kube</b>, <b>.neonkube</b>, and any /// the <b>OpenVPN</b> if it exists. /// </summary> public static void EncryptSensitiveFiles() { if (NeonHelper.IsWindows) { var userFolderPath = Environment.GetFolderPath(Environment.SpecialFolder.UserProfile); var sensitiveFolders = new string[] { Path.Combine(userFolderPath, ".kube"), Path.Combine(userFolderPath, ".neonkube"), Path.Combine(userFolderPath, "OpenVPN") }; foreach (var sensitiveFolder in sensitiveFolders) { if (Directory.Exists(sensitiveFolder)) { KubeHelper.EncryptFile(sensitiveFolder); } } } else { // $todo(jeff.lill): Implement this for OS/X throw new NotImplementedException(); } }
/// <summary> /// Disables <b>cloud-init</b>. /// </summary> /// <param name="controller">The setup controller.</param> public void BaseDisableCloudInit(ISetupController controller) { Covenant.Requires <ArgumentException>(controller != null, nameof(controller)); var hostingEnvironment = controller.Get <HostingEnvironment>(KubeSetupProperty.HostingEnvironment); // Do this only for non-cloud environments. if (KubeHelper.IsCloudEnvironment(hostingEnvironment)) { return; } InvokeIdempotent("base/cloud-init", () => { controller.LogProgress(this, verb: "disable", message: "cloud-init"); var disableCloudInitScript = $@" set -euo pipefail mkdir -p /etc/cloud touch /etc/cloud/cloud-init.disabled "; SudoCommand(CommandBundle.FromScript(disableCloudInitScript), RunOptions.Defaults | RunOptions.FaultOnError); }); }
/// <summary> /// Reads and returns information loaded from the current <b>~/.kube/config</b> file. /// </summary> /// <returns>The parsed <see cref="KubeConfig"/> or an empty config if the file doesn't exist.</returns> /// <exception cref="NeonKubeException">Thrown when the current config is invalid.</exception> public static KubeConfig Load() { var configPath = KubeHelper.KubeConfigPath; if (File.Exists(configPath)) { var config = NeonHelper.YamlDeserialize <KubeConfig>(KubeHelper.ReadFileTextWithRetry(configPath)); config.Validate(); // Load any related neonKUBE cluster logins. foreach (var context in config.Contexts) { var extensionPath = Path.Combine(KubeHelper.LoginsFolder, $"{context.Name}.login.yaml"); if (File.Exists(extensionPath)) { context.Extension = NeonHelper.YamlDeserialize <ClusterLogin>(KubeHelper.ReadFileTextWithRetry(extensionPath)); } else { context.Extension = new ClusterLogin(); } } return(config); } else { return(new KubeConfig()); } }
/// <summary> /// Validates the options. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <exception cref="ClusterDefinitionException">Thrown if the definition is not valid.</exception> internal void Validate(ClusterDefinition clusterDefinition) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); ContainerOptions.ValidateRegistryPrefix(Prefix, allowWildcard: true, propertyPath: $"{nameof(ClusterDefinition.Container)}.{nameof(ContainerOptions.Registries)}.{nameof(Prefix)}"); try { KubeHelper.CheckName(this.Name); } catch (Exception e) { throw new ClusterDefinitionException(e.Message, e); } if (!Prefix.StartsWith("*.") && !string.IsNullOrEmpty(Location)) { throw new ClusterDefinitionException($"[{nameof(ClusterDefinition.Container)}.{nameof(Prefix)}={Prefix}]: [{nameof(Location)}] required when the prefix doesn't include a wildcard like: *.example.com "); } if (!string.IsNullOrEmpty(Location)) { ContainerOptions.ValidateRegistryPrefix(Location, allowWildcard: true, propertyPath: $"{nameof(ClusterDefinition.Container)}.{nameof(ContainerOptions.Registries)}.{nameof(Location)}"); } }
/// <summary> /// Constructs an instance with the specified name and <typeparamref name="TConfig"/> value. /// </summary> /// <param name="name">Specifies the configmap name.</param> /// <param name="namespace">specifies the namespace.</param> /// <param name="config"> /// Optionally specifies the initial config value. A default instance will be created /// when this is <c>null</c>. /// </param> public TypeSafeConfigMap(string name, string @namespace, TConfig config = null) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(name), nameof(name)); Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(@namespace), nameof(@namespace)); Covenant.Requires <ArgumentNullException>(config != null, nameof(config)); this.Config = config ?? new TConfig(); this.ConfigMap = KubeHelper.CreateKubeObject <V1ConfigMap>(name); this.ConfigMap.Data = new Dictionary <string, string>(); this.ConfigMap.Data[dataPropertyName] = NeonHelper.JsonSerialize(this.Config); }
/// <summary> /// Removes a neonKUBE related kubecontext if it exists. /// </summary> /// <param name="name">The context name.</param> /// <param name="noSave">Optionally prevent context save after the change.</param> public void RemoveContext(KubeContextName name, bool noSave = false) { var context = GetContext(name); if (context != null) { RemoveContext(context); } else { NeonHelper.DeleteFile(KubeHelper.GetClusterLoginPath(name)); } }
/// <summary> /// Determines whether the hosting environment supports <b>zeroing</b> /// block devices. /// </summary> /// <param name="environment">Specifies the hosting environment.</param> /// <returns><c>true</c> if <b>fstrim</b> is supported.</returns> public static bool SupportsFsZero(HostingEnvironment environment) { // AWS EC2 backed block devices shouldn't be zeroed because that will // actually make snapshots and thus AMIs created from the snapshots // bigger and initially slower to boot. // // https://aws.amazon.com/blogs/apn/how-to-build-sparse-ebs-volumes-for-fun-and-easy-snapshotting/ // // The same thing will happen on other cloud environments with sparse // page blobs, so we'll disable this for all clouds. return(!KubeHelper.IsCloudEnvironment(environment)); }
/// <summary> /// Constructs a reverse proxy. /// </summary> /// <param name="serviceName"></param> /// <param name="localPort">The local port.</param> /// <param name="remotePort">The remote port.</param> /// <param name="@namespace"></param> /// Optionally specifies an acceptable server certificate. This can be used /// as a way to allow access for a specific self-signed certificate. Passing /// a certificate implies <paramref name="remoteTls"/><c>=true</c>. /// </param> /// <param name="clientCertificate"> /// Optionally specifies a client certificate. Passing a certificate implies /// <paramref name="remoteTls"/><c>=true</c>. /// </param> /// <param name="requestHandler">Optional request hook.</param> /// <param name="responseHandler">Optional response hook.</param> public PortForward( string serviceName, int localPort, int remotePort, string @namespace = "default") { Covenant.Requires <ArgumentException>(NetHelper.IsValidPort(localPort)); Covenant.Requires <ArgumentException>(NetHelper.IsValidPort(remotePort)); if (!NeonHelper.IsWindows) { throw new NotSupportedException($"[{nameof(PortForward)}] is supported only on Windows."); } this.serviceName = serviceName; this.localPort = localPort; this.remotePort = remotePort; this.@namespace = @namespace; this.kubectlProxyProcess = new Process(); // Create the client. KubeHelper.PortForward(serviceName, remotePort, localPort, @namespace, kubectlProxyProcess); }
/// <summary> /// <para> /// Ensures that <b>helm</b> tool whose version is at least as great as the requested /// cluster version is installed to the <b>neonKUBE</b> programs folder by copying the /// tool from the cache if necessary. /// </para> /// <note> /// This will probably require elevated privileges. /// </note> /// <note> /// This assumes that <b>Helm</b> has already been downloaded and cached and also that /// more recent <b>Helm</b> releases are backwards compatible with older deployed versions /// of Tiller. /// </note> /// </summary> /// <param name="setupInfo">The KUbernetes setup information.</param> public static void InstallHelm(KubeSetupInfo setupInfo) { Covenant.Requires <ArgumentNullException>(setupInfo != null); var hostPlatform = KubeHelper.HostPlatform; var cachedHelmPath = KubeHelper.GetCachedComponentPath(hostPlatform, "helm", setupInfo.Versions.Helm); var targetPath = Path.Combine(KubeHelper.ProgramFolder); switch (hostPlatform) { case KubeHostPlatform.Windows: targetPath = Path.Combine(targetPath, "helm.exe"); if (!File.Exists(targetPath)) { File.Copy(cachedHelmPath, targetPath); } else { // Execute the existing target to obtain its version and update it // to the cached copy if the cluster installed a more recent version // of Kubernetes. // $hack(jeff.lill): Simple client version extraction var pattern = "SemVer:\"v"; var response = NeonHelper.ExecuteCapture(targetPath, "version"); var pStart = response.OutputText.IndexOf(pattern); var error = "Cannot identify existing [helm] version."; if (pStart == -1) { throw new KubeException(error); } pStart += pattern.Length; var pEnd = response.OutputText.IndexOf("\"", pStart); if (pEnd == -1) { throw new KubeException(error); } var currentVersionString = response.OutputText.Substring(pStart, pEnd - pStart); if (!Version.TryParse(currentVersionString, out var currentVersion)) { throw new KubeException(error); } if (Version.Parse(setupInfo.Versions.Helm) > currentVersion) { // We need to copy the latest version. File.Copy(cachedHelmPath, targetPath); } } break; case KubeHostPlatform.Linux: case KubeHostPlatform.Osx: default: throw new NotImplementedException($"[{hostPlatform}] support is not implemented."); } }
/// <summary> /// <para> /// Ensures that <b>kubectl</b> tool whose version is at least as great as the Kubernetes /// cluster version is installed to the <b>neonKUBE</b> programs folder by copying the /// tool from the cache if necessary. /// </para> /// <note> /// This will probably require elevated privileges. /// </note> /// <note> /// This assumes that <b>kubectl</b> has already been downloaded and cached and also that /// more recent <b>kubectl</b> releases are backwards compatible with older deployed versions /// of Kubernetes. /// </note> /// </summary> /// <param name="setupInfo">The KUbernetes setup information.</param> public static void InstallKubeCtl(KubeSetupInfo setupInfo) { Covenant.Requires <ArgumentNullException>(setupInfo != null); var hostPlatform = KubeHelper.HostPlatform; var cachedKubeCtlPath = KubeHelper.GetCachedComponentPath(hostPlatform, "kubectl", setupInfo.Versions.Kubernetes); var targetPath = Path.Combine(KubeHelper.ProgramFolder); switch (hostPlatform) { case KubeHostPlatform.Windows: targetPath = Path.Combine(targetPath, "kubectl.exe"); // Ensure that the KUBECONFIG environment variable exists and includes // the path to the user's [.neonkube] configuration. var kubeConfigVar = Environment.GetEnvironmentVariable("KUBECONFIG"); if (string.IsNullOrEmpty(kubeConfigVar)) { // The [KUBECONFIG] environment variable doesn't exist so we'll set it. Registry.SetValue(@"HKEY_CURRENT_USER\Environment", "KUBECONFIG", KubeConfigPath, RegistryValueKind.ExpandString); Environment.SetEnvironmentVariable("KUBECONFIG", KubeConfigPath); } else { // The [KUBECONFIG] environment variable exists but we still need to // ensure that the path to our [USER/.neonkube] config is present. var sb = new StringBuilder(); var found = false; foreach (var path in kubeConfigVar.Split(new char[] { ';' }, StringSplitOptions.RemoveEmptyEntries)) { if (path == KubeConfigPath) { found = true; } sb.AppendWithSeparator(path, ";"); } if (!found) { sb.AppendWithSeparator(KubeConfigPath, ";"); } var newKubeConfigVar = sb.ToString(); if (newKubeConfigVar != kubeConfigVar) { Registry.SetValue(@"HKEY_CURRENT_USER\Environment", "KUBECONFIG", newKubeConfigVar, RegistryValueKind.ExpandString); Environment.SetEnvironmentVariable("KUBECONFIG", newKubeConfigVar); } } if (!File.Exists(targetPath)) { File.Copy(cachedKubeCtlPath, targetPath); } else { // Execute the existing target to obtain its version and update it // to the cached copy if the cluster installed a more recent version // of Kubernetes. // $hack(jeff.lill): Simple client version extraction var pattern = "GitVersion:\"v"; var response = NeonHelper.ExecuteCapture(targetPath, "version"); var pStart = response.OutputText.IndexOf(pattern); var error = "Cannot identify existing [kubectl] version."; if (pStart == -1) { throw new KubeException(error); } pStart += pattern.Length; var pEnd = response.OutputText.IndexOf("\"", pStart); if (pEnd == -1) { throw new KubeException(error); } var currentVersionString = response.OutputText.Substring(pStart, pEnd - pStart); if (!Version.TryParse(currentVersionString, out var currentVersion)) { throw new KubeException(error); } if (Version.Parse(setupInfo.Versions.Kubernetes) > currentVersion) { // We need to copy the latest version. if (File.Exists(targetPath)) { File.Delete(targetPath); } File.Copy(cachedKubeCtlPath, targetPath); } } break; case KubeHostPlatform.Linux: case KubeHostPlatform.Osx: default: throw new NotImplementedException($"[{hostPlatform}] support is not implemented."); } }
/// <summary> /// Constructs the <see cref="ISetupController"/> to be used for preparing a cluster. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <param name="nodeImageUri"> /// <para> /// Optionally specifies the node image URI. /// </para> /// <note> /// One of <paramref name="nodeImageUri"/> or <paramref name="nodeImagePath"/> must be specified for /// on-premise hypervisor based environments. This is ignored for cloud hosting. /// </note> /// </param> /// <param name="nodeImagePath"> /// <para> /// Optionally specifies the node image path. /// </para> /// <note> /// One of <paramref name="nodeImageUri"/> or <paramref name="nodeImagePath"/> must be specified for /// on-premise hypervisor based environments. This is ignored for cloud hosting. /// </note> /// </param> /// <param name="maxParallel"> /// Optionally specifies the maximum number of node operations to be performed in parallel. /// This <b>defaults to 500</b> which is effectively infinite. /// </param> /// <param name="packageCacheEndpoints"> /// <para> /// Optionally specifies the IP endpoints for the APT package caches to be used by /// the cluster, overriding the cluster definition settings. This is useful when /// package caches are already deployed in an environment. /// </para> /// <note> /// Package cache servers are deployed to the control-plane nodes by default. /// </note> /// </param> /// <param name="unredacted"> /// Optionally indicates that sensitive information <b>won't be redacted</b> from the setup logs /// (typically used when debugging). /// </param> /// <param name="debugMode">Optionally indicates that the cluster will be prepared in debug mode.</param> /// <param name="baseImageName">Optionally specifies the base image name to use for debug mode.</param> /// <param name="clusterspace">Optionally specifies the clusterspace for the operation.</param> /// <param name="neonCloudHeadendUri">Optionally overrides the headend service URI. This defaults to <see cref="KubeConst.NeonCloudHeadendUri"/>.</param> /// <param name="removeExisting">Optionally remove any existing cluster with the same name in the target environment.</param> /// <param name="disableConsoleOutput"> /// Optionally disables status output to the console. This is typically /// enabled for non-console applications. /// </param> /// <returns>The <see cref="ISetupController"/>.</returns> /// <exception cref="NeonKubeException">Thrown when there's a problem.</exception> public static ISetupController CreateClusterPrepareController( ClusterDefinition clusterDefinition, string nodeImageUri = null, string nodeImagePath = null, int maxParallel = 500, IEnumerable <IPEndPoint> packageCacheEndpoints = null, bool unredacted = false, bool debugMode = false, string baseImageName = null, string clusterspace = null, string neonCloudHeadendUri = null, bool removeExisting = false, bool disableConsoleOutput = false) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); if (KubeHelper.IsOnPremiseHypervisorEnvironment(clusterDefinition.Hosting.Environment)) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(nodeImageUri) || !string.IsNullOrEmpty(nodeImagePath), $"{nameof(nodeImageUri)}/{nameof(nodeImagePath)}"); } Covenant.Requires <ArgumentException>(maxParallel > 0, nameof(maxParallel)); Covenant.Requires <ArgumentNullException>(!debugMode || !string.IsNullOrEmpty(baseImageName), nameof(baseImageName)); neonCloudHeadendUri ??= KubeConst.NeonCloudHeadendUri; clusterDefinition.Validate(); if (!string.IsNullOrEmpty(nodeImagePath)) { if (!File.Exists(nodeImagePath)) { throw new NeonKubeException($"No node image file exists at: {nodeImagePath}"); } } // Determine where the log files should go. var logFolder = KubeHelper.LogFolder; // Remove any log files left over from a previous prepare/setup operation. foreach (var file in Directory.GetFiles(logFolder, "*.*", SearchOption.AllDirectories)) { File.Delete(file); } // Initialize the cluster proxy. var cluster = new ClusterProxy( clusterDefinition: clusterDefinition, hostingManagerFactory: new HostingManagerFactory(() => HostingLoader.Initialize()), operation: ClusterProxy.Operation.Prepare, nodeImageUri: nodeImageUri, nodeImagePath: nodeImagePath, nodeProxyCreator: (nodeName, nodeAddress) => { var logStream = new FileStream(Path.Combine(logFolder, $"{nodeName}.log"), FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite); var logWriter = new StreamWriter(logStream); var sshCredentials = SshCredentials.FromUserPassword(KubeConst.SysAdminUser, KubeConst.SysAdminPassword); return(new NodeSshProxy <NodeDefinition>(nodeName, nodeAddress, sshCredentials, logWriter: logWriter)); }); if (unredacted) { cluster.SecureRunOptions = RunOptions.None; } var hostingManager = cluster.HostingManager; // Ensure that the nodes have valid IP addresses. cluster.Definition.ValidatePrivateNodeAddresses(); // Override the cluster definition package caches when requested. if (packageCacheEndpoints != null && packageCacheEndpoints.Count() > 0) { var sb = new StringBuilder(); foreach (var endpoint in packageCacheEndpoints) { sb.AppendWithSeparator($"{endpoint.Address}:{endpoint.Port}"); } clusterDefinition.PackageProxy = sb.ToString(); } // Configure the setup controller. var controller = new SetupController <NodeDefinition>($"Preparing [{cluster.Definition.Name}] cluster infrastructure", cluster.Nodes, KubeHelper.LogFolder, disableConsoleOutput: disableConsoleOutput) { MaxParallel = maxParallel, LogBeginMarker = "# CLUSTER-BEGIN-PREPARE #######################################################", LogEndMarker = "# CLUSTER-END-PREPARE-SUCCESS #################################################", LogFailedMarker = "# CLUSTER-END-PREPARE-FAILED ##################################################" }; // Load the cluster login information if it exists and when it indicates that // setup is still pending, we'll use that information (especially the generated // secure SSH password). // // Otherwise, we'll fail the cluster prepare to avoid the possiblity of overwriting // the login for an active cluster. var contextName = $"{KubeConst.RootUser}@{clusterDefinition.Name}"; var clusterLoginPath = KubeHelper.GetClusterLoginPath((KubeContextName)contextName); var clusterLogin = ClusterLogin.Load(clusterLoginPath); if (clusterLogin == null || !clusterLogin.SetupDetails.SetupPending) { clusterLogin = new ClusterLogin(clusterLoginPath) { ClusterDefinition = clusterDefinition, SshUsername = KubeConst.SysAdminUser, SetupDetails = new KubeSetupDetails() { SetupPending = true } }; clusterLogin.Save(); } else { throw new InvalidOperationException($"Cannot overwrite existing cluster login [{KubeConst.RootUser}@{clusterDefinition.Name}]. Remove the login first when you're VERY SURE IT'S NOT IMPORTANT!"); } // Configure the setup controller state. controller.Add(KubeSetupProperty.Preparing, true); controller.Add(KubeSetupProperty.ReleaseMode, KubeHelper.IsRelease); controller.Add(KubeSetupProperty.DebugMode, debugMode); controller.Add(KubeSetupProperty.BaseImageName, baseImageName); controller.Add(KubeSetupProperty.MaintainerMode, !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("NC_ROOT"))); controller.Add(KubeSetupProperty.ClusterProxy, cluster); controller.Add(KubeSetupProperty.ClusterLogin, clusterLogin); controller.Add(KubeSetupProperty.HostingManager, cluster.HostingManager); controller.Add(KubeSetupProperty.HostingEnvironment, cluster.HostingManager.HostingEnvironment); controller.Add(KubeSetupProperty.ClusterspaceFolder, clusterspace); controller.Add(KubeSetupProperty.NeonCloudHeadendUri, neonCloudHeadendUri); controller.Add(KubeSetupProperty.DisableImageDownload, !string.IsNullOrEmpty(nodeImagePath)); controller.Add(KubeSetupProperty.Redact, !unredacted); // Configure the cluster preparation steps. controller.AddGlobalStep("configure hosting manager", controller => { controller.SetGlobalStepStatus("configure: hosting manager"); hostingManager.MaxParallel = maxParallel; hostingManager.WaitSeconds = 60; }); // Delete any existing cluster in the environment when requested. if (removeExisting) { controller.AddGlobalStep("remove existing cluster", async controller => { await hostingManager.RemoveClusterAsync(removeOrphans: true); }); } controller.AddGlobalStep("generate credentials", controller => { // We're going to generate a secure random password and we're going to append // an extra 4-character string to ensure that the password meets Azure (and probably // other cloud) minimum requirements: // // The supplied password must be between 6-72 characters long and must // satisfy at least 3 of password complexity requirements from the following: // // 1. Contains an uppercase character // 2. Contains a lowercase character // 3. Contains a numeric digit // 4. Contains a special character // 5. Control characters are not allowed // // We're going to use the cloud API to configure this secure password // when creating the VMs. For on-premise hypervisor environments such // as Hyper-V and XenServer, we're going use the [neon-init] // service to mount a virtual DVD that will change the password before // configuring the network on first boot. var hostingManager = controller.Get <IHostingManager>(KubeSetupProperty.HostingManager); var clusterLogin = controller.Get <ClusterLogin>(KubeSetupProperty.ClusterLogin); controller.SetGlobalStepStatus("generate: SSH password"); // Generate a secure SSH password and append a string that guarantees that // the generated password meets minimum cloud requirements. clusterLogin.SshPassword = NeonHelper.GetCryptoRandomPassword(clusterDefinition.Security.PasswordLength); clusterLogin.SshPassword += ".Aa0"; // We're also going to generate the server's SSH key here and pass that to the hosting // manager's provisioner. We need to do this up front because some hosting environments // like Azure don't allow SSH password authentication by default, so we'll need the SSH key // to initialize the nodes after they've been provisioned for those environments. if (clusterLogin.SshKey == null) { // Generate a 2048 bit SSH key pair. controller.SetGlobalStepStatus("generate: SSH client key pair"); clusterLogin.SshKey = KubeHelper.GenerateSshKey(cluster.Name, KubeConst.SysAdminUser); } // We also need to generate the root SSO password when necessary and add this // to the cluster login. controller.SetGlobalStepStatus("generate: SSO password"); clusterLogin.SsoUsername = "******"; clusterLogin.SsoPassword = cluster.Definition.RootPassword ?? NeonHelper.GetCryptoRandomPassword(cluster.Definition.Security.PasswordLength); clusterLogin.Save(); // Update node proxies with the generated SSH credentials. foreach (var node in cluster.Nodes) { node.UpdateCredentials(clusterLogin.SshCredentials); } }); // Have the hosting manager add any custom proviosioning steps. hostingManager.AddProvisioningSteps(controller); // Add the provisioning steps. controller.AddWaitUntilOnlineStep(timeout: TimeSpan.FromMinutes(15)); controller.AddNodeStep("check node OS", (controller, node) => node.VerifyNodeOS()); controller.AddNodeStep("delete boot script", (controller, node) => { // Hosting managers may use [cloud-init] to execute custom scripts // when node virtual machine first boots to configure networking and // also to set a secure SSH password. // // We need to delete this script file since it includes the SSH password. // If present, the script writes the path to itself to: // // /etc/neonkube/cloud-init/boot-script-path // // We're going to read this file if it exists and delete the script. var scriptPath = "/etc/neonkube/cloud-init/boot-script-path"; if (node.FileExists(scriptPath)) { scriptPath = node.DownloadText(scriptPath); if (!string.IsNullOrEmpty(scriptPath)) { node.SudoCommand("rm", "-f", scriptPath.Trim()); } } }); controller.AddNodeStep("check image version", (controller, node) => { // Ensure that the node image version matches the current neonKUBE version. var imageVersion = node.ImageVersion; if (imageVersion == null) { throw new Exception($"Node image is not stamped with the image version file: {KubeConst.ImageVersionPath}"); } if (imageVersion != SemanticVersion.Parse(KubeVersions.NeonKube)) { throw new Exception($"Node image version [{imageVersion}] does not match the neonKUBE version [{KubeVersions.NeonKube}] implemented by the current build."); } }); controller.AddNodeStep("node credentials", (controller, node) => { node.ConfigureSshKey(controller); }); controller.AddNodeStep("prepare nodes", (controller, node) => { node.PrepareNode(controller); }); controller.AddGlobalStep("neoncluster.io domain", async controller => { controller.SetGlobalStepStatus("create: *.neoncluster.io domain (for TLS)"); if (string.IsNullOrEmpty(cluster.Definition.Domain)) { var hostingEnvironment = controller.Get <HostingEnvironment>(KubeSetupProperty.HostingEnvironment); var clusterAddresses = cluster.HostingManager.GetClusterAddresses(); using (var jsonClient = new JsonClient()) { jsonClient.BaseAddress = new Uri(controller.Get <string>(KubeSetupProperty.NeonCloudHeadendUri)); clusterLogin.ClusterDefinition.Domain = await jsonClient.PostAsync <string>($"/cluster/domain?addresses={string.Join(',', clusterAddresses)}"); // $hack(jefflill): // // I'm going to parse the cluster ID from the domain returned. // // https://github.com/nforgeio/neonKUBE/issues/16407 // // $todo(marcusbooyah): // // You need to modify the headend API to return an object with the cluster ID // and domain being returned as separate properties and then replace this code. // // Note that the ID should include some dashes to make it easier to read, like: // // 4FCA-0F7A-F7F3-4FC0 // // Cluster IDs are probably going to end up being important for customer support // and I suspect that we'll be asking users for this ID when they call in. var fields = clusterLogin.ClusterDefinition.Domain.Split('.'); clusterLogin.ClusterDefinition.Id = fields[1]; } } clusterLogin.Save(); }); // Some hosting managers may have to some additional work after // the cluster has been otherwise prepared. hostingManager.AddPostProvisioningSteps(controller); // Indicate that cluster prepare succeeded by creating [prepare-ok] file to // the log folder. Cluster setup will verify that this file exists before // proceeding. controller.AddGlobalStep("finish", controller => { File.Create(Path.Combine(logFolder, "prepare-ok")); }, quiet: true); // We need to dispose this after the setup controller runs. controller.AddDisposable(cluster); return(controller); }
/// <summary> /// Constructs the <see cref="ISetupController"/> to be used for setting up a cluster. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <param name="maxParallel"> /// Optionally specifies the maximum number of node operations to be performed in parallel. /// This <b>defaults to 500</b> which is effectively infinite. /// </param> /// <param name="unredacted"> /// Optionally indicates that sensitive information <b>won't be redacted</b> from the setup logs /// (typically used when debugging). /// </param> /// <param name="debugMode">Optionally indicates that the cluster will be prepared in debug mode.</param> /// <param name="uploadCharts"> /// <para> /// Optionally specifies that the current Helm charts should be uploaded to replace the charts in the base image. /// </para> /// <note> /// This will be treated as <c>true</c> when <paramref name="debugMode"/> is passed as <c>true</c>. /// </note> /// </param> /// <param name = "clusterspace" > Optionally specifies the clusterspace for the operation.</param> /// <param name="neonCloudHeadendUri">Optionally overrides the neonCLOUD headend service URI. This defaults to <see cref="KubeConst.NeonCloudHeadendUri"/>.</param> /// <param name="disableConsoleOutput"> /// Optionally disables status output to the console. This is typically /// enabled for non-console applications. /// </param> /// <returns>The <see cref="ISetupController"/>.</returns> /// <exception cref="NeonKubeException">Thrown when there's a problem.</exception> public static ISetupController CreateClusterSetupController( ClusterDefinition clusterDefinition, int maxParallel = 500, bool unredacted = false, bool debugMode = false, bool uploadCharts = false, string clusterspace = null, string neonCloudHeadendUri = null, bool disableConsoleOutput = false) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); Covenant.Requires <ArgumentException>(maxParallel > 0, nameof(maxParallel)); neonCloudHeadendUri ??= KubeConst.NeonCloudHeadendUri; clusterDefinition.Validate(); // Determine where the log files should go. var logFolder = KubeHelper.LogFolder; // Ensure that the [prepare-ok] file in the log folder exists, indicating that // the last prepare operation succeeded. var prepareOkPath = Path.Combine(logFolder, "prepare-ok"); if (!File.Exists(prepareOkPath)) { throw new NeonKubeException($"Cannot locate the [{prepareOkPath}] file. Cluster prepare must have failed."); } // Clear the log folder except for the [prepare-ok] file. if (Directory.Exists(logFolder)) { foreach (var file in Directory.GetFiles(logFolder, "*", SearchOption.TopDirectoryOnly)) { if (Path.GetFileName(file) != "prepare-ok") { NeonHelper.DeleteFile(file); } } } else { throw new DirectoryNotFoundException(logFolder); } // Reload the any KubeConfig file to ensure we're up-to-date. KubeHelper.LoadConfig(); // Do some quick checks to ensure that component versions look reasonable. //var kubernetesVersion = new Version(KubeVersions.Kubernetes); //var crioVersion = new Version(KubeVersions.Crio); //if (crioVersion.Major != kubernetesVersion.Major || crioVersion.Minor != kubernetesVersion.Minor) //{ // throw new NeonKubeException($"[{nameof(KubeConst)}.{nameof(KubeVersions.Crio)}={KubeVersions.Crio}] major and minor versions don't match [{nameof(KubeConst)}.{nameof(KubeVersions.Kubernetes)}={KubeVersions.Kubernetes}]."); //} // Initialize the cluster proxy. var contextName = KubeContextName.Parse($"root@{clusterDefinition.Name}"); var kubeContext = new KubeConfigContext(contextName); KubeHelper.InitContext(kubeContext); ClusterProxy cluster = null; cluster = new ClusterProxy( hostingManagerFactory: new HostingManagerFactory(() => HostingLoader.Initialize()), operation: ClusterProxy.Operation.Setup, clusterDefinition: clusterDefinition, nodeProxyCreator: (nodeName, nodeAddress) => { var logStream = new FileStream(Path.Combine(logFolder, $"{nodeName}.log"), FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite); var logWriter = new StreamWriter(logStream); var context = KubeHelper.CurrentContext; var sshCredentials = context.Extension.SshCredentials ?? SshCredentials.FromUserPassword(KubeConst.SysAdminUser, KubeConst.SysAdminPassword); return(new NodeSshProxy <NodeDefinition>(nodeName, nodeAddress, sshCredentials, logWriter: logWriter)); }); if (unredacted) { cluster.SecureRunOptions = RunOptions.None; } // Configure the setup controller. var controller = new SetupController <NodeDefinition>($"Setup [{cluster.Definition.Name}] cluster", cluster.Nodes, KubeHelper.LogFolder, disableConsoleOutput: disableConsoleOutput) { MaxParallel = maxParallel, LogBeginMarker = "# CLUSTER-BEGIN-SETUP #########################################################", LogEndMarker = "# CLUSTER-END-SETUP-SUCCESS ###################################################", LogFailedMarker = "# CLUSTER-END-SETUP-FAILED ####################################################" }; // Load the cluster login information if it exists and when it indicates that // setup is still pending, we'll use that information (especially the generated // secure SSH password). // // Otherwise, we'll write (or overwrite) the context file with a fresh context. var clusterLoginPath = KubeHelper.GetClusterLoginPath((KubeContextName)$"{KubeConst.RootUser}@{clusterDefinition.Name}"); var clusterLogin = ClusterLogin.Load(clusterLoginPath); if (clusterLogin == null || !clusterLogin.SetupDetails.SetupPending) { clusterLogin = new ClusterLogin(clusterLoginPath) { ClusterDefinition = clusterDefinition, SshUsername = KubeConst.SysAdminUser, SetupDetails = new KubeSetupDetails() { SetupPending = true } }; clusterLogin.Save(); } // Update the cluster node SSH credentials to use the secure password. var sshCredentials = SshCredentials.FromUserPassword(KubeConst.SysAdminUser, clusterLogin.SshPassword); foreach (var node in cluster.Nodes) { node.UpdateCredentials(sshCredentials); } // Configure the setup controller state. controller.Add(KubeSetupProperty.Preparing, false); controller.Add(KubeSetupProperty.ReleaseMode, KubeHelper.IsRelease); controller.Add(KubeSetupProperty.DebugMode, debugMode); controller.Add(KubeSetupProperty.MaintainerMode, !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("NC_ROOT"))); controller.Add(KubeSetupProperty.ClusterProxy, cluster); controller.Add(KubeSetupProperty.ClusterLogin, clusterLogin); controller.Add(KubeSetupProperty.HostingManager, cluster.HostingManager); controller.Add(KubeSetupProperty.HostingEnvironment, cluster.HostingManager.HostingEnvironment); controller.Add(KubeSetupProperty.ClusterspaceFolder, clusterspace); controller.Add(KubeSetupProperty.NeonCloudHeadendUri, neonCloudHeadendUri); controller.Add(KubeSetupProperty.Redact, !unredacted); // Configure the setup steps. controller.AddGlobalStep("resource requirements", KubeSetup.CalculateResourceRequirements); cluster.HostingManager.AddSetupSteps(controller); controller.AddWaitUntilOnlineStep("connect nodes"); controller.AddNodeStep("check node OS", (controller, node) => node.VerifyNodeOS()); controller.AddNodeStep("check image version", (controller, node) => { // Ensure that the node image version matches the current neonKUBE (build) version. var imageVersion = node.ImageVersion; if (imageVersion == null) { throw new Exception($"Node image is not stamped with the image version file: {KubeConst.ImageVersionPath}"); } if (imageVersion != SemanticVersion.Parse(KubeVersions.NeonKube)) { throw new Exception($"Node image version [{imageVersion}] does not match the neonKUBE version [{KubeVersions.NeonKube}] implemented by the current build."); } }); controller.AddNodeStep("disable cloud-init", (controller, node) => node.SudoCommand("touch /etc/cloud/cloud-init.disabled")); controller.AddNodeStep("node basics", (controller, node) => node.BaseInitialize(controller, upgradeLinux: false)); // $todo(jefflill): We don't support Linux distribution upgrades yet. controller.AddNodeStep("root certificates", (controller, node) => node.UpdateRootCertificates()); controller.AddNodeStep("setup ntp", (controller, node) => node.SetupConfigureNtp(controller)); controller.AddNodeStep("cluster metadata", ConfigureMetadataAsync); // Perform common configuration for the bootstrap node first. // We need to do this so the the package cache will be running // when the remaining nodes are configured. var configureControlPlaneStepLabel = cluster.Definition.ControlNodes.Count() > 1 ? "setup first control-plane node" : "setup control-plane node"; controller.AddNodeStep(configureControlPlaneStepLabel, (controller, node) => { node.SetupNode(controller, KubeSetup.ClusterManifest); }, (controller, node) => node == cluster.FirstControlNode); // Perform common configuration for the remaining nodes (if any). if (cluster.Definition.Nodes.Count() > 1) { controller.AddNodeStep("setup other nodes", (controller, node) => { node.SetupNode(controller, KubeSetup.ClusterManifest); node.InvokeIdempotent("setup/setup-node-restart", () => node.Reboot(wait: true)); }, (controller, node) => node != cluster.FirstControlNode); } if (debugMode) { controller.AddNodeStep("load images", (controller, node) => node.NodeLoadImagesAsync(controller, downloadParallel: 5, loadParallel: 3)); } controller.AddNodeStep("install helm", (controller, node) => { node.NodeInstallHelm(controller); }); controller.AddNodeStep("install kustomize", (controller, node) => { node.NodeInstallKustomize(controller); }); if (uploadCharts || debugMode) { controller.AddNodeStep("upload helm charts", (controller, node) => { cluster.FirstControlNode.SudoCommand($"rm -rf {KubeNodeFolder.Helm}/*"); cluster.FirstControlNode.NodeInstallHelmArchive(controller); var zipPath = LinuxPath.Combine(KubeNodeFolder.Helm, "charts.zip"); cluster.FirstControlNode.SudoCommand($"unzip {zipPath} -d {KubeNodeFolder.Helm}"); cluster.FirstControlNode.SudoCommand($"rm -f {zipPath}"); }, (controller, node) => node == cluster.FirstControlNode); } //----------------------------------------------------------------- // Cluster setup. controller.AddGlobalStep("setup cluster", controller => KubeSetup.SetupClusterAsync(controller)); controller.AddGlobalStep("persist state", controller => { // Indicate that setup is complete. clusterLogin.ClusterDefinition.ClearSetupState(); clusterLogin.SetupDetails.SetupPending = false; clusterLogin.Save(); }); //----------------------------------------------------------------- // Verify the cluster. controller.AddNodeStep("check control-plane nodes", (controller, node) => { KubeDiagnostics.CheckControlNode(node, cluster.Definition); }, (controller, node) => node.Metadata.IsControlPane); if (cluster.Workers.Count() > 0) { controller.AddNodeStep("check workers", (controller, node) => { KubeDiagnostics.CheckWorker(node, cluster.Definition); }, (controller, node) => node.Metadata.IsWorker); } cluster.HostingManager.AddPostSetupSteps(controller); // We need to dispose this after the setup controller runs. controller.AddDisposable(cluster); return(controller); }
/// <summary> /// Used by cloud and potentially other hosting manager implementations to verify the /// node address assignments and/or to automatically assign these addresses. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <remarks> /// <note> /// This method verifies that node addresses for on-premise environments are located /// within the premise subnet. The method will not attempt to assign node addresses /// for on-premise node and requires all nodes have explicit addresses. /// </note> /// </remarks> protected void AssignNodeAddresses(ClusterDefinition clusterDefinition) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); var networkOptions = clusterDefinition.Network; // Verify that explicit address assignments are not duplicated // across any nodes. var addressToNode = new Dictionary <IPAddress, NodeDefinition>(); foreach (var node in clusterDefinition.SortedNodes) { if (string.IsNullOrEmpty(node.Address)) { continue; } var address = NetHelper.ParseIPv4Address(node.Address); if (addressToNode.TryGetValue(address, out var conflictNode)) { throw new ClusterDefinitionException($"Nodes [{conflictNode.Name}] and [{node.Name}] both specify the same address [{address}]. Node addresses must be unique."); } addressToNode.Add(address, node); } if (KubeHelper.IsOnPremiseEnvironment(clusterDefinition.Hosting.Environment)) { // Verify that all nodes have explicit addresses for on-premise environments. foreach (var node in clusterDefinition.SortedNodes) { if (string.IsNullOrEmpty(node.Address)) { throw new ClusterDefinitionException($"Node [{node.Name}] is not assigned an address. All nodes must have explicit IP addresses for on-premise hosting environments."); } } return; } // Ensure that any explicit node IP address assignments are located // within the subnet where the nodes will be provisioned and do not // conflict with any of the addresses reserved by the cloud provider // or neonKUBE. var nodeSubnetInfo = clusterDefinition.NodeSubnet; var nodeSubnet = NetworkCidr.Parse(nodeSubnetInfo.Subnet); if (clusterDefinition.Nodes.Count() > nodeSubnet.AddressCount - nodeSubnetInfo.ReservedAddresses) { throw new ClusterDefinitionException($"The cluster includes [{clusterDefinition.Nodes.Count()}] nodes which will not fit within the [{nodeSubnet}] target subnet after accounting for [{nodeSubnetInfo.ReservedAddresses}] reserved addresses."); } var firstValidAddressUint = NetHelper.AddressToUint(nodeSubnet.FirstAddress) + KubeConst.CloudSubnetStartReservedIPs; var firstValidAddress = NetHelper.UintToAddress(firstValidAddressUint); var lastValidAddressUint = NetHelper.AddressToUint(nodeSubnet.LastAddress) - KubeConst.CloudSubnetEndReservedIPs; var lastValidAddress = NetHelper.UintToAddress(lastValidAddressUint); foreach (var node in clusterDefinition.SortedNodes.OrderBy(node => node.Name)) { if (string.IsNullOrEmpty(node.Address)) { // Ignore nodes with unassigned addresses. continue; } var address = NetHelper.ParseIPv4Address(node.Address); if (!nodeSubnet.Contains(address)) { throw new ClusterDefinitionException($"Node [{node.Name}] is assigned [{node.Address}={node.Address}] which is outside of the [{nodeSubnet}]."); } var addressUint = NetHelper.AddressToUint(address); if (addressUint < firstValidAddressUint) { throw new ClusterDefinitionException($"Node [{node.Name}] defines IP address [{node.Address}={node.Address}] which is reserved. The first valid node address for subnet [{nodeSubnet}] is [{firstValidAddress}]."); } if (addressUint > lastValidAddressUint) { throw new ClusterDefinitionException($"Node [{node.Name}] defines IP address [{node.Address}={node.Address}] which is reserved. The last valid node address for subnet [{nodeSubnet}] is [{lastValidAddress}]."); } } //----------------------------------------------------------------- // Automatically assign unused IP addresses within the subnet to nodes that // were not explicitly assigned an address in the cluster definition. var assignedAddresses = new HashSet <uint>(); foreach (var node in clusterDefinition.SortedNodes) { if (string.IsNullOrEmpty(node.Address)) { continue; } var address = NetHelper.ParseIPv4Address(node.Address); var addressUint = NetHelper.AddressToUint(address); if (!assignedAddresses.Contains(addressUint)) { assignedAddresses.Add(addressUint); } } foreach (var azureNode in clusterDefinition.SortedControlThenWorkerNodes) { if (!string.IsNullOrEmpty(azureNode.Address)) { continue; } for (var addressUint = firstValidAddressUint; addressUint <= lastValidAddressUint; addressUint++) { if (!assignedAddresses.Contains(addressUint)) { azureNode.Address = NetHelper.UintToAddress(addressUint).ToString(); assignedAddresses.Add(addressUint); break; } } } }
//--------------------------------------------------------------------- // Instance members /// <inheritdoc/> public bool IsCloudEnvironment(HostingEnvironment environment) { return(KubeHelper.IsCloudEnvironment(environment)); }