/// <summary> /// Constructor. /// </summary> /// <param name="cluster">The cluster being managed.</param> /// <param name="logFolder"> /// The folder where log files are to be written, otherwise or <c>null</c> or /// empty if logging is disabled. /// </param> public XenServerHostingManager(ClusterProxy cluster, string logFolder = null) { this.cluster = cluster; this.cluster.HostingManager = this; this.logFolder = logFolder; this.maxVmNameWidth = cluster.Definition.Nodes.Max(n => n.Name.Length) + cluster.Definition.Hosting.GetVmNamePrefix(cluster.Definition).Length; }
/// <inheritdoc/> public HostingManager GetManager(ClusterProxy cluster, string logFolder = null) { Covenant.Requires <ArgumentNullException>(cluster != null, nameof(cluster)); CheckInitialized(); return(Loader.GetManager(cluster, logFolder)); }
/// <inheritdoc/> public HostingManager GetManagerWithNodeImageFile(ClusterProxy cluster, string nodeImagePath, string logFolder = null) { Covenant.Requires <ArgumentNullException>(cluster != null, nameof(cluster)); Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(nodeImagePath), nameof(nodeImagePath)); CheckInitialized(); return(Loader.GetManagerWithNodeImageFile(cluster, nodeImagePath, logFolder)); }
/// <summary> /// Creates an instance that is capable of provisioning a cluster on manually provisioned /// servers or virtual machines. /// </summary> /// <param name="cluster">The cluster being managed.</param> /// <param name="nodeImageUri">Optionally specifies the node image URI (one of <paramref name="nodeImageUri"/> or <paramref name="nodeImagePath"/> must be passed).</param> /// <param name="nodeImagePath">Optionally specifies the path to the local node image file (one of <paramref name="nodeImageUri"/> or <paramref name="nodeImagePath"/> must be passed).</param> /// <param name="logFolder"> /// The folder where log files are to be written, otherwise or <c>null</c> or /// empty if logging is disabled. /// </param> /// <remarks> /// <note> /// One of <paramref name="nodeImageUri"/> or <paramref name="nodeImagePath"/> must be specified. /// </note> /// </remarks> public BareMetalHostingManager(ClusterProxy cluster, string nodeImageUri = null, string nodeImagePath = null, string logFolder = null) { Covenant.Requires <ArgumentNullException>(cluster != null, nameof(cluster)); Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(nodeImageUri) || !string.IsNullOrEmpty(nodeImagePath), $"{nameof(nodeImageUri)}/{nodeImagePath}"); cluster.HostingManager = this; this.cluster = cluster; this.nodeImageUri = nodeImageUri; this.nodeImagePath = nodeImagePath; }
/// <summary> /// Returns the <see cref="HostingManager"/> for a specific environment. /// </summary> /// <param name="cluster">The cluster being managed.</param> /// <param name="logFolder"> /// The folder where log files are to be written, otherwise or <c>null</c> or /// empty if logging is disabled. /// </param> /// <returns> /// The <see cref="HostingManager"/> or <c>null</c> if no hosting manager /// could be located for the specified cluster environment. /// </returns> public HostingManager GetManager(ClusterProxy cluster, string logFolder = null) { Covenant.Requires <ArgumentNullException>(cluster != null, nameof(cluster)); Covenant.Assert(environmentToHostingManager != null, $"[{nameof(HostingLoader)}] is not initialized. You must call [{nameof(HostingLoader)}.{nameof(HostingLoader.Initialize)}()] first."); if (!environmentToHostingManager.TryGetValue(cluster.Definition.Hosting.Environment, out var managerType)) { return(null); } return((HostingManager)Activator.CreateInstance(managerType, cluster, logFolder)); }
/// <inheritdoc/> public HostingManager GetManagerWithNodeImageUri(ClusterProxy cluster, string nodeImageUri, string logFolder = null) { Covenant.Requires <ArgumentNullException>(cluster != null, nameof(cluster)); Covenant.Assert(environmentToHostingManager != null, $"[{nameof(HostingLoader)}] is not initialized. You must call [{nameof(HostingLoader)}.{nameof(HostingLoader.Initialize)}()] first."); if (environmentToHostingManager.TryGetValue(cluster.Definition.Hosting.Environment, out var managerType)) { return((HostingManager)Activator.CreateInstance(managerType, cluster, nodeImageUri, (string)null, logFolder)); } throw new NotImplementedException($"[{nameof(HostingEnvironment)}={cluster.Definition.Hosting.Environment}]"); }
/// <inheritdoc/> public override void Run(ClusterProxy cluster) { Covenant.Requires <ArgumentNullException>(cluster != null, nameof(cluster)); var node = cluster.GetNode(nodeName); if (operationName != null) { node.InvokeIdempotentAction(operationName, () => action(node)); } else { action(node); } }
/// <inheritdoc/> public override void Run(ClusterProxy cluster) { Covenant.Requires <ArgumentNullException>(cluster != null); foreach (var node in cluster.Nodes) { node.Status = $"pause {delay}"; } Thread.Sleep(delay); foreach (var node in cluster.Nodes) { node.Status = string.Empty; } }
/// <inheritdoc/> public void Validate(ClusterDefinition clusterDefinition) { CheckInitialized(); Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); var cluster = new ClusterProxy(clusterDefinition); var master = GetMaster(cluster); if (master == null) { throw new KubeException($"Cannot locate a [{nameof(IHostingManager)}] implementation for the [{clusterDefinition.Hosting.Environment}] hosting environment."); } master.Validate(clusterDefinition); }
/// <inheritdoc/> public override void Run(ClusterProxy cluster) { Covenant.Requires <ArgumentNullException>(cluster != null, nameof(cluster)); var node = cluster.GetNode(nodeName); var status = this.ToString(); node.UploadText(path, text, tabStop, outputEncoding); if (!string.IsNullOrEmpty(permissions)) { node.SudoCommand("chmod", permissions, path); } StatusPause(); node.Status = string.Empty; }
/// <summary> /// Constructor. /// </summary> /// <param name="controller">The setup controller.</param> internal SetupClusterStatus(ISetupController controller) { Covenant.Requires <ArgumentNullException>(controller != null, nameof(controller)); this.isClone = false; this.controller = controller; this.cluster = controller.Get <ClusterProxy>(KubeSetupProperty.ClusterProxy); this.GlobalStatus = controller.GlobalStatus; this.globalStatus = this.GlobalStatus; // Initialize the cluster node/host status instances. this.Nodes = new List <SetupNodeStatus>(); foreach (var node in cluster.Nodes) { Nodes.Add(new SetupNodeStatus(node, node.NodeDefinition)); } this.Hosts = new List <SetupNodeStatus>(); foreach (var host in cluster.Hosts) { Hosts.Add(new SetupNodeStatus(host, new object())); } // Initialize the setup steps. this.Steps = new List <SetupStepStatus>(); foreach (var step in controller.GetStepStatus().Where(step => !step.IsQuiet)) { Steps.Add(step); } this.CurrentStep = Steps.SingleOrDefault(step => step.Number == controller.CurrentStepNumber); }
/// <summary> /// Constructs the <see cref="ISetupController"/> to be used for preparing a cluster. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <param name="nodeImageUri"> /// <para> /// Optionally specifies the node image URI. /// </para> /// <note> /// One of <paramref name="nodeImageUri"/> or <paramref name="nodeImagePath"/> must be specified for /// on-premise hypervisor based environments. This is ignored for cloud hosting. /// </note> /// </param> /// <param name="nodeImagePath"> /// <para> /// Optionally specifies the node image path. /// </para> /// <note> /// One of <paramref name="nodeImageUri"/> or <paramref name="nodeImagePath"/> must be specified for /// on-premise hypervisor based environments. This is ignored for cloud hosting. /// </note> /// </param> /// <param name="maxParallel"> /// Optionally specifies the maximum number of node operations to be performed in parallel. /// This <b>defaults to 500</b> which is effectively infinite. /// </param> /// <param name="packageCacheEndpoints"> /// <para> /// Optionally specifies the IP endpoints for the APT package caches to be used by /// the cluster, overriding the cluster definition settings. This is useful when /// package caches are already deployed in an environment. /// </para> /// <note> /// Package cache servers are deployed to the control-plane nodes by default. /// </note> /// </param> /// <param name="unredacted"> /// Optionally indicates that sensitive information <b>won't be redacted</b> from the setup logs /// (typically used when debugging). /// </param> /// <param name="debugMode">Optionally indicates that the cluster will be prepared in debug mode.</param> /// <param name="baseImageName">Optionally specifies the base image name to use for debug mode.</param> /// <param name="clusterspace">Optionally specifies the clusterspace for the operation.</param> /// <param name="neonCloudHeadendUri">Optionally overrides the headend service URI. This defaults to <see cref="KubeConst.NeonCloudHeadendUri"/>.</param> /// <param name="removeExisting">Optionally remove any existing cluster with the same name in the target environment.</param> /// <param name="disableConsoleOutput"> /// Optionally disables status output to the console. This is typically /// enabled for non-console applications. /// </param> /// <returns>The <see cref="ISetupController"/>.</returns> /// <exception cref="NeonKubeException">Thrown when there's a problem.</exception> public static ISetupController CreateClusterPrepareController( ClusterDefinition clusterDefinition, string nodeImageUri = null, string nodeImagePath = null, int maxParallel = 500, IEnumerable <IPEndPoint> packageCacheEndpoints = null, bool unredacted = false, bool debugMode = false, string baseImageName = null, string clusterspace = null, string neonCloudHeadendUri = null, bool removeExisting = false, bool disableConsoleOutput = false) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); if (KubeHelper.IsOnPremiseHypervisorEnvironment(clusterDefinition.Hosting.Environment)) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(nodeImageUri) || !string.IsNullOrEmpty(nodeImagePath), $"{nameof(nodeImageUri)}/{nameof(nodeImagePath)}"); } Covenant.Requires <ArgumentException>(maxParallel > 0, nameof(maxParallel)); Covenant.Requires <ArgumentNullException>(!debugMode || !string.IsNullOrEmpty(baseImageName), nameof(baseImageName)); neonCloudHeadendUri ??= KubeConst.NeonCloudHeadendUri; clusterDefinition.Validate(); if (!string.IsNullOrEmpty(nodeImagePath)) { if (!File.Exists(nodeImagePath)) { throw new NeonKubeException($"No node image file exists at: {nodeImagePath}"); } } // Determine where the log files should go. var logFolder = KubeHelper.LogFolder; // Remove any log files left over from a previous prepare/setup operation. foreach (var file in Directory.GetFiles(logFolder, "*.*", SearchOption.AllDirectories)) { File.Delete(file); } // Initialize the cluster proxy. var cluster = new ClusterProxy( clusterDefinition: clusterDefinition, hostingManagerFactory: new HostingManagerFactory(() => HostingLoader.Initialize()), operation: ClusterProxy.Operation.Prepare, nodeImageUri: nodeImageUri, nodeImagePath: nodeImagePath, nodeProxyCreator: (nodeName, nodeAddress) => { var logStream = new FileStream(Path.Combine(logFolder, $"{nodeName}.log"), FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite); var logWriter = new StreamWriter(logStream); var sshCredentials = SshCredentials.FromUserPassword(KubeConst.SysAdminUser, KubeConst.SysAdminPassword); return(new NodeSshProxy <NodeDefinition>(nodeName, nodeAddress, sshCredentials, logWriter: logWriter)); }); if (unredacted) { cluster.SecureRunOptions = RunOptions.None; } var hostingManager = cluster.HostingManager; // Ensure that the nodes have valid IP addresses. cluster.Definition.ValidatePrivateNodeAddresses(); // Override the cluster definition package caches when requested. if (packageCacheEndpoints != null && packageCacheEndpoints.Count() > 0) { var sb = new StringBuilder(); foreach (var endpoint in packageCacheEndpoints) { sb.AppendWithSeparator($"{endpoint.Address}:{endpoint.Port}"); } clusterDefinition.PackageProxy = sb.ToString(); } // Configure the setup controller. var controller = new SetupController <NodeDefinition>($"Preparing [{cluster.Definition.Name}] cluster infrastructure", cluster.Nodes, KubeHelper.LogFolder, disableConsoleOutput: disableConsoleOutput) { MaxParallel = maxParallel, LogBeginMarker = "# CLUSTER-BEGIN-PREPARE #######################################################", LogEndMarker = "# CLUSTER-END-PREPARE-SUCCESS #################################################", LogFailedMarker = "# CLUSTER-END-PREPARE-FAILED ##################################################" }; // Load the cluster login information if it exists and when it indicates that // setup is still pending, we'll use that information (especially the generated // secure SSH password). // // Otherwise, we'll fail the cluster prepare to avoid the possiblity of overwriting // the login for an active cluster. var contextName = $"{KubeConst.RootUser}@{clusterDefinition.Name}"; var clusterLoginPath = KubeHelper.GetClusterLoginPath((KubeContextName)contextName); var clusterLogin = ClusterLogin.Load(clusterLoginPath); if (clusterLogin == null || !clusterLogin.SetupDetails.SetupPending) { clusterLogin = new ClusterLogin(clusterLoginPath) { ClusterDefinition = clusterDefinition, SshUsername = KubeConst.SysAdminUser, SetupDetails = new KubeSetupDetails() { SetupPending = true } }; clusterLogin.Save(); } else { throw new InvalidOperationException($"Cannot overwrite existing cluster login [{KubeConst.RootUser}@{clusterDefinition.Name}]. Remove the login first when you're VERY SURE IT'S NOT IMPORTANT!"); } // Configure the setup controller state. controller.Add(KubeSetupProperty.Preparing, true); controller.Add(KubeSetupProperty.ReleaseMode, KubeHelper.IsRelease); controller.Add(KubeSetupProperty.DebugMode, debugMode); controller.Add(KubeSetupProperty.BaseImageName, baseImageName); controller.Add(KubeSetupProperty.MaintainerMode, !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("NC_ROOT"))); controller.Add(KubeSetupProperty.ClusterProxy, cluster); controller.Add(KubeSetupProperty.ClusterLogin, clusterLogin); controller.Add(KubeSetupProperty.HostingManager, cluster.HostingManager); controller.Add(KubeSetupProperty.HostingEnvironment, cluster.HostingManager.HostingEnvironment); controller.Add(KubeSetupProperty.ClusterspaceFolder, clusterspace); controller.Add(KubeSetupProperty.NeonCloudHeadendUri, neonCloudHeadendUri); controller.Add(KubeSetupProperty.DisableImageDownload, !string.IsNullOrEmpty(nodeImagePath)); controller.Add(KubeSetupProperty.Redact, !unredacted); // Configure the cluster preparation steps. controller.AddGlobalStep("configure hosting manager", controller => { controller.SetGlobalStepStatus("configure: hosting manager"); hostingManager.MaxParallel = maxParallel; hostingManager.WaitSeconds = 60; }); // Delete any existing cluster in the environment when requested. if (removeExisting) { controller.AddGlobalStep("remove existing cluster", async controller => { await hostingManager.RemoveClusterAsync(removeOrphans: true); }); } controller.AddGlobalStep("generate credentials", controller => { // We're going to generate a secure random password and we're going to append // an extra 4-character string to ensure that the password meets Azure (and probably // other cloud) minimum requirements: // // The supplied password must be between 6-72 characters long and must // satisfy at least 3 of password complexity requirements from the following: // // 1. Contains an uppercase character // 2. Contains a lowercase character // 3. Contains a numeric digit // 4. Contains a special character // 5. Control characters are not allowed // // We're going to use the cloud API to configure this secure password // when creating the VMs. For on-premise hypervisor environments such // as Hyper-V and XenServer, we're going use the [neon-init] // service to mount a virtual DVD that will change the password before // configuring the network on first boot. var hostingManager = controller.Get <IHostingManager>(KubeSetupProperty.HostingManager); var clusterLogin = controller.Get <ClusterLogin>(KubeSetupProperty.ClusterLogin); controller.SetGlobalStepStatus("generate: SSH password"); // Generate a secure SSH password and append a string that guarantees that // the generated password meets minimum cloud requirements. clusterLogin.SshPassword = NeonHelper.GetCryptoRandomPassword(clusterDefinition.Security.PasswordLength); clusterLogin.SshPassword += ".Aa0"; // We're also going to generate the server's SSH key here and pass that to the hosting // manager's provisioner. We need to do this up front because some hosting environments // like Azure don't allow SSH password authentication by default, so we'll need the SSH key // to initialize the nodes after they've been provisioned for those environments. if (clusterLogin.SshKey == null) { // Generate a 2048 bit SSH key pair. controller.SetGlobalStepStatus("generate: SSH client key pair"); clusterLogin.SshKey = KubeHelper.GenerateSshKey(cluster.Name, KubeConst.SysAdminUser); } // We also need to generate the root SSO password when necessary and add this // to the cluster login. controller.SetGlobalStepStatus("generate: SSO password"); clusterLogin.SsoUsername = "******"; clusterLogin.SsoPassword = cluster.Definition.RootPassword ?? NeonHelper.GetCryptoRandomPassword(cluster.Definition.Security.PasswordLength); clusterLogin.Save(); // Update node proxies with the generated SSH credentials. foreach (var node in cluster.Nodes) { node.UpdateCredentials(clusterLogin.SshCredentials); } }); // Have the hosting manager add any custom proviosioning steps. hostingManager.AddProvisioningSteps(controller); // Add the provisioning steps. controller.AddWaitUntilOnlineStep(timeout: TimeSpan.FromMinutes(15)); controller.AddNodeStep("check node OS", (controller, node) => node.VerifyNodeOS()); controller.AddNodeStep("delete boot script", (controller, node) => { // Hosting managers may use [cloud-init] to execute custom scripts // when node virtual machine first boots to configure networking and // also to set a secure SSH password. // // We need to delete this script file since it includes the SSH password. // If present, the script writes the path to itself to: // // /etc/neonkube/cloud-init/boot-script-path // // We're going to read this file if it exists and delete the script. var scriptPath = "/etc/neonkube/cloud-init/boot-script-path"; if (node.FileExists(scriptPath)) { scriptPath = node.DownloadText(scriptPath); if (!string.IsNullOrEmpty(scriptPath)) { node.SudoCommand("rm", "-f", scriptPath.Trim()); } } }); controller.AddNodeStep("check image version", (controller, node) => { // Ensure that the node image version matches the current neonKUBE version. var imageVersion = node.ImageVersion; if (imageVersion == null) { throw new Exception($"Node image is not stamped with the image version file: {KubeConst.ImageVersionPath}"); } if (imageVersion != SemanticVersion.Parse(KubeVersions.NeonKube)) { throw new Exception($"Node image version [{imageVersion}] does not match the neonKUBE version [{KubeVersions.NeonKube}] implemented by the current build."); } }); controller.AddNodeStep("node credentials", (controller, node) => { node.ConfigureSshKey(controller); }); controller.AddNodeStep("prepare nodes", (controller, node) => { node.PrepareNode(controller); }); controller.AddGlobalStep("neoncluster.io domain", async controller => { controller.SetGlobalStepStatus("create: *.neoncluster.io domain (for TLS)"); if (string.IsNullOrEmpty(cluster.Definition.Domain)) { var hostingEnvironment = controller.Get <HostingEnvironment>(KubeSetupProperty.HostingEnvironment); var clusterAddresses = cluster.HostingManager.GetClusterAddresses(); using (var jsonClient = new JsonClient()) { jsonClient.BaseAddress = new Uri(controller.Get <string>(KubeSetupProperty.NeonCloudHeadendUri)); clusterLogin.ClusterDefinition.Domain = await jsonClient.PostAsync <string>($"/cluster/domain?addresses={string.Join(',', clusterAddresses)}"); // $hack(jefflill): // // I'm going to parse the cluster ID from the domain returned. // // https://github.com/nforgeio/neonKUBE/issues/16407 // // $todo(marcusbooyah): // // You need to modify the headend API to return an object with the cluster ID // and domain being returned as separate properties and then replace this code. // // Note that the ID should include some dashes to make it easier to read, like: // // 4FCA-0F7A-F7F3-4FC0 // // Cluster IDs are probably going to end up being important for customer support // and I suspect that we'll be asking users for this ID when they call in. var fields = clusterLogin.ClusterDefinition.Domain.Split('.'); clusterLogin.ClusterDefinition.Id = fields[1]; } } clusterLogin.Save(); }); // Some hosting managers may have to some additional work after // the cluster has been otherwise prepared. hostingManager.AddPostProvisioningSteps(controller); // Indicate that cluster prepare succeeded by creating [prepare-ok] file to // the log folder. Cluster setup will verify that this file exists before // proceeding. controller.AddGlobalStep("finish", controller => { File.Create(Path.Combine(logFolder, "prepare-ok")); }, quiet: true); // We need to dispose this after the setup controller runs. controller.AddDisposable(cluster); return(controller); }
/// <summary> /// Constructs the <see cref="ISetupController"/> to be used for setting up a cluster. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <param name="maxParallel"> /// Optionally specifies the maximum number of node operations to be performed in parallel. /// This <b>defaults to 500</b> which is effectively infinite. /// </param> /// <param name="unredacted"> /// Optionally indicates that sensitive information <b>won't be redacted</b> from the setup logs /// (typically used when debugging). /// </param> /// <param name="debugMode">Optionally indicates that the cluster will be prepared in debug mode.</param> /// <param name="uploadCharts"> /// <para> /// Optionally specifies that the current Helm charts should be uploaded to replace the charts in the base image. /// </para> /// <note> /// This will be treated as <c>true</c> when <paramref name="debugMode"/> is passed as <c>true</c>. /// </note> /// </param> /// <param name = "clusterspace" > Optionally specifies the clusterspace for the operation.</param> /// <param name="neonCloudHeadendUri">Optionally overrides the neonCLOUD headend service URI. This defaults to <see cref="KubeConst.NeonCloudHeadendUri"/>.</param> /// <param name="disableConsoleOutput"> /// Optionally disables status output to the console. This is typically /// enabled for non-console applications. /// </param> /// <returns>The <see cref="ISetupController"/>.</returns> /// <exception cref="NeonKubeException">Thrown when there's a problem.</exception> public static ISetupController CreateClusterSetupController( ClusterDefinition clusterDefinition, int maxParallel = 500, bool unredacted = false, bool debugMode = false, bool uploadCharts = false, string clusterspace = null, string neonCloudHeadendUri = null, bool disableConsoleOutput = false) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); Covenant.Requires <ArgumentException>(maxParallel > 0, nameof(maxParallel)); neonCloudHeadendUri ??= KubeConst.NeonCloudHeadendUri; clusterDefinition.Validate(); // Determine where the log files should go. var logFolder = KubeHelper.LogFolder; // Ensure that the [prepare-ok] file in the log folder exists, indicating that // the last prepare operation succeeded. var prepareOkPath = Path.Combine(logFolder, "prepare-ok"); if (!File.Exists(prepareOkPath)) { throw new NeonKubeException($"Cannot locate the [{prepareOkPath}] file. Cluster prepare must have failed."); } // Clear the log folder except for the [prepare-ok] file. if (Directory.Exists(logFolder)) { foreach (var file in Directory.GetFiles(logFolder, "*", SearchOption.TopDirectoryOnly)) { if (Path.GetFileName(file) != "prepare-ok") { NeonHelper.DeleteFile(file); } } } else { throw new DirectoryNotFoundException(logFolder); } // Reload the any KubeConfig file to ensure we're up-to-date. KubeHelper.LoadConfig(); // Do some quick checks to ensure that component versions look reasonable. //var kubernetesVersion = new Version(KubeVersions.Kubernetes); //var crioVersion = new Version(KubeVersions.Crio); //if (crioVersion.Major != kubernetesVersion.Major || crioVersion.Minor != kubernetesVersion.Minor) //{ // throw new NeonKubeException($"[{nameof(KubeConst)}.{nameof(KubeVersions.Crio)}={KubeVersions.Crio}] major and minor versions don't match [{nameof(KubeConst)}.{nameof(KubeVersions.Kubernetes)}={KubeVersions.Kubernetes}]."); //} // Initialize the cluster proxy. var contextName = KubeContextName.Parse($"root@{clusterDefinition.Name}"); var kubeContext = new KubeConfigContext(contextName); KubeHelper.InitContext(kubeContext); ClusterProxy cluster = null; cluster = new ClusterProxy( hostingManagerFactory: new HostingManagerFactory(() => HostingLoader.Initialize()), operation: ClusterProxy.Operation.Setup, clusterDefinition: clusterDefinition, nodeProxyCreator: (nodeName, nodeAddress) => { var logStream = new FileStream(Path.Combine(logFolder, $"{nodeName}.log"), FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite); var logWriter = new StreamWriter(logStream); var context = KubeHelper.CurrentContext; var sshCredentials = context.Extension.SshCredentials ?? SshCredentials.FromUserPassword(KubeConst.SysAdminUser, KubeConst.SysAdminPassword); return(new NodeSshProxy <NodeDefinition>(nodeName, nodeAddress, sshCredentials, logWriter: logWriter)); }); if (unredacted) { cluster.SecureRunOptions = RunOptions.None; } // Configure the setup controller. var controller = new SetupController <NodeDefinition>($"Setup [{cluster.Definition.Name}] cluster", cluster.Nodes, KubeHelper.LogFolder, disableConsoleOutput: disableConsoleOutput) { MaxParallel = maxParallel, LogBeginMarker = "# CLUSTER-BEGIN-SETUP #########################################################", LogEndMarker = "# CLUSTER-END-SETUP-SUCCESS ###################################################", LogFailedMarker = "# CLUSTER-END-SETUP-FAILED ####################################################" }; // Load the cluster login information if it exists and when it indicates that // setup is still pending, we'll use that information (especially the generated // secure SSH password). // // Otherwise, we'll write (or overwrite) the context file with a fresh context. var clusterLoginPath = KubeHelper.GetClusterLoginPath((KubeContextName)$"{KubeConst.RootUser}@{clusterDefinition.Name}"); var clusterLogin = ClusterLogin.Load(clusterLoginPath); if (clusterLogin == null || !clusterLogin.SetupDetails.SetupPending) { clusterLogin = new ClusterLogin(clusterLoginPath) { ClusterDefinition = clusterDefinition, SshUsername = KubeConst.SysAdminUser, SetupDetails = new KubeSetupDetails() { SetupPending = true } }; clusterLogin.Save(); } // Update the cluster node SSH credentials to use the secure password. var sshCredentials = SshCredentials.FromUserPassword(KubeConst.SysAdminUser, clusterLogin.SshPassword); foreach (var node in cluster.Nodes) { node.UpdateCredentials(sshCredentials); } // Configure the setup controller state. controller.Add(KubeSetupProperty.Preparing, false); controller.Add(KubeSetupProperty.ReleaseMode, KubeHelper.IsRelease); controller.Add(KubeSetupProperty.DebugMode, debugMode); controller.Add(KubeSetupProperty.MaintainerMode, !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("NC_ROOT"))); controller.Add(KubeSetupProperty.ClusterProxy, cluster); controller.Add(KubeSetupProperty.ClusterLogin, clusterLogin); controller.Add(KubeSetupProperty.HostingManager, cluster.HostingManager); controller.Add(KubeSetupProperty.HostingEnvironment, cluster.HostingManager.HostingEnvironment); controller.Add(KubeSetupProperty.ClusterspaceFolder, clusterspace); controller.Add(KubeSetupProperty.NeonCloudHeadendUri, neonCloudHeadendUri); controller.Add(KubeSetupProperty.Redact, !unredacted); // Configure the setup steps. controller.AddGlobalStep("resource requirements", KubeSetup.CalculateResourceRequirements); cluster.HostingManager.AddSetupSteps(controller); controller.AddWaitUntilOnlineStep("connect nodes"); controller.AddNodeStep("check node OS", (controller, node) => node.VerifyNodeOS()); controller.AddNodeStep("check image version", (controller, node) => { // Ensure that the node image version matches the current neonKUBE (build) version. var imageVersion = node.ImageVersion; if (imageVersion == null) { throw new Exception($"Node image is not stamped with the image version file: {KubeConst.ImageVersionPath}"); } if (imageVersion != SemanticVersion.Parse(KubeVersions.NeonKube)) { throw new Exception($"Node image version [{imageVersion}] does not match the neonKUBE version [{KubeVersions.NeonKube}] implemented by the current build."); } }); controller.AddNodeStep("disable cloud-init", (controller, node) => node.SudoCommand("touch /etc/cloud/cloud-init.disabled")); controller.AddNodeStep("node basics", (controller, node) => node.BaseInitialize(controller, upgradeLinux: false)); // $todo(jefflill): We don't support Linux distribution upgrades yet. controller.AddNodeStep("root certificates", (controller, node) => node.UpdateRootCertificates()); controller.AddNodeStep("setup ntp", (controller, node) => node.SetupConfigureNtp(controller)); controller.AddNodeStep("cluster metadata", ConfigureMetadataAsync); // Perform common configuration for the bootstrap node first. // We need to do this so the the package cache will be running // when the remaining nodes are configured. var configureControlPlaneStepLabel = cluster.Definition.ControlNodes.Count() > 1 ? "setup first control-plane node" : "setup control-plane node"; controller.AddNodeStep(configureControlPlaneStepLabel, (controller, node) => { node.SetupNode(controller, KubeSetup.ClusterManifest); }, (controller, node) => node == cluster.FirstControlNode); // Perform common configuration for the remaining nodes (if any). if (cluster.Definition.Nodes.Count() > 1) { controller.AddNodeStep("setup other nodes", (controller, node) => { node.SetupNode(controller, KubeSetup.ClusterManifest); node.InvokeIdempotent("setup/setup-node-restart", () => node.Reboot(wait: true)); }, (controller, node) => node != cluster.FirstControlNode); } if (debugMode) { controller.AddNodeStep("load images", (controller, node) => node.NodeLoadImagesAsync(controller, downloadParallel: 5, loadParallel: 3)); } controller.AddNodeStep("install helm", (controller, node) => { node.NodeInstallHelm(controller); }); controller.AddNodeStep("install kustomize", (controller, node) => { node.NodeInstallKustomize(controller); }); if (uploadCharts || debugMode) { controller.AddNodeStep("upload helm charts", (controller, node) => { cluster.FirstControlNode.SudoCommand($"rm -rf {KubeNodeFolder.Helm}/*"); cluster.FirstControlNode.NodeInstallHelmArchive(controller); var zipPath = LinuxPath.Combine(KubeNodeFolder.Helm, "charts.zip"); cluster.FirstControlNode.SudoCommand($"unzip {zipPath} -d {KubeNodeFolder.Helm}"); cluster.FirstControlNode.SudoCommand($"rm -f {zipPath}"); }, (controller, node) => node == cluster.FirstControlNode); } //----------------------------------------------------------------- // Cluster setup. controller.AddGlobalStep("setup cluster", controller => KubeSetup.SetupClusterAsync(controller)); controller.AddGlobalStep("persist state", controller => { // Indicate that setup is complete. clusterLogin.ClusterDefinition.ClearSetupState(); clusterLogin.SetupDetails.SetupPending = false; clusterLogin.Save(); }); //----------------------------------------------------------------- // Verify the cluster. controller.AddNodeStep("check control-plane nodes", (controller, node) => { KubeDiagnostics.CheckControlNode(node, cluster.Definition); }, (controller, node) => node.Metadata.IsControlPane); if (cluster.Workers.Count() > 0) { controller.AddNodeStep("check workers", (controller, node) => { KubeDiagnostics.CheckWorker(node, cluster.Definition); }, (controller, node) => node.Metadata.IsWorker); } cluster.HostingManager.AddPostSetupSteps(controller); // We need to dispose this after the setup controller runs. controller.AddDisposable(cluster); return(controller); }
/// <inheritdoc/> public HostingManager GetMaster(ClusterProxy cluster, string logFolder = null) { CheckInitialized(); return(Loader.GetManager(cluster, logFolder)); }
/// <summary> /// Implements the configuration step. /// </summary> /// <param name="cluster">The cluster proxy instance.</param> public abstract void Run(ClusterProxy cluster);
/// <summary> /// Constructor. /// </summary> /// <param name="cluster">The cluster being managed.</param> /// <param name="logFolder"> /// The folder where log files are to be written, otherwise or <c>null</c> or /// empty if logging is disabled. /// </param> public MachineHostingManager(ClusterProxy cluster, string logFolder = null) { cluster.HostingManager = this; this.cluster = cluster; }
/// <summary> /// Constructor. /// </summary> /// <param name="cluster">The cluster being managed.</param> /// <param name="logFolder"> /// The folder where log files are to be written, otherwise or <c>null</c> or /// empty if logging is disabled. /// </param> public HyperVLocalHostingManager(ClusterProxy cluster, string logFolder = null) { cluster.HostingManager = this; this.cluster = cluster; }