/// <summary> /// Updates the hive services and containers. /// </summary> /// <param name="force"><c>true</c> to disable the update prompt.</param> /// <param name="maxParallel">Maximum number of parallel operations.</param> /// <param name="imageTag">Optionally overrides the default image tag.</param> private void UpdateServices(bool force, int maxParallel, string imageTag = null) { EnsureRootPivileges(); if (!force && !Program.PromptYesNo($"*** Are you sure you want to UPDATE HIVE services on [{hive.Name}]?")) { Program.Exit(0); } var controller = new SetupController <NodeDefinition>("hive update images", hive.Nodes) { MaxParallel = maxParallel, ShowStatus = true }; controller.SetDefaultRunOptions(RunOptions.FaultOnError); HiveUpdateManager.AddHiveUpdateSteps(hive, controller, out var restartRequired, servicesOnly: true, serviceUpdateParallism: Program.MaxParallel, imageTag: imageTag); if (controller.StepCount == 0) { Console.WriteLine("The hive is already up-to-date."); Program.Exit(0); } if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: One or more UPDATE steps failed."); Program.Exit(1); } Console.WriteLine(); Console.WriteLine("*** Hive services and containers were updated successfully."); }
/// <summary> /// Updates the Docker engine on all hive nodes and then restarts them /// one at a time, giving each of them some time to stabilize before /// updating the next node. /// </summary> /// <param name="force"><c>true</c> to disable the update prompt.</param> /// <param name="version">The Docker version to install.</param> /// <param name="maxParallel">Maximum number of parallel operations.</param> private void UpdateDocker(bool force, string version, int maxParallel) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(version)); EnsureRootPivileges(); if (!hive.Headend.IsDockerCompatible(hive.Globals.Version, version, out var message)) { Console.Error.WriteLine($"*** ERROR: {message}"); Program.Exit(1); } if (!force && !Program.PromptYesNo($"*** Are you sure you want to UPDATE DOCKER on [{hive.Name}] hive nodes?")) { Program.Exit(0); } this.version = version; this.dockerPackageUri = hive.Headend.GetDockerPackageUri(version, out message); var controller = new SetupController <NodeDefinition>($"hive update docker: {version}", hive.Nodes) { MaxParallel = maxParallel, ShowStatus = !Program.Quiet }; controller.SetDefaultRunOptions(RunOptions.FaultOnError); controller.AddStep("update managers", UpdateDocker, n => n.Metadata.IsManager, parallelLimit: 1); if (hive.Workers.Count() > 0) { controller.AddStep("update workers", UpdateDocker, n => n.Metadata.IsWorker, parallelLimit: 1); } if (hive.Pets.Count() > 0) { controller.AddStep("update pets", UpdateDocker, n => n.Metadata.IsPet, parallelLimit: 1); } if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: One or more DOCKER UPDATE steps failed."); Program.Exit(1); } Console.WriteLine(); Console.WriteLine("*** Docker Engine was updated successfully."); }
/// <summary> /// Updates the Linux distribution on all hive nodes and then reboots them /// one at a time, giving each of them some time to stabilize before rebooting /// the next node. /// </summary> /// <param name="force"><c>true</c> to disable the update prompt.</param> /// <param name="maxParallel">Maximum number of parallel operations.</param> private void UpdateLinux(bool force, int maxParallel) { EnsureRootPivileges(); if (!force && !Program.PromptYesNo($"*** Are you sure you want to UPDATE LINUX on [{hive.Name}] hive nodes?")) { Program.Exit(0); } var firstManager = hive.FirstManager; var controller = new SetupController <NodeDefinition>("hive update linux", hive.Nodes) { MaxParallel = maxParallel, ShowStatus = !Program.Quiet }; controller.SetDefaultRunOptions(RunOptions.FaultOnError); controller.AddStep("fetch updates", (node, stepDelay) => { Thread.Sleep(stepDelay); node.Status = "run: safe-apt-get update"; node.SudoCommand("safe-apt-get update"); }); controller.AddStep("update managers", UpdateLinux, n => n.Metadata.IsManager, parallelLimit: 1); if (hive.Workers.Count() > 0) { controller.AddStep("update workers", UpdateLinux, n => n.Metadata.IsWorker, parallelLimit: 1); } if (hive.Pets.Count() > 0) { controller.AddStep("update pets", UpdateLinux, n => n.Metadata.IsPet, parallelLimit: 1); } if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: One or more UPDATE steps failed."); Program.Exit(1); } Console.WriteLine(); Console.WriteLine("*** Linux packages was updated successfully."); }
/// <summary> /// Updates the hive hive configuration, services and containers. /// </summary> /// <param name="force"><c>true</c> to disable the update prompt.</param> /// <param name="maxParallel">Maximum number of parallel operations.</param> /// <param name="imageTag">Optionally overrides the default image tag.</param> private void UpdateHive(bool force, int maxParallel, string imageTag = null) { EnsureRootPivileges(); if (!force && !Program.PromptYesNo($"*** Are you sure you want to UPDATE HIVE components and services on [{hive.Name}]?")) { Program.Exit(0); } var controller = new SetupController <NodeDefinition>("hive update", hive.Nodes) { ShowStatus = !Program.Quiet }; controller.MaxParallel = maxParallel; controller.SetDefaultRunOptions(RunOptions.FaultOnError); var hiveUpdateCount = HiveUpdateManager.AddHiveUpdateSteps(hive, controller, out var restartRequired, serviceUpdateParallism: Program.MaxParallel, imageTag: imageTag); if (controller.StepCount == 0) { Console.WriteLine("The hive is already up-to-date."); Program.Exit(0); } if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: One or more UPDATE steps failed."); Program.Exit(1); } Console.WriteLine(); Console.WriteLine("*** Hive components, services, and containers were updated successfully."); if (hive.Globals.TryGetBool(HiveGlobals.UserDisableAutoUnseal, out var disableAutoUnseal) || !disableAutoUnseal) { Console.WriteLine(); Console.WriteLine("*** WARNING: The hive Vault is probably sealed now because auto-unseal is disabled."); Console.WriteLine(); Console.WriteLine("Use these command to check Vault status and manually unseal if necessary:"); Console.WriteLine(); Console.WriteLine(" neon vault -- status"); Console.WriteLine(" Neon vault -- unseal"); Console.WriteLine(); } }
/// <summary> /// Checks the hive for pending updates. /// </summary> /// <param name="maxParallel">Maximum number of parallel operations.</param> private void CheckHive(int maxParallel) { EnsureRootPivileges(); // Use a temporary controller to determine how many hive // updates are pending. var controller = new SetupController <NodeDefinition>("hive update check", hive.Nodes) { MaxParallel = maxParallel, ShowStatus = !Program.Quiet }; controller.SetDefaultRunOptions(RunOptions.FaultOnError); var hiveUpdateCount = HiveUpdateManager.AddHiveUpdateSteps(hive, controller, out var restartRequired, serviceUpdateParallism: Program.MaxParallel); // Create another controller to actually scan the hive nodes to // count the pending Linux updates as well as the system containers // and services that need to be updated. // $todo(jeff.lill): // // We need to query a new image lookup service to get the images // compatible with the hive and then determine whether any of // these need updating on any node. Right now, we're just checking // the Linux package updates. // // We should do something similar for the host services like: // consul, docker, powerdns, and vault. controller = new SetupController <NodeDefinition>("hive update check", hive.Nodes) { MaxParallel = maxParallel, ShowStatus = !Program.Quiet }; controller.SetDefaultRunOptions(RunOptions.FaultOnError); var syncLock = new object(); var maxUpdates = 0; var maxSecurityUpdates = 0; var componentInfo = hive.Headend.GetComponentInfo(hive.Globals.Version, ThisAssembly.Git.Branch); var dockerVersions = new Dictionary <SemanticVersion, int>(); // Counts the numbers versions installed var consulVersions = new Dictionary <SemanticVersion, int>(); // on hive nodes. var vaultVersions = new Dictionary <SemanticVersion, int>(); controller.AddStep("scan hive", (node, stepDelay) => { Thread.Sleep(stepDelay); //--------------------------------------------------------- // Look for Linux package updates. node.Status = "run: safe-apt-get update"; node.SudoCommand("safe-apt-get update"); node.Status = "run: apt-check"; var response = node.SudoCommand("/usr/lib/update-notifier/apt-check"); // This command returns the total number of updates and // the security updates like: TOTAL;SECURITY. var fields = response.ErrorText.Trim().Split(';'); if (fields.Length < 2 || !int.TryParse(fields[0], out var updates) || !int.TryParse(fields[1], out var securityUpdates)) { node.Fault($"Unexpected update response: {response.OutputText}"); return; } lock (syncLock) { maxUpdates = Math.Max(maxUpdates, updates); maxSecurityUpdates = Math.Max(maxSecurityUpdates, securityUpdates); } //--------------------------------------------------------- // Determine the versions of Docker, Consul, and Vault installed // on this node and tally the versions for the hive. Note that // it's possible for multiple versions of a compontent to be // installed on different nodes if a previous update did not // run until completion. node.Status = "docker version"; var dockerVersion = node.GetDockerVersion(faultIfNotInstalled: true); node.Status = "consul version"; var consulVersion = node.GetConsulVersion(faultIfNotInstalled: true); node.Status = "vault version"; var vaultVersion = node.GetVaultVersion(faultIfNotInstalled: true); if (!node.IsFaulted) { lock (syncLock) { int count; if (!dockerVersions.TryGetValue(dockerVersion, out count)) { count = 0; } dockerVersions[dockerVersion] = count + 1; if (!consulVersions.TryGetValue(consulVersion, out count)) { count = 0; } consulVersions[consulVersion] = count + 1; if (!vaultVersions.TryGetValue(vaultVersion, out count)) { count = 0; } vaultVersions[vaultVersion] = count + 1; } } }); if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: One or more CHECK steps failed."); Program.Exit(1); } // Output the results. var title = $"[{hive.Name}] hive"; Console.WriteLine(); Console.WriteLine(title); Console.WriteLine(new string('-', title.Length)); var restartStatus = restartRequired ? " *** hive restart required ***" : string.Empty; var hiveStatus = (hiveUpdateCount == 0 && maxUpdates == 0) ? "CURRENT" : hiveUpdateCount.ToString() + restartStatus; var linuxPackageStatus = (maxUpdates == 0) ? "CURRENT" : maxUpdates.ToString(); var linuxSecurityStatus = (maxSecurityUpdates == 0) ? "CURRENT" : maxSecurityUpdates.ToString(); Console.WriteLine($"neonHIVE updates: {hiveStatus}"); Console.WriteLine($"Linux package updates: {linuxPackageStatus}"); Console.WriteLine($"Linux security updates: {linuxSecurityStatus}"); //------------------------------------------------------------- // Docker status string dockerVersionInfo; if (dockerVersions.Count == 0) { dockerVersionInfo = "*** ERROR: Docker is not installed."; } else if (dockerVersions.Count == 1) { dockerVersionInfo = (string)dockerVersions.Keys.First(); } else { var sb = new StringBuilder(); foreach (var version in dockerVersions.Keys.OrderBy(v => v)) { sb.AppendWithSeparator((string)version, ", "); } dockerVersionInfo = sb.ToString(); } var dockerStatus = "CURRENT"; if (dockerVersions.Count == 0) { dockerStatus = "ERROR: cannot detect version"; } else if (dockerVersions.Count > 1) { dockerStatus = "WARNING: multiple versions installed"; } else if (dockerVersions.Keys.Min(v => v) < (SemanticVersion)componentInfo.Docker) { dockerStatus = "UPDATE AVAILABLE"; } var dockerTitle = $"Docker Engine: {dockerStatus}"; Console.WriteLine(); Console.WriteLine(); Console.WriteLine(dockerTitle); Console.WriteLine(new string('-', dockerTitle.Length)); Console.WriteLine($"Current: {dockerVersionInfo}"); Console.WriteLine($"Latest: {componentInfo.Docker}"); //------------------------------------------------------------- // Consul status string consulVersionInfo; if (consulVersions.Count == 0) { consulVersionInfo = "*** ERROR: Consul is not installed."; } else if (consulVersions.Count == 1) { consulVersionInfo = (string)consulVersions.Keys.First(); } else { var sb = new StringBuilder(); foreach (var version in consulVersions.Keys.OrderBy(v => v)) { sb.AppendWithSeparator((string)version, ", "); } consulVersionInfo = sb.ToString(); } var consulStatus = "CURRENT"; if (consulVersions.Count == 0) { consulStatus = "ERROR: cannot detect version"; } else if (consulVersions.Count > 1) { consulStatus = "WARNING: multiple versions installed"; } else if (consulVersions.Keys.Min(v => v) < (SemanticVersion)componentInfo.Consul) { consulStatus = "UPDATE AVAILABLE"; } var consulTitle = $"HashiCorp Consul: {consulStatus}"; Console.WriteLine(); Console.WriteLine(); Console.WriteLine(consulTitle); Console.WriteLine(new string('-', consulTitle.Length)); Console.WriteLine($"Current: {consulVersionInfo}"); Console.WriteLine($"Latest: {componentInfo.Consul}"); //------------------------------------------------------------- // Vault status string vaultVersionInfo; if (consulVersions.Count == 0) { vaultVersionInfo = "*** ERROR: Vault is not installed."; } else if (consulVersions.Count == 1) { vaultVersionInfo = (string)vaultVersions.Keys.First(); } else { var sb = new StringBuilder(); foreach (var version in vaultVersions.Keys.OrderBy(v => v)) { sb.AppendWithSeparator((string)version, ", "); } vaultVersionInfo = sb.ToString(); } var vaultStatus = "CURRENT"; if (vaultVersions.Count == 0) { vaultStatus = "ERROR: cannot detect version"; } else if (vaultVersions.Count > 1) { vaultStatus = "WARNING: multiple versions installed"; } else if (vaultVersions.Keys.Min(v => v) < (SemanticVersion)componentInfo.Vault) { vaultStatus = "UPDATE AVAILABLE"; } var vaultTitle = $"HashiCorp Vault: {vaultStatus}"; Console.WriteLine(); Console.WriteLine(); Console.WriteLine(vaultTitle); Console.WriteLine(new string('-', vaultTitle.Length)); Console.WriteLine($"Current: {vaultVersionInfo}"); Console.WriteLine($"Latest: {componentInfo.Vault}"); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { var split = commandLine.Split("--"); var leftCommandLine = split.Left; var rightCommandLine = split.Right; // Basic initialization. if (leftCommandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } Program.ConnectHive(); var hive = HiveHelper.Hive; // Process the nodes. var nodeDefinitions = new List <NodeDefinition>(); var nodeOption = leftCommandLine.GetOption("--node", null); if (!string.IsNullOrWhiteSpace(nodeOption)) { if (nodeOption == "+") { foreach (var manager in hive.Definition.SortedManagers) { nodeDefinitions.Add(manager); } foreach (var worker in hive.Definition.SortedWorkers) { nodeDefinitions.Add(worker); } foreach (var pet in hive.Definition.SortedPets) { nodeDefinitions.Add(pet); } } else { foreach (var name in nodeOption.Split(',', StringSplitOptions.RemoveEmptyEntries)) { var trimmedName = name.Trim(); NodeDefinition node; if (!hive.Definition.NodeDefinitions.TryGetValue(trimmedName, out node)) { Console.Error.WriteLine($"*** ERROR: Node [{trimmedName}] is not present in the hive."); Program.Exit(1); } nodeDefinitions.Add(node); } } } var groupName = leftCommandLine.GetOption("--group"); if (!string.IsNullOrEmpty(groupName)) { var nodeGroups = hive.Definition.GetHostGroups(); if (!nodeGroups.TryGetValue(groupName, out var group)) { Console.Error.WriteLine($"*** ERROR: Node group [{groupName}] is not defined for the hive."); Program.Exit(1); } // Add the group nodes to the node definitions if they aren't // already present. foreach (var node in group) { if (nodeDefinitions.Count(n => n.Name.Equals(node.Name, StringComparison.InvariantCultureIgnoreCase)) == 0) { nodeDefinitions.Add(node); } } } if (nodeDefinitions.Count == 0) { // Default to a healthy manager. nodeDefinitions.Add(hive.GetReachableManager().Metadata); } // Create the command bundle by appending the right command. if (rightCommandLine == null) { Console.Error.WriteLine($"*** ERROR: [exec] command expectes: [-- COMMAND...]"); Program.Exit(1); } string command = rightCommandLine.Items.First(); var args = rightCommandLine.Items.Skip(1).ToArray(); var bundle = new CommandBundle(command, args.ToArray()); // Append any script, text, or data files to the bundle. foreach (var scriptPath in leftCommandLine.GetOptionValues("--script")) { if (!File.Exists(scriptPath)) { Console.Error.WriteLine($"*** ERROR: Script [{scriptPath}] does not exist."); Program.Exit(1); } bundle.AddFile(Path.GetFileName(scriptPath), File.ReadAllText(scriptPath), isExecutable: true); } foreach (var textPath in leftCommandLine.GetOptionValues("--text")) { if (!File.Exists(textPath)) { Console.Error.WriteLine($"*** ERROR: Text file [{textPath}] does not exist."); Program.Exit(1); } bundle.AddFile(Path.GetFileName(textPath), File.ReadAllText(textPath)); } foreach (var dataPath in leftCommandLine.GetOptionValues("--data")) { if (!File.Exists(dataPath)) { Console.Error.WriteLine($"*** ERROR: Data file [{dataPath}] does not exist."); Program.Exit(1); } bundle.AddFile(Path.GetFileName(dataPath), File.ReadAllBytes(dataPath)); } // Perform the operation. if (nodeDefinitions.Count == 1) { // Run the command on a single node and return the output and exit code. var node = hive.GetNode(nodeDefinitions.First().Name); var response = node.SudoCommand(bundle); Console.WriteLine(response.OutputText); Program.Exit(response.ExitCode); } else { // Run the command on multiple nodes and return an overall exit code. var controller = new SetupController <NodeDefinition>(Program.SafeCommandLine, hive.Nodes.Where(n => nodeDefinitions.Exists(nd => nd.Name == n.Name))) { ShowStatus = !Program.Quiet, MaxParallel = Program.MaxParallel }; controller.SetDefaultRunOptions(RunOptions.FaultOnError); controller.AddWaitUntilOnlineStep(); controller.AddStep($"run: {bundle.Command}", (node, stepDelay) => { Thread.Sleep(stepDelay); node.Status = "running"; node.SudoCommand(bundle, RunOptions.FaultOnError | RunOptions.LogOutput); if (Program.WaitSeconds > 0) { node.Status = $"stabilize ({Program.WaitSeconds}s)"; Thread.Sleep(TimeSpan.FromSeconds(Program.WaitSeconds)); } }); if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: [exec] on one or more nodes failed."); Program.Exit(1); } } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Help(); Program.Exit(0); } // Special-case handling of the [--remove-templates] option. if (commandLine.HasOption("--remove-templates")) { Console.WriteLine("Removing cached virtual machine templates."); foreach (var fileName in Directory.GetFiles(HiveHelper.GetVmTemplatesFolder(), "*.*", SearchOption.TopDirectoryOnly)) { File.Delete(fileName); } Program.Exit(0); } // Implement the command. packageCacheUri = commandLine.GetOption("--package-cache"); // This overrides the hive definition, if specified. if (Program.HiveLogin != null) { Console.Error.WriteLine("*** ERROR: You are logged into a hive. You need to logout before preparing another."); Program.Exit(1); } if (commandLine.Arguments.Length == 0) { Console.Error.WriteLine($"*** ERROR: HIVE-DEF expected."); Program.Exit(1); } hiveDefPath = commandLine.Arguments[0]; force = commandLine.GetFlag("--force"); HiveDefinition.ValidateFile(hiveDefPath, strict: true); var hiveDefinition = HiveDefinition.FromFile(hiveDefPath, strict: true); hiveDefinition.Provisioner = $"neon-cli:{Program.Version}"; // Identify this tool/version as the hive provisioner // NOTE: // // Azure has implemented a more restrictive password policy and our // default machine password does not meet the requirements: // // The supplied password must be between 6-72 characters long and must // satisfy at least 3 of password complexity requirements from the following: // // 1. Contains an uppercase character // 2. Contains a lowercase character // 3. Contains a numeric digit // 4. Contains a special character // 5. Control characters are not allowed // // It's also probably not a great idea to use a static password when // provisioning VMs in public clouds because it might be possible for // somebody to use this fact the SSH into nodes while the hive is being // setup and before we set the secure password at the end. // // This is less problematic for non-cloud environments because it's // likely that the hosts won't initially be able to receive inbound // Internet traffic and besides, we need to have a known password // embedded into the VM templates. // // We're going to handle this for cloud environments by looking // at [Program.MachinePassword]. If this is set to the default // machine password then we're going to replace it with a randomlly // generated password with a few extra characters to ensure that // it meets the target cloud's password requirements. We'll use // a non-default password if the operator specified one. if (hiveDefinition.Hosting.IsCloudProvider && Program.MachinePassword == HiveConst.DefaulVmTemplatePassword) { Program.MachinePassword = NeonHelper.GetRandomPassword(20); // Append a string that guarantees that the generated password meets // cloud minimum requirements. Program.MachinePassword += ".Aa0"; } // Note that hive prepare starts new log files. hive = new HiveProxy(hiveDefinition, Program.CreateNodeProxy <NodeDefinition>, appendLog: false, useBootstrap: true, defaultRunOptions: RunOptions.LogOutput | RunOptions.FaultOnError); if (File.Exists(Program.GetHiveLoginPath(HiveConst.RootUser, hive.Definition.Name))) { Console.Error.WriteLine($"*** ERROR: A hive login named [{HiveConst.RootUser}@{hive.Definition.Name}] already exists."); Program.Exit(1); } Program.OSProperties = OSProperties.For(hiveDefinition.HiveNode.OperatingSystem); // Configure global options. if (commandLine.HasOption("--unredacted")) { hive.SecureRunOptions = RunOptions.None; } //----------------------------------------------------------------- // $todo(jeff.lill): // // We're temporarily disabling redaction to make it easier to investigate // Vault setup issues. Remove this line before final launch. // // https://github.com/jefflill/NeonForge/issues/225 hive.SecureRunOptions = RunOptions.None; //----------------------------------------------------------------- // Assign the VPN client return subnets to the manager nodes if VPN is enabled. if (hive.Definition.Vpn.Enabled) { var vpnSubnet = NetworkCidr.Parse(hive.Definition.Network.VpnPoolSubnet); var prefixLength = 25; var nextVpnSubnetAddress = vpnSubnet.Address; // Note that we're not going to assign the first block of addresses in the // VPN subnet to any managers to prevent conflicts with addresses reserved // by some cloud platforms at the beginning of a subnet. Azure for example // reserves 4 IP addresses for DNS servers and platform provided VPNs. foreach (var manager in hive.Definition.SortedManagers) { var managerVpnSubnet = new NetworkCidr(NetHelper.AddressIncrement(nextVpnSubnetAddress, VpnOptions.ServerAddressCount), prefixLength); manager.VpnPoolSubnet = managerVpnSubnet.ToString(); nextVpnSubnetAddress = managerVpnSubnet.NextAddress; } } //----------------------------------------------------------------- // Try to ensure that no servers are already deployed on the IP addresses defined // for hive nodes because provisoning over an existing hive will likely // corrupt the existing hive and also probably prevent the new hive from // provisioning correctly. // // Note that we're not going to perform this check for the [Machine] hosting // environment because we're expecting the bare machines to be already running // with the assigned addresses and we're also not going to do this for cloud // environments because we're assuming that the hive will run in its own private // network so there'll ne no possibility of conflicts. if (hive.Definition.Hosting.Environment != HostingEnvironments.Machine && !hive.Definition.Hosting.IsCloudProvider) { Console.WriteLine(); Console.WriteLine("Scanning for IP address conflicts..."); Console.WriteLine(); var pingOptions = new PingOptions(ttl: 32, dontFragment: true); var pingTimeout = TimeSpan.FromSeconds(2); var pingConflicts = new List <NodeDefinition>(); var pingAttempts = 2; // I'm going to use up to 20 threads at a time here for simplicity // rather then doing this as async operations. var parallelOptions = new ParallelOptions() { MaxDegreeOfParallelism = 20 }; Parallel.ForEach(hive.Definition.NodeDefinitions.Values, parallelOptions, node => { using (var ping = new Ping()) { // We're going to try pinging up to [pingAttempts] times for each node // just in case the network it sketchy and we're losing reply packets. for (int i = 0; i < pingAttempts; i++) { var reply = ping.Send(node.PrivateAddress, (int)pingTimeout.TotalMilliseconds); if (reply.Status == IPStatus.Success) { lock (pingConflicts) { pingConflicts.Add(node); } break; } } } }); if (pingConflicts.Count > 0) { Console.Error.WriteLine($"*** ERROR: Cannot provision the hive because [{pingConflicts.Count}] other"); Console.Error.WriteLine($"*** machines conflict with the following hive nodes:"); Console.Error.WriteLine(); foreach (var node in pingConflicts.OrderBy(n => NetHelper.AddressToUint(IPAddress.Parse(n.PrivateAddress)))) { Console.Error.WriteLine($"{node.PrivateAddress, 16}: {node.Name}"); } Program.Exit(1); } } //----------------------------------------------------------------- // Perform basic environment provisioning. This creates basic hive components // such as virtual machines, networks, load balancers, public IP addresses, security // groups,... as required for the environment. hostingManager = new HostingManagerFactory(() => HostingLoader.Initialize()).GetManager(hive, Program.LogPath); if (hostingManager == null) { Console.Error.WriteLine($"*** ERROR: No hosting manager for the [{hive.Definition.Hosting.Environment}] hosting environment could be located."); Program.Exit(1); } hostingManager.HostUsername = Program.MachineUsername; hostingManager.HostPassword = Program.MachinePassword; hostingManager.ShowStatus = !Program.Quiet; hostingManager.MaxParallel = Program.MaxParallel; hostingManager.WaitSeconds = Program.WaitSeconds; if (hostingManager.RequiresAdminPrivileges) { Program.VerifyAdminPrivileges($"Provisioning to [{hive.Definition.Hosting.Environment}] requires elevated administrator privileges."); } if (!hostingManager.Provision(force)) { Program.Exit(1); } // Get the mounted drive prefix from the hosting manager. hive.Definition.DrivePrefix = hostingManager.DrivePrefix; // Ensure that the nodes have valid IP addresses. hive.Definition.ValidatePrivateNodeAddresses(); var ipAddressToServer = new Dictionary <IPAddress, SshProxy <NodeDefinition> >(); foreach (var node in hive.Nodes.OrderBy(n => n.Name)) { SshProxy <NodeDefinition> duplicateServer; if (node.PrivateAddress == IPAddress.Any) { throw new ArgumentException($"Node [{node.Name}] has not been assigned an IP address."); } if (ipAddressToServer.TryGetValue(node.PrivateAddress, out duplicateServer)) { throw new ArgumentException($"Nodes [{duplicateServer.Name}] and [{node.Name}] have the same IP address [{node.Metadata.PrivateAddress}]."); } ipAddressToServer.Add(node.PrivateAddress, node); } //----------------------------------------------------------------- // Perform basic node provisioning including operating system updates & configuration, // and configure OpenVPN on the manager nodes so that hive setup will be // able to reach the nodes on all ports. // Write the operation begin marker to all hive node logs. hive.LogLine(logBeginMarker); var operation = $"Preparing [{hive.Definition.Name}] nodes"; var controller = new SetupController <NodeDefinition>(operation, hive.Nodes) { ShowStatus = !Program.Quiet, MaxParallel = Program.MaxParallel }; if (!string.IsNullOrEmpty(packageCacheUri)) { hive.Definition.PackageProxy = packageCacheUri; } // Prepare the nodes. controller.AddWaitUntilOnlineStep(timeout: TimeSpan.FromMinutes(15)); hostingManager.AddPostProvisionSteps(controller); controller.AddStep("verify OS", (node, stepDelay) => { Thread.Sleep(stepDelay); CommonSteps.VerifyOS(node); }); controller.AddStep("prepare", (node, stepDelay) => { Thread.Sleep(stepDelay); CommonSteps.PrepareNode(node, hive.Definition, shutdown: false); }, stepStaggerSeconds: hive.Definition.Setup.StepStaggerSeconds); // Add any VPN configuration steps. if (hive.Definition.Vpn.Enabled) { controller.AddGlobalStep("vpn credentials", () => CreateVpnCredentials()); controller.AddStep("vpn server", (node, stepDelay) => { Thread.Sleep(stepDelay); ConfigManagerVpn(node); }, node => node.Metadata.IsManager); // Add a step to establish a VPN connection if we're provisioning to a cloud. // We specifically don't want to do this if we're provisioning to a on-premise // datacenter because we're assuming that we're already directly connected to // the LAN while preparing and setting up the hive. if (hive.Definition.Hosting.IsCloudProvider) { controller.AddStep("vpn connect", (manager, stepDelay) => { Thread.Sleep(stepDelay); // Create a hive login with just enough credentials to connect the VPN. // Note that this isn't really a node specific command but I wanted to // be able to display the connection status somewhere. var vpnLogin = new HiveLogin() { Definition = hive.Definition, VpnCredentials = vpnCredentials }; // Ensure that we don't have an old VPN client for the hive running. HiveHelper.VpnClose(vpnLogin.Definition.Name); // ...and then start a new one. HiveHelper.VpnOpen(vpnLogin, onStatus: message => manager.Status = $"{message}", onError: message => manager.Status = $"ERROR: {message}"); }, n => n == hive.FirstManager); } // Perform any post-VPN setup provisioning required by the hosting provider. hostingManager.AddPostVpnSteps(controller); } if (!controller.Run()) { // Write the operation end/failed marker to all hive node logs. hive.LogLine(logFailedMarker); Console.Error.WriteLine("*** ERROR: One or more configuration steps failed."); Program.Exit(1); } // Write the hive login file. var hiveLoginPath = Program.GetHiveLoginPath(HiveConst.RootUser, hive.Definition.Name); var hiveLogin = new HiveLogin() { Path = hiveLoginPath, Username = HiveConst.RootUser, Definition = hive.Definition, SshUsername = Program.MachineUsername, SshPassword = Program.MachinePassword, SshProvisionPassword = Program.MachinePassword, SetupPending = true }; if (hive.Definition.Vpn.Enabled) { hiveLogin.VpnCredentials = vpnCredentials; } // Generate the hive certificates. const int bitCount = 2048; const int validDays = 365000; // About 1,000 years. if (hiveLogin.HiveCertificate == null) { var hostnames = new string[] { $"{hive.Name}.nhive.io", $"*.{hive.Name}.nhive.io", $"*.neon-vault.{hive.Name}.nhive.io", $"*.neon-registry-cache.{hive.Name}.nhive.io", $"*.neon-hivemq.{hive.Name}.nhive.io" }; hiveLogin.HiveCertificate = TlsCertificate.CreateSelfSigned(hostnames, bitCount, validDays, issuedBy: "neonHIVE", issuedTo: $"neonHIVE: {hiveDefinition.Name}"); hiveLogin.HiveCertificate.FriendlyName = $"neonHIVE: {hiveLogin.Definition.Name}"; } // Persist the certificates into the hive login. hiveLogin.Save(); // Write the operation end marker to all hive node logs. hive.LogLine(logEndMarker); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } var hiveLogin = Program.ConnectHive(); // Process the command arguments. var nodeDefinitions = new List <NodeDefinition>(); if (commandLine.Arguments.Length < 1) { Console.Error.WriteLine("*** ERROR: At least one NODE must be specified."); Program.Exit(1); } if (commandLine.Arguments.Length == 1 && commandLine.Arguments[0] == "+") { foreach (var manager in hiveLogin.Definition.SortedManagers) { nodeDefinitions.Add(manager); } foreach (var worker in hiveLogin.Definition.SortedWorkers) { nodeDefinitions.Add(worker); } } else { foreach (var name in commandLine.Arguments) { NodeDefinition node; if (!hiveLogin.Definition.NodeDefinitions.TryGetValue(name, out node)) { Console.Error.WriteLine($"*** Error: Node [{name}] is not present in the hive."); Program.Exit(1); } nodeDefinitions.Add(node); } } // Perform the reboots. var hive = new HiveProxy(hiveLogin); var controller = new SetupController <NodeDefinition>(Program.SafeCommandLine, hive.Nodes.Where(n => nodeDefinitions.Exists(nd => nd.Name == n.Name))) { ShowStatus = !Program.Quiet, MaxParallel = Program.MaxParallel }; controller.SetDefaultRunOptions(RunOptions.FaultOnError); controller.AddWaitUntilOnlineStep(); controller.AddStep("reboot nodes", (node, stepDelay) => { Thread.Sleep(stepDelay); node.Status = "rebooting"; node.Reboot(wait: true); node.Status = $"stablizing ({Program.WaitSeconds}s)"; Thread.Sleep(TimeSpan.FromSeconds(Program.WaitSeconds)); }); if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: The reboot for one or more nodes failed."); Program.Exit(1); } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Help(); Program.Exit(0); } // Special-case handling of the [--remove-templates] option. if (commandLine.HasOption("--remove-templates")) { Console.WriteLine("Removing cached virtual machine templates."); foreach (var fileName in Directory.GetFiles(KubeHelper.VmTemplatesFolder, "*.*", SearchOption.TopDirectoryOnly)) { File.Delete(fileName); } Program.Exit(0); } // Implement the command. if (KubeHelper.CurrentContext != null) { Console.Error.WriteLine("*** ERROR: You are logged into a cluster. You need to logout before preparing another."); Program.Exit(1); } if (commandLine.Arguments.Length == 0) { Console.Error.WriteLine($"*** ERROR: CLUSTER-DEF expected."); Program.Exit(1); } clusterDefPath = commandLine.Arguments[0]; force = commandLine.GetFlag("--force"); ClusterDefinition.ValidateFile(clusterDefPath, strict: true); var clusterDefinition = ClusterDefinition.FromFile(clusterDefPath, strict: true); clusterDefinition.Provisioner = $"neon-cli:{Program.Version}"; // Identify this tool/version as the cluster provisioner // NOTE: // // Azure has a more restrictive password policy and our default // machine password does not meet the requirements: // // The supplied password must be between 6-72 characters long and must // satisfy at least 3 of password complexity requirements from the following: // // 1. Contains an uppercase character // 2. Contains a lowercase character // 3. Contains a numeric digit // 4. Contains a special character // 5. Control characters are not allowed // // It's also probably not a great idea to use a static password when // provisioning VMs in public clouds because it might be possible for // somebody to use this fact the SSH into nodes while the cluster is // being setup and before we set the secure password at the end. // // This is less problematic for non-cloud environments because it's // likely that the hosts won't initially be able to receive inbound // Internet traffic and besides, we need to have a known password // embedded into the VM templates. // // We're going to handle this for cloud environments by looking // at [Program.MachinePassword]. If this is set to the default // machine password then we're going to replace it with a randomlly // generated password with a few extra characters to ensure that // it meets the target cloud's password requirements. We'll use // a non-default password if the operator specified one. if (clusterDefinition.Hosting.IsCloudProvider && Program.MachinePassword == KubeConst.DefaulVmTemplatePassword) { Program.MachinePassword = NeonHelper.GetCryptoRandomPassword(20); // Append a string that guarantees that the generated password meets // cloud minimum requirements. Program.MachinePassword += ".Aa0"; } // NOTE: Cluster prepare starts new log files. cluster = new ClusterProxy(clusterDefinition, Program.CreateNodeProxy <NodeDefinition>, appendToLog: false, defaultRunOptions: RunOptions.LogOutput | RunOptions.FaultOnError); if (KubeHelper.Config.GetContext(cluster.Definition.Name) != null) { Console.Error.WriteLine($"*** ERROR: A context named [{cluster.Definition.Name}] already exists."); Program.Exit(1); } // Configure global options. if (commandLine.HasOption("--unredacted")) { cluster.SecureRunOptions = RunOptions.None; } var failed = false; try { KubeHelper.Desktop.StartOperationAsync($"Preparing [{cluster.Name}]").Wait(); //----------------------------------------------------------------- // Try to ensure that no servers are already deployed on the IP addresses defined // for cluster nodes because provisoning over an existing cluster will likely // corrupt the existing cluster and also probably prevent the new cluster from // provisioning correctly. // // Note that we're not going to perform this check for the [Machine] hosting // environment because we're expecting the bare machines to be already running // with the assigned addresses and we're also not going to do this for cloud // environments because we're assuming that the cluster will run in its own // private network so there'll ne no possibility of conflicts. if (cluster.Definition.Hosting.Environment != HostingEnvironments.Machine && !cluster.Definition.Hosting.IsCloudProvider) { Console.WriteLine(); Console.WriteLine(" Scanning for IP address conflicts..."); Console.WriteLine(); var pingOptions = new PingOptions(ttl: 32, dontFragment: true); var pingTimeout = TimeSpan.FromSeconds(2); var pingConflicts = new List <NodeDefinition>(); var pingAttempts = 2; // I'm going to use up to 20 threads at a time here for simplicity // rather then doing this as async operations. var parallelOptions = new ParallelOptions() { MaxDegreeOfParallelism = 20 }; Parallel.ForEach(cluster.Definition.NodeDefinitions.Values, parallelOptions, node => { using (var pinger = new Pinger()) { // We're going to try pinging up to [pingAttempts] times for each node // just in case the network it sketchy and we're losing reply packets. for (int i = 0; i < pingAttempts; i++) { var reply = pinger.SendPingAsync(node.PrivateAddress, (int)pingTimeout.TotalMilliseconds).Result; if (reply.Status == IPStatus.Success) { lock (pingConflicts) { pingConflicts.Add(node); } break; } } } }); if (pingConflicts.Count > 0) { Console.Error.WriteLine($"*** ERROR: Cannot provision the cluster because [{pingConflicts.Count}] other"); Console.Error.WriteLine($"*** machines conflict with the following cluster nodes:"); Console.Error.WriteLine(); foreach (var node in pingConflicts.OrderBy(n => NetHelper.AddressToUint(IPAddress.Parse(n.PrivateAddress)))) { Console.Error.WriteLine($"{node.PrivateAddress, 16}: {node.Name}"); } Program.Exit(1); } } //----------------------------------------------------------------- // Perform basic environment provisioning. This creates basic cluster components // such as virtual machines, networks, load balancers, public IP addresses, security // groups,... as required for the environment. hostingManager = new HostingManagerFactory(() => HostingLoader.Initialize()).GetMaster(cluster, Program.LogPath); if (hostingManager == null) { Console.Error.WriteLine($"*** ERROR: No hosting manager for the [{cluster.Definition.Hosting.Environment}] hosting environment could be located."); Program.Exit(1); } hostingManager.HostUsername = Program.MachineUsername; hostingManager.HostPassword = Program.MachinePassword; hostingManager.ShowStatus = !Program.Quiet; hostingManager.MaxParallel = Program.MaxParallel; hostingManager.WaitSeconds = Program.WaitSeconds; if (hostingManager.RequiresAdminPrivileges) { Program.VerifyAdminPrivileges($"Provisioning to [{cluster.Definition.Hosting.Environment}] requires elevated administrator privileges."); } if (!hostingManager.Provision(force)) { Program.Exit(1); } // Get the mounted drive prefix from the hosting manager. cluster.Definition.DrivePrefix = hostingManager.DrivePrefix; // Ensure that the nodes have valid IP addresses. cluster.Definition.ValidatePrivateNodeAddresses(); var ipAddressToServer = new Dictionary <IPAddress, SshProxy <NodeDefinition> >(); foreach (var node in cluster.Nodes.OrderBy(n => n.Name)) { SshProxy <NodeDefinition> duplicateServer; if (node.PrivateAddress == IPAddress.Any) { throw new ArgumentException($"Node [{node.Name}] has not been assigned an IP address."); } if (ipAddressToServer.TryGetValue(node.PrivateAddress, out duplicateServer)) { throw new ArgumentException($"Nodes [{duplicateServer.Name}] and [{node.Name}] have the same IP address [{node.Metadata.PrivateAddress}]."); } ipAddressToServer.Add(node.PrivateAddress, node); } // We're going to use the masters as package caches unless the user // specifies something else. packageCaches = commandLine.GetOption("--package-cache"); // This overrides the cluster definition, if specified. if (!string.IsNullOrEmpty(packageCaches)) { cluster.Definition.PackageProxy = packageCaches; } if (string.IsNullOrEmpty(cluster.Definition.PackageProxy)) { var sbProxies = new StringBuilder(); foreach (var master in cluster.Masters) { sbProxies.AppendWithSeparator($"{master.PrivateAddress}:{NetworkPorts.AppCacherNg}"); } cluster.Definition.PackageProxy = sbProxies.ToString(); } //----------------------------------------------------------------- // Prepare the cluster. // Write the operation begin marker to all cluster node logs. cluster.LogLine(logBeginMarker); var nodesText = cluster.Nodes.Count() == 1 ? "node" : "nodes"; var operation = $"Preparing [{cluster.Definition.Name}] {nodesText}"; var controller = new SetupController <NodeDefinition>(operation, cluster.Nodes) { ShowStatus = !Program.Quiet, MaxParallel = Program.MaxParallel }; controller.AddGlobalStep("setup details", () => { using (var client = new HeadendClient()) { kubeSetupInfo = client.GetSetupInfoAsync(cluster.Definition).Result; } }); // Prepare the nodes. controller.AddWaitUntilOnlineStep(timeout: TimeSpan.FromMinutes(15)); hostingManager.AddPostProvisionSteps(controller); controller.AddStep("verify OS", CommonSteps.VerifyOS); controller.AddStep("prepare", (node, stepDelay) => { Thread.Sleep(stepDelay); CommonSteps.PrepareNode(node, cluster.Definition, kubeSetupInfo, shutdown: false); }, stepStaggerSeconds: cluster.Definition.Setup.StepStaggerSeconds); if (!controller.Run()) { // Write the operation end/failed marker to all cluster node logs. cluster.LogLine(logFailedMarker); Console.Error.WriteLine("*** ERROR: One or more configuration steps failed."); Program.Exit(1); } // Persist the cluster context extension. var contextExtensionsPath = KubeHelper.GetContextExtensionPath((KubeContextName)$"{KubeConst.RootUser}@{clusterDefinition.Name}"); var contextExtension = new KubeContextExtension(contextExtensionsPath) { ClusterDefinition = clusterDefinition, SshUsername = Program.MachineUsername, SshPassword = Program.MachinePassword, SetupDetails = new KubeSetupDetails() { SetupPending = true } }; contextExtension.Save(); // Write the operation end marker to all cluster node logs. cluster.LogLine(logEndMarker); } catch { failed = true; throw; } finally { if (!failed) { KubeHelper.Desktop.EndOperationAsync($"Cluster [{cluster.Name}] has been prepared and is ready for setup.").Wait(); } else { KubeHelper.Desktop.EndOperationAsync($"Cluster [{cluster.Name}] prepare has failed.", failed: true).Wait(); } } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } var hiveLogin = Program.ConnectHive(); // Process the command options. var isText = false; var permissions = new LinuxPermissions("440"); if (commandLine.GetOption("--text") != null) { isText = true; } var chmod = commandLine.GetOption("--chmod"); if (!string.IsNullOrEmpty(chmod)) { if (!LinuxPermissions.TryParse(chmod, out permissions)) { Console.Error.WriteLine("*** ERROR: Invalid Linux file permissions."); Program.Exit(1); } } // Process the command arguments. List <NodeDefinition> nodeDefinitions = new List <NodeDefinition>(); string source; string target; if (commandLine.Arguments.Length < 1) { Console.Error.WriteLine("*** ERROR: SOURCE file was not specified."); Program.Exit(1); } source = commandLine.Arguments[0]; if (commandLine.Arguments.Length < 2) { Console.Error.WriteLine("*** ERROR: TARGET file was not specified."); Program.Exit(1); } target = commandLine.Arguments[1]; if (commandLine.Arguments.Length == 2) { nodeDefinitions.Add(hiveLogin.Definition.Managers.First()); } else if (commandLine.Arguments.Length == 3 && commandLine.Arguments[2] == "+") { foreach (var manager in hiveLogin.Definition.SortedManagers) { nodeDefinitions.Add(manager); } foreach (var worker in hiveLogin.Definition.SortedWorkers) { nodeDefinitions.Add(worker); } } else { foreach (var name in commandLine.Shift(2).Arguments) { NodeDefinition node; if (!hiveLogin.Definition.NodeDefinitions.TryGetValue(name, out node)) { Console.Error.WriteLine($"*** ERROR: Node [{name}] is not present in the hive."); Program.Exit(1); } nodeDefinitions.Add(node); } } if (!File.Exists(source)) { Console.Error.WriteLine($"*** ERROR: File [{source}] does not exist."); Program.Exit(1); } // Perform the upload. var hive = new HiveProxy(hiveLogin); var controller = new SetupController <NodeDefinition>(Program.SafeCommandLine, hive.Nodes.Where(n => nodeDefinitions.Exists(nd => nd.Name == n.Name))) { ShowStatus = !Program.Quiet, MaxParallel = Program.MaxParallel }; controller.SetDefaultRunOptions(RunOptions.FaultOnError); controller.AddWaitUntilOnlineStep(); controller.AddStep("upload", (node, stepDelay) => { Thread.Sleep(stepDelay); node.Status = "uploading"; if (isText) { node.UploadText(target, File.ReadAllText(source, Encoding.UTF8), tabStop: 4, outputEncoding: Encoding.UTF8); } else { using (var stream = new FileStream(source, FileMode.Open, FileAccess.Read)) { node.Upload(target, stream); } } node.Status = $"set permissions: {permissions}"; node.SudoCommand("chmod", permissions, target); }); if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: The upload to one or more nodes failed."); Program.Exit(1); } }