/// <summary> /// Verifies Consul health. /// </summary> /// <param name="node">The manager node.</param> /// <param name="hiveDefinition">The hive definition.</param> private static void CheckConsul(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition) { node.Status = "checking: consul"; // Verify that the daemon is running. switch (Program.ServiceManager) { case ServiceManager.Systemd: { var output = node.SudoCommand("systemctl status consul", RunOptions.LogOutput).OutputText; if (!output.Contains("Active: active (running)")) { node.Fault($"Consul deamon is not running."); return; } } break; default: throw new NotImplementedException(); } }
/// <summary> /// Removes a dashboard. /// </summary> /// <param name="commandLine">The command line.</param> private void Remove(CommandLine commandLine) { var name = commandLine.Arguments.ElementAtOrDefault(1); if (string.IsNullOrEmpty(name)) { Console.Error.WriteLine("*** ERROR: Expected a NAME argument."); Program.Exit(1); } if (!HiveDefinition.IsValidName(name) || reserved.Contains(name)) { Console.Error.WriteLine($"*** ERROR: [{name}] is not a valid dashboard name."); Program.Exit(1); } name = name.ToLowerInvariant(); var existingDashboard = hive.Dashboard.Get(name); if (existingDashboard == null) { Console.Error.WriteLine($"*** ERROR: Dashboard [{name}] does not exist."); Program.Exit(1); } hive.Dashboard.Remove(name); Console.WriteLine($"Removed [{name}] dashboard."); }
/// <summary> /// Generates the PowerDNS Recursor hosts file for a node. This will be uploaded /// to <b>/etc/powerdns/hosts</b>. /// </summary> /// <param name="hiveDefinition">The hive definition.</param> /// <param name="nodeDefinition">The target node definition.</param> /// <returns>The host definitions.</returns> private static string GetPowerDnsHosts(HiveDefinition hiveDefinition, NodeDefinition nodeDefinition) { var sbHosts = new StringBuilder(); sbHosts.AppendLineLinux("# PowerDNS Recursor authoritatively answers for [*.HIVENAME.nhive.io] hostnames."); sbHosts.AppendLineLinux("# on the local node using these mappings."); sbHosts.AppendLineLinux(); sbHosts.AppendLineLinux("# Internal hive Consul mappings:"); sbHosts.AppendLineLinux(); sbHosts.AppendLineLinux($"{GetHostsFormattedAddress(nodeDefinition)} {hiveDefinition.Hostnames.Consul}"); foreach (var manager in hiveDefinition.Managers) { sbHosts.AppendLineLinux($"{GetHostsFormattedAddress(manager)} {manager.Name}.{hiveDefinition.Hostnames.Consul}"); } sbHosts.AppendLineLinux(); sbHosts.AppendLineLinux("# Internal hive Vault mappings:"); sbHosts.AppendLineLinux(); sbHosts.AppendLineLinux($"{GetHostsFormattedAddress(nodeDefinition)} {hiveDefinition.Hostnames.Vault}"); foreach (var manager in hiveDefinition.Managers) { sbHosts.AppendLineLinux($"{GetHostsFormattedAddress(manager)} {manager.Name}.{hiveDefinition.Hostnames.Vault}"); } if (hiveDefinition.Docker.RegistryCache) { sbHosts.AppendLineLinux(); sbHosts.AppendLineLinux("# Internal hive registry cache related mappings:"); sbHosts.AppendLineLinux(); foreach (var manager in hiveDefinition.Managers) { sbHosts.AppendLineLinux($"{GetHostsFormattedAddress(manager)} {manager.Name}.{hiveDefinition.Hostnames.RegistryCache}"); } } if (hiveDefinition.Log.Enabled) { sbHosts.AppendLineLinux(); sbHosts.AppendLineLinux("# Internal hive log pipeline related mappings:"); sbHosts.AppendLineLinux(); sbHosts.AppendLineLinux($"{GetHostsFormattedAddress(nodeDefinition)} {hiveDefinition.Hostnames.LogEsData}"); } sbHosts.AppendLineLinux(); sbHosts.AppendLineLinux("# Internal hive RabbitMQ related mappings:"); sbHosts.AppendLineLinux(); foreach (var node in hiveDefinition.SortedNodes.Where(n => n.Labels.HiveMQ)) { sbHosts.AppendLineLinux($"{GetHostsFormattedAddress(node)} {node.Name}.{hiveDefinition.Hostnames.HiveMQ}"); } return(sbHosts.ToString()); }
/// <summary> /// Verifies that a hive worker or pet node is healthy. /// </summary> /// <param name="node">The server node.</param> /// <param name="hiveDefinition">The hive definition.</param> public static void CheckWorkersOrPet(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition) { Covenant.Requires <ArgumentNullException>(node != null); Covenant.Requires <ArgumentException>(node.Metadata.IsWorker || node.Metadata.IsPet); Covenant.Requires <ArgumentNullException>(hiveDefinition != null); if (!node.IsFaulted) { CheckWorkerNtp(node, hiveDefinition); } if (!node.IsFaulted) { CheckDocker(node, hiveDefinition); } if (!node.IsFaulted) { CheckConsul(node, hiveDefinition); } if (!node.IsFaulted) { CheckVault(node, hiveDefinition); } node.Status = "healthy"; }
/// <summary> /// Sets a dashboard. /// </summary> /// <param name="commandLine">The command line.</param> private void Set(CommandLine commandLine) { var name = commandLine.Arguments.ElementAtOrDefault(1); var url = commandLine.Arguments.ElementAtOrDefault(2); if (string.IsNullOrEmpty(name)) { Console.Error.WriteLine("*** ERROR: Expected a NAME argument."); Program.Exit(1); } if (!HiveDefinition.IsValidName(name) || reserved.Contains(name)) { Console.Error.WriteLine($"*** ERROR: [{name}] is not a valid dashboard name."); Program.Exit(1); } name = name.ToLowerInvariant(); if (string.IsNullOrEmpty(url)) { Console.Error.WriteLine("*** ERROR: Expected a URL argument."); Program.Exit(1); } var title = commandLine.GetOption("--title"); var folder = commandLine.GetOption("--folder"); var description = commandLine.GetOption("--description"); var dashboard = new HiveDashboard() { Name = name, Title = title, Folder = folder, Url = url, Description = description }; var errors = dashboard.Validate(hive.Definition); if (errors.Count > 0) { foreach (var error in errors) { Console.Error.WriteLine($"*** ERROR: {error}"); } Program.Exit(1); } hive.Dashboard.Set(dashboard); Console.WriteLine(); Console.WriteLine($"Saved [{name}] dashboard."); }
/// <summary> /// Verifies Docker health. /// </summary> /// <param name="node">The target hive node.</param> /// <param name="hiveDefinition">The hive definition.</param> private static void CheckDocker(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition) { node.Status = "checking: docker"; // This is a super simple ping to verify that Docker appears to be running. var response = node.SudoCommand("docker info"); if (response.ExitCode != 0) { node.Fault($"Docker: {response.AllText}"); } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.Arguments.Length < 1) { Console.Error.WriteLine("*** ERROR: HIVE-DEF is required."); Program.Exit(1); } // Parse and validate the hive definition. HiveDefinition.FromFile(commandLine.Arguments[0], strict: true); Console.WriteLine(""); Console.WriteLine("*** The hive definition is OK."); }
/// <summary> /// Uploads a resource file to the remote server after performing any necessary preprocessing. /// </summary> /// <typeparam name="TMetadata">The node metadata type.</typeparam> /// <param name="node">The remote node.</param> /// <param name="hiveDefinition">The hive definition or <c>null</c>.</param> /// <param name="file">The resource file.</param> /// <param name="targetPath">The target path on the remote server.</param> private static void UploadFile <TMetadata>(this SshProxy <TMetadata> node, HiveDefinition hiveDefinition, ResourceFiles.File file, string targetPath) where TMetadata : class { using (var input = file.ToStream()) { if (file.HasVariables) { // We need to expand any variables. Note that if we don't have a // hive definition or for undefined variables, we're going to // have the variables expand to the empty string. using (var msExpanded = new MemoryStream()) { using (var writer = new StreamWriter(msExpanded)) { var preprocessReader = new PreprocessReader(new StreamReader(input)) { DefaultVariable = string.Empty, ExpandVariables = true, ProcessCommands = false, StripComments = false }; if (hiveDefinition != null) { SetHiveVariables(preprocessReader, hiveDefinition, node.Metadata as NodeDefinition); } foreach (var line in preprocessReader.Lines()) { writer.WriteLine(line); } writer.Flush(); msExpanded.Position = 0; node.UploadText(targetPath, msExpanded, tabStop: 4, outputEncoding: Encoding.UTF8); } } } else { node.UploadText(targetPath, input, tabStop: 4, outputEncoding: Encoding.UTF8); } } }
/// <summary> /// Verifies Vault health for a node. /// </summary> /// <param name="node">The node.</param> /// <param name="hiveDefinition">The hive definition.</param> private static void CheckVault(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition) { // $todo(jeff.lill): Implement this. return; node.Status = "checking: vault"; // This is a minimal health test that just verifies that Vault // is listening for requests. We're going to ping the local // Vault instance at [/v1/sys/health]. // // Note that this should return a 500 status code with some // JSON content. The reason for this is because we have not // yet initialized and unsealed the vault. var targetUrl = $"https://{node.Metadata.PrivateAddress}:{hiveDefinition.Vault.Port}/v1/sys/health?standbycode=200"; using (var client = new HttpClient()) { try { var response = client.GetAsync(targetUrl).Result; if (response.StatusCode != HttpStatusCode.OK && response.StatusCode != HttpStatusCode.InternalServerError) { node.Fault($"Vault: Unexpected HTTP response status [{(int) response.StatusCode}={response.StatusCode}]"); return; } if (!response.Content.Headers.ContentType.MediaType.Equals("application/json", StringComparison.OrdinalIgnoreCase)) { node.Fault($"Vault: Unexpected content type [{response.Content.Headers.ContentType.MediaType}]"); return; } } catch (Exception e) { node.Fault($"Vault: {NeonHelper.ExceptionError(e)}"); } } }
/// <summary> /// Initializes the VPN certificate authority, and creates the OpenVPN server and /// root client certificates. /// </summary> /// <param name="defPath">Path to the hive definition file.</param> /// <param name="targetFolder">The output folder.</param> private void InitializeCA(string defPath, string targetFolder) { DirectNotAllowed(); var hiveDefinition = HiveDefinition.FromFile(defPath, strict: true); // This implements the steps described here: // // http://www.macfreek.nl/memory/Create_a_OpenVPN_Certificate_Authority // Initialize // ---------- Directory.CreateDirectory(caFolder); // Initialize the file paths. // // IMPORTANT: // // Do not change these file names because the [VpnCaFiles] class // depends on this naming convention. var indexPath = Path.Combine(caFolder, "index.txt"); var caSignCnfPath = Path.Combine(caFolder, "ca-sign.cnf"); var caCnfPath = Path.Combine(caFolder, "ca.cnf"); var caKeyPath = Path.Combine(caFolder, "ca.key"); var caReqPath = Path.Combine(caFolder, "ca.req"); var caCrtPath = Path.Combine(caFolder, "ca.crt"); var dhParamPath = Path.Combine(caFolder, "dhparam.pem"); var serverCnfPath = Path.Combine(caFolder, "server.cnf"); var serverKeyPath = Path.Combine(caFolder, "server.key"); var serverReqPath = Path.Combine(caFolder, "server.req"); var serverCrtPath = Path.Combine(caFolder, "server.crt"); var rootCnfPath = Path.Combine(caFolder, $"{HiveConst.RootUser}.cnf"); var rootReqPath = Path.Combine(caFolder, $"{HiveConst.RootUser}.req"); var rootKeyPath = Path.Combine(caFolder, $"{HiveConst.RootUser}.key"); var rootCrtPath = Path.Combine(caFolder, $"{HiveConst.RootUser}.crt"); var taKeyPath = Path.Combine(caFolder, "ta.key"); var crlnumberPath = Path.Combine(caFolder, "crlnumber"); var crlPath = Path.Combine(caFolder, "crl.pem"); // Create an empty certificate index file. File.WriteAllText(indexPath, string.Empty); // CA Configuration Files // ---------------------- // Create configuration files. In our setup, [ca-sign.cnf] contains the configuration for signing certificates. // We only use it in conjunction with the [openssl ca command]. It described the folder structure within the [ca] // directory, the location of support files for the CA, as well as properties of the signed certificates (duration, // restricted usage) as well as the policy for the name ("distinguished name") of signed certificates. Finally, // it lists the policy for certification revocation lists. For this small-scale CA, there is no public URL to // download the CRL; I plan to distribute it manually. var caSignCnf = $@"# ca-sign.cnf # This configuration file is used by the 'ca' command, to create signed certificates. [ ca ] default_ca = CA_default # The default ca section [ CA_default ] dir = {$"{this.caFolder}"} # Where everything is kept certs = $dir/ # Where the issued certs are kept crl_dir = $dir/ # Where the issued crl are kept new_certs_dir = $dir/ # default place for new certs private_key = $dir/ca.key # The private key certificate = $dir/ca.crt # The CA root certificate database = $dir/index.txt # List of signed certificates serial = $dir/serial # The current serial number crlnumber = $dir/crlnumber # the current crl number crl = $dir/crl.pem # The current CRL RANDFILE = $dir/.rand # private random number file unique_subject = no # allow multiple certificates with same subject. default_md = sha256 # Use hash algorithm specified in the request default_days = 365000 # client certificates last about 1000 years default_crl_days = 30 # How often clients should download the CRL #x509_extensions = X509_ca # The x509 extensions for the root certificate #x509_extensions = X509_server # The x509 extensions for a server certificate x509_extensions = X509_client # The x509 extensions for a client certificate # These options control what fields from the distinguished name to show before signing. # They are required to make sure all fields are shown. name_opt = ca_default # Subject Name options cert_opt = ca_default # Certificate field options copy_extensions = copy # Accept requested extensions policy = policy_dn [ X509_ca ] # X509v3 extensions for the root certificate basicConstraints = CA:TRUE nsCertType = sslCA # restrict the usage keyUsage = keyCertSign, cRLSign # restrict the usage subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer:always #subjectAltName = email:move # Move email address from DN to extensions #crlDistributionPoints = URI:http://www.example.com/example_ca.crl [ X509_server ] # X509v3 extensions for server certificates basicConstraints = CA:FALSE nsCertType = server # restrict the usage keyUsage = digitalSignature, keyEncipherment extendedKeyUsage = serverAuth # restrict the usage subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer #subjectAltName = email:move # Move email address from DN to extensions #crlDistributionPoints = URI:http://www.example.com/example_ca.crl [ X509_client ] # X509v3 extensions for client certificates basicConstraints = CA:FALSE nsCertType = client # restrict the usage keyUsage = digitalSignature # restrict the usage extendedKeyUsage = clientAuth # restrict the usage subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer #subjectAltName = email:move # Move email address from DN to extensions #crlDistributionPoints = URI:http://www.example.com/example_ca.crl [ policy_dn ] countryName = supplied # required parameter, any value allowed stateOrProvinceName = optional localityName = optional organizationName = match # required, and must match root certificate organizationalUnitName = optional commonName = supplied # required parameter, any value allowed emailAddress = optional # email in DN is deprecated, use subjectAltName "; File.WriteAllText(caSignCnfPath, caSignCnf); // The x509_extensions sections are not really required by openssl or openvpn, but // adds extra security by telling OpenVPN that clients may connect to servers // only. nsCertType is required for the OpenVPN option ns-cert-type server|client; // keyUsage and extendedKeyUsage are required for remote-cert-tls server|client. // [ca.cnf] defines the distinguished name for the certificate authority. It also // contains the key length (2048 is recommended nowadays, over the default of 1024), // and if the key should be encrypted. var caCnf = $@"# ca.cnf # This configuration file is used by the 'req' command when the root certificates is created. [ req ] default_bits = 2048 # default strength of client certificates default_md = sha256 encrypt_key = no # ""no"" is equivalent to -nodes prompt = no string_mask = utf8only distinguished_name = ca_distinguished_name # root certificate name req_extensions = req_cert_extensions #attributes = req_attributes [ ca_distinguished_name ] # root certificate name countryName = {hiveDefinition.Vpn.CertCountryCode} #stateOrProvinceName = Utrecht #localityName = Hometown organizationName = {hiveDefinition.Vpn.CertOrganization} #organizationalUnitName = My Department Name commonName = ca #emailAddress = [email protected] # email in DN is deprecated, use subjectAltName [ req_cert_extensions ] nsCertType = server #subjectAltName = email:[email protected] "; File.WriteAllText(caCnfPath, caCnf); // Note that in the above examples, the email address is specified in the [subjectAltName], // instead of in the distinguished name. This is in accordance with PKIX standards. // Build CA certificate // -------------------- // If your CA should be valid after the year 2038, be sure to use openssl 0.9.9 or higher. // // First create a request with the correct name, and then self-sign a certificate and create // a serial number file. Program.Execute("openssl", "req", "-new", "-config", caCnfPath, "-keyout", caKeyPath, "-out", caReqPath); Program.Execute("openssl", "ca", "-batch", "-config", caSignCnfPath, "-extensions", "X509_ca", "-days", 365000, "-create_serial", "-selfsign", "-keyfile", caKeyPath, "-in", caReqPath, "-out", caCrtPath); // Generate Prime Numbers (the Diffie Hellman parameters) // ------------------------------------------------------ Program.Execute("openssl", "dhparam", "-out", dhParamPath, "2048"); // Build server certificate // ------------------------ // First, create a configuration for the server, similar to [ca.cnf]: var serverCnf = $@"# server.cnf # This configuration file is used by the 'req' command when the server certificate is created. [ req ] default_bits = 2048 default_md = sha256 encrypt_key = no prompt = no string_mask = utf8only distinguished_name = server_distinguished_name req_extensions = req_cert_extensions #attributes = req_attributes [ server_distinguished_name ] countryName = {hiveDefinition.Vpn.CertCountryCode} #stateOrProvinceName = #localityName = organizationName = {hiveDefinition.Vpn.CertOrganization} #organizationalUnitName = My Department Name commonName = server #emailAddress = [ req_cert_extensions ] nsCertType = server #subjectAltName = email:[email protected] "; File.WriteAllText(serverCnfPath, serverCnf); // Create the server request and private key. Program.Execute("openssl", "req", "-new", "-config", serverCnfPath, "-keyout", serverKeyPath, "-out", serverReqPath); // Create the server certificate. Program.Execute("openssl", "ca", "-batch", "-config", caSignCnfPath, "-extensions", "X509_server", "-in", serverReqPath, "-out", serverCrtPath); // Build the [root] client certificate. try { File.WriteAllText(rootCnfPath, GetClientConfig(hiveDefinition, HiveConst.RootUser, rootPrivileges: true)); Program.Execute("openssl", "req", "-new", "-config", rootCnfPath, "-keyout", rootKeyPath, "-out", rootReqPath); Program.Execute("openssl", "ca", "-batch", "-config", caSignCnfPath, "-out", rootCrtPath, "-in", rootReqPath); } finally { if (File.Exists(rootCnfPath)) { File.Delete(rootCnfPath); } } // Initialize the Certificate Revocation List (CLR) number file // and then generate the initial (empty) CRL. File.WriteAllText(crlnumberPath, "00"); Program.Execute("openssl", "ca", "-config", caSignCnfPath, "-gencrl", "-out", crlPath); // As one final additional step, we're going to generate a shared // key that OpenVPN can use to quickly reject packets that didn't // come from a client with the key. This provides a decent amount // of DOS protection, especially for VPNs that only use the UDP // transport. Program.Execute("openvpn", "--genkey", "--secret", taKeyPath); // Copy all of the CA files to the target folder. Directory.CreateDirectory(targetFolder); foreach (var file in Directory.GetFiles(caFolder, "*.*", SearchOption.TopDirectoryOnly)) { File.Copy(file, Path.Combine(targetFolder, Path.GetFileName(file))); } }
/// <summary> /// Sets hive definition related variables for a <see cref="PreprocessReader"/>. /// </summary> /// <param name="preprocessReader">The reader.</param> /// <param name="hiveDefinition">The hive definition.</param> /// <param name="nodeDefinition">The target node definition.</param> private static void SetHiveVariables(PreprocessReader preprocessReader, HiveDefinition hiveDefinition, NodeDefinition nodeDefinition) { Covenant.Requires <ArgumentNullException>(preprocessReader != null); Covenant.Requires <ArgumentNullException>(hiveDefinition != null); // Generate the manager node variables in sorted order. The variable // names will be formatted as: // // NEON_MANAGER_# // // where [#] is the zero-based index of the node. This is compatible // with the [getmanager] function included the script. // // Each variable defines an associative array with [name] and [address] // properties. // // Then generate the NEON_MANAGER_NAMES and NEON_MANAGER_ADDRESSES arrays. // // NOTE: We need to use Linux-style line endings. var sbManagers = new StringBuilder(); var sbManagerNamesArray = new StringBuilder(); var sbManagerAddressesArray = new StringBuilder(); var sbPeerManagerAddressesArray = new StringBuilder(); var sbManagerNodesSummary = new StringBuilder(); var index = 0; var managerNameWidth = 0; sbManagerNamesArray.Append("("); sbManagerAddressesArray.Append("("); sbPeerManagerAddressesArray.Append("("); foreach (var manager in hiveDefinition.SortedManagers) { sbManagers.Append($"declare -x -A NEON_MANAGER_{index}\n"); sbManagers.Append($"NEON_MANAGER_{index}=( [\"name\"]=\"{manager.Name}\" [\"address\"]=\"{manager.PrivateAddress}\" )\n"); sbManagers.Append("\n"); index++; sbManagerNamesArray.Append($" \"{manager.Name}\""); sbManagerAddressesArray.Append($" \"{manager.PrivateAddress}\""); if (manager != nodeDefinition) { sbPeerManagerAddressesArray.Append($" \"{manager.PrivateAddress}\""); } managerNameWidth = Math.Max(manager.Name.Length, managerNameWidth); } sbManagerNamesArray.Append(" )"); sbManagerAddressesArray.Append(" )"); sbPeerManagerAddressesArray.Append(" )"); foreach (var manager in hiveDefinition.SortedManagers) { var nameField = manager.Name; if (nameField.Length < managerNameWidth) { nameField += new string(' ', managerNameWidth - nameField.Length); } // The blanks below are just enough so that the "=" sign lines up // with the summary output from [hive.conf.sh]. if (sbManagerNodesSummary.Length == 0) { sbManagerNodesSummary.Append($" echo \"NEON_MANAGER_NODES = {nameField}: {manager.PrivateAddress}\" 1>&2\n"); } else { sbManagerNodesSummary.Append($" echo \" {nameField}: {manager.PrivateAddress}\" 1>&2\n"); } } foreach (var manager in hiveDefinition.SortedManagers) { sbManagers.Append($"declare -x -A NEON_MANAGER_{index}\n"); sbManagers.Append($"NEON_MANAGER_{index}=( [\"name\"]=\"{manager.Name}\" [\"address\"]=\"{manager.PrivateAddress}\" )\n"); index++; } sbManagers.Append("\n"); sbManagers.Append($"declare -x NEON_MANAGER_NAMES={sbManagerNamesArray}\n"); sbManagers.Append($"declare -x NEON_MANAGER_ADDRESSES={sbManagerAddressesArray}\n"); sbManagers.Append("\n"); if (hiveDefinition.Managers.Count() > 1) { sbManagers.Append($"declare -x NEON_MANAGER_PEERS={sbPeerManagerAddressesArray}\n"); } else { sbManagers.Append("export NEON_MANAGER_PEERS=\"\"\n"); } // Generate the manager and worker NTP time sources. var managerTimeSources = string.Empty; var workerTimeSources = string.Empty; if (hiveDefinition.TimeSources != null) { foreach (var source in hiveDefinition.TimeSources) { if (string.IsNullOrWhiteSpace(source)) { continue; } if (managerTimeSources.Length > 0) { managerTimeSources += " "; } managerTimeSources += $"\"{source}\""; } } foreach (var manager in hiveDefinition.SortedManagers) { if (workerTimeSources.Length > 0) { workerTimeSources += " "; } workerTimeSources += $"\"{manager.PrivateAddress}\""; } if (string.IsNullOrWhiteSpace(managerTimeSources)) { // Default to reasonable public time sources. managerTimeSources = "\"pool.ntp.org\""; } // Generate the Docker daemon command line options. var sbDockerOptions = new StringBuilder(); if (Program.ServiceManager == ServiceManager.Systemd) { sbDockerOptions.AppendWithSeparator($"-H unix:///var/run/docker.sock"); } else { throw new NotImplementedException(); } if (hiveDefinition.DebugMode) { // Expose the Docker Swarm REST API on the node's internal hive IP address so it // can be reached by apps like [neon-proxy-manager] running off the manager node // (potentially in the debugger). sbDockerOptions.AppendWithSeparator($"-H tcp://{nodeDefinition.PrivateAddress}:{NetworkPorts.Docker}"); } preprocessReader.Set("docker.options", sbDockerOptions); // Define the Consul command line options. var consulOptions = string.Empty; if (hiveDefinition.Dashboard.Consul) { if (consulOptions.Length > 0) { consulOptions += " "; } consulOptions += "-ui"; } // Format the network upstream nameservers as semicolon separated // to be compatible with the PowerDNS Recursor [forward-zones-recurse] // configuration setting. // // Note that manager nodes will recurse to upstream (external) DNS // servers and workers/pets will recurse to the managers so they can // dynamically pickup hive DNS changes. if (hiveDefinition.Network?.Nameservers == null) { // $hack(jeff.lill): // // [Network] will be null if we're just preparing servers, not doing full setup // so we'll set this to the defaults to avoid null references below. hiveDefinition.Network = new NetworkOptions(); } var nameservers = string.Empty; if (nodeDefinition.Role == NodeRole.Manager) { for (int i = 0; i < hiveDefinition.Network.Nameservers.Length; i++) { if (i > 0) { nameservers += ";"; } nameservers += hiveDefinition.Network.Nameservers[i].Trim(); } } else { foreach (var manager in hiveDefinition.SortedManagers) { if (nameservers.Length > 0) { nameservers += ";"; } nameservers += manager.PrivateAddress; } } // Set the variables. preprocessReader.Set("load-hive-conf", HiveHostFolders.Config + "/hive.conf.sh --echo-summary"); preprocessReader.Set("load-hive-conf-quiet", HiveHostFolders.Config + "/hive.conf.sh"); SetBashVariable(preprocessReader, "hive.provisioner", hiveDefinition.Provisioner); SetBashVariable(preprocessReader, "hive.rootuser", Program.MachineUsername); SetBashVariable(preprocessReader, "node.driveprefix", hiveDefinition.DrivePrefix); SetBashVariable(preprocessReader, "neon.folders.archive", HiveHostFolders.Archive); SetBashVariable(preprocessReader, "neon.folders.bin", HiveHostFolders.Bin); SetBashVariable(preprocessReader, "neon.folders.exec", HiveHostFolders.Exec); SetBashVariable(preprocessReader, "neon.folders.config", HiveHostFolders.Config); SetBashVariable(preprocessReader, "neon.folders.scripts", HiveHostFolders.Scripts); SetBashVariable(preprocessReader, "neon.folders.secrets", HiveHostFolders.Secrets); SetBashVariable(preprocessReader, "neon.folders.setup", HiveHostFolders.Setup); SetBashVariable(preprocessReader, "neon.folders.source", HiveHostFolders.Source); SetBashVariable(preprocessReader, "neon.folders.state", HiveHostFolders.State); SetBashVariable(preprocessReader, "neon.folders.tmpfs", HiveHostFolders.Tmpfs); SetBashVariable(preprocessReader, "neon.folders.tools", HiveHostFolders.Tools); preprocessReader.Set("neon.hosts.neon-log-es-data", hiveDefinition.Hostnames.LogEsData); SetBashVariable(preprocessReader, "nodes.manager.count", hiveDefinition.Managers.Count()); preprocessReader.Set("nodes.managers", sbManagers); preprocessReader.Set("nodes.manager.summary", sbManagerNodesSummary); SetBashVariable(preprocessReader, "ntp.manager.sources", managerTimeSources); SetBashVariable(preprocessReader, "ntp.worker.sources", workerTimeSources); if (!hiveDefinition.BareDocker) { // When we're not deploying bare Docker, the manager nodes will use the // configured name servers as the hive's upstream DNS and the worker // nodes will be configured to query the name servers. if (nodeDefinition.IsManager) { preprocessReader.Set("net.nameservers", nameservers); } else { var managerNameservers = string.Empty; foreach (var manager in hiveDefinition.Managers) { if (managerNameservers.Length > 0) { managerNameservers += ";"; } managerNameservers += manager.PrivateAddress.ToString(); } preprocessReader.Set("net.nameservers", managerNameservers); } } else { // All servers use the configured upstream nameservers when we're not // deploying the Local DNS. preprocessReader.Set("net.nameservers", nameservers); } SetBashVariable(preprocessReader, "net.powerdns.recursor.package.uri", hiveDefinition.Network.PdnsRecursorPackageUri); preprocessReader.Set("net.powerdns.recursor.hosts", GetPowerDnsHosts(hiveDefinition, nodeDefinition)); var dockerPackageUri = new HeadendClient().GetDockerPackageUri(hiveDefinition.Docker.Version, out var packageMessage); if (dockerPackageUri == null) { // $todo(jeff.lill: // // This should probably be replaced with hive definition validation code. Console.WriteLine($"*** ERROR: {packageMessage}"); Program.Exit(1); } SetBashVariable(preprocessReader, "docker.packageuri", dockerPackageUri); SetBashVariable(preprocessReader, "consul.version", hiveDefinition.Consul.Version); SetBashVariable(preprocessReader, "consul.options", consulOptions); SetBashVariable(preprocessReader, "consul.address", $"{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); SetBashVariable(preprocessReader, "consul.fulladdress", $"https://{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); SetBashVariable(preprocessReader, "consul.hostname", hiveDefinition.Hostnames.Consul); SetBashVariable(preprocessReader, "consul.port", hiveDefinition.Consul.Port); SetBashVariable(preprocessReader, "consul.tls", hiveDefinition.Consul.Tls ? "true" : "false"); SetBashVariable(preprocessReader, "vault.version", hiveDefinition.Vault.Version); SetBashVariable(preprocessReader, "vault.download", $"https://releases.hashicorp.com/vault/{hiveDefinition.Vault.Version}/vault_{hiveDefinition.Vault.Version}_linux_amd64.zip"); SetBashVariable(preprocessReader, "vault.hostname", hiveDefinition.Hostnames.Vault); SetBashVariable(preprocessReader, "vault.port", hiveDefinition.Vault.Port); SetBashVariable(preprocessReader, "vault.consulpath", "vault/"); SetBashVariable(preprocessReader, "vault.maximumlease", hiveDefinition.Vault.MaximimLease); SetBashVariable(preprocessReader, "vault.defaultlease", hiveDefinition.Vault.DefaultLease); SetBashVariable(preprocessReader, "vault.dashboard", hiveDefinition.Dashboard.Vault ? "true" : "false"); SetBashVariable(preprocessReader, "log.enabled", hiveDefinition.Log.Enabled); //----------------------------------------------------------------- // Configure the variables for the [setup-disk.sh] script. switch (hiveDefinition.Hosting.Environment) { case HostingEnvironments.Aws: throw new NotImplementedException("$todo(jeff.lill)"); case HostingEnvironments.Azure: switch (Program.OSProperties.TargetOS) { case TargetOS.Ubuntu_16_04: // The primary Azure data drive is [/dev/sdb] so any mounted drive will be [/dev/sdc]. if (nodeDefinition.Azure.HardDriveCount == 0) { SetBashVariable(preprocessReader, "data.disk", "PRIMARY"); } else { SetBashVariable(preprocessReader, "data.disk", "/dev/sdc"); } break; default: throw new NotImplementedException($"Support for [{Program.OSProperties.TargetOS}] is not implemented."); } break; case HostingEnvironments.Google: throw new NotImplementedException("$todo(jeff.lill)"); case HostingEnvironments.HyperV: case HostingEnvironments.HyperVDev: case HostingEnvironments.Machine: case HostingEnvironments.Unknown: case HostingEnvironments.XenServer: // VMs for all of these environments simply host their data on the // primary OS disk only for now, the idea being that this disk // can be sized up as necessary. There are valid scenarios where // folks would like the data on a different drive (e.g. for better // performance). I'm putting support for that on the backlog. SetBashVariable(preprocessReader, "data.disk", "PRIMARY"); break; default: throw new NotImplementedException($"The [{hiveDefinition.Hosting.Environment}] hosting environment is not implemented."); } }
/// <summary> /// Initializes a near virgin server with the basic capabilities required /// for a neonHIVE host node. /// </summary> /// <param name="node">The target hive node.</param> /// <param name="hiveDefinition">The hive definition.</param> /// <param name="shutdown">Optionally shuts down the node.</param> public static void PrepareNode(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition, bool shutdown = false) { if (node.FileExists($"{HiveHostFolders.State}/setup/prepared")) { return; // Already prepared } //----------------------------------------------------------------- // Ensure that the hive host folders exist. node.CreateHiveHostFolders(); //----------------------------------------------------------------- // Package manager configuration. if (!hiveDefinition.HiveNode.AllowPackageManagerIPv6) { // Restrict the [apt] package manager to using IPv4 to communicate // with the package mirrors, since IPv6 often doesn't work. node.UploadText("/etc/apt/apt.conf.d/99-force-ipv4-transport", "Acquire::ForceIPv4 \"true\";"); node.SudoCommand("chmod 644 /etc/apt/apt.conf.d/99-force-ipv4-transport"); } // Configure [apt] to retry. node.UploadText("/etc/apt/apt.conf.d/99-retries", $"APT::Acquire::Retries \"{hiveDefinition.HiveNode.PackageManagerRetries}\";"); node.SudoCommand("chmod 644 /etc/apt/apt.conf.d/99-retries"); //----------------------------------------------------------------- // Other configuration. ConfigureOpenSSH(node); node.UploadConfigFiles(hiveDefinition); node.UploadResources(hiveDefinition); if (hiveDefinition != null) { ConfigureEnvironmentVariables(node, hiveDefinition); } node.SudoCommand("safe-apt-get update"); node.InvokeIdempotentAction("setup/prep-node", () => { node.Status = "run: setup-prep-node.sh"; node.SudoCommand("setup-prep-node.sh"); node.Reboot(wait: true); }); // We need to upload the hive configuration and initialize drives attached // to the node. We're going to assume that these are not already initialized. // $todo(jeff.lill): // // We may need an option that allows an operator to pre-build a hardware // based drive array or something. I'm going to defer this to later and // concentrate on commodity hardware and cloud deployments for now. CommonSteps.ConfigureEnvironmentVariables(node, hiveDefinition); node.Status = "run: setup-disk.sh"; node.SudoCommand("setup-disk.sh"); // Clear any DHCP leases to be super sure that cloned node // VMs will obtain fresh IP addresses. node.Status = "clear DHCP leases"; node.SudoCommand("rm -f /var/lib/dhcp/*"); // Indicate that the node has been fully prepared. node.SudoCommand($"touch {HiveHostFolders.State}/setup/prepared"); // Shutdown the node if requested. if (shutdown) { node.Status = "shutdown"; node.SudoCommand("shutdown 0", RunOptions.Defaults | RunOptions.Shutdown); } }
/// <inheritdoc/> public void Run(ModuleContext context) { var hive = HiveHelper.Hive; if (!context.ValidateArguments(context.Arguments, validModuleArgs)) { context.Failed = true; return; } // Obtain common arguments. if (!context.Arguments.TryGetValue <string>("name", out var name)) { throw new ArgumentException($"[name] module argument is required."); } if (!HiveDefinition.IsValidName(name)) { throw new ArgumentException($"[name={name}] is not a valid certificate name."); } if (!context.Arguments.TryGetValue <string>("state", out var state)) { state = "present"; } state = state.ToLowerInvariant(); if (!context.Arguments.TryGetValue <bool>("force", out var force)) { force = false; } if (context.HasErrors) { return; } // We have the required arguments, so perform the operation. if (!context.Login.HasVaultRootCredentials) { throw new ArgumentException("Access Denied: Root Vault credentials are required."); } switch (state) { case "absent": context.WriteLine(AnsibleVerbosity.Trace, $"Vault: checking for [{name}] certificate"); if (hive.Certificate.Get(name) != null) { context.WriteLine(AnsibleVerbosity.Trace, $"Vault: [{name}] certificate exists"); if (context.CheckMode) { context.WriteLine(AnsibleVerbosity.Info, $"Certificate [{name}] will be removed when CHECK-MODE is disabled."); } else { context.WriteLine(AnsibleVerbosity.Trace, $"Removing [{name}] certyificate."); hive.Certificate.Remove(name); context.WriteLine(AnsibleVerbosity.Info, $"[{name}] certificate removed"); } context.Changed = !context.CheckMode; } else { context.WriteLine(AnsibleVerbosity.Info, $"[{name}] certificate does not exist"); } break; case "present": if (!context.Arguments.TryGetValue <string>("value", out var value)) { throw new ArgumentException($"[value] module argument is required."); } var certificate = TlsCertificate.Parse(value); // This validates the certificate/private key context.WriteLine(AnsibleVerbosity.Trace, $"Reading [{name}] certificate"); var existingCert = hive.Certificate.Get(name); var changed = false; if (existingCert == null) { context.WriteLine(AnsibleVerbosity.Info, $"[{name}] certificate does not exist"); context.Changed = !context.CheckMode; changed = true; } else if (!NeonHelper.JsonEquals(existingCert, certificate) || force) { context.WriteLine(AnsibleVerbosity.Info, $"[{name}] certificate does exists but is different"); context.Changed = !context.CheckMode; changed = true; } else { context.WriteLine(AnsibleVerbosity.Info, $"[{name}] certificate is unchanged"); } if (changed) { if (context.CheckMode) { context.WriteLine(AnsibleVerbosity.Info, $"Certificate [{name}] will be updated when CHECK-MODE is disabled."); } else { context.WriteLine(AnsibleVerbosity.Trace, $"Saving [{name}] certificate"); hive.Certificate.Set(name, certificate); context.WriteLine(AnsibleVerbosity.Info, $"[{name}] certificate saved"); } } break; default: throw new ArgumentException($"[state={state}] is not one of the valid choices: [present] or [absent]."); } }
/// <summary> /// Deploys RabbitMQ to a cluster node as a container. /// </summary> /// <param name="node">The target hive node.</param> private void DeployHiveMQ(SshProxy <NodeDefinition> node) { // Deploy RabbitMQ only on the labeled nodes. if (node.Metadata.Labels.HiveMQ) { // Build a comma separated list of fully qualified RabbitMQ hostnames so we // can pass them as the CLUSTER environment variable. var rabbitNodes = hive.Definition.SortedNodes.Where(n => n.Labels.HiveMQ).ToList(); var sbCluster = new StringBuilder(); foreach (var rabbitNode in rabbitNodes) { sbCluster.AppendWithSeparator($"{rabbitNode.Name}@{rabbitNode.Name}.{hive.Definition.Hostnames.HiveMQ}", ","); } var hipeCompileArgs = new List <string>(); if (hive.Definition.HiveMQ.Precompile) { hipeCompileArgs.Add("--env"); hipeCompileArgs.Add("RABBITMQ_HIPE_COMPILE=1"); } var managementPluginArgs = new List <string>(); if (node.Metadata.Labels.HiveMQManager) { hipeCompileArgs.Add("--env"); hipeCompileArgs.Add("MANAGEMENT_PLUGIN=true"); } // $todo(jeff.lill): // // I was unable to get TLS working correctly for RabbitMQ. I'll come back // and revisit this later: // // https://github.com/jefflill/NeonForge/issues/319 ServiceHelper.StartContainer(node, "neon-hivemq", hive.Definition.Image.HiveMQ, RunOptions.FaultOnError, new CommandBundle( "docker run", "--detach", "--name", "neon-hivemq", "--env", $"CLUSTER_NAME={hive.Definition.Name}", "--env", $"CLUSTER_NODES={sbCluster}", "--env", $"CLUSTER_PARTITION_MODE=autoheal", "--env", $"NODENAME={node.Name}@{node.Name}.{hive.Definition.Hostnames.HiveMQ}", "--env", $"RABBITMQ_USE_LONGNAME=true", "--env", $"RABBITMQ_DEFAULT_USER=sysadmin", "--env", $"RABBITMQ_DEFAULT_PASS=password", "--env", $"RABBITMQ_NODE_PORT={HiveHostPorts.HiveMQAMQP}", "--env", $"RABBITMQ_DIST_PORT={HiveHostPorts.HiveMQDIST}", "--env", $"RABBITMQ_MANAGEMENT_PORT={HiveHostPorts.HiveMQManagement}", "--env", $"RABBITMQ_ERLANG_COOKIE={hive.Definition.HiveMQ.ErlangCookie}", "--env", $"RABBITMQ_VM_MEMORY_HIGH_WATERMARK={hive.Definition.HiveMQ.RamHighWatermark}", hipeCompileArgs, managementPluginArgs, "--env", $"RABBITMQ_DISK_FREE_LIMIT={HiveDefinition.ValidateSize(hive.Definition.HiveMQ.DiskFreeLimit, typeof(HiveMQOptions), nameof(hive.Definition.HiveMQ.DiskFreeLimit))}", //"--env", $"RABBITMQ_SSL_CERTFILE=/etc/neon/certs/hive.crt", //"--env", $"RABBITMQ_SSL_KEYFILE=/etc/neon/certs/hive.key", "--env", $"ERL_EPMD_PORT={HiveHostPorts.HiveMQEPMD}", "--mount", "type=volume,source=neon-hivemq,target=/var/lib/rabbitmq", "--mount", "type=bind,source=/etc/neon/certs,target=/etc/neon/certs,readonly", "--publish", $"{HiveHostPorts.HiveMQEPMD}:{HiveHostPorts.HiveMQEPMD}", "--publish", $"{HiveHostPorts.HiveMQAMQP}:{HiveHostPorts.HiveMQAMQP}", "--publish", $"{HiveHostPorts.HiveMQDIST}:{HiveHostPorts.HiveMQDIST}", "--publish", $"{HiveHostPorts.HiveMQManagement}:{HiveHostPorts.HiveMQManagement}", "--memory", HiveDefinition.ValidateSize(hive.Definition.HiveMQ.RamLimit, typeof(HiveMQOptions), nameof(hive.Definition.HiveMQ.RamLimit)), "--restart", "always", ServiceHelper.ImagePlaceholderArg)); // Wait for the RabbitMQ node to report that it's ready. var timeout = TimeSpan.FromMinutes(4); var pollTime = TimeSpan.FromSeconds(2); node.Status = "hivemq: waiting"; try { NeonHelper.WaitFor( () => { var readyReponse = node.SudoCommand($"docker exec neon-hivemq rabbitmqctl node_health_check -n {node.Name}@{node.Name}.{hive.Definition.Hostnames.HiveMQ}", node.DefaultRunOptions & ~RunOptions.FaultOnError); return(readyReponse.ExitCode == 0); }, timeout: timeout, pollTime: pollTime); } catch (TimeoutException) { node.Fault($"RabbitMQ not ready after waiting [{timeout}]."); return; } node.Status = "hivemq: ready"; } }
/// <summary> /// Configures the global environment variables that describe the configuration /// of the server within the hive. /// </summary> /// <param name="node">The server to be updated.</param> /// <param name="hiveDefinition">The hive definition.</param> public static void ConfigureEnvironmentVariables(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition) { node.Status = "environment variables"; // We're going to append the new variables to the existing Linux [/etc/environment] file. var sb = new StringBuilder(); // Append all of the existing environment variables except for those // whose names start with "NEON_" to make the operation idempotent. // // Note that we're going to special case PATH to add any Neon // related directories. using (var currentEnvironmentStream = new MemoryStream()) { node.Download("/etc/environment", currentEnvironmentStream); currentEnvironmentStream.Position = 0; using (var reader = new StreamReader(currentEnvironmentStream)) { foreach (var line in reader.Lines()) { if (line.StartsWith("PATH=")) { if (!line.Contains(HiveHostFolders.Tools)) { sb.AppendLine(line + $":{HiveHostFolders.Tools}"); } else { sb.AppendLine(line); } } else if (!line.StartsWith("NEON_")) { sb.AppendLine(line); } } } } // Add the global neonHIVE related environment variables. sb.AppendLine($"NEON_HIVE_PROVISIONER={hiveDefinition.Provisioner}"); sb.AppendLine($"NEON_HIVE={hiveDefinition.Name}"); sb.AppendLine($"NEON_DATACENTER={hiveDefinition.Datacenter.ToLowerInvariant()}"); sb.AppendLine($"NEON_ENVIRONMENT={hiveDefinition.Environment.ToString().ToLowerInvariant()}"); if (hiveDefinition.Hosting != null) { sb.AppendLine($"NEON_HOSTING={hiveDefinition.Hosting.Environment.ToMemberString().ToLowerInvariant()}"); } sb.AppendLine($"NEON_NODE_NAME={node.Name}"); sb.AppendLine($"NEON_NODE_FS={hiveDefinition.HiveFS.Enabled.ToString().ToLowerInvariant()}"); if (node.Metadata != null) { sb.AppendLine($"NEON_NODE_ROLE={node.Metadata.Role}"); sb.AppendLine($"NEON_NODE_IP={node.Metadata.PrivateAddress}"); sb.AppendLine($"NEON_NODE_SSD={node.Metadata.Labels.StorageSSD.ToString().ToLowerInvariant()}"); sb.AppendLine($"NEON_NODE_SWAP={node.Metadata.Labels.ComputeSwap.ToString().ToLowerInvariant()}"); } var sbNameservers = new StringBuilder(); foreach (var nameServer in hiveDefinition.Network.Nameservers) { sbNameservers.AppendWithSeparator(nameServer, ","); } sb.AppendLine($"NEON_UPSTREAM_DNS=\"{sbNameservers}\""); sb.AppendLine($"NEON_APT_PROXY={HiveHelper.GetPackageProxyReferences(hiveDefinition)}"); sb.AppendLine($"NEON_ARCHIVE_FOLDER={HiveHostFolders.Archive}"); sb.AppendLine($"NEON_BIN_FOLDER={HiveHostFolders.Bin}"); sb.AppendLine($"NEON_CONFIG_FOLDER={HiveHostFolders.Config}"); sb.AppendLine($"NEON_EXEC_FOLDER={HiveHostFolders.Exec}"); sb.AppendLine($"NEON_SCRIPTS_FOLDER={HiveHostFolders.Scripts}"); sb.AppendLine($"NEON_SECRETS_FOLDER={HiveHostFolders.Secrets}"); sb.AppendLine($"NEON_SETUP_FOLDER={HiveHostFolders.Setup}"); sb.AppendLine($"NEON_SOURCE_FOLDER={HiveHostFolders.Source}"); sb.AppendLine($"NEON_STATE_FOLDER={HiveHostFolders.State}"); sb.AppendLine($"NEON_TMPFS_FOLDER={HiveHostFolders.Tmpfs}"); sb.AppendLine($"NEON_TOOLS_FOLDER={HiveHostFolders.Tools}"); // Append Consul and Vault addresses. // All nodes will be configured such that host processes using the HashiCorp Consul // CLI will access the Consul cluster via local Consul instance. This will be a // server for manager nodes and a proxy for workers and pets. if (hiveDefinition.Consul.Tls) { sb.AppendLine($"CONSUL_HTTP_SSL=true"); sb.AppendLine($"CONSUL_HTTP_ADDR=" + $"{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); sb.AppendLine($"CONSUL_HTTP_FULLADDR=" + $"https://{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); } else { sb.AppendLine($"CONSUL_HTTP_SSL=false"); sb.AppendLine($"CONSUL_HTTP_ADDR=" + $"{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); sb.AppendLine($"CONSUL_HTTP_FULLADDR=" + $"http://{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); } // All nodes will be configured such that host processes using the HashiCorp Vault // CLI will access the Vault cluster via the [neon-proxy-vault] proxy service // by default. sb.AppendLine($"VAULT_ADDR={hiveDefinition.VaultProxyUri}"); if (node.Metadata != null) { if (node.Metadata.IsManager) { // Manager hosts may use the [VAULT_DIRECT_ADDR] environment variable to // access Vault without going through the [neon-proxy-vault] proxy. This // points to the Vault instance running locally. // // This is useful when configuring Vault. sb.AppendLine($"VAULT_DIRECT_ADDR={hiveDefinition.GetVaultDirectUri(node.Name)}"); } else { sb.AppendLine($"VAULT_DIRECT_ADDR="); } } // Upload the new environment to the server. node.UploadText("/etc/environment", sb.ToString(), tabStop: 4); }
/// <summary> /// Implements the built-in <b>neon_dashboard</b> module. /// </summary> /// <param name="context">The module context.</param> public void Run(ModuleContext context) { var hive = HiveHelper.Hive; var consul = HiveHelper.Consul; if (!context.ValidateArguments(context.Arguments, validModuleArgs)) { context.Failed = true; return; } // Obtain common arguments. context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [name]"); if (!context.Arguments.TryGetValue <string>("name", out var name)) { throw new ArgumentException($"[name] module argument is required."); } if (!HiveDefinition.IsValidName(name)) { throw new ArgumentException($"[{name}] is not a valid dashboard name."); } context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [state]"); if (!context.Arguments.TryGetValue <string>("state", out var state)) { state = "present"; } state = state.ToLowerInvariant(); if (context.HasErrors) { return; } // We have the required arguments, so perform the operation. switch (state) { case "absent": context.WriteLine(AnsibleVerbosity.Trace, $"Check if dashboard [{name}] exists."); if (hive.Dashboard.Get(name) != null) { context.WriteLine(AnsibleVerbosity.Trace, $"Dashboard [{name}] already exists."); if (context.CheckMode) { context.WriteLine(AnsibleVerbosity.Info, $"Dashboard [{name}] will be deleted when CHECK-MODE is disabled."); } else { context.WriteLine(AnsibleVerbosity.Info, $"Deleting dashboard [{name}]."); hive.Dashboard.Remove(name); context.WriteLine(AnsibleVerbosity.Trace, $"Dashboard [{name}] deleted."); context.Changed = true; } } else { context.WriteLine(AnsibleVerbosity.Trace, $"Dashboard [{name}] does not exist."); } break; case "present": // Parse the PRESENT arguments. context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [url]"); if (!context.Arguments.TryGetValue <string>("url", out var url) && state == "present") { throw new ArgumentException($"[url] module argument is required when [state={state}]."); } if (!Uri.TryCreate(url, UriKind.Absolute, out var urlParsed)) { throw new ArgumentException($"[url={url}] is not valid."); } url = urlParsed.ToString(); context.Arguments.TryGetValue <string>("title", out var title); context.Arguments.TryGetValue <string>("folder", out var folder); context.Arguments.TryGetValue <string>("description", out var description); if (context.HasErrors) { return; } // Build the dashboard definition from the arguments. var newDashboard = new HiveDashboard() { Name = name, Title = title, Folder = folder, Url = url, Description = description }; // Validate the dashboard. context.WriteLine(AnsibleVerbosity.Trace, "Validating dashboard."); var errors = newDashboard.Validate(hive.Definition); if (errors.Count > 0) { context.WriteLine(AnsibleVerbosity.Trace, $"[{errors.Count}] dashboard validation errors."); foreach (var error in errors) { context.WriteLine(AnsibleVerbosity.Important, error); context.WriteErrorLine(error); } context.Failed = true; return; } context.WriteLine(AnsibleVerbosity.Trace, "Dashboard is valid."); // Try reading any existing dashboard with this name and then determine // whether the two versions are actually different. context.WriteLine(AnsibleVerbosity.Trace, $"Looking for existing dashboard [{name}]"); var existingDashboard = hive.Dashboard.Get(name); var changed = false; if (existingDashboard != null) { context.WriteLine(AnsibleVerbosity.Trace, $"Dashboard exists: checking for differences."); changed = !NeonHelper.JsonEquals(newDashboard, existingDashboard); if (changed) { context.WriteLine(AnsibleVerbosity.Trace, $"Dashboards are different."); } else { context.WriteLine(AnsibleVerbosity.Info, $"Dashboards are the same. No need to update."); } } else { changed = true; context.WriteLine(AnsibleVerbosity.Trace, $"Dashboard for [{name}] does not exist."); } if (changed) { if (context.CheckMode) { context.WriteLine(AnsibleVerbosity.Info, $"Dashboard [{name}] will be updated when CHECK-MODE is disabled."); } else { context.WriteLine(AnsibleVerbosity.Trace, $"Updating dashboard."); hive.Dashboard.Set(newDashboard); context.WriteLine(AnsibleVerbosity.Info, $"Dashboard updated."); context.Changed = true; } context.CheckMode = !context.CheckMode; } break; default: throw new ArgumentException($"[state={state}] is not one of the valid choices: [present] or [absent]."); } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } // Process the command arguments. TlsCertificate certificate; string certName; var command = commandLine.Arguments.FirstOrDefault(); if (command == null) { Console.WriteLine(usage); Program.Exit(1); } commandLine = commandLine.Shift(1); switch (command) { case "get": Program.ConnectHive(); certName = commandLine.Arguments.FirstOrDefault(); if (string.IsNullOrEmpty(certName)) { Console.Error.WriteLine("*** ERROR: Expected arguments: NAME"); Program.Exit(1); } if (!HiveDefinition.IsValidName(certName)) { Console.Error.WriteLine($"*** ERROR: [{certName}] is not a valid certificate name."); Program.Exit(1); } certificate = HiveHelper.Hive.Certificate.Get(certName); if (certificate == null) { Console.Error.WriteLine($"*** ERROR: Certificate [{certName}] does not exist."); Program.Exit(1); } Console.WriteLine(certificate.CombinedPem); break; case "join": if (commandLine.Arguments.Length != 3) { Console.Error.WriteLine("*** ERROR: Expected arguments: PATH-CERT PATH-KEY PATH-OUTPUT"); Program.Exit(1); } certificate = TlsCertificate.Load(commandLine.Arguments[0], commandLine.Arguments[1]); File.WriteAllText(commandLine.Arguments[2], certificate.CombinedPem); break; case "list": case "ls": Program.ConnectHive(); var certList = new List <CertInfo>(); DateTime?checkDate = null; bool expired = false; if (commandLine.GetOption("--expired") != null) { checkDate = DateTime.UtcNow; expired = true; } else if (commandLine.GetOption("--expiring") != null) { checkDate = DateTime.UtcNow + TimeSpan.FromDays(30); } // List the certificate key/names and then fetch each one // to capture details like the expiration date and covered // hostnames. foreach (var name in HiveHelper.Hive.Certificate.List()) { certificate = HiveHelper.Hive.Certificate.Get(name); if (checkDate.HasValue && certificate.IsValidDate(checkDate)) { continue; } certList.Add(new CertInfo(name, certificate)); } if (checkDate.HasValue && certList.Count == 0) { Console.WriteLine(expired ? "* No certificates have expired." : "* No certificates are expiring within 30 days."); Program.Exit(0); } if (certList.Count > 0) { var nameHeader = "Name"; var validUntilHeader = "Valid Until"; var hostsHeader = "Hosts"; var nameColumnWidth = Math.Max(nameHeader.Length, certList.Max(ci => ci.Name.Length)); var dateColumnWidth = Math.Max(validUntilHeader.Length, certList.Max(ci => ci.ValidUntil.Length)); var hostColumnWidth = Math.Max(hostsHeader.Length, certList.Max(ci => ci.Hosts.Length)); Console.WriteLine($"{nameHeader}{new string(' ', nameColumnWidth - "Name".Length)} {validUntilHeader}{new string(' ', dateColumnWidth - validUntilHeader.Length)} {hostsHeader}"); Console.WriteLine($"{new string('-', nameColumnWidth)} {new string('-', dateColumnWidth)} {new string('-', hostColumnWidth)}"); foreach (var certInfo in certList.OrderBy(ci => ci.Name.ToLowerInvariant())) { Console.WriteLine($"{certInfo.Name}{new string(' ', nameColumnWidth - certInfo.Name.Length)} {certInfo.ValidUntil}{new string(' ', dateColumnWidth - certInfo.ValidUntil.Length)} {certInfo.Hosts}"); } if (checkDate.HasValue) { Program.Exit(1); } } else { Console.WriteLine("* No certificates"); } break; case "remove": case "rm": Program.ConnectHive(); certName = commandLine.Arguments.FirstOrDefault(); if (string.IsNullOrEmpty(certName)) { Console.Error.WriteLine("*** ERROR: Expected arguments: NAME"); Program.Exit(1); } if (!HiveDefinition.IsValidName(certName)) { Console.Error.WriteLine($"*** ERROR: [{certName}] is not a valid certificate name."); Program.Exit(1); } if (HiveHelper.Hive.Certificate.Get(certName) != null) { HiveHelper.Hive.Certificate.Remove(certName); Console.WriteLine($"Certificate [{certName}] was removed."); } else { Console.Error.WriteLine($"*** ERROR: [{certName}] does not exist."); Program.Exit(1); } break; case "set": Program.ConnectHive(); using (var vault = HiveHelper.OpenVault(Program.HiveLogin.VaultCredentials.RootToken)) { if (commandLine.Arguments.Length != 2) { Console.Error.WriteLine("*** ERROR: Expected arguments: NAME PATH"); Program.Exit(1); } certName = commandLine.Arguments.FirstOrDefault(); if (string.IsNullOrEmpty(certName)) { Console.Error.WriteLine("*** ERROR: Expected arguments: NAME"); Program.Exit(1); } if (!HiveDefinition.IsValidName(certName)) { Console.Error.WriteLine($"*** ERROR: [{certName}] is not a valid certificate name."); Program.Exit(1); } certificate = TlsCertificate.Load(commandLine.Arguments.ElementAtOrDefault(1)); certificate.Parse(); if (HiveHelper.Hive.Certificate.Get(certName) == null) { HiveHelper.Hive.Certificate.Set(certName, certificate); Console.WriteLine($"Certificate [{certName}] was added."); } else { HiveHelper.Hive.Certificate.Set(certName, certificate); Console.WriteLine($"Certificate [{certName}] was updated."); } } break; case "split": if (commandLine.Arguments.Length != 3) { Console.Error.WriteLine("*** ERROR: Expected arguments: PATH PATH-CERT PATH-KEY"); Program.Exit(1); } certificate = TlsCertificate.Load(commandLine.Arguments[0]); File.WriteAllText(commandLine.Arguments[1], certificate.CertPem); File.WriteAllText(commandLine.Arguments[2], certificate.KeyPem); break; case "verify": VerifyLocalCertificate(commandLine); break; default: Console.Error.WriteLine($"*** ERROR: Unknown command: [{command}]"); Program.Exit(1); break; } }
/// <summary> /// Returns the OpenSSL client certificate configuration for a named user. /// </summary> /// <param></param> /// <param name="hiveDefinition">The hive definition.</param> /// <param name="user">The user name.</param> /// <param name="rootPrivileges">Indicates whether the user has root hive priviledges.</param> /// <returns>The configuration file text.</returns> private string GetClientConfig(HiveDefinition hiveDefinition, string user, bool rootPrivileges) { // Make sure the user name is reasonable and doesn't conflict with // any of the other ca/cert file names. var nameOK = true; if (user.Length == 0) { nameOK = false; } foreach (var ch in user) { if (!(char.IsLetterOrDigit(ch) || ch == '-' || ch == '_' || ch == '.')) { nameOK = false; break; } } if (!nameOK) { throw new Exception($"User name [{user}] is invalid. Names must consist of letters, digits, dashes, underscores or periods only."); } switch (user.ToLower()) { case "ca": case "ca-sign": case "client": case "crl": case "dh2048": case "index": case "serial": case "server": throw new Exception($"User name [{user}] conflicts with a reserved file. Choose another name."); } var rootSuffix = rootPrivileges ? " (is-root)" : string.Empty; var clientCnf = $@"# client.cnf # This configuration file is used by the 'req' command when a certificate is created for [{user}]. [ req ] default_bits = 2048 default_md = sha256 encrypt_key = no prompt = no string_mask = utf8only distinguished_name = client1_distinguished_name req_extensions = req_cert_extensions # attributes = req_attributes [ client1_distinguished_name ] countryName = {hiveDefinition.Vpn.CertCountryCode} #stateOrProvinceName = Utrecht #localityName = HomeTown organizationName = {hiveDefinition.Vpn.CertOrganization} #organizationalUnitName = My Department Name commonName = {user}{rootSuffix} [ req_cert_extensions ] nsCertType = client subjectAltName = email:[email protected] "; return(clientCnf); }
/// <summary> /// Implements the service as a <see cref="Task"/>. /// </summary> /// <returns>The <see cref="Task"/>.</returns> private static async Task RunAsync() { var periodicTask = new AsyncPeriodicTask( pollInterval, onTaskAsync: async() => { log.LogDebug(() => "Starting poll"); // We're going to collect the [hostname --> address] mappings into // a specialized (semi-threadsafe) dictionary. var hostAddresses = new HostAddresses(); // Retrieve the current hive definition from Consul if we don't already // have it or it's different from what we've cached. hiveDefinition = await HiveHelper.GetDefinitionAsync(hiveDefinition, terminator.CancellationToken); log.LogDebug(() => $"Hive has [{hiveDefinition.NodeDefinitions.Count}] nodes."); // Add the [NAME.HIVENAME.nhive.io] definitions for each cluster node. foreach (var node in hiveDefinition.Nodes) { hostAddresses.Add($"{node.Name}.{hiveDefinition.Name}.nhive.io", IPAddress.Parse(node.PrivateAddress)); } // Read the DNS entry definitions from Consul and add the appropriate // host/addresses based on health checks, etc. var targetsResult = (await consul.KV.ListOrDefault <DnsEntry>(HiveConst.ConsulDnsEntriesKey + "/", terminator.CancellationToken)); List <DnsEntry> targets; if (targetsResult == null) { // The targets key wasn't found in Consul, so we're // going to assume that there are no targets. targets = new List <DnsEntry>(); } else { targets = targetsResult.ToList(); } log.LogDebug(() => $"Consul has [{targets.Count()}] DNS targets."); await ResolveTargetsAsync(hostAddresses, targets); // Generate a canonical [hosts.txt] file by sorting host entries by // hostname and then by IP address. // // Unhealthy hosts will be assigned the unrouteable [0.0.0.0] address. // The reason for this is subtle but super important. // // If we didn't do this, the DNS host would likely be resolved by a // public DNS service, perhaps returning the IP address of a production // endpoint. // // This could cause a disaster if the whole purpose of having a local // DNS host defined to redirect test traffic to a test service. If // the test service endpoints didn't report as healthy and [0.0.0.0] // wasn't set, then test traffic could potentially hit the production // endpoint and do serious damage. var sbHosts = new StringBuilder(); var mappingCount = 0; foreach (var host in hostAddresses.OrderBy(h => h.Key)) { foreach (var address in host.Value.OrderBy(a => a.ToString())) { sbHosts.AppendLineLinux($"{address,-15} {host.Key}"); mappingCount++; } } var unhealthyTargets = targets.Where(t => !hostAddresses.ContainsKey(t.Hostname) || hostAddresses[t.Hostname].Count == 0).ToList(); if (unhealthyTargets.Count > 0) { sbHosts.AppendLine(); sbHosts.AppendLine($"# [{unhealthyTargets.Count}] unhealthy DNS hosts:"); sbHosts.AppendLine(); var unhealthyAddress = "0.0.0.0"; foreach (var target in unhealthyTargets.OrderBy(h => h)) { sbHosts.AppendLineLinux($"{unhealthyAddress,-15} {target.Hostname}"); } } // Compute the MD5 hash and compare it to the hash persisted to // Consul (if any) to determine whether we need to update the // answers in Consul. var hostsTxt = sbHosts.ToString(); var hostsMD5 = NeonHelper.ComputeMD5(hostsTxt); var currentMD5 = await consul.KV.GetStringOrDefault(HiveConst.ConsulDnsHostsMd5Key, terminator.CancellationToken); if (currentMD5 == null) { currentMD5 = string.Empty; } if (hostsMD5 != currentMD5) { log.LogDebug(() => $"DNS answers have changed."); log.LogDebug(() => $"Writing [{mappingCount}] DNS answers to Consul."); // Update the Consul keys using a transaction. var operations = new List <KVTxnOp>() { new KVTxnOp(HiveConst.ConsulDnsHostsMd5Key, KVTxnVerb.Set) { Value = Encoding.UTF8.GetBytes(hostsMD5) }, new KVTxnOp(HiveConst.ConsulDnsHostsKey, KVTxnVerb.Set) { Value = Encoding.UTF8.GetBytes(hostsTxt) } }; await consul.KV.Txn(operations, terminator.CancellationToken); } log.LogDebug(() => "Finished poll"); return(await Task.FromResult(false)); }, onExceptionAsync: async e => { log.LogError(e); return(await Task.FromResult(false)); }, onTerminateAsync: async() => { log.LogInfo(() => "Terminating"); await Task.CompletedTask; }); terminator.AddDisposable(periodicTask); await periodicTask.Run(); }
/// <summary> /// Uploads the configuration files for the target operating system to the server. /// </summary> /// <typeparam name="Metadata">The node metadata type.</typeparam> /// <param name="node">The remote node.</param> /// <param name="hiveDefinition">The hive definition or <c>null</c>.</param> public static void UploadConfigFiles <Metadata>(this SshProxy <Metadata> node, HiveDefinition hiveDefinition = null) where Metadata : class { Covenant.Requires <ArgumentNullException>(node != null); // Clear the contents of the configuration folder. node.Status = $"clear: {HiveHostFolders.Config}"; node.SudoCommand($"rm -rf {HiveHostFolders.Config}/*.*"); // Upload the files. node.Status = "upload: config files"; foreach (var file in Program.LinuxFolder.GetFolder("conf").Files()) { node.UploadFile(hiveDefinition, file, $"{HiveHostFolders.Config}/{file.Name}"); } // Secure the files and make the scripts executable. node.SudoCommand($"chmod 644 {HiveHostFolders.Config}/*.*"); node.SudoCommand($"chmod 744 {HiveHostFolders.Config}/*.sh"); node.Status = "copied"; }
/// <inheritdoc/> public void Run(ModuleContext context) { TrafficManager trafficManager = null; bool isPublic = false; string name = null; string ruleName = null; bool deferUpdate = false; if (!context.ValidateArguments(context.Arguments, validModuleArgs)) { context.Failed = true; return; } // Obtain common arguments. context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [state]"); if (!context.Arguments.TryGetValue <string>("state", out var state)) { state = "present"; } state = state.ToLowerInvariant(); if (context.HasErrors) { return; } context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [name]"); if (!context.Arguments.TryGetValue <string>("name", out name)) { throw new ArgumentException($"[name] module argument is required."); } switch (name) { case "private": trafficManager = HiveHelper.Hive.PrivateTraffic; isPublic = false; break; case "public": trafficManager = HiveHelper.Hive.PublicTraffic; isPublic = true; break; default: throw new ArgumentException($"[name={name}] is not a one of the valid traffic manager names: [private] or [public]."); } if (state == "present" || state == "absent") { context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [rule_name]"); if (!context.Arguments.TryGetValue <string>("rule_name", out ruleName)) { throw new ArgumentException($"[rule_name] module argument is required."); } if (!HiveDefinition.IsValidName(ruleName)) { throw new ArgumentException($"[rule_name={ruleName}] is not a valid traffic manager rule name."); } context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [defer_update]"); if (!context.Arguments.TryGetValue <bool>("defer_update", out deferUpdate)) { deferUpdate = false; } } // We have the required arguments, so perform the operation. switch (state) { case "absent": context.WriteLine(AnsibleVerbosity.Trace, $"Check if rule [{ruleName}] exists."); if (trafficManager.GetRule(ruleName) != null) { context.WriteLine(AnsibleVerbosity.Trace, $"Rule [{ruleName}] does exist."); context.WriteLine(AnsibleVerbosity.Info, $"Deleting rule [{ruleName}]."); if (context.CheckMode) { context.WriteLine(AnsibleVerbosity.Info, $"Rule [{ruleName}] will be deleted when CHECK-MODE is disabled."); } else { trafficManager.RemoveRule(ruleName, deferUpdate: deferUpdate); context.WriteLine(AnsibleVerbosity.Trace, $"Rule [{ruleName}] deleted."); context.Changed = true; } } else { context.WriteLine(AnsibleVerbosity.Trace, $"Rule [{ruleName}] does not exist."); } break; case "present": context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [rule]"); if (!context.Arguments.TryGetValue <JObject>("rule", out var routeObject)) { throw new ArgumentException($"[rule] module argument is required when [state={state}]."); } var ruleText = routeObject.ToString(); context.WriteLine(AnsibleVerbosity.Trace, "Parsing rule"); var newRule = TrafficRule.Parse(ruleText, strict: true); context.WriteLine(AnsibleVerbosity.Trace, "Rule parsed successfully"); // Use the name argument if the deserialized rule doesn't // have a name. This will make it easier on operators because // they won't need to specify the name twice. if (string.IsNullOrWhiteSpace(newRule.Name)) { newRule.Name = ruleName; } // Ensure that the name passed as an argument and the // name within the rule definition match. if (!string.Equals(ruleName, newRule.Name, StringComparison.InvariantCultureIgnoreCase)) { throw new ArgumentException($"The [rule_name={ruleName}] argument and the rule's [{nameof(TrafficRule.Name)}={newRule.Name}] property are not the same."); } context.WriteLine(AnsibleVerbosity.Trace, "Rule name matched."); // Validate the rule. context.WriteLine(AnsibleVerbosity.Trace, "Validating rule."); var proxySettings = trafficManager.GetSettings(); var validationContext = new TrafficValidationContext(name, proxySettings); // $hack(jeff.lill): // // This ensures that [proxySettings.Resolvers] is initialized with // the built-in Docker DNS resolver. proxySettings.Validate(validationContext); // Load the TLS certificates into the validation context so we'll // be able to verify that any referenced certificates mactually exist. // $todo(jeff.lill): // // This code assumes that the operator is currently logged in with // root Vault privileges. We'll have to do something else for // non-root logins. // // One idea might be to save two versions of the certificates. // The primary certificate with private key in Vault and then // just the public certificate in Consul and then load just // the public ones here. // // A good time to make this change might be when we convert to // use the .NET X.509 certificate implementation. if (!context.Login.HasVaultRootCredentials) { throw new ArgumentException("Access Denied: Root Vault credentials are required."); } context.WriteLine(AnsibleVerbosity.Trace, "Reading hive certificates."); using (var vault = HiveHelper.OpenVault(Program.HiveLogin.VaultCredentials.RootToken)) { // List the certificate key/names and then fetch each one // to capture details like the expiration date and covered // hostnames. foreach (var certName in vault.ListAsync("neon-secret/cert").Result) { context.WriteLine(AnsibleVerbosity.Trace, $"Reading: {certName}"); var certificate = vault.ReadJsonAsync <TlsCertificate>(HiveHelper.GetVaultCertificateKey(certName)).Result; validationContext.Certificates.Add(certName, certificate); } } context.WriteLine(AnsibleVerbosity.Trace, $"[{validationContext.Certificates.Count}] hive certificates downloaded."); // Actually perform the rule validation. newRule.Validate(validationContext); if (validationContext.HasErrors) { context.WriteLine(AnsibleVerbosity.Trace, $"[{validationContext.Errors.Count}] Route validation errors."); foreach (var error in validationContext.Errors) { context.WriteLine(AnsibleVerbosity.Important, error); context.WriteErrorLine(error); } context.Failed = true; return; } context.WriteLine(AnsibleVerbosity.Trace, "Rule is valid."); // Try reading any existing rule with this name and then determine // whether the two versions of the rule are actually different. context.WriteLine(AnsibleVerbosity.Trace, $"Looking for existing rule [{ruleName}]"); var existingRule = trafficManager.GetRule(ruleName); var changed = false; if (existingRule != null) { context.WriteLine(AnsibleVerbosity.Trace, $"Rule exists: checking for differences."); // Normalize the new and existing rules so the JSON text comparision // will work properly. newRule.Normalize(isPublic); existingRule.Normalize(isPublic); changed = !NeonHelper.JsonEquals(newRule, existingRule); if (changed) { context.WriteLine(AnsibleVerbosity.Trace, $"Rules are different."); } else { context.WriteLine(AnsibleVerbosity.Info, $"Rules are the same. No need to update."); } } else { changed = true; context.WriteLine(AnsibleVerbosity.Trace, $"Rule [name={ruleName}] does not exist."); } if (changed) { if (context.CheckMode) { context.WriteLine(AnsibleVerbosity.Info, $"Rule [{ruleName}] will be updated when CHECK-MODE is disabled."); } else { context.WriteLine(AnsibleVerbosity.Trace, $"Writing rule [{ruleName}]."); trafficManager.SetRule(newRule); context.WriteLine(AnsibleVerbosity.Info, $"Rule updated."); context.Changed = !context.CheckMode; } } break; case "update": trafficManager.Update(); context.Changed = true; context.WriteLine(AnsibleVerbosity.Info, $"Update signalled."); break; case "purge": var purgeItems = context.ParseStringArray("purge_list"); var purgeCaseSensitive = context.ParseBool("purge_case_sensitive"); if (!purgeCaseSensitive.HasValue) { purgeCaseSensitive = false; } if (purgeItems.Count == 0) { context.WriteLine(AnsibleVerbosity.Important, $"[purge_list] is missing or empty."); break; } trafficManager.Purge(purgeItems.ToArray(), caseSensitive: purgeCaseSensitive.Value); context.Changed = true; context.WriteLine(AnsibleVerbosity.Info, $"Purge request submitted."); break; default: throw new ArgumentException($"[state={state}] is not one of the valid choices: [present], [absent], or [update]."); } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Help(); Program.Exit(0); } // Special-case handling of the [--remove-templates] option. if (commandLine.HasOption("--remove-templates")) { Console.WriteLine("Removing cached virtual machine templates."); foreach (var fileName in Directory.GetFiles(HiveHelper.GetVmTemplatesFolder(), "*.*", SearchOption.TopDirectoryOnly)) { File.Delete(fileName); } Program.Exit(0); } // Implement the command. packageCacheUri = commandLine.GetOption("--package-cache"); // This overrides the hive definition, if specified. if (Program.HiveLogin != null) { Console.Error.WriteLine("*** ERROR: You are logged into a hive. You need to logout before preparing another."); Program.Exit(1); } if (commandLine.Arguments.Length == 0) { Console.Error.WriteLine($"*** ERROR: HIVE-DEF expected."); Program.Exit(1); } hiveDefPath = commandLine.Arguments[0]; force = commandLine.GetFlag("--force"); HiveDefinition.ValidateFile(hiveDefPath, strict: true); var hiveDefinition = HiveDefinition.FromFile(hiveDefPath, strict: true); hiveDefinition.Provisioner = $"neon-cli:{Program.Version}"; // Identify this tool/version as the hive provisioner // NOTE: // // Azure has implemented a more restrictive password policy and our // default machine password does not meet the requirements: // // The supplied password must be between 6-72 characters long and must // satisfy at least 3 of password complexity requirements from the following: // // 1. Contains an uppercase character // 2. Contains a lowercase character // 3. Contains a numeric digit // 4. Contains a special character // 5. Control characters are not allowed // // It's also probably not a great idea to use a static password when // provisioning VMs in public clouds because it might be possible for // somebody to use this fact the SSH into nodes while the hive is being // setup and before we set the secure password at the end. // // This is less problematic for non-cloud environments because it's // likely that the hosts won't initially be able to receive inbound // Internet traffic and besides, we need to have a known password // embedded into the VM templates. // // We're going to handle this for cloud environments by looking // at [Program.MachinePassword]. If this is set to the default // machine password then we're going to replace it with a randomlly // generated password with a few extra characters to ensure that // it meets the target cloud's password requirements. We'll use // a non-default password if the operator specified one. if (hiveDefinition.Hosting.IsCloudProvider && Program.MachinePassword == HiveConst.DefaulVmTemplatePassword) { Program.MachinePassword = NeonHelper.GetRandomPassword(20); // Append a string that guarantees that the generated password meets // cloud minimum requirements. Program.MachinePassword += ".Aa0"; } // Note that hive prepare starts new log files. hive = new HiveProxy(hiveDefinition, Program.CreateNodeProxy <NodeDefinition>, appendLog: false, useBootstrap: true, defaultRunOptions: RunOptions.LogOutput | RunOptions.FaultOnError); if (File.Exists(Program.GetHiveLoginPath(HiveConst.RootUser, hive.Definition.Name))) { Console.Error.WriteLine($"*** ERROR: A hive login named [{HiveConst.RootUser}@{hive.Definition.Name}] already exists."); Program.Exit(1); } Program.OSProperties = OSProperties.For(hiveDefinition.HiveNode.OperatingSystem); // Configure global options. if (commandLine.HasOption("--unredacted")) { hive.SecureRunOptions = RunOptions.None; } //----------------------------------------------------------------- // $todo(jeff.lill): // // We're temporarily disabling redaction to make it easier to investigate // Vault setup issues. Remove this line before final launch. // // https://github.com/jefflill/NeonForge/issues/225 hive.SecureRunOptions = RunOptions.None; //----------------------------------------------------------------- // Assign the VPN client return subnets to the manager nodes if VPN is enabled. if (hive.Definition.Vpn.Enabled) { var vpnSubnet = NetworkCidr.Parse(hive.Definition.Network.VpnPoolSubnet); var prefixLength = 25; var nextVpnSubnetAddress = vpnSubnet.Address; // Note that we're not going to assign the first block of addresses in the // VPN subnet to any managers to prevent conflicts with addresses reserved // by some cloud platforms at the beginning of a subnet. Azure for example // reserves 4 IP addresses for DNS servers and platform provided VPNs. foreach (var manager in hive.Definition.SortedManagers) { var managerVpnSubnet = new NetworkCidr(NetHelper.AddressIncrement(nextVpnSubnetAddress, VpnOptions.ServerAddressCount), prefixLength); manager.VpnPoolSubnet = managerVpnSubnet.ToString(); nextVpnSubnetAddress = managerVpnSubnet.NextAddress; } } //----------------------------------------------------------------- // Try to ensure that no servers are already deployed on the IP addresses defined // for hive nodes because provisoning over an existing hive will likely // corrupt the existing hive and also probably prevent the new hive from // provisioning correctly. // // Note that we're not going to perform this check for the [Machine] hosting // environment because we're expecting the bare machines to be already running // with the assigned addresses and we're also not going to do this for cloud // environments because we're assuming that the hive will run in its own private // network so there'll ne no possibility of conflicts. if (hive.Definition.Hosting.Environment != HostingEnvironments.Machine && !hive.Definition.Hosting.IsCloudProvider) { Console.WriteLine(); Console.WriteLine("Scanning for IP address conflicts..."); Console.WriteLine(); var pingOptions = new PingOptions(ttl: 32, dontFragment: true); var pingTimeout = TimeSpan.FromSeconds(2); var pingConflicts = new List <NodeDefinition>(); var pingAttempts = 2; // I'm going to use up to 20 threads at a time here for simplicity // rather then doing this as async operations. var parallelOptions = new ParallelOptions() { MaxDegreeOfParallelism = 20 }; Parallel.ForEach(hive.Definition.NodeDefinitions.Values, parallelOptions, node => { using (var ping = new Ping()) { // We're going to try pinging up to [pingAttempts] times for each node // just in case the network it sketchy and we're losing reply packets. for (int i = 0; i < pingAttempts; i++) { var reply = ping.Send(node.PrivateAddress, (int)pingTimeout.TotalMilliseconds); if (reply.Status == IPStatus.Success) { lock (pingConflicts) { pingConflicts.Add(node); } break; } } } }); if (pingConflicts.Count > 0) { Console.Error.WriteLine($"*** ERROR: Cannot provision the hive because [{pingConflicts.Count}] other"); Console.Error.WriteLine($"*** machines conflict with the following hive nodes:"); Console.Error.WriteLine(); foreach (var node in pingConflicts.OrderBy(n => NetHelper.AddressToUint(IPAddress.Parse(n.PrivateAddress)))) { Console.Error.WriteLine($"{node.PrivateAddress, 16}: {node.Name}"); } Program.Exit(1); } } //----------------------------------------------------------------- // Perform basic environment provisioning. This creates basic hive components // such as virtual machines, networks, load balancers, public IP addresses, security // groups,... as required for the environment. hostingManager = new HostingManagerFactory(() => HostingLoader.Initialize()).GetManager(hive, Program.LogPath); if (hostingManager == null) { Console.Error.WriteLine($"*** ERROR: No hosting manager for the [{hive.Definition.Hosting.Environment}] hosting environment could be located."); Program.Exit(1); } hostingManager.HostUsername = Program.MachineUsername; hostingManager.HostPassword = Program.MachinePassword; hostingManager.ShowStatus = !Program.Quiet; hostingManager.MaxParallel = Program.MaxParallel; hostingManager.WaitSeconds = Program.WaitSeconds; if (hostingManager.RequiresAdminPrivileges) { Program.VerifyAdminPrivileges($"Provisioning to [{hive.Definition.Hosting.Environment}] requires elevated administrator privileges."); } if (!hostingManager.Provision(force)) { Program.Exit(1); } // Get the mounted drive prefix from the hosting manager. hive.Definition.DrivePrefix = hostingManager.DrivePrefix; // Ensure that the nodes have valid IP addresses. hive.Definition.ValidatePrivateNodeAddresses(); var ipAddressToServer = new Dictionary <IPAddress, SshProxy <NodeDefinition> >(); foreach (var node in hive.Nodes.OrderBy(n => n.Name)) { SshProxy <NodeDefinition> duplicateServer; if (node.PrivateAddress == IPAddress.Any) { throw new ArgumentException($"Node [{node.Name}] has not been assigned an IP address."); } if (ipAddressToServer.TryGetValue(node.PrivateAddress, out duplicateServer)) { throw new ArgumentException($"Nodes [{duplicateServer.Name}] and [{node.Name}] have the same IP address [{node.Metadata.PrivateAddress}]."); } ipAddressToServer.Add(node.PrivateAddress, node); } //----------------------------------------------------------------- // Perform basic node provisioning including operating system updates & configuration, // and configure OpenVPN on the manager nodes so that hive setup will be // able to reach the nodes on all ports. // Write the operation begin marker to all hive node logs. hive.LogLine(logBeginMarker); var operation = $"Preparing [{hive.Definition.Name}] nodes"; var controller = new SetupController <NodeDefinition>(operation, hive.Nodes) { ShowStatus = !Program.Quiet, MaxParallel = Program.MaxParallel }; if (!string.IsNullOrEmpty(packageCacheUri)) { hive.Definition.PackageProxy = packageCacheUri; } // Prepare the nodes. controller.AddWaitUntilOnlineStep(timeout: TimeSpan.FromMinutes(15)); hostingManager.AddPostProvisionSteps(controller); controller.AddStep("verify OS", (node, stepDelay) => { Thread.Sleep(stepDelay); CommonSteps.VerifyOS(node); }); controller.AddStep("prepare", (node, stepDelay) => { Thread.Sleep(stepDelay); CommonSteps.PrepareNode(node, hive.Definition, shutdown: false); }, stepStaggerSeconds: hive.Definition.Setup.StepStaggerSeconds); // Add any VPN configuration steps. if (hive.Definition.Vpn.Enabled) { controller.AddGlobalStep("vpn credentials", () => CreateVpnCredentials()); controller.AddStep("vpn server", (node, stepDelay) => { Thread.Sleep(stepDelay); ConfigManagerVpn(node); }, node => node.Metadata.IsManager); // Add a step to establish a VPN connection if we're provisioning to a cloud. // We specifically don't want to do this if we're provisioning to a on-premise // datacenter because we're assuming that we're already directly connected to // the LAN while preparing and setting up the hive. if (hive.Definition.Hosting.IsCloudProvider) { controller.AddStep("vpn connect", (manager, stepDelay) => { Thread.Sleep(stepDelay); // Create a hive login with just enough credentials to connect the VPN. // Note that this isn't really a node specific command but I wanted to // be able to display the connection status somewhere. var vpnLogin = new HiveLogin() { Definition = hive.Definition, VpnCredentials = vpnCredentials }; // Ensure that we don't have an old VPN client for the hive running. HiveHelper.VpnClose(vpnLogin.Definition.Name); // ...and then start a new one. HiveHelper.VpnOpen(vpnLogin, onStatus: message => manager.Status = $"{message}", onError: message => manager.Status = $"ERROR: {message}"); }, n => n == hive.FirstManager); } // Perform any post-VPN setup provisioning required by the hosting provider. hostingManager.AddPostVpnSteps(controller); } if (!controller.Run()) { // Write the operation end/failed marker to all hive node logs. hive.LogLine(logFailedMarker); Console.Error.WriteLine("*** ERROR: One or more configuration steps failed."); Program.Exit(1); } // Write the hive login file. var hiveLoginPath = Program.GetHiveLoginPath(HiveConst.RootUser, hive.Definition.Name); var hiveLogin = new HiveLogin() { Path = hiveLoginPath, Username = HiveConst.RootUser, Definition = hive.Definition, SshUsername = Program.MachineUsername, SshPassword = Program.MachinePassword, SshProvisionPassword = Program.MachinePassword, SetupPending = true }; if (hive.Definition.Vpn.Enabled) { hiveLogin.VpnCredentials = vpnCredentials; } // Generate the hive certificates. const int bitCount = 2048; const int validDays = 365000; // About 1,000 years. if (hiveLogin.HiveCertificate == null) { var hostnames = new string[] { $"{hive.Name}.nhive.io", $"*.{hive.Name}.nhive.io", $"*.neon-vault.{hive.Name}.nhive.io", $"*.neon-registry-cache.{hive.Name}.nhive.io", $"*.neon-hivemq.{hive.Name}.nhive.io" }; hiveLogin.HiveCertificate = TlsCertificate.CreateSelfSigned(hostnames, bitCount, validDays, issuedBy: "neonHIVE", issuedTo: $"neonHIVE: {hiveDefinition.Name}"); hiveLogin.HiveCertificate.FriendlyName = $"neonHIVE: {hiveLogin.Definition.Name}"; } // Persist the certificates into the hive login. hiveLogin.Save(); // Write the operation end marker to all hive node logs. hive.LogLine(logEndMarker); }
/// <summary> /// Verifies that a worker node's NTP health. /// </summary> /// <param name="node">The manager node.</param> /// <param name="hiveDefinition">The hive definition.</param> private static void CheckWorkerNtp(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition) { // We're going to use [ntpq -pw] to query the configured time sources. // We should get something back that looks like // // remote refid st t when poll reach delay offset jitter // ============================================================================== // LOCAL(0).LOCL. 10 l 45m 64 0 0.000 0.000 0.000 // * 10.0.1.5 198.60.22.240 2 u 111 128 377 0.062 3.409 0.608 // + 10.0.1.7 198.60.22.240 2 u 111 128 377 0.062 3.409 0.608 // + 10.0.1.7 198.60.22.240 2 u 111 128 377 0.062 3.409 0.608 // // For worker nodes, we need to verify that each of the managers are answering // by confirming that their IP addresses are present. node.Status = "checking: NTP"; var retryDelay = TimeSpan.FromSeconds(30); var fault = (string)null; var firstTry = true; tryAgain: for (var tries = 0; tries < 6; tries++) { var output = node.SudoCommand("/usr/bin/ntpq -pw", RunOptions.LogOutput).OutputText; foreach (var manager in hiveDefinition.SortedManagers) { // We're going to check the for presence of the manager's IP address // or its name, the latter because [ntpq] appears to attempt a reverse // IP address lookup which will resolve into one of the DNS names defined // in the local [/etc/hosts] file. if (!output.Contains(manager.PrivateAddress.ToString()) && !output.Contains(manager.Name.ToLower())) { fault = $"NTP: Manager [{manager.Name}/{manager.PrivateAddress}] is not answering."; Thread.Sleep(retryDelay); continue; } // Everything looks OK. break; } } if (fault != null) { if (firstTry) { // $hack(jeff.lill): // // I've seen the NTP check fail on a non-manager node, complaining // that the connection attempt was rejected. I manually restarted // the node and then it worked. I'm not sure if the rejected connection // was being made to the local NTP service or from the local service // to NTP running on the manager. // // I'm going to assume that it was to the local NTP service and I'm // going to try mitigating this by restarting the local NTP service // and then re-running the tests. I'm only going to do this once. node.SudoCommand("systemctl restart ntp", node.DefaultRunOptions & ~RunOptions.FaultOnError); firstTry = false; goto tryAgain; } node.Fault(fault); } }
/// <summary> /// Verifies that a manager node's NTP health. /// </summary> /// <param name="node">The manager node.</param> /// <param name="hiveDefinition">The hive definition.</param> private static void CheckManagerNtp(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition) { // We're going to use [ntpq -pw] to query the configured time sources. // We should get something back that looks like // // remote refid st t when poll reach delay offset jitter // ============================================================================== // LOCAL(0).LOCL. 10 l 45m 64 0 0.000 0.000 0.000 // * clock.xmission. .GPS. 1 u 134 256 377 48.939 - 0.549 18.357 // + 173.44.32.10 18.26.4.105 2 u 200 256 377 96.981 - 0.623 3.284 // + pacific.latt.ne 44.24.199.34 3 u 243 256 377 41.457 - 8.929 8.497 // // For manager nodes, we're simply going to verify that we have at least one external // time source answering. node.Status = "checking: NTP"; var retryDelay = TimeSpan.FromSeconds(30); var fault = (string)null; for (int tryCount = 0; tryCount < 6; tryCount++) { var response = node.SudoCommand("/usr/bin/ntpq -pw", RunOptions.LogOutput); if (response.ExitCode != 0) { Thread.Sleep(retryDelay); continue; } using (var reader = response.OpenOutputTextReader()) { string line; // Column header and table bar lines. line = reader.ReadLine(); if (string.IsNullOrWhiteSpace(line)) { fault = "NTP: Invalid [ntpq -pw] response."; Thread.Sleep(retryDelay); continue; } line = reader.ReadLine(); if (string.IsNullOrWhiteSpace(line) || line[0] != '=') { fault = "NTP: Invalid [ntpq -pw] response."; Thread.Sleep(retryDelay); continue; } // Count the lines starting that don't include [*.LOCL.*], // the local clock. var sourceCount = 0; for (line = reader.ReadLine(); line != null; line = reader.ReadLine()) { if (line.Length > 0 && !line.Contains(".LOCL.")) { sourceCount++; } } if (sourceCount == 0) { fault = "NTP: No external sources are answering."; Thread.Sleep(retryDelay); continue; } // Everything looks good. break; } } if (fault != null) { node.Fault(fault); } }
/// <summary> /// Uploads the setup and other scripts and tools for the target operating system to the server. /// </summary> /// <typeparam name="TMetadata">The server's metadata type.</typeparam> /// <param name="server">The remote server.</param> /// <param name="hiveDefinition">The hive definition or <c>null</c>.</param> public static void UploadResources <TMetadata>(this SshProxy <TMetadata> server, HiveDefinition hiveDefinition = null) where TMetadata : class { Covenant.Requires <ArgumentNullException>(server != null); //----------------------------------------------------------------- // Upload resource files to the setup folder. server.Status = $"clear: {HiveHostFolders.Setup}"; server.SudoCommand($"rm -rf {HiveHostFolders.Setup}/*.*"); // Upload the setup files. server.Status = "upload: setup files"; foreach (var file in Program.LinuxFolder.GetFolder("setup").Files()) { server.UploadFile(hiveDefinition, file, $"{HiveHostFolders.Setup}/{file.Name}"); } // Make the setup scripts executable. server.SudoCommand($"chmod 744 {HiveHostFolders.Setup}/*"); // Uncomment this if/when we have to upload source files. #if FALSE //----------------------------------------------------------------- // Upload resource files to the source folder. Note that we're going // to convert to Linux style line endings and we're going to convert // leading spaces into TABs (4 spaces == 1 TAB). // $hack(jeff.lill): // // This is hardcoded to assume that the source consists of a single level // folder with the source files. If the folders nest eny further, we'll // need to implement a recursive method to handle this properly. // // This code also assumes that the folder and file names do not include // any spaces. server.Status = $"clear: {HiveHostFolders.Source}"; server.SudoCommand($"rm -rf {HiveHostFolders.Source}/*.*"); // Upload the source files. server.Status = "upload: source files"; foreach (var folder in Program.LinuxFolder.GetFolder("source").Folders()) { foreach (var file in folder.Files()) { var targetPath = $"{HiveHostFolders.Source}/{folder.Name}/{file.Name}"; server.UploadText(targetPath, file.Contents, tabStop: -4); server.SudoCommand("chmod 664", targetPath); } } #endif //----------------------------------------------------------------- // Upload files to the tools folder. server.Status = $"clear: {HiveHostFolders.Tools}"; server.SudoCommand($"rm -rf {HiveHostFolders.Tools}/*.*"); // Upload the tool files. Note that we're going to strip out the [.sh] // file type to make these easier to run. server.Status = "upload: tool files"; foreach (var file in Program.LinuxFolder.GetFolder("tools").Files()) { server.UploadFile(hiveDefinition, file, $"{HiveHostFolders.Tools}/{file.Name.Replace(".sh", string.Empty)}"); } // Make the scripts executable. server.SudoCommand($"chmod 744 {HiveHostFolders.Tools}/*"); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption || commandLine.Arguments.Length == 0) { Console.WriteLine(usage); Program.Exit(0); } Program.ConnectHive(); // Process the command arguments. var trafficManager = (TrafficManager)null; var yaml = commandLine.HasOption("--yaml"); var directorName = commandLine.Arguments.FirstOrDefault(); var isPublic = false; switch (directorName) { case "help": // $hack: This isn't really a traffic manager name. Console.WriteLine(ruleHelp); Program.Exit(0); break; case "public": trafficManager = HiveHelper.Hive.PublicTraffic; isPublic = true; break; case "private": trafficManager = HiveHelper.Hive.PrivateTraffic; isPublic = false; break; default: Console.Error.WriteLine($"*** ERROR: Load balancer name must be one of [public] or [private] ([{directorName}] is not valid)."); Program.Exit(1); break; } commandLine = commandLine.Shift(1); var command = commandLine.Arguments.FirstOrDefault(); if (command == null) { Console.WriteLine(usage); Program.Exit(1); } commandLine = commandLine.Shift(1); string ruleName; switch (command) { case "get": ruleName = commandLine.Arguments.FirstOrDefault(); if (string.IsNullOrEmpty(ruleName)) { Console.Error.WriteLine("*** ERROR: [RULE] argument expected."); Program.Exit(1); } if (!HiveDefinition.IsValidName(ruleName)) { Console.Error.WriteLine($"*** ERROR: [{ruleName}] is not a valid rule name."); Program.Exit(1); } // Fetch a specific traffic manager rule and output it. var rule = trafficManager.GetRule(ruleName); if (rule == null) { Console.Error.WriteLine($"*** ERROR: Load balancer [{directorName}] rule [{ruleName}] does not exist."); Program.Exit(1); } Console.WriteLine(yaml ? rule.ToYaml() : rule.ToJson()); break; case "haproxy": case "haproxy-bridge": case "varnish": // We're going to download the traffic manager's ZIP archive containing the // [haproxy.cfg] or [varnish.vcl] file, extract and write it to the console. using (var consul = HiveHelper.OpenConsul()) { var proxy = command.Equals("haproxy-bridge", StringComparison.InvariantCultureIgnoreCase) ? directorName + "-bridge" : directorName; var confKey = $"neon/service/neon-proxy-manager/proxies/{proxy}/proxy-conf"; var confZipBytes = consul.KV.GetBytesOrDefault(confKey).Result; if (confZipBytes == null) { Console.Error.WriteLine($"*** ERROR: Proxy ZIP configuration was not found in Consul at [{confKey}]."); Program.Exit(1); } using (var msZipData = new MemoryStream(confZipBytes)) { using (var zip = new ZipFile(msZipData)) { var file = command.Equals("varnish", StringComparison.InvariantCultureIgnoreCase) ? "varnish.vcl" : "haproxy.cfg"; var entry = zip.GetEntry(file); if (entry == null || !entry.IsFile) { Console.Error.WriteLine($"*** ERROR: Proxy ZIP configuration in Consul at [{confKey}] appears to be corrupt. Cannot locate the [{file}] entry."); Program.Exit(1); } using (var entryStream = zip.GetInputStream(entry)) { using (var reader = new StreamReader(entryStream)) { foreach (var line in reader.Lines()) { Console.WriteLine(line); } } } } } } break; case "inspect": Console.WriteLine(NeonHelper.JsonSerialize(trafficManager.GetDefinition(), Formatting.Indented)); break; case "list": case "ls": var showAll = commandLine.HasOption("--all"); var showSys = commandLine.HasOption("--sys"); var rules = trafficManager.ListRules( r => { if (showAll) { return(true); } else if (showSys) { return(r.System); } else { return(!r.System); } }); Console.WriteLine(); Console.WriteLine($"[{rules.Count()}] {trafficManager.Name} rules"); Console.WriteLine(); foreach (var item in rules) { Console.WriteLine(item.Name); } Console.WriteLine(); break; case "purge": var purgeUri = commandLine.Arguments.FirstOrDefault(); if (string.IsNullOrEmpty(purgeUri)) { Console.Error.WriteLine("*** ERROR: [URI-PATTERN] or [ALL] argument expected."); } if (purgeUri.Equals("all", StringComparison.InvariantCultureIgnoreCase)) { if (!commandLine.HasOption("--force") && !Program.PromptYesNo($"*** Are you sure you want to purge all cached items for [{directorName.ToUpperInvariant()}]?")) { return; } trafficManager.PurgeAll(); } else { trafficManager.Purge(new string[] { purgeUri }); } Console.WriteLine(); Console.WriteLine("Purge request submitted."); Console.WriteLine(); break; case "update": trafficManager.Update(); break; case "remove": case "rm": ruleName = commandLine.Arguments.FirstOrDefault(); if (string.IsNullOrEmpty(ruleName)) { Console.Error.WriteLine("*** ERROR: [RULE] argument expected."); Program.Exit(1); } if (!HiveDefinition.IsValidName(ruleName)) { Console.Error.WriteLine($"*** ERROR: [{ruleName}] is not a valid rule name."); Program.Exit(1); } if (trafficManager.RemoveRule(ruleName)) { Console.Error.WriteLine($"Deleted load balancer [{directorName}] rule [{ruleName}]."); } else { Console.Error.WriteLine($"*** ERROR: Load balancer [{directorName}] rule [{ruleName}] does not exist."); Program.Exit(1); } break; case "set": // $todo(jeff.lill): // // It would be really nice to download the existing rules and verify that // adding the new rule won't cause conflicts. Currently errors will be // detected only by the [neon-proxy-manager] which will log them and cease // updating the hive until the errors are corrected. // // An alternative would be to have some kind of service available in the // hive to do this for us or perhaps having [neon-proxy-manager] generate // a summary of all of the certificates (names, covered hostnames, and // expiration dates) and save this to Consul so it would be easy to // download. Perhaps do the same for the rules? if (commandLine.Arguments.Length != 2) { Console.Error.WriteLine("*** ERROR: FILE or [-] argument expected."); Program.Exit(1); } // Load the rule. Note that we support reading rules as JSON or // YAML, automatcially detecting the format. We always persist // rules as JSON though. var ruleFile = commandLine.Arguments[1]; string ruleText; if (ruleFile == "-") { using (var input = Console.OpenStandardInput()) { using (var reader = new StreamReader(input, detectEncodingFromByteOrderMarks: true)) { ruleText = reader.ReadToEnd(); } } } else { ruleText = File.ReadAllText(ruleFile); } var trafficManagerRule = TrafficRule.Parse(ruleText, strict: true); ruleName = trafficManagerRule.Name; if (!HiveDefinition.IsValidName(ruleName)) { Console.Error.WriteLine($"*** ERROR: [{ruleName}] is not a valid rule name."); Program.Exit(1); } // Validate a clone of the rule with any implicit frontends. var clonedRule = NeonHelper.JsonClone(trafficManagerRule); var context = new TrafficValidationContext(directorName, null) { ValidateCertificates = false // Disable this because we didn't download the certs (see note above) }; clonedRule.Validate(context); clonedRule.Normalize(isPublic); if (context.HasErrors) { Console.Error.WriteLine("*** ERROR: One or more rule errors:"); Console.Error.WriteLine(); foreach (var error in context.Errors) { Console.Error.WriteLine(error); } Program.Exit(1); } if (trafficManager.SetRule(trafficManagerRule)) { Console.WriteLine($"Load balancer [{directorName}] rule [{ruleName}] has been updated."); } else { Console.WriteLine($"Load balancer [{directorName}] rule [{ruleName}] has been added."); } break; case "settings": var settingsFile = commandLine.Arguments.FirstOrDefault(); if (string.IsNullOrEmpty(settingsFile)) { Console.Error.WriteLine("*** ERROR: [-] or FILE argument expected."); Program.Exit(1); } string settingsText; if (settingsFile == "-") { settingsText = NeonHelper.ReadStandardInputText(); } else { settingsText = File.ReadAllText(settingsFile); } var trafficManagerSettings = TrafficSettings.Parse(settingsText, strict: true); trafficManager.UpdateSettings(trafficManagerSettings); Console.WriteLine($"Traffic manager [{directorName}] settings have been updated."); break; case "status": using (var consul = HiveHelper.OpenConsul()) { var statusJson = consul.KV.GetStringOrDefault($"neon/service/neon-proxy-manager/status/{directorName}").Result; if (statusJson == null) { Console.Error.WriteLine($"*** ERROR: Status for traffic manager [{directorName}] is not currently available."); Program.Exit(1); } var trafficManagerStatus = NeonHelper.JsonDeserialize <TrafficStatus>(statusJson); Console.WriteLine(); Console.WriteLine($"Snapshot Time: {trafficManagerStatus.TimestampUtc} (UTC)"); Console.WriteLine(); using (var reader = new StringReader(trafficManagerStatus.Status)) { foreach (var line in reader.Lines()) { Console.WriteLine(line); } } } break; default: Console.Error.WriteLine($"*** ERROR: Unknown command: [{command}]"); Program.Exit(1); break; } }