/// <inheritdoc/> public override void Run(CommandLine commandLine) { var hiveLogin = Program.HiveLogin; Console.WriteLine(""); // Close all VPN connections even if we're not officially logged in. // // We're passing NULL to close all hive VPN connections to ensure that // we're only connected to one at a time. It's very possible for a operator // to have to manage multiple disconnnected hives that share the same // IP address space. HiveHelper.VpnClose(null); // Actually logout. if (hiveLogin == null) { return; // Not logged in. } Console.WriteLine($"Logging out of [{hiveLogin.HiveName}]."); Console.WriteLine(""); CurrentHiveLogin.Delete(); }
/// <summary> /// Lists the VPN user certificates. /// </summary> private void UserList() { DirectNotAllowed(); RootLogin(); var columnWidths = new int[] { "Revoked ".Length, "MM-dd-yyyy HH:mm:ss".Length, "A901C8F59E261D83".Length, "Username".Length }; Console.WriteLine(); Console.WriteLine($"{PadRight("Status", columnWidths[0])} {PadRight("Valid Until", columnWidths[1])} {PadRight("Thumbprint", columnWidths[2])} {PadRight("Username", columnWidths[3])}"); Console.WriteLine($"{new string('-', columnWidths[0])} {new string('-', columnWidths[1])} {new string('-', columnWidths[2])} {new string('-', columnWidths[3])}"); try { foreach (var cert in ListCerts(GetVpnCaFiles()) .OrderBy(c => c.Name.ToLowerInvariant()) .ThenBy(c => c.ValidUntil)) { if (cert.Name == "ca" || cert.Name == "server") { continue; } var status = cert.IsValid ? "Valid" : "Revoked"; Console.WriteLine($"{PadRight(status, columnWidths[0])} {PadRight(cert.ValidUntil.ToString("MM-dd-yyyy HH:mm:ss"), columnWidths[1])} {PadRight(cert.Thumbprint, columnWidths[2])} {cert.Name}"); } } finally { HiveHelper.CloseHive(); } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.Arguments.Length < 1) { Console.Error.WriteLine("*** ERROR: USER@HIVE is required."); Program.Exit(1); } var login = HiveHelper.SplitLogin(commandLine.Arguments[0]); if (!login.IsOK) { Console.Error.WriteLine($"*** ERROR: Invalid username/hive [{commandLine.Arguments[0]}]. Expected something like: USER@HIVE"); Program.Exit(1); } var username = login.Username; var hiveName = login.HiveName; var hiveLoginPath = Program.GetHiveLoginPath(username, hiveName); if (File.Exists(hiveLoginPath)) { var outputPath = Path.GetFullPath(Path.GetFileName(hiveLoginPath)); var loginJson = File.ReadAllText(hiveLoginPath); File.WriteAllText(outputPath, loginJson); Console.Error.WriteLine($"Login exported to: {outputPath}"); } else { Console.Error.WriteLine($"*** ERROR: Login [{login.Username}@{login.HiveName}] does not exist."); return; } }
/// <summary> /// Returns a Couchbase bucket connection using specified settings and a Docker secret. /// </summary> /// <param name="settings">The Couchbase settings.</param> /// <param name="secretName">The Docker secret name.</param> /// <returns>The connected <see cref="IBucket"/>.</returns> public static IBucket ConnectBucket(this CouchbaseSettings settings, string secretName) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(secretName)); var credentials = NeonHelper.JsonDeserialize <Credentials>(HiveHelper.GetSecret(secretName)); return(global::Couchbase.CouchbaseExtensions.OpenBucket(settings, credentials)); }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); try { var commandLine = new CommandLine(args); var command = commandLine.Arguments.ElementAtOrDefault(0); if (command == null) { log.LogError("usage: vegomatic COMMAND ARGS..."); Program.Exit(1, immediate: true); } switch (command) { case "cephfs": await new CephFS().ExecAsync(commandLine.Shift(1)); break; case "issue-mntc": await new IssueMntc().ExecAsync(commandLine.Shift(1)); break; default: case "test-server": await new TestServer().ExecAsync(commandLine.Shift(1)); break; } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Returns a RabbitMQ cluster connection using specified settings and credentials /// loaded from a Docker secret. This works only for Docker services where the /// Docker secret was mounted into the service containers. /// </summary> /// <param name="settings">The Couchbase settings.</param> /// <param name="secretName">The local name of the Docker secret holding the credentials.</param> /// <param name="dispatchConsumersAsync">Optionally enables <c>async</c> message consumers. This defaults to <c>false</c>.</param> /// <returns>The RabbitMQ <see cref="IConnection"/>.</returns> /// <remarks> /// The credentials must be formatted as JSON as serialized by the <see cref="Credentials"/> /// class. /// </remarks> public static IConnection ConnectUsingSecret(this HiveMQSettings settings, string secretName, bool dispatchConsumersAsync = false) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(settings.VirtualHost)); Covenant.Requires <ArgumentNullException>(settings.AmqpHosts != null && settings.AmqpHosts.Count > 0); Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(secretName)); var credentials = NeonHelper.JsonDeserialize <Credentials>(HiveHelper.GetSecret(secretName), dispatchConsumersAsync); return(new RabbitMQConnection(settings.ConnectRabbitMQ(credentials))); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } HiveHelper.CleanHiveReferences(); }
/// <summary> /// Copies the current CRL to eack of the OpenVPN servers. /// </summary> private void UpdateCRL() { DirectNotAllowed(); RootLogin(); try { var vpnCaFiles = GetVpnCaFiles(); // Initialize the file paths. // // IMPORTANT: // // Do not change these file names because the [VpnCaFiles] class // depends on this naming convention. Directory.CreateDirectory(caFolder); vpnCaFiles.Extract(caFolder); var indexPath = Path.Combine(caFolder, "index.txt"); var caSignCnfPath = Path.Combine(caFolder, "ca-sign.cnf"); var caCnfPath = Path.Combine(caFolder, "ca.cnf"); var caKeyPath = Path.Combine(caFolder, "ca.key"); var caReqPath = Path.Combine(caFolder, "ca.req"); var caCrtPath = Path.Combine(caFolder, "ca.crt"); var dhParamPath = Path.Combine(caFolder, "dhparam.pem"); var serverCnfPath = Path.Combine(caFolder, "server.cnf"); var serverKeyPath = Path.Combine(caFolder, "server.key"); var serverReqPath = Path.Combine(caFolder, "server.req"); var serverCrtPath = Path.Combine(caFolder, "server.crt"); var taKeyPath = Path.Combine(caFolder, "ta.key"); var crlnumberPath = Path.Combine(caFolder, "crlnumber"); var crlPath = Path.Combine(caFolder, "crl.pem"); // Write the updated CRL to each manager. var crlText = vpnCaFiles.GetFile("crl.pem"); Console.WriteLine(); foreach (var manager in hive.Managers) { Console.WriteLine($"*** {manager.Name}: Updating"); manager.UploadText("/etc/openvpn/crl.pem", crlText); manager.SudoCommand("chmod 664 /etc/openvpn/crl.pem"); } } finally { HiveHelper.CloseHive(); } }
/// <summary> /// Initializes the hive login and hive proxy and verifies that the /// current user has root privileges and the hive enables a VPN. /// </summary> private void RootLogin() { hiveLogin = Program.ConnectHive(); if (!hiveLogin.Definition.Vpn.Enabled) { Console.Error.WriteLine(VpnNotEnabled); Program.Exit(1); } if (string.IsNullOrEmpty(hiveLogin.VpnCredentials.CaZipKey)) { Console.Error.WriteLine(MustHaveRootPrivileges); Program.Exit(1); } hive = HiveHelper.OpenHive(hiveLogin); }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { HiveHelper.OpenHiveRemote(); } else { HiveHelper.OpenHive(); } await RunAsync(); } catch (Exception e) { log.LogCritical(e); Program.Exit(1); } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); }
public override NativeResultCode DeleteKey(RegistryRequest request) { // Overriden, first delete the real key. if (HiveHelper.IsHiveHandle(request.Handle)) { return(NativeResultCode.AccessDenied); } if (!IsKnownKey(request)) { return(NativeResultCode.InvalidHandle); } var index = request.KeyFullPath.LastIndexOf(@"\"); var subKeyName = request.KeyFullPath.Substring(index + 1); var keyFullPath = request.KeyFullPath.Substring(0, index); var registryKey = HostRegistry.OpenKey(keyFullPath, true); try { if (registryKey != null) { registryKey.DeleteSubKeyTree(subKeyName); } } catch (ArgumentException) { // Key is not found in real registry, call base to delete it from the buffer. base.DeleteKey(request); return(NativeResultCode.FileNotFound); } catch { return(NativeResultCode.AccessDenied); } // Real key is deleted, now delete the virtual one. return(base.DeleteKey(request)); }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Parse the environment variable settings. var environment = new EnvironmentParser(log); pollInterval = environment.Get("POLL_INTERVAL", TimeSpan.FromSeconds(5), validator: v => v > TimeSpan.Zero); verifyInterval = environment.Get("VERIFY_INTERVAL", TimeSpan.FromMinutes(5), validator: v => v > TimeSpan.Zero); // Create process terminator to handle process termination signals. terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { hive = HiveHelper.OpenHiveRemote(); // For testing and development, we're going to write a test // hosts file to [%NF_TEMP\neon-dns-hosts.txt] so we can see // what's happening outside of a hive. powerDnsHostsPath = Environment.ExpandEnvironmentVariables("%NF_TEMP%\\neon-dns-hosts.txt"); File.WriteAllText(powerDnsHostsPath, $@"# PowerDNS Recursor authoritatively answers for [*.HIVENAME.nhive.io] hostnames. # on the local node using these mappings. 10.0.0.30 {HiveHelper.Hive.Definition.Hostnames.Consul} # Internal hive Vault mappings: 10.0.0.30 {HiveHelper.Hive.Definition.Hostnames.Vault} 10.0.0.30 {HiveHelper.Hive.FirstManager.Name}.{HiveHelper.Hive.Definition.Hostnames.Vault} # Internal hive registry cache related mappings: 10.0.0.30 {HiveHelper.Hive.FirstManager.Name}.{HiveHelper.Hive.Definition.Hostnames.RegistryCache} # Internal hive log pipeline related mappings: 10.0.0.30 {HiveHelper.Hive.Definition.Hostnames.LogEsData} "); // We're also going to create a temporary folder for the reload signal. reloadSignalPath = Environment.ExpandEnvironmentVariables("%NF_TEMP%\\neon-dns\\reload"); Directory.CreateDirectory(Path.GetDirectoryName(reloadSignalPath)); } else { hive = HiveHelper.OpenHive(); } // Ensure that we're running on a manager node. This is required because // we need to be able to update the [/etc/powerdns/hosts] files deployed // on the managers. var nodeRole = Environment.GetEnvironmentVariable("NEON_NODE_ROLE"); if (string.IsNullOrEmpty(nodeRole)) { log.LogCritical(() => "Service does not appear to be running on a neonHIVE."); Program.Exit(1, immediate: true); } if (!string.Equals(nodeRole, NodeRole.Manager, StringComparison.OrdinalIgnoreCase)) { log.LogCritical(() => $"[neon-dns] service is running on a [{nodeRole}] hive node. Only [{NodeRole.Manager}] nodes are supported."); Program.Exit(1, immediate: true); } // Ensure that the [/etc/powerdns/hosts] file was mapped into the container. if (!File.Exists(powerDnsHostsPath)) { log.LogCritical(() => $"[neon-dns] service cannot locate [{powerDnsHostsPath}] on the host manager. Was this mounted to the container as read/write?"); Program.Exit(1, immediate: true); } // Open Consul and then start the main service task. log.LogDebug(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { await RunAsync(); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Performs the Docker registry cache related configuration of the node. /// </summary> public void Configure(SshProxy <NodeDefinition> node) { // NOTE: // // We're going to configure the certificates even if the registry cache // isn't enabled so it'll be easier to upgrade the hive later. // For managers, upload the individual cache certificate and // private key files for managers [cache.crt] and [cache.key] at // [/etc/neon-registry-cache/]. This directory will be // mapped into the cache container. // // Then create the cache's data volume and start the manager's // Registry cache container. if (node.Metadata.IsManager) { node.InvokeIdempotentAction("setup/registrycache", () => { // Copy the registry cache certificate and private key to // // /etc/neon-registry-cache node.Status = "run: registry-cache-server-certs.sh"; var copyCommand = new CommandBundle("./registry-cache-server-certs.sh"); var sbCopyScript = new StringBuilder(); sbCopyScript.AppendLine("mkdir -p /etc/neon-registry-cache"); sbCopyScript.AppendLine("chmod 750 /etc/neon-registry-cache"); copyCommand.AddFile($"cache.crt", hive.HiveLogin.HiveCertificate.CertPem); copyCommand.AddFile($"cache.key", hive.HiveLogin.HiveCertificate.KeyPem); sbCopyScript.AppendLine($"cp cache.crt /etc/neon-registry-cache/cache.crt"); sbCopyScript.AppendLine($"cp cache.key /etc/neon-registry-cache/cache.key"); sbCopyScript.AppendLine($"chmod 640 /etc/neon-registry-cache/*"); copyCommand.AddFile("registry-cache-server-certs.sh", sbCopyScript.ToString(), isExecutable: true); node.SudoCommand(copyCommand); // Upload the cache certificates to every hive node at: // // /etc/docker/certs.d/<hostname>:{HiveHostPorts.RegistryCache}/ca.crt // // and then have Linux reload the trusted certificates. node.InvokeIdempotentAction("setup/registrycache-cert", () => { node.Status = "upload: registry cache certs"; var uploadCommand = new CommandBundle("./registry-cache-client-certs.sh"); var sbUploadScript = new StringBuilder(); uploadCommand.AddFile($"hive-neon-registry-cache.crt", hive.HiveLogin.HiveCertificate.CertPem); foreach (var manager in hive.Definition.SortedManagers) { var cacheHostName = hive.Definition.GetRegistryCacheHost(manager); sbUploadScript.AppendLine($"mkdir -p /etc/docker/certs.d/{cacheHostName}:{HiveHostPorts.DockerRegistryCache}"); sbUploadScript.AppendLine($"cp hive-neon-registry-cache.crt /etc/docker/certs.d/{cacheHostName}:{HiveHostPorts.DockerRegistryCache}/ca.crt"); } uploadCommand.AddFile("registry-cache-client-certs.sh", sbUploadScript.ToString(), isExecutable: true); node.SudoCommand(uploadCommand); }); // Start the registry cache containers if enabled for the hive. if (hive.Definition.Docker.RegistryCache) { // Create the registry data volume. node.Status = "create: registry cache volume"; node.SudoCommand(new CommandBundle("docker-volume-create \"neon-registry-cache\"")); // Start the registry cache using the required Docker public registry // credentials, if any. var publicRegistryCredentials = hive.Definition.Docker.Registries.SingleOrDefault(r => HiveHelper.IsDockerPublicRegistry(r.Registry)); publicRegistryCredentials = publicRegistryCredentials ?? new RegistryCredentials() { Registry = HiveConst.DockerPublicRegistry }; publicRegistryCredentials.Username = publicRegistryCredentials.Username ?? string.Empty; publicRegistryCredentials.Password = publicRegistryCredentials.Password ?? string.Empty; node.Status = "start: neon-registry-cache"; var registry = publicRegistryCredentials.Registry; if (string.IsNullOrEmpty(registry) || registry.Equals("docker.io", StringComparison.InvariantCultureIgnoreCase)) { registry = "registry-1.docker.io"; } ServiceHelper.StartContainer(node, "neon-registry-cache", hive.Definition.Image.RegistryCache, RunOptions.FaultOnError | hive.SecureRunOptions, new CommandBundle( "docker run", "--name", "neon-registry-cache", "--detach", "--restart", "always", "--publish", $"{HiveHostPorts.DockerRegistryCache}:5000", "--volume", "/etc/neon-registry-cache:/etc/neon-registry-cache:ro", // Registry cache certificates folder "--volume", "neon-registry-cache:/var/lib/neon-registry-cache", "--env", $"HOSTNAME={node.Name}.{hive.Definition.Hostnames.RegistryCache}", "--env", $"REGISTRY=https://{registry}", "--env", $"USERNAME={publicRegistryCredentials.Username}", "--env", $"PASSWORD={publicRegistryCredentials.Password}", "--env", "LOG_LEVEL=info", ServiceHelper.ImagePlaceholderArg)); } }); node.Status = string.Empty; } }
/// <summary> /// Configures the global environment variables that describe the configuration /// of the server within the hive. /// </summary> /// <param name="node">The server to be updated.</param> /// <param name="hiveDefinition">The hive definition.</param> public static void ConfigureEnvironmentVariables(SshProxy <NodeDefinition> node, HiveDefinition hiveDefinition) { node.Status = "environment variables"; // We're going to append the new variables to the existing Linux [/etc/environment] file. var sb = new StringBuilder(); // Append all of the existing environment variables except for those // whose names start with "NEON_" to make the operation idempotent. // // Note that we're going to special case PATH to add any Neon // related directories. using (var currentEnvironmentStream = new MemoryStream()) { node.Download("/etc/environment", currentEnvironmentStream); currentEnvironmentStream.Position = 0; using (var reader = new StreamReader(currentEnvironmentStream)) { foreach (var line in reader.Lines()) { if (line.StartsWith("PATH=")) { if (!line.Contains(HiveHostFolders.Tools)) { sb.AppendLine(line + $":{HiveHostFolders.Tools}"); } else { sb.AppendLine(line); } } else if (!line.StartsWith("NEON_")) { sb.AppendLine(line); } } } } // Add the global neonHIVE related environment variables. sb.AppendLine($"NEON_HIVE_PROVISIONER={hiveDefinition.Provisioner}"); sb.AppendLine($"NEON_HIVE={hiveDefinition.Name}"); sb.AppendLine($"NEON_DATACENTER={hiveDefinition.Datacenter.ToLowerInvariant()}"); sb.AppendLine($"NEON_ENVIRONMENT={hiveDefinition.Environment.ToString().ToLowerInvariant()}"); if (hiveDefinition.Hosting != null) { sb.AppendLine($"NEON_HOSTING={hiveDefinition.Hosting.Environment.ToMemberString().ToLowerInvariant()}"); } sb.AppendLine($"NEON_NODE_NAME={node.Name}"); sb.AppendLine($"NEON_NODE_FS={hiveDefinition.HiveFS.Enabled.ToString().ToLowerInvariant()}"); if (node.Metadata != null) { sb.AppendLine($"NEON_NODE_ROLE={node.Metadata.Role}"); sb.AppendLine($"NEON_NODE_IP={node.Metadata.PrivateAddress}"); sb.AppendLine($"NEON_NODE_SSD={node.Metadata.Labels.StorageSSD.ToString().ToLowerInvariant()}"); sb.AppendLine($"NEON_NODE_SWAP={node.Metadata.Labels.ComputeSwap.ToString().ToLowerInvariant()}"); } var sbNameservers = new StringBuilder(); foreach (var nameServer in hiveDefinition.Network.Nameservers) { sbNameservers.AppendWithSeparator(nameServer, ","); } sb.AppendLine($"NEON_UPSTREAM_DNS=\"{sbNameservers}\""); sb.AppendLine($"NEON_APT_PROXY={HiveHelper.GetPackageProxyReferences(hiveDefinition)}"); sb.AppendLine($"NEON_ARCHIVE_FOLDER={HiveHostFolders.Archive}"); sb.AppendLine($"NEON_BIN_FOLDER={HiveHostFolders.Bin}"); sb.AppendLine($"NEON_CONFIG_FOLDER={HiveHostFolders.Config}"); sb.AppendLine($"NEON_EXEC_FOLDER={HiveHostFolders.Exec}"); sb.AppendLine($"NEON_SCRIPTS_FOLDER={HiveHostFolders.Scripts}"); sb.AppendLine($"NEON_SECRETS_FOLDER={HiveHostFolders.Secrets}"); sb.AppendLine($"NEON_SETUP_FOLDER={HiveHostFolders.Setup}"); sb.AppendLine($"NEON_SOURCE_FOLDER={HiveHostFolders.Source}"); sb.AppendLine($"NEON_STATE_FOLDER={HiveHostFolders.State}"); sb.AppendLine($"NEON_TMPFS_FOLDER={HiveHostFolders.Tmpfs}"); sb.AppendLine($"NEON_TOOLS_FOLDER={HiveHostFolders.Tools}"); // Append Consul and Vault addresses. // All nodes will be configured such that host processes using the HashiCorp Consul // CLI will access the Consul cluster via local Consul instance. This will be a // server for manager nodes and a proxy for workers and pets. if (hiveDefinition.Consul.Tls) { sb.AppendLine($"CONSUL_HTTP_SSL=true"); sb.AppendLine($"CONSUL_HTTP_ADDR=" + $"{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); sb.AppendLine($"CONSUL_HTTP_FULLADDR=" + $"https://{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); } else { sb.AppendLine($"CONSUL_HTTP_SSL=false"); sb.AppendLine($"CONSUL_HTTP_ADDR=" + $"{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); sb.AppendLine($"CONSUL_HTTP_FULLADDR=" + $"http://{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}"); } // All nodes will be configured such that host processes using the HashiCorp Vault // CLI will access the Vault cluster via the [neon-proxy-vault] proxy service // by default. sb.AppendLine($"VAULT_ADDR={hiveDefinition.VaultProxyUri}"); if (node.Metadata != null) { if (node.Metadata.IsManager) { // Manager hosts may use the [VAULT_DIRECT_ADDR] environment variable to // access Vault without going through the [neon-proxy-vault] proxy. This // points to the Vault instance running locally. // // This is useful when configuring Vault. sb.AppendLine($"VAULT_DIRECT_ADDR={hiveDefinition.GetVaultDirectUri(node.Name)}"); } else { sb.AppendLine($"VAULT_DIRECT_ADDR="); } } // Upload the new environment to the server. node.UploadText("/etc/environment", sb.ToString(), tabStop: 4); }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle process termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler( () => { // Cancel any operations in progress. exit = true; terminator.CancellationTokenSource.Cancel(); // This gracefully closes the [proxyNotifyChannel] so HiveMQ will // promptly remove the associated queue. if (proxyNotifyChannel != null) { proxyNotifyChannel.Dispose(); proxyNotifyChannel = null; } try { NeonHelper.WaitFor(() => !processingConfigs, terminator.Timeout); log.LogInfo(() => "Tasks stopped gracefully."); } catch (TimeoutException) { log.LogWarn(() => $"Tasks did not stop within [{terminator.Timeout}]."); } }); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { var vaultCredentialsSecret = "neon-proxy-manager-credentials"; Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, "neon-proxy-manager")); } else { hive = HiveHelper.OpenHive(); } // [neon-proxy-manager] requires access to the [IHostingManager] implementation for the // current environment, so we'll need to initialize the hosting loader. HostingLoader.Initialize(); try { // Log into Vault using a Docker secret. var vaultCredentialsSecret = Environment.GetEnvironmentVariable("VAULT_CREDENTIALS"); if (string.IsNullOrEmpty(vaultCredentialsSecret)) { log.LogCritical("[VAULT_CREDENTIALS] environment variable does not exist."); Program.Exit(1, immediate: true); } var vaultSecret = HiveHelper.GetSecret(vaultCredentialsSecret); if (string.IsNullOrEmpty(vaultSecret)) { log.LogCritical($"Cannot read Docker secret [{vaultCredentialsSecret}]."); Program.Exit(1, immediate: true); } var vaultCredentials = HiveCredentials.ParseJson(vaultSecret); if (vaultCredentials == null) { log.LogCritical($"Cannot parse Docker secret [{vaultCredentialsSecret}]."); Program.Exit(1, immediate: true); } // Open the hive data services and then start the main service task. log.LogInfo(() => $"Connecting: Vault"); using (vault = HiveHelper.OpenVault(vaultCredentials)) { log.LogInfo(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogInfo(() => $"Connecting: Docker"); using (docker = HiveHelper.OpenDocker()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // NOTE: // // We're passing [useBootstrap=true] here so that the HiveMQ client will // connect directly to the HiveMQ cluster nodes as opposed to routing // traffic through the private traffic manager. This is necessary because // the load balancers rely on HiveMQ to broadcast update notifications. // // One consequence of this is that this service will need to be restarted // whenever HiveMQ instances are relocated to different hive hosts. // We're going to monitor for changes to the HiveMQ bootstrap settings // and gracefully terminate the process when this happens. We're then // depending on Docker to restart the process so we'll be able to pick // up the change. hive.HiveMQ.Internal.HiveMQBootstrapChanged += (s, a) => { log.LogInfo("HiveMQ bootstrap settings change detected. Terminating service with [exitcode=-1] expecting that Docker will restart it."); // Use ExitCode=-1 so that we'll restart even if the service/container // was not configured with [restart=always]. terminator.Exit(-1); }; using (proxyNotifyChannel = hive.HiveMQ.Internal.GetProxyNotifyChannel(useBootstrap: true).Open()) { // Read the service settings, initializing their default values // if they don't already exist. if (!await consul.KV.Exists(certWarnDaysKey)) { log.LogInfo($"Persisting setting [{certWarnDaysKey}=30.0]"); await consul.KV.PutDouble(certWarnDaysKey, 30.0); } if (!await consul.KV.Exists(cacheRemoveSecondsKey)) { log.LogInfo($"Persisting setting [{cacheRemoveSecondsKey}=300.0]"); await consul.KV.PutDouble(cacheRemoveSecondsKey, 300.0); } if (!await consul.KV.Exists(failsafeSecondsKey)) { log.LogInfo($"Persisting setting [{failsafeSecondsKey}=120.0]"); await consul.KV.PutDouble(failsafeSecondsKey, 120); } certWarnTime = TimeSpan.FromDays(await consul.KV.GetDouble(certWarnDaysKey)); cacheRemoveDelay = TimeSpan.FromDays(await consul.KV.GetDouble(cacheRemoveSecondsKey)); failsafeInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(failsafeSecondsKey)); log.LogInfo(() => $"Using setting [{certWarnDaysKey}={certWarnTime.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{cacheRemoveSecondsKey}={cacheRemoveDelay.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{failsafeSecondsKey}={failsafeInterval.TotalSeconds}]"); // Run the service tasks. var tasks = new List <Task>(); tasks.Add(ConfigGeneratorAsync()); tasks.Add(FailsafeBroadcasterAsync()); await NeonHelper.WaitAllAsync(tasks); } } } } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } if (commandLine.Arguments.Length < 2) { Console.WriteLine(usage); Program.Exit(1); } if (Environment.GetEnvironmentVariable("NEON_RUN_ENV") != null) { Console.Error.WriteLine("*** ERROR: [neon run ...] cannot be executed recursively."); Program.Exit(1); } var commandSplit = Program.CommandLine.Split(); var leftCommandLine = commandSplit.Left.Shift(1); var rightCommandLine = commandSplit.Right; if (rightCommandLine == null || rightCommandLine.Arguments.Length == 0) { Console.Error.WriteLine("*** ERROR: Expecting a [--] argument followed by a shell command."); Program.Exit(1); } var orgDirectory = Directory.GetCurrentDirectory(); var runFolder = Path.Combine(HiveHelper.GetRunFolder(), Guid.NewGuid().ToString("D")); var runEnvPath = Path.Combine(runFolder, "__runenv.txt"); var exitCode = 1; try { // Create the temporary run folder and make it the current directory. Directory.CreateDirectory(runFolder); // We need to load variables from any files specified on the command line, // decrypting them as required. var allVars = new Dictionary <string, string>(StringComparer.InvariantCultureIgnoreCase); if (leftCommandLine.Arguments.Length > 0) { bool askVaultPass = leftCommandLine.HasOption("--ask-vault-pass"); string tempPasswordPath = null; string passwordName = null; try { if (askVaultPass) { // Note that [--ask-vault-pass] takes presidence over [--vault-password-file]. var password = NeonHelper.ReadConsolePassword("Vault password: "******"D"); tempPasswordPath = Path.Combine(passwordsFolder, $"{guid}.tmp"); passwordName = Path.GetFileName(tempPasswordPath); File.WriteAllText(tempPasswordPath, password); } else { passwordName = leftCommandLine.GetOption("--vault-password-file"); } if (!string.IsNullOrEmpty(passwordName)) { AnsibleCommand.VerifyPassword(passwordName); } // Decrypt the variables files, add the variables to the environment // and also to the [allVars] dictionary which we'll use below to // create the run variables file. foreach (var varFile in leftCommandLine.Arguments) { var varContents = File.ReadAllText(varFile); if (varContents.StartsWith("$ANSIBLE_VAULT;")) { // The variable file is encrypted so we're going recursively invoke // the following command to decrypt it: // // neon ansible vault view -- --vault-password=NAME VARS-PATH // // This uses the password to decrypt the variables to STDOUT. if (string.IsNullOrEmpty(passwordName)) { Console.Error.WriteLine($"*** ERROR: [{varFile}] is encrypted. Use [--ask-vault-pass] or [--vault-password-file] to specify the password."); Program.Exit(1); } var result = Program.ExecuteRecurseCaptureStreams( new object[] { "ansible", "vault", "--", "view", $"--vault-password-file={passwordName}", varFile }); if (result.ExitCode != 0) { Console.Error.Write(result.AllText); Program.Exit(result.ExitCode); } varContents = NeonHelper.StripAnsibleWarnings(result.OutputText); } // [varContents] now holds the decrypted variables formatted as YAML. // We're going to parse this and set the appropriate environment // variables. // // Note that we're going to ignore variables with multi-line values. var yaml = new YamlStream(); var vars = new List <KeyValuePair <string, string> >(); try { yaml.Load(varContents); } catch (Exception e) { throw new HiveException($"Unable to parse YAML from decrypted [{varFile}]: {NeonHelper.ExceptionError(e)}", e); } if (yaml.Documents.FirstOrDefault() != null) { ParseYamlVariables(vars, (YamlMappingNode)yaml.Documents.First().RootNode); } foreach (var variable in vars) { if (variable.Value != null && variable.Value.Contains('\n')) { continue; // Ignore variables with multi-line values. } allVars[variable.Key] = variable.Value; Environment.SetEnvironmentVariable(variable.Key, variable.Value); } } } finally { if (tempPasswordPath != null && File.Exists(tempPasswordPath)) { File.Delete(tempPasswordPath); // Don't need this any more. } } } // We need to generate the NEON_RUN_ENV file defining the environment variables // loaded by the command. This file format is compatible with the Docker // [run] command's [--env-file=PATH] option and will be used by nested calls to // [neon] to pass these variables through to the tool container as required. Environment.SetEnvironmentVariable("NEON_RUN_ENV", runEnvPath); using (var runEnvWriter = new StreamWriter(runEnvPath, false, Encoding.UTF8)) { foreach (var item in allVars) { runEnvWriter.WriteLine($"{item.Key}={item.Value}"); } } // Execute the command in the appropriate shell for the current workstation. var sbCommand = new StringBuilder(); foreach (var arg in rightCommandLine.Items) { if (sbCommand.Length > 0) { sbCommand.Append(' '); } if (arg.Contains(' ')) { sbCommand.Append("\"" + arg + "\""); } else { sbCommand.Append(arg); } } exitCode = NeonHelper.ExecuteShell(sbCommand.ToString()); } finally { // Restore the current directory. Directory.SetCurrentDirectory(orgDirectory); // Cleanup if (Directory.Exists(runFolder)) { Directory.Delete(runFolder, true); } } Program.Exit(exitCode); }
/// <summary> /// Handles polling of Docker swarm about the hive nodes and updating the hive /// definition and hash when changes are detected. /// </summary> /// <returns>The tracking <see cref="Task"/>.</returns> private static async Task SwarmPollerAsync() { var periodicTask = new AsyncPeriodicTask( swarmPollInterval, onTaskAsync: async() => { try { log.LogDebug(() => "SWARM-POLLER: Polling"); // Retrieve the current hive definition from Consul if we don't already // have it or if it's different from what we've cached. cachedHiveDefinition = await HiveHelper.GetDefinitionAsync(cachedHiveDefinition, terminator.CancellationToken); // Retrieve the swarm nodes from Docker. log.LogDebug(() => $"SWARM-POLLER: Querying [{docker.Settings.Uri}]"); var swarmNodes = await docker.NodeListAsync(); // Parse the node definitions from the swarm nodes and build a new definition with // using the new nodes. Then compare the hashes of the cached and new hive definitions // and then update Consul if they're different. var currentHiveDefinition = NeonHelper.JsonClone <HiveDefinition>(cachedHiveDefinition); currentHiveDefinition.NodeDefinitions.Clear(); foreach (var swarmNode in swarmNodes) { var nodeDefinition = NodeDefinition.ParseFromLabels(swarmNode.Labels); nodeDefinition.Name = swarmNode.Hostname; currentHiveDefinition.NodeDefinitions.Add(nodeDefinition.Name, nodeDefinition); } log.LogDebug(() => $"SWARM-POLLER: [{currentHiveDefinition.Managers.Count()}] managers and [{currentHiveDefinition.Workers.Count()}] workers in current hive definition."); // Hive pets are not part of the Swarm, so Docker won't return any information // about them. We'll read the pet definitions from [neon/global/pets-definition] in // Consul. We'll assume that there are no pets if this key doesn't exist for // backwards compatibility and robustness. var petsJson = await HiveHelper.Consul.KV.GetStringOrDefault($"{HiveConst.GlobalKey}/{HiveGlobals.PetsDefinition}", terminator.CancellationToken); if (petsJson == null) { log.LogDebug(() => $"SWARM-POLLER: [{HiveConst.GlobalKey}/{HiveGlobals.PetsDefinition}] Consul key not found. Assuming no pets."); } else { if (!string.IsNullOrWhiteSpace(petsJson)) { // Parse the pet node definitions and add them to the hive definition. var petDefinitions = NeonHelper.JsonDeserialize <Dictionary <string, NodeDefinition> >(petsJson); foreach (var item in petDefinitions) { currentHiveDefinition.NodeDefinitions.Add(item.Key, item.Value); } log.LogDebug(() => $"SWARM-POLLER: [{HiveConst.GlobalKey}/{HiveGlobals.PetsDefinition}] defines [{petDefinitions.Count}] pets."); } else { log.LogDebug(() => $"SWARM-POLLER: [{HiveConst.GlobalKey}/{HiveGlobals.PetsDefinition}] is empty."); } } // Fetch the hive summary and add it to the hive definition. currentHiveDefinition.Summary = HiveSummary.FromHive(hive, currentHiveDefinition); // Determine if the definition has changed. currentHiveDefinition.ComputeHash(); if (currentHiveDefinition.Hash != cachedHiveDefinition.Hash) { log.LogInfo(() => "SWARM-POLLER: Hive definition has CHANGED. Updating Consul."); await HiveHelper.PutDefinitionAsync(currentHiveDefinition, cancellationToken: terminator.CancellationToken); cachedHiveDefinition = currentHiveDefinition; } else { log.LogDebug(() => "SWARM-POLLER: Hive definition is UNCHANGED."); } } catch (KeyNotFoundException) { // We'll see this when no hive definition has been persisted to the // hive. This is a serious problem. This is configured during setup // and there should always be a definition in Consul. log.LogError(() => $"SWARM-POLLER: No hive definition has been found at [{hiveDefinitionKey}] in Consul. This is a serious error that will have to be corrected manually."); } log.LogDebug(() => "SWARM-POLLER: Finished Poll"); return(await Task.FromResult(false)); }, onExceptionAsync: async e => { log.LogError("SWARM-POLLER", e); return(await Task.FromResult(false)); }, onTerminateAsync: async() => { log.LogInfo(() => "SWARM-POLLER: Terminating"); await Task.CompletedTask; }); terminator.AddDisposable(periodicTask); await periodicTask.Run(); }
/// <summary> /// Implements the service as a <see cref="Task"/>. /// </summary> /// <returns>The <see cref="Task"/>.</returns> private static async Task RunAsync() { var periodicTask = new AsyncPeriodicTask( pollInterval, onTaskAsync: async() => { log.LogDebug(() => "Starting poll"); // We're going to collect the [hostname --> address] mappings into // a specialized (semi-threadsafe) dictionary. var hostAddresses = new HostAddresses(); // Retrieve the current hive definition from Consul if we don't already // have it or it's different from what we've cached. hiveDefinition = await HiveHelper.GetDefinitionAsync(hiveDefinition, terminator.CancellationToken); log.LogDebug(() => $"Hive has [{hiveDefinition.NodeDefinitions.Count}] nodes."); // Add the [NAME.HIVENAME.nhive.io] definitions for each cluster node. foreach (var node in hiveDefinition.Nodes) { hostAddresses.Add($"{node.Name}.{hiveDefinition.Name}.nhive.io", IPAddress.Parse(node.PrivateAddress)); } // Read the DNS entry definitions from Consul and add the appropriate // host/addresses based on health checks, etc. var targetsResult = (await consul.KV.ListOrDefault <DnsEntry>(HiveConst.ConsulDnsEntriesKey + "/", terminator.CancellationToken)); List <DnsEntry> targets; if (targetsResult == null) { // The targets key wasn't found in Consul, so we're // going to assume that there are no targets. targets = new List <DnsEntry>(); } else { targets = targetsResult.ToList(); } log.LogDebug(() => $"Consul has [{targets.Count()}] DNS targets."); await ResolveTargetsAsync(hostAddresses, targets); // Generate a canonical [hosts.txt] file by sorting host entries by // hostname and then by IP address. // // Unhealthy hosts will be assigned the unrouteable [0.0.0.0] address. // The reason for this is subtle but super important. // // If we didn't do this, the DNS host would likely be resolved by a // public DNS service, perhaps returning the IP address of a production // endpoint. // // This could cause a disaster if the whole purpose of having a local // DNS host defined to redirect test traffic to a test service. If // the test service endpoints didn't report as healthy and [0.0.0.0] // wasn't set, then test traffic could potentially hit the production // endpoint and do serious damage. var sbHosts = new StringBuilder(); var mappingCount = 0; foreach (var host in hostAddresses.OrderBy(h => h.Key)) { foreach (var address in host.Value.OrderBy(a => a.ToString())) { sbHosts.AppendLineLinux($"{address,-15} {host.Key}"); mappingCount++; } } var unhealthyTargets = targets.Where(t => !hostAddresses.ContainsKey(t.Hostname) || hostAddresses[t.Hostname].Count == 0).ToList(); if (unhealthyTargets.Count > 0) { sbHosts.AppendLine(); sbHosts.AppendLine($"# [{unhealthyTargets.Count}] unhealthy DNS hosts:"); sbHosts.AppendLine(); var unhealthyAddress = "0.0.0.0"; foreach (var target in unhealthyTargets.OrderBy(h => h)) { sbHosts.AppendLineLinux($"{unhealthyAddress,-15} {target.Hostname}"); } } // Compute the MD5 hash and compare it to the hash persisted to // Consul (if any) to determine whether we need to update the // answers in Consul. var hostsTxt = sbHosts.ToString(); var hostsMD5 = NeonHelper.ComputeMD5(hostsTxt); var currentMD5 = await consul.KV.GetStringOrDefault(HiveConst.ConsulDnsHostsMd5Key, terminator.CancellationToken); if (currentMD5 == null) { currentMD5 = string.Empty; } if (hostsMD5 != currentMD5) { log.LogDebug(() => $"DNS answers have changed."); log.LogDebug(() => $"Writing [{mappingCount}] DNS answers to Consul."); // Update the Consul keys using a transaction. var operations = new List <KVTxnOp>() { new KVTxnOp(HiveConst.ConsulDnsHostsMd5Key, KVTxnVerb.Set) { Value = Encoding.UTF8.GetBytes(hostsMD5) }, new KVTxnOp(HiveConst.ConsulDnsHostsKey, KVTxnVerb.Set) { Value = Encoding.UTF8.GetBytes(hostsTxt) } }; await consul.KV.Txn(operations, terminator.CancellationToken); } log.LogDebug(() => "Finished poll"); return(await Task.FromResult(false)); }, onExceptionAsync: async e => { log.LogError(e); return(await Task.FromResult(false)); }, onTerminateAsync: async() => { log.LogInfo(() => "Terminating"); await Task.CompletedTask; }); terminator.AddDisposable(periodicTask); await periodicTask.Run(); }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle process termination signals. terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { var secrets = new DebugSecrets(); // NOTE: // // Add your target hive's Vault credentials here for // manual debugging. Take care not to commit sensitive // credentials for production hives. // // You'll find this information in the ROOT hive login // for the target hive. secrets.Add("neon-hive-manager-vaultkeys", new VaultCredentials() { RootToken = "cd5831fa-86ec-cc22-b1f3-051f88147382", KeyThreshold = 1, UnsealKeys = new List <string>() { "8SgwdO/GwqJ7nyxT2tK2n1CCR3084kQVh7gEy8jNQh8=" } }); hive = HiveHelper.OpenHiveRemote(secrets); } else { hive = HiveHelper.OpenHive(sshCredentialsSecret: "neon-ssh-credentials"); } // Ensure that we're running on a manager node. We won't be able // to query swarm status otherwise. var nodeRole = Environment.GetEnvironmentVariable("NEON_NODE_ROLE"); if (string.IsNullOrEmpty(nodeRole)) { log.LogCritical(() => "Service does not appear to be running on a neonHIVE."); Program.Exit(1, immediate: true); } if (!string.Equals(nodeRole, NodeRole.Manager, StringComparison.OrdinalIgnoreCase)) { log.LogCritical(() => $"[neon-hive-manager] service is running on a [{nodeRole}] hive node. Running on only [{NodeRole.Manager}] nodes are supported."); Program.Exit(1, immediate: true); } // Open the hive data services and then start the main service task. log.LogDebug(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogDebug(() => $"Connecting: Docker"); using (docker = HiveHelper.OpenDocker()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // We're passing [useBootstrap=true] here so that the HiveMQ client will // connect directly to the HiveMQ cluster nodes as opposed to routing // traffic through the private traffic manager. This is necessary because // the load balancers rely on HiveMQ to broadcast update notifications. // // One consequence of this is that this service will need to be restarted // whenever HiveMQ instances are relocated to different hive hosts. // We're going to monitor for changes to the HiveMQ bootstrap settings // and gracefully terminate the process when this happens. We're then // depending on Docker to restart the process so we'll be able to pick // up the change. hive.HiveMQ.Internal.HiveMQBootstrapChanged += (s, a) => { log.LogInfo("HiveMQ bootstrap settings change detected. Terminating service with [exitcode=-1] expecting that Docker will restart it."); // Use ExitCode=-1 so that we'll restart even if the service/container // was not configured with [restart=always]. terminator.Exit(-1); }; using (proxyNotifyChannel = hive.HiveMQ.Internal.GetProxyNotifyChannel(useBootstrap: true).Open()) { await RunAsync(); } } } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Parse the environment variable settings. var environment = new EnvironmentParser(log); nameservers = environment.Get("NAMESERVERS", "8.8.8.8,8.8.4.4").Split(','); pingTimeout = environment.Get("PING_TIMEOUT", TimeSpan.FromSeconds(1.5), validator: v => v > TimeSpan.Zero); pollInterval = environment.Get("POLL_INTERVAL", TimeSpan.FromSeconds(5), validator: v => v > TimeSpan.Zero); warnInterval = environment.Get("WARN_INTERVAL", TimeSpan.FromMinutes(5), validator: v => v > TimeSpan.Zero); // Create a timer so we'll avoid spamming the logs with warnings. warnTimer = new PolledTimer(warnInterval, autoReset: true); warnTimer.FireNow(); // Set so that the first warnings detected will be reported immediately. // Create the object that will actually perform the hostname lookups // and health pings. This object caches things to improve performance. healthResolver = new HealthResolver(nameservers); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { hive = HiveHelper.OpenHiveRemote(); } else { hive = HiveHelper.OpenHive(); } // Open Consul and then start the main service task. log.LogDebug(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { await RunAsync(); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { HiveProxy hive; if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } var hiveLogin = Program.HiveLogin; // Print the current login status if no hive name was passed. if (hiveLogin == null) { Console.Error.WriteLine("*** You are not logged in."); Program.Exit(1); } Console.WriteLine(hiveLogin.LoginName); // Parse and validate the hive definition. hive = new HiveProxy(hiveLogin, (nodeName, publicAddress, privateAddress, append) => { return(new SshProxy <NodeDefinition>(nodeName, publicAddress, privateAddress, hiveLogin.GetSshCredentials(), TextWriter.Null)); }); // Verify the credentials by logging into a manager node. var verifyCredentials = true; Console.Error.WriteLine(); Console.Error.WriteLine($"Checking login [{hiveLogin.LoginName}]..."); if (hiveLogin.ViaVpn) { var vpnClient = HiveHelper.VpnGetClient(hiveLogin.HiveName); if (vpnClient == null) { Console.Error.WriteLine("*** ERROR: VPN is not running."); } else { switch (vpnClient.State) { case HiveHelper.VpnState.Connecting: Console.Error.WriteLine("VPN is connecting"); break; case HiveHelper.VpnState.Healthy: Console.Error.WriteLine("VPN connection is healthy"); break; case HiveHelper.VpnState.Unhealthy: Console.Error.WriteLine("*** ERROR: VPN connection is not healthy"); verifyCredentials = false; break; } } } if (verifyCredentials) { Console.Error.WriteLine("Authenticating..."); try { hive.GetReachableManager().Connect(); Console.Error.WriteLine("Authenticated"); } catch (Exception e) { Console.Error.WriteLine($"*** ERROR: Hive authentication failed: {NeonHelper.ExceptionError(e)}"); } } Console.WriteLine(); return; }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption || commandLine.Arguments.Length == 0) { Console.WriteLine(usage); Program.Exit(0); } var hiveLogin = Program.ConnectHive(); var hive = new HiveProxy(hiveLogin); var command = commandLine.Arguments.ElementAtOrDefault(0); var registry = commandLine.Arguments.ElementAtOrDefault(1); List <RegistryCredentials> registries; switch (command) { case "ls": case "list": registries = hive.Registry.List(); // Special-case the Docker public registry if it's not // set explicitly. All neonHIVEs implicitly reference // the public registry. if (!registries.Exists(r => HiveHelper.IsDockerPublicRegistry(r.Registry))) { registries.Add( new RegistryCredentials() { Registry = HiveConst.DockerPublicRegistry }); } var maxRegistryLength = registries.Max(r => r.Registry.Length); foreach (var item in registries) { var spacer = new string(' ', maxRegistryLength - item.Registry.Length); var credentials = string.Empty; if (!string.IsNullOrEmpty(item.Username)) { credentials = $"{item.Username}/{item.Password ?? string.Empty}"; } Console.WriteLine($"{item.Registry}{spacer} - {credentials}"); } break; case "login": if (string.IsNullOrEmpty(registry)) { Console.Error.WriteLine("***ERROR: REGISTRY argument expected."); Program.Exit(1); } if (!HiveDefinition.DnsHostRegex.IsMatch(registry)) { Console.Error.WriteLine($"***ERROR: [{registry}] is not a valid registry hostname."); Program.Exit(1); } // Get the credentials. var username = commandLine.Arguments.ElementAtOrDefault(2); var password = commandLine.Arguments.ElementAtOrDefault(3); if (password == "-") { password = NeonHelper.ReadStandardInputText(); } if (string.IsNullOrEmpty(username)) { Console.Write("username: "******"password: "******"Verifying registry credentials on [{manager.Name}]."); if (!manager.RegistryLogin(registry, username, password)) { Console.Error.WriteLine($"*** ERROR: Registry login failed on [{manager.Name}]."); Program.Exit(1); } Console.WriteLine($"Registry credentials are valid."); // Login all of the nodes. var sbFailedNodes = new StringBuilder(); Console.WriteLine($"Logging the hive into the [{registry}] registry."); hive.Registry.Login(registry, username, password); // Restart the registry cache containers running on the managers // with the new credentials if we're updating credentials for the // Docker public registry and the cache is enabled. hive.Registry.RestartCache(registry, username, password); break; case "logout": if (string.IsNullOrEmpty(registry)) { Console.Error.WriteLine("***ERROR: REGISTRY argument expected."); Program.Exit(1); } if (!HiveDefinition.DnsHostRegex.IsMatch(registry)) { Console.Error.WriteLine($"***ERROR: [{registry}] is not a valid registry hostname."); Program.Exit(1); } // $todo(jeff.lill): // // Complete this implementation. Note that we also need to update // registry cache credentials when we're logging out of the Docker // public registry. break; default: Console.Error.WriteLine($"*** ERROR: Unknown command: [{command}]"); Program.Exit(1); break; } }
/// <summary> /// Main program entry point. /// </summary> /// <param name="args">The command line arguments.</param> public static void Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler(() => terminator.ReadyToExit()); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { var vaultCredentialsSecret = "neon-proxy-manager-credentials"; Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, "neon-proxy-manager")); } else { hive = HiveHelper.OpenHive(); } // Parse the command line. var commandLine = new CommandLine(args); if (commandLine.Arguments.Count() != 2) { log.LogError($"*** ERROR: Invalid command line arguments: {commandLine}"); log.LogError($"*** Expected: MYSECRET MYCONSULKEY"); SleepForever(); } var secretName = commandLine.Arguments[0]; var consulKey = commandLine.Arguments[1]; try { // Read the secret file. var secretPath = ($"/run/secrets/{secretName}"); log.LogInfo($"Reading secret [{secretName}]."); if (!File.Exists(secretPath)) { log.LogError($"The secret file [{secretPath}] does not exist."); } else { var secret = File.ReadAllBytes(secretPath); log.LogInfo($"Writing secret to Consul [{consulKey}]."); HiveHelper.Consul.KV.PutBytes(consulKey, secret).Wait(); } } catch (Exception e) { log.LogError(e); } SleepForever(); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.Arguments.Length < 1) { Console.Error.WriteLine("*** ERROR: USER@HIVE is required."); Program.Exit(1); } var login = HiveHelper.SplitLogin(commandLine.Arguments[0]); if (!login.IsOK) { Console.Error.WriteLine($"*** ERROR: Invalid username/hive [{commandLine.Arguments[0]}]. Expected something like: USER@HIVE"); Program.Exit(1); } var username = login.Username; var hiveName = login.HiveName; var hiveLoginPath = Program.GetHiveLoginPath(username, hiveName); if (File.Exists(hiveLoginPath)) { if (!commandLine.HasOption("--force") && !Program.PromptYesNo($"*** Are you sure you want to remove the [{username}@{hiveName}] login?")) { return; } File.Delete(hiveLoginPath); // Delete the backup and cached hive definition files if present. var backupPath = hiveLoginPath + ".bak"; var definitionPath = HiveHelper.GetCachedDefinitionPath(username, hiveName); if (File.Exists(backupPath)) { File.Delete(backupPath); } if (File.Exists(definitionPath)) { File.Delete(definitionPath); } // Remove the [.current] file if this is the logged-in hive. if (Program.HiveLogin != null && string.Equals(Program.HiveLogin.Username, username, StringComparison.OrdinalIgnoreCase) && string.Equals(Program.HiveLogin.HiveName, hiveName, StringComparison.OrdinalIgnoreCase)) { CurrentHiveLogin.Delete(); HiveHelper.VpnClose(hiveName); } Console.WriteLine($"Removed [{username}@{hiveName}]"); } else { Console.Error.WriteLine($"*** ERROR: Login [{username}@{hiveName}] does not exist."); return; } }
/// <summary> /// Implements the service as a <see cref="Task"/>. /// </summary> /// <returns>The <see cref="Task"/>.</returns> private static async Task RunAsync() { // Load the settings. // // Initialize the proxy manager settings to their default values // if they don't already exist. if (!await consul.KV.Exists(hivemqMaintainSecondsKey)) { log.LogInfo($"Persisting setting [{hivemqMaintainSecondsKey}=60.0]"); await consul.KV.PutDouble(hivemqMaintainSecondsKey, 60); } if (!await consul.KV.Exists(logPurgeSecondsKey)) { log.LogInfo($"Persisting setting [{logPurgeSecondsKey}=300.0]"); await consul.KV.PutDouble(logPurgeSecondsKey, 300); } if (!await consul.KV.Exists(managerTopologySecondsKey)) { log.LogInfo($"Persisting setting [{managerTopologySecondsKey}=300.0]"); await consul.KV.PutDouble(managerTopologySecondsKey, 1800); } if (!await consul.KV.Exists(proxyUpdateSecondsKey)) { log.LogInfo($"Persisting setting [{proxyUpdateSecondsKey}=60.0]"); await consul.KV.PutDouble(proxyUpdateSecondsKey, 60); } if (!await consul.KV.Exists(secretPurgeSecondsKey)) { log.LogInfo($"Persisting setting [{secretPurgeSecondsKey}=300.0]"); await consul.KV.PutDouble(secretPurgeSecondsKey, 300); } if (!await consul.KV.Exists(swarmPollSecondsKey)) { log.LogInfo($"Persisting setting [{swarmPollSecondsKey}=30.0]"); await consul.KV.PutDouble(swarmPollSecondsKey, 30.0); } if (!await consul.KV.Exists(vaultUnsealSecondsKey)) { log.LogInfo($"Persisting setting [{vaultUnsealSecondsKey}=30.0]"); await consul.KV.PutDouble(vaultUnsealSecondsKey, 30.0); } hivemqMantainInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(hivemqMaintainSecondsKey)); logPurgerInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(logPurgeSecondsKey)); managerTopologyInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(managerTopologySecondsKey)); proxyUpdateInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(proxyUpdateSecondsKey)); secretPurgeInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(secretPurgeSecondsKey)); swarmPollInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(swarmPollSecondsKey)); vaultUnsealInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(vaultUnsealSecondsKey)); log.LogInfo(() => $"Using setting [{hivemqMaintainSecondsKey}={hivemqMantainInterval.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{logPurgeSecondsKey}={logPurgerInterval.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{managerTopologySecondsKey}={managerTopologyInterval.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{proxyUpdateSecondsKey}={proxyUpdateInterval.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{secretPurgeSecondsKey}={secretPurgeInterval.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{swarmPollSecondsKey}={swarmPollInterval.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{vaultUnsealSecondsKey}={vaultUnsealInterval.TotalSeconds}]"); // Parse the Vault credentials from the [neon-hive-manager-vaultkeys] // secret, if it exists. var vaultCredentialsJson = HiveHelper.GetSecret("neon-hive-manager-vaultkeys"); if (string.IsNullOrWhiteSpace(vaultCredentialsJson)) { log.LogInfo(() => "Vault AUTO-UNSEAL is DISABLED because [neon-hive-manager-vaultkeys] Docker secret is not specified."); } else { try { vaultCredentials = NeonHelper.JsonDeserialize <VaultCredentials>(vaultCredentialsJson); log.LogInfo(() => "Vault AUTO-UNSEAL is ENABLED."); } catch (Exception e) { log.LogError("Vault AUTO-UNSEAL is DISABLED because the [neon-hive-manager-vaultkeys] Docker secret could not be parsed.", e); } } // We're going to need this later. vaultUris = await GetVaultUrisAsync(); // Launch the sub-tasks. These will run until the service is terminated. var tasks = new List <Task>(); // Start a task that handles HiveMQ related activities like ensuring that // the [sysadmin] account has full permissions for all virtual hosts. tasks.Add(HiveMQMaintainerAsync()); // Start a task that checks for Elasticsearch [logstash] and [metricbeat] indexes // that are older than the number of retention days. tasks.Add(LogPurgerAsync()); // Start a task that periodically checks for changes to the set of hive managers // (e.g. if a manager is added or removed). This task will cause the service to exit // so it can be restarted automatically by Docker to respond to the change. tasks.Add(ManagerWatcherAsync()); // Start a task that checks for old [neon-secret-retriever-*] service instances // as well as old persisted secrets and removes them. tasks.Add(SecretPurgerAsync()); // Start a task that polls current hive state to update the hive definition in Consul, etc. tasks.Add(SwarmPollerAsync()); // Start a task that periodically notifies the [neon-proxy-manager] service // that it should proactively rebuild the proxy configurations. tasks.Add(ProxyUpdaterAsync()); // We need to start a vault poller for the Vault instance running on each manager // node. We're going to construct the direct Vault URIs by querying Docker for // the current hive nodes and looking for the managers. foreach (var uri in vaultUris) { tasks.Add(VaultUnsealerAsync(uri)); } // Wait for all tasks to exit cleanly for a normal shutdown. await NeonHelper.WaitAllAsync(tasks); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (HiveHelper.InToolContainer) { Console.Error.WriteLine("*** ERROR: [file] commands cannot be run inside a Docker container."); Program.Exit(1); } if (commandLine.Arguments.Length == 0 || commandLine.HasHelpOption) { Help(); Program.Exit(0); } // Parse the arguments. var command = commandLine.Arguments.ElementAtOrDefault(0); var source = string.Empty; var target = string.Empty; var passwordName = string.Empty; switch (command) { // These commands accept two parameters. case "create": case "edit": case "view": target = commandLine.Arguments.ElementAtOrDefault(1); passwordName = commandLine.Arguments.ElementAtOrDefault(2); if (string.IsNullOrEmpty(target)) { Console.Error.WriteLine("*** ERROR: PATH argument is missing."); Program.Exit(1); } if (string.IsNullOrEmpty(passwordName)) { Console.Error.WriteLine("*** ERROR: PASSWORD-NAME argument is missing."); Program.Exit(1); } break; // These commands accept three parameters. case "encrypt": case "decrypt": source = commandLine.Arguments.ElementAtOrDefault(1); target = commandLine.Arguments.ElementAtOrDefault(2); passwordName = commandLine.Arguments.ElementAtOrDefault(3); if (string.IsNullOrEmpty(source)) { Console.Error.WriteLine("*** ERROR: SOURCE argument is missing."); Program.Exit(1); } if (string.IsNullOrEmpty(target)) { Console.Error.WriteLine("*** ERROR: TARGET argument is missing."); Program.Exit(1); } if (string.IsNullOrEmpty(passwordName)) { Console.Error.WriteLine("*** ERROR: PASSWORD-NAME argument is missing."); Program.Exit(1); } break; default: Console.Error.WriteLine($"*** ERROR: Unexpected [{command}] command."); Program.Exit(1); break; } var editor = commandLine.GetOption("--editor", "nano"); switch (editor.ToLowerInvariant()) { case "nano": Environment.SetEnvironmentVariable("EDITOR", "/bin/nano"); break; case "vim": Environment.SetEnvironmentVariable("EDITOR", "/usr/bin/vim"); break; case "vi": Environment.SetEnvironmentVariable("EDITOR", "/usr/bin/vi"); break; default: Console.Error.WriteLine($"*** ERROR: [--editor={editor}] does not specify a known editor. Specify one of: NANO, VIM, or VI."); Program.Exit(1); break; } // Ensure that the password file actually exists. Covenant.Assert(!string.IsNullOrEmpty(passwordName)); if (!File.Exists(Path.Combine(HiveHelper.GetAnsiblePasswordsFolder(), passwordName))) { Console.Error.WriteLine($"*** ERROR: Password file for [{passwordName}] does not exist."); Program.Exit(1); } // $note(jeff.lill): // // I tried to call [Program.ExecuteRecurse()] here to recurse into // the [neon vault -- COMMAND --vault-password-file=NAME PATH] commands // but it didn't work for [edit]. It looks like the command did run but // then gets stuck. I could have sworn that I had this working at one // point but I can't get it working again. I think the standard // I/O streams being redirect might be confusing Docker and Ansible, // since Ansible needs to access the Docker TTY. // // The [view] command was also a bit wonky. For example, two blank // lines in the encrypted file would be returned as only a single // blank line. // // The (not so bad) workaround is to simply recurse into // [Program.Main()]. It's a little sloppy but should be OK // (and will be faster to boot). I'm going to do this for // all of the commands. switch (command) { case "create": Program.Main( new string[] { "ansible", "vault", "--", "create", $"--vault-password-file={passwordName}", target }); break; case "decrypt": File.Copy(source, target, overwrite: true); Program.Main( new string[] { "ansible", "vault", "--", "decrypt", $"--vault-password-file={passwordName}", target }); break; case "edit": Program.Main( new string[] { "ansible", "vault", $"--editor={editor}", "--", "edit", $"--vault-password-file={passwordName}", target }); break; case "encrypt": File.Copy(source, target, overwrite: true); Program.Main( new string[] { "ansible", "vault", "--", "encrypt", $"--vault-password-file={passwordName}", target }); break; case "view": Program.Main( new string[] { "ansible", "vault", "--", "view", $"--vault-password-file={passwordName}", target }); break; default: Console.Error.WriteLine($"*** ERROR: Unexpected [{command}] command."); Program.Exit(1); break; } }
/// <inheritdoc/> public void Run(ModuleContext context) { var hive = HiveHelper.Hive; if (!context.ValidateArguments(context.Arguments, validModuleArgs)) { context.Failed = true; return; } // Obtain common arguments. context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [registry]"); if (!context.Arguments.TryGetValue <string>("registry", out var registry)) { throw new ArgumentException($"[registry] module argument is required."); } context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [state]"); if (!context.Arguments.TryGetValue <string>("state", out var state)) { state = "present"; } state = state.ToLowerInvariant(); if (context.HasErrors) { return; } context.WriteLine(AnsibleVerbosity.Trace, $"Reading existing credentials for [{registry}]."); var existingCredentials = hive.Registry.GetCredentials(registry); if (existingCredentials != null) { context.WriteLine(AnsibleVerbosity.Info, $"Credentials for [{registry}] exist."); } else { context.WriteLine(AnsibleVerbosity.Info, $"Credentials for [{registry}] do not exist."); } var sbErrorNodes = new StringBuilder(); switch (state) { case "absent": if (context.CheckMode) { if (existingCredentials != null) { context.WriteLine(AnsibleVerbosity.Important, $"Credentials for [{registry}] will be deleted when CHECK-MODE is disabled."); } return; } // Log the hive out of the registry. if (existingCredentials != null) { context.Changed = true; } context.WriteLine(AnsibleVerbosity.Trace, $"Logging the hive out of the [{registry}] registry."); hive.Registry.Logout(registry); context.WriteLine(AnsibleVerbosity.Trace, $"All hive nodes are logged out."); break; case "present": if (context.CheckMode) { if (existingCredentials == null) { context.WriteLine(AnsibleVerbosity.Important, $"Credentials for [{registry}] will be added when CHECK-MODE is disabled."); } return; } // Parse the [username] and [password] credentials. context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [username]"); if (!context.Arguments.TryGetValue <string>("username", out var username)) { throw new ArgumentException($"[username] module argument is required."); } context.WriteLine(AnsibleVerbosity.Trace, $"Parsing [password]"); if (!context.Arguments.TryGetValue <string>("password", out var password)) { throw new ArgumentException($"[password] module argument is required."); } context.WriteLine(AnsibleVerbosity.Trace, $"Logging the hive into the [{registry}] registry."); hive.Registry.Login(registry, username, password); // Log all of the nodes in with the new registry credentials. // // Note that we won't do this if the registry cache is enabled and we're // updating credentials for the Docker public registry because for this // configuration, only the registry cache needs the upstream credentials. // The nodes don't authenticate against the local registry cache. if (!hive.Definition.Docker.RegistryCache || !HiveHelper.IsDockerPublicRegistry(registry)) { context.WriteLine(AnsibleVerbosity.Trace, $"Logging the hive into the [{registry}] registry."); hive.Registry.Login(registry, username, password); } else { // Restart the hive registry cache containers with the new credentials. context.WriteLine(AnsibleVerbosity.Trace, $"Restarting the hive registry caches."); if (!hive.Registry.RestartCache(registry, username, password)) { context.WriteErrorLine("Unable to restart one or more of the hive registry caches."); return; } context.WriteLine(AnsibleVerbosity.Trace, $"Hive registry caches restarted."); } context.Changed = existingCredentials == null; break; default: throw new ArgumentException($"[state={state}] is not one of the valid choices: [present] or [absent]."); } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { HiveProxy hiveProxy; if (commandLine.HasHelpOption || commandLine.Arguments.Length == 0) { Console.WriteLine(usage); Program.Exit(0); } Console.Error.WriteLine(); var hiveLogin = Program.HiveLogin; var login = HiveHelper.SplitLogin(commandLine.Arguments[0]); if (!login.IsOK) { Console.Error.WriteLine($"*** ERROR: Invalid username/hive [{commandLine.Arguments[0]}]. Expected something like: USER@HIVE"); Program.Exit(1); } // Check whether we're already logged into the hive. var username = login.Username; var hiveName = login.HiveName; if (hiveLogin != null && string.Equals(hiveLogin.HiveName, hiveName, StringComparison.OrdinalIgnoreCase) && string.Equals(hiveLogin.Username, username, StringComparison.OrdinalIgnoreCase)) { // Ensure that the client is compatible with the hive. try { HiveHelper.ValidateClientVersion(hiveLogin, Program.Version); } catch (VersionException e) { HiveHelper.VpnClose(null); CurrentHiveLogin.Delete(); Console.Error.WriteLine($"*** ERROR: {e.Message}"); Program.Exit(0); } // Ensure that the hive's certificates, hostnames,... are properly initialized. HiveHelper.OpenHive(hiveLogin); Console.Error.WriteLine($"*** You are already logged into [{Program.HiveLogin.Username}@{Program.HiveLogin.HiveName}]."); Program.Exit(0); } // Logout of the current hive. if (hiveLogin != null) { Console.Error.WriteLine($"Logging out of [{Program.HiveLogin.Username}@{Program.HiveLogin.HiveName}]."); CurrentHiveLogin.Delete(); } // We're passing NULL to close all hive VPN connections to ensure that // we're only connected to one at a time. It's very possible for a operator // to have to manage multiple disconnnected hives that share the same // IP address space. HiveHelper.VpnClose(null); // Fetch the new hive login. var hiveLoginPath = Program.GetHiveLoginPath(username, hiveName); if (!File.Exists(hiveLoginPath)) { Console.Error.WriteLine($"*** ERROR: Cannot find login [{username}@{hiveName}]."); Program.Exit(1); } hiveLogin = NeonHelper.JsonDeserialize <HiveLogin>(File.ReadAllText(hiveLoginPath)); // Determine whether we're going to use the VPN. var useVpn = false; var showVpn = commandLine.HasOption("--show-vpn"); if (hiveLogin.Definition.Hosting.IsOnPremiseProvider) { if (hiveLogin.Definition.Vpn.Enabled) { if (!commandLine.HasOption("--no-vpn")) { if (!hiveLogin.Definition.Vpn.Enabled) { Console.Error.WriteLine($"*** ERROR: Hive [{hiveLogin.HiveName}] was not provisioned with a VPN."); Program.Exit(1); } useVpn = true; } else { useVpn = false; Console.Error.WriteLine("Using the local network (not the VPN)"); } } else { useVpn = false; } } else { useVpn = true; // Always TRUE for cloud environments. } // Connect the VPN if enabled. if (useVpn) { HiveHelper.VpnOpen(hiveLogin, onStatus: message => Console.Error.WriteLine($"{message}"), onError: message => Console.Error.WriteLine($"*** ERROR {message}"), show: showVpn); } // Verify the credentials by logging into a manager node. Console.Error.WriteLine("Authenticating..."); hiveProxy = new HiveProxy(hiveLogin, (nodeName, publicAddress, privateAddress, append) => { return(new SshProxy <NodeDefinition>(nodeName, publicAddress, privateAddress, hiveLogin.GetSshCredentials(), TextWriter.Null)); }); var viaVpn = useVpn ? $" (via VPN)" : string.Empty; try { hiveProxy.GetReachableManager().Connect(); var currentLogin = new CurrentHiveLogin() { Login = hiveLogin.LoginName, ViaVpn = useVpn }; currentLogin.Save(); // Call GetLogin() with the client version so that the current hive // definition will be downloaded and so we'll also verify that the // current client is capable of managing the hive. try { HiveHelper.GetLogin(clientVersion: Program.Version); } catch (VersionException e) { HiveHelper.VpnClose(null); CurrentHiveLogin.Delete(); Console.Error.WriteLine($"*** ERROR: {e.Message}"); Program.Exit(1); } // Ensure that the hive's certificates, hostnames,... are properly initialized. HiveHelper.OpenHive(hiveLogin); Console.Error.WriteLine($"Logged into [{hiveLogin.LoginName}]{viaVpn}."); Console.Error.WriteLine(""); } catch (Exception e) { Console.Error.WriteLine($"*** ERROR: Hive login failed{viaVpn}: {NeonHelper.ExceptionError(e)}"); Console.Error.WriteLine(""); // Delete the current login because it failed. CurrentHiveLogin.Delete(); Program.Exit(1); } }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler( () => { // Cancel any operations in progress. terminator.CancellationTokenSource.Cancel(); }); // Read the environment variables. // $hack(jeff.lill: // // We're going to scan the Consul configuration key to determine whether this // instance is managing the public or private proxy (or bridges) so we'll // be completely compatible with existing deployments. // // In theory, we could have passed a new environment variable but that's not // worth the trouble. configKey = Environment.GetEnvironmentVariable("CONFIG_KEY"); if (string.IsNullOrEmpty(configKey)) { log.LogError("[CONFIG_KEY] environment variable is required."); Program.Exit(1, immediate: true); } isPublic = configKey.Contains("/public/"); var proxyName = isPublic ? "public" : "private"; serviceName = $"neon-proxy-{proxyName}:{GitVersion}"; log.LogInfo(() => $"Starting [{serviceName}]"); configHashKey = Environment.GetEnvironmentVariable("CONFIG_HASH_KEY"); if (string.IsNullOrEmpty(configHashKey)) { log.LogError("[CONFIG_HASH_KEY] environment variable is required."); Program.Exit(1, immediate: true); } vaultCredentialsName = Environment.GetEnvironmentVariable("VAULT_CREDENTIALS"); if (string.IsNullOrEmpty(vaultCredentialsName)) { log.LogWarn("HTTPS routes are not supported because VAULT_CREDENTIALS is not specified or blank."); } var warnSeconds = Environment.GetEnvironmentVariable("WARN_SECONDS"); if (string.IsNullOrEmpty(warnSeconds) || !double.TryParse(warnSeconds, out var warnSecondsValue)) { warnInterval = TimeSpan.FromSeconds(300); } else { warnInterval = TimeSpan.FromSeconds(warnSecondsValue); } var startSeconds = Environment.GetEnvironmentVariable("START_SECONDS"); if (string.IsNullOrEmpty(startSeconds) || !double.TryParse(startSeconds, out var startSecondsValue)) { startDelay = TimeSpan.FromSeconds(10); } else { startDelay = TimeSpan.FromSeconds(startSecondsValue); } var maxHAProxyCountString = Environment.GetEnvironmentVariable("MAX_HAPROXY_COUNT"); if (!int.TryParse(maxHAProxyCountString, out maxHAProxyCount)) { maxHAProxyCount = 10; } if (maxHAProxyCount < 0) { maxHAProxyCount = 0; } debugMode = "true".Equals(Environment.GetEnvironmentVariable("DEBUG"), StringComparison.InvariantCultureIgnoreCase); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); log.LogInfo(() => $"CONFIG_KEY={configKey}"); log.LogInfo(() => $"CONFIG_HASH_KEY={configHashKey}"); log.LogInfo(() => $"VAULT_CREDENTIALS={vaultCredentialsName}"); log.LogInfo(() => $"WARN_SECONDS={warnInterval}"); log.LogInfo(() => $"START_SECONDS={startDelay}"); log.LogInfo(() => $"MAX_HAPROXY_COUNT={maxHAProxyCount}"); log.LogInfo(() => $"DEBUG={debugMode}"); // Ensure that the required directories exist. Directory.CreateDirectory(tmpfsFolder); Directory.CreateDirectory(configFolder); Directory.CreateDirectory(configUpdateFolder); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { throw new NotImplementedException("This service works only within a Linux container with HAProxy installed."); //var vaultCredentialsSecret = "neon-proxy-manager-credentials"; //Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); //hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, $"neon-proxy-{proxyName}")); } else { hive = HiveHelper.OpenHive(); } try { // Log into Vault using the Vault credentials persisted as a Docker // secret, if one was specified. We won't open Vault otherwise. if (!string.IsNullOrEmpty(vaultCredentialsName)) { var vaultSecret = HiveHelper.GetSecret(vaultCredentialsName); if (string.IsNullOrEmpty(vaultSecret)) { log.LogCritical($"Cannot read Docker secret [{vaultCredentialsName}]."); Program.Exit(1, immediate: true); } var vaultCredentials = HiveCredentials.ParseJson(vaultSecret); if (vaultCredentials == null) { log.LogCritical($"Cannot parse Docker secret [{vaultCredentialsName}]."); Program.Exit(1, immediate: true); } log.LogInfo(() => $"Connecting: Vault"); vault = HiveHelper.OpenVault(vaultCredentials); } else { vault = null; // $hack(jeff.lill): // // This is a bit of backwards compatible hack. Instances started without the // VAULT_CREDENTIALS environment variable are assumed to be proxy bridges. isBridge = true; } // Open Consul and then start the service tasks. log.LogInfo(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // Verify that the required Consul keys exist or loop to wait until they // are created. This will allow the service wait for pending hive setup // operations to be completed. while (!await consul.KV.Exists(configKey)) { log.LogWarn(() => $"Waiting for [{configKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } while (!await consul.KV.Exists(configHashKey)) { log.LogWarn(() => $"Waiting for [{configHashKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } // Crank up the service tasks. await NeonHelper.WaitAllAsync( ErrorPollerAsync(), HAProxShim()); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler( () => { // Cancel any operations in progress. terminator.CancellationTokenSource.Cancel(); }); // Read the environment variables. // $hack(jeff.lill: // // We're going to scan the Consul configuration key to determine whether this // instance is managing the public or private proxy (or bridges) so we'll // be completely compatible with existing deployments. // // In theory, we could have passed a new environment variable but that's not // worth the trouble. configKey = Environment.GetEnvironmentVariable("CONFIG_KEY"); if (string.IsNullOrEmpty(configKey)) { log.LogError("[CONFIG_KEY] environment variable is required."); Program.Exit(1, immediate: true); } isPublic = configKey.Contains("/public/"); var proxyName = isPublic ? "public" : "private"; serviceName = $"neon-proxy-{proxyName}-cache:{GitVersion}"; log.LogInfo(() => $"Starting [{serviceName}]"); configHashKey = Environment.GetEnvironmentVariable("CONFIG_HASH_KEY"); if (string.IsNullOrEmpty(configHashKey)) { log.LogError("[CONFIG_HASH_KEY] environment variable is required."); Program.Exit(1, immediate: true); } var memoryLimitValue = Environment.GetEnvironmentVariable("MEMORY_LIMIT"); if (string.IsNullOrEmpty(memoryLimitValue)) { memoryLimitValue = DefMemoryLimitString; } if (!NeonHelper.TryParseCount(memoryLimitValue, out var memoryLimitDouble)) { memoryLimitDouble = DefMemoryLimit; } if (memoryLimitDouble < MinMemoryLimit) { log.LogWarn(() => $"[MEMORY_LIMIT={memoryLimitValue}] is to small. Using [{MinMemoryLimitString}] instead."); memoryLimitDouble = MinMemoryLimit; } memoryLimit = (long)memoryLimitDouble; var warnSeconds = Environment.GetEnvironmentVariable("WARN_SECONDS"); if (string.IsNullOrEmpty(warnSeconds) || !double.TryParse(warnSeconds, out var warnSecondsValue)) { warnInterval = TimeSpan.FromSeconds(300); } else { warnInterval = TimeSpan.FromSeconds(warnSecondsValue); } debugMode = "true".Equals(Environment.GetEnvironmentVariable("DEBUG"), StringComparison.InvariantCultureIgnoreCase); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); log.LogInfo(() => $"CONFIG_KEY={configKey}"); log.LogInfo(() => $"CONFIG_HASH_KEY={configHashKey}"); log.LogInfo(() => $"MEMORY_LIMIT={memoryLimit}"); log.LogInfo(() => $"WARN_SECONDS={warnInterval}"); log.LogInfo(() => $"DEBUG={debugMode}"); // Ensure that the required directories exist. Directory.CreateDirectory(tmpfsFolder); Directory.CreateDirectory(configFolder); Directory.CreateDirectory(configUpdateFolder); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { throw new NotImplementedException("This service works only within a Linux container with Varnish installed."); //var vaultCredentialsSecret = "neon-proxy-manager-credentials"; //Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); //hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, $"neon-proxy-{proxyName}")); } else { hive = HiveHelper.OpenHive(); } try { // Open Consul and then start the service tasks. log.LogInfo(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // Verify that the required Consul keys exist or loop to wait until they // are created. This will allow the service wait for pending hive setup // operations to be completed. while (!await consul.KV.Exists(configKey)) { log.LogWarn(() => $"Waiting for [{configKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } while (!await consul.KV.Exists(configHashKey)) { log.LogWarn(() => $"Waiting for [{configHashKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } // Crank up the service tasks. log.LogInfo(() => $"Starting service tasks."); await NeonHelper.WaitAllAsync( CacheWarmer(), ErrorPollerAsync(), VarnishShim()); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }