/// <summary> /// Logs a <b>critical</b> message. /// </summary> /// <param name="message">The message.</param> public void Critical(string message) { if (log != null) { log.LogCritical(message, Id); } }
/// <summary> /// Logs a <b>critical</b> message. /// </summary> /// <param name="message">The object that will be serialized into the message.</param> public void Critical(object message) { if (log != null) { log.LogCritical(message, Id); } }
/// <summary> /// Logs a critical exception. /// </summary> /// <param name="log">The log.</param> /// <param name="e">The exception.</param> public static void LogCritical(this INeonLogger log, Exception e) { if (log.IsCriticalEnabled) { log.LogCritical(null, e); } }
/// <summary> /// Logs a critical message retrieved via a message function. /// </summary> /// <param name="log">The log.</param> /// <param name="messageFunc">The message function.</param> /// <remarks> /// This method is intended mostly to enable the efficient use of interpolated C# strings. /// </remarks> public static void LogCritical(this INeonLogger log, Func <object> messageFunc) { if (log.IsCriticalEnabled) { log.LogCritical(messageFunc()); } }
/// <summary> /// Logs a critical message retrieved via a message function. /// </summary> /// <param name="log">The log.</param> /// <param name="messageFunc">The message function.</param> /// <param name="activityId">The optional activity ID.</param> /// <remarks> /// This method is intended mostly to enable the efficient use of interpolated C# strings. /// </remarks> public static void LogCritical(this INeonLogger log, Func <string> messageFunc, string activityId = null) { if (log.IsLogCriticalEnabled) { log.LogCritical(messageFunc(), activityId); } }
/// <summary> /// Logs a critical exception. /// </summary> /// <param name="log">The log.</param> /// <param name="e">The exception.</param> /// <param name="activityId">The optional activity ID.</param> public static void LogCritical(this INeonLogger log, Exception e, string activityId = null) { if (log.IsLogCriticalEnabled) { log.LogCritical(null, e, activityId); } }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); try { var commandLine = new CommandLine(args); var command = commandLine.Arguments.ElementAtOrDefault(0); if (command == null) { log.LogError("usage: vegomatic COMMAND ARGS..."); Program.Exit(1, immediate: true); } switch (command) { case "cephfs": await new CephFS().ExecAsync(commandLine.Shift(1)); break; case "issue-mntc": await new IssueMntc().ExecAsync(commandLine.Shift(1)); break; default: case "test-server": await new TestServer().ExecAsync(commandLine.Shift(1)); break; } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { HiveHelper.OpenHiveRemote(); } else { HiveHelper.OpenHive(); } await RunAsync(); } catch (Exception e) { log.LogCritical(e); Program.Exit(1); } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Parse the environment variable settings. var environment = new EnvironmentParser(log); nameservers = environment.Get("NAMESERVERS", "8.8.8.8,8.8.4.4").Split(','); pingTimeout = environment.Get("PING_TIMEOUT", TimeSpan.FromSeconds(1.5), validator: v => v > TimeSpan.Zero); pollInterval = environment.Get("POLL_INTERVAL", TimeSpan.FromSeconds(5), validator: v => v > TimeSpan.Zero); warnInterval = environment.Get("WARN_INTERVAL", TimeSpan.FromMinutes(5), validator: v => v > TimeSpan.Zero); // Create a timer so we'll avoid spamming the logs with warnings. warnTimer = new PolledTimer(warnInterval, autoReset: true); warnTimer.FireNow(); // Set so that the first warnings detected will be reported immediately. // Create the object that will actually perform the hostname lookups // and health pings. This object caches things to improve performance. healthResolver = new HealthResolver(nameservers); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { hive = HiveHelper.OpenHiveRemote(); } else { hive = HiveHelper.OpenHive(); } // Open Consul and then start the main service task. log.LogDebug(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { await RunAsync(); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler( () => { // Cancel any operations in progress. terminator.CancellationTokenSource.Cancel(); }); // Read the environment variables. // $hack(jeff.lill: // // We're going to scan the Consul configuration key to determine whether this // instance is managing the public or private proxy (or bridges) so we'll // be completely compatible with existing deployments. // // In theory, we could have passed a new environment variable but that's not // worth the trouble. configKey = Environment.GetEnvironmentVariable("CONFIG_KEY"); if (string.IsNullOrEmpty(configKey)) { log.LogError("[CONFIG_KEY] environment variable is required."); Program.Exit(1, immediate: true); } isPublic = configKey.Contains("/public/"); var proxyName = isPublic ? "public" : "private"; serviceName = $"neon-proxy-{proxyName}:{GitVersion}"; log.LogInfo(() => $"Starting [{serviceName}]"); configHashKey = Environment.GetEnvironmentVariable("CONFIG_HASH_KEY"); if (string.IsNullOrEmpty(configHashKey)) { log.LogError("[CONFIG_HASH_KEY] environment variable is required."); Program.Exit(1, immediate: true); } vaultCredentialsName = Environment.GetEnvironmentVariable("VAULT_CREDENTIALS"); if (string.IsNullOrEmpty(vaultCredentialsName)) { log.LogWarn("HTTPS routes are not supported because VAULT_CREDENTIALS is not specified or blank."); } var warnSeconds = Environment.GetEnvironmentVariable("WARN_SECONDS"); if (string.IsNullOrEmpty(warnSeconds) || !double.TryParse(warnSeconds, out var warnSecondsValue)) { warnInterval = TimeSpan.FromSeconds(300); } else { warnInterval = TimeSpan.FromSeconds(warnSecondsValue); } var startSeconds = Environment.GetEnvironmentVariable("START_SECONDS"); if (string.IsNullOrEmpty(startSeconds) || !double.TryParse(startSeconds, out var startSecondsValue)) { startDelay = TimeSpan.FromSeconds(10); } else { startDelay = TimeSpan.FromSeconds(startSecondsValue); } var maxHAProxyCountString = Environment.GetEnvironmentVariable("MAX_HAPROXY_COUNT"); if (!int.TryParse(maxHAProxyCountString, out maxHAProxyCount)) { maxHAProxyCount = 10; } if (maxHAProxyCount < 0) { maxHAProxyCount = 0; } debugMode = "true".Equals(Environment.GetEnvironmentVariable("DEBUG"), StringComparison.InvariantCultureIgnoreCase); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); log.LogInfo(() => $"CONFIG_KEY={configKey}"); log.LogInfo(() => $"CONFIG_HASH_KEY={configHashKey}"); log.LogInfo(() => $"VAULT_CREDENTIALS={vaultCredentialsName}"); log.LogInfo(() => $"WARN_SECONDS={warnInterval}"); log.LogInfo(() => $"START_SECONDS={startDelay}"); log.LogInfo(() => $"MAX_HAPROXY_COUNT={maxHAProxyCount}"); log.LogInfo(() => $"DEBUG={debugMode}"); // Ensure that the required directories exist. Directory.CreateDirectory(tmpfsFolder); Directory.CreateDirectory(configFolder); Directory.CreateDirectory(configUpdateFolder); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { throw new NotImplementedException("This service works only within a Linux container with HAProxy installed."); //var vaultCredentialsSecret = "neon-proxy-manager-credentials"; //Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); //hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, $"neon-proxy-{proxyName}")); } else { hive = HiveHelper.OpenHive(); } try { // Log into Vault using the Vault credentials persisted as a Docker // secret, if one was specified. We won't open Vault otherwise. if (!string.IsNullOrEmpty(vaultCredentialsName)) { var vaultSecret = HiveHelper.GetSecret(vaultCredentialsName); if (string.IsNullOrEmpty(vaultSecret)) { log.LogCritical($"Cannot read Docker secret [{vaultCredentialsName}]."); Program.Exit(1, immediate: true); } var vaultCredentials = HiveCredentials.ParseJson(vaultSecret); if (vaultCredentials == null) { log.LogCritical($"Cannot parse Docker secret [{vaultCredentialsName}]."); Program.Exit(1, immediate: true); } log.LogInfo(() => $"Connecting: Vault"); vault = HiveHelper.OpenVault(vaultCredentials); } else { vault = null; // $hack(jeff.lill): // // This is a bit of backwards compatible hack. Instances started without the // VAULT_CREDENTIALS environment variable are assumed to be proxy bridges. isBridge = true; } // Open Consul and then start the service tasks. log.LogInfo(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // Verify that the required Consul keys exist or loop to wait until they // are created. This will allow the service wait for pending hive setup // operations to be completed. while (!await consul.KV.Exists(configKey)) { log.LogWarn(() => $"Waiting for [{configKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } while (!await consul.KV.Exists(configHashKey)) { log.LogWarn(() => $"Waiting for [{configHashKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } // Crank up the service tasks. await NeonHelper.WaitAllAsync( ErrorPollerAsync(), HAProxShim()); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Constructor. /// </summary> /// <param name="neonLogger">The Neon base logger.</param> public HiveEasyMQLogProvider(INeonLogger neonLogger) { Covenant.Requires <ArgumentNullException>(neonLogger != null); this.neonLogger = neonLogger; this.loggerFunc = (logLevel, messageFunc, exception, formatParameters) => { if (messageFunc == null) { return(true); } var message = LogMessageFormatter.FormatStructuredMessage(messageFunc(), formatParameters, out _); switch (logLevel) { case EasyNetQ.Logging.LogLevel.Trace: // NOTE: Neon logging doesn't have a TRACE level so we'll // map these to DEBUG. case EasyNetQ.Logging.LogLevel.Debug: if (neonLogger.IsDebugEnabled) { if (exception == null) { neonLogger.LogDebug(message); } else { neonLogger.LogDebug(message, exception); } } break; case EasyNetQ.Logging.LogLevel.Error: if (neonLogger.IsErrorEnabled) { if (exception == null) { neonLogger.LogError(message); } else { neonLogger.LogError(message, exception); } } break; case EasyNetQ.Logging.LogLevel.Fatal: if (neonLogger.IsCriticalEnabled) { if (exception == null) { neonLogger.LogCritical(message); } else { neonLogger.LogCritical(message, exception); } } break; case EasyNetQ.Logging.LogLevel.Info: if (neonLogger.IsInfoEnabled) { if (exception == null) { neonLogger.LogInfo(message); } else { neonLogger.LogInfo(message, exception); } } break; case EasyNetQ.Logging.LogLevel.Warn: if (neonLogger.IsWarnEnabled) { if (exception == null) { neonLogger.LogWarn(message); } else { neonLogger.LogWarn(message, exception); } } break; } return(true); }; }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Parse the environment variable settings. var environment = new EnvironmentParser(log); pollInterval = environment.Get("POLL_INTERVAL", TimeSpan.FromSeconds(5), validator: v => v > TimeSpan.Zero); verifyInterval = environment.Get("VERIFY_INTERVAL", TimeSpan.FromMinutes(5), validator: v => v > TimeSpan.Zero); // Create process terminator to handle process termination signals. terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { hive = HiveHelper.OpenHiveRemote(); // For testing and development, we're going to write a test // hosts file to [%NF_TEMP\neon-dns-hosts.txt] so we can see // what's happening outside of a hive. powerDnsHostsPath = Environment.ExpandEnvironmentVariables("%NF_TEMP%\\neon-dns-hosts.txt"); File.WriteAllText(powerDnsHostsPath, $@"# PowerDNS Recursor authoritatively answers for [*.HIVENAME.nhive.io] hostnames. # on the local node using these mappings. 10.0.0.30 {HiveHelper.Hive.Definition.Hostnames.Consul} # Internal hive Vault mappings: 10.0.0.30 {HiveHelper.Hive.Definition.Hostnames.Vault} 10.0.0.30 {HiveHelper.Hive.FirstManager.Name}.{HiveHelper.Hive.Definition.Hostnames.Vault} # Internal hive registry cache related mappings: 10.0.0.30 {HiveHelper.Hive.FirstManager.Name}.{HiveHelper.Hive.Definition.Hostnames.RegistryCache} # Internal hive log pipeline related mappings: 10.0.0.30 {HiveHelper.Hive.Definition.Hostnames.LogEsData} "); // We're also going to create a temporary folder for the reload signal. reloadSignalPath = Environment.ExpandEnvironmentVariables("%NF_TEMP%\\neon-dns\\reload"); Directory.CreateDirectory(Path.GetDirectoryName(reloadSignalPath)); } else { hive = HiveHelper.OpenHive(); } // Ensure that we're running on a manager node. This is required because // we need to be able to update the [/etc/powerdns/hosts] files deployed // on the managers. var nodeRole = Environment.GetEnvironmentVariable("NEON_NODE_ROLE"); if (string.IsNullOrEmpty(nodeRole)) { log.LogCritical(() => "Service does not appear to be running on a neonHIVE."); Program.Exit(1, immediate: true); } if (!string.Equals(nodeRole, NodeRole.Manager, StringComparison.OrdinalIgnoreCase)) { log.LogCritical(() => $"[neon-dns] service is running on a [{nodeRole}] hive node. Only [{NodeRole.Manager}] nodes are supported."); Program.Exit(1, immediate: true); } // Ensure that the [/etc/powerdns/hosts] file was mapped into the container. if (!File.Exists(powerDnsHostsPath)) { log.LogCritical(() => $"[neon-dns] service cannot locate [{powerDnsHostsPath}] on the host manager. Was this mounted to the container as read/write?"); Program.Exit(1, immediate: true); } // Open Consul and then start the main service task. log.LogDebug(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { await RunAsync(); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler( () => { // Cancel any operations in progress. terminator.CancellationTokenSource.Cancel(); }); // Read the environment variables. // $hack(jeff.lill: // // We're going to scan the Consul configuration key to determine whether this // instance is managing the public or private proxy (or bridges) so we'll // be completely compatible with existing deployments. // // In theory, we could have passed a new environment variable but that's not // worth the trouble. configKey = Environment.GetEnvironmentVariable("CONFIG_KEY"); if (string.IsNullOrEmpty(configKey)) { log.LogError("[CONFIG_KEY] environment variable is required."); Program.Exit(1, immediate: true); } isPublic = configKey.Contains("/public/"); var proxyName = isPublic ? "public" : "private"; serviceName = $"neon-proxy-{proxyName}-cache:{GitVersion}"; log.LogInfo(() => $"Starting [{serviceName}]"); configHashKey = Environment.GetEnvironmentVariable("CONFIG_HASH_KEY"); if (string.IsNullOrEmpty(configHashKey)) { log.LogError("[CONFIG_HASH_KEY] environment variable is required."); Program.Exit(1, immediate: true); } var memoryLimitValue = Environment.GetEnvironmentVariable("MEMORY_LIMIT"); if (string.IsNullOrEmpty(memoryLimitValue)) { memoryLimitValue = DefMemoryLimitString; } if (!NeonHelper.TryParseCount(memoryLimitValue, out var memoryLimitDouble)) { memoryLimitDouble = DefMemoryLimit; } if (memoryLimitDouble < MinMemoryLimit) { log.LogWarn(() => $"[MEMORY_LIMIT={memoryLimitValue}] is to small. Using [{MinMemoryLimitString}] instead."); memoryLimitDouble = MinMemoryLimit; } memoryLimit = (long)memoryLimitDouble; var warnSeconds = Environment.GetEnvironmentVariable("WARN_SECONDS"); if (string.IsNullOrEmpty(warnSeconds) || !double.TryParse(warnSeconds, out var warnSecondsValue)) { warnInterval = TimeSpan.FromSeconds(300); } else { warnInterval = TimeSpan.FromSeconds(warnSecondsValue); } debugMode = "true".Equals(Environment.GetEnvironmentVariable("DEBUG"), StringComparison.InvariantCultureIgnoreCase); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); log.LogInfo(() => $"CONFIG_KEY={configKey}"); log.LogInfo(() => $"CONFIG_HASH_KEY={configHashKey}"); log.LogInfo(() => $"MEMORY_LIMIT={memoryLimit}"); log.LogInfo(() => $"WARN_SECONDS={warnInterval}"); log.LogInfo(() => $"DEBUG={debugMode}"); // Ensure that the required directories exist. Directory.CreateDirectory(tmpfsFolder); Directory.CreateDirectory(configFolder); Directory.CreateDirectory(configUpdateFolder); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { throw new NotImplementedException("This service works only within a Linux container with Varnish installed."); //var vaultCredentialsSecret = "neon-proxy-manager-credentials"; //Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); //hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, $"neon-proxy-{proxyName}")); } else { hive = HiveHelper.OpenHive(); } try { // Open Consul and then start the service tasks. log.LogInfo(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // Verify that the required Consul keys exist or loop to wait until they // are created. This will allow the service wait for pending hive setup // operations to be completed. while (!await consul.KV.Exists(configKey)) { log.LogWarn(() => $"Waiting for [{configKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } while (!await consul.KV.Exists(configHashKey)) { log.LogWarn(() => $"Waiting for [{configHashKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } // Crank up the service tasks. log.LogInfo(() => $"Starting service tasks."); await NeonHelper.WaitAllAsync( CacheWarmer(), ErrorPollerAsync(), VarnishShim()); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle process termination signals. terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { var secrets = new DebugSecrets(); // NOTE: // // Add your target hive's Vault credentials here for // manual debugging. Take care not to commit sensitive // credentials for production hives. // // You'll find this information in the ROOT hive login // for the target hive. secrets.Add("neon-hive-manager-vaultkeys", new VaultCredentials() { RootToken = "cd5831fa-86ec-cc22-b1f3-051f88147382", KeyThreshold = 1, UnsealKeys = new List <string>() { "8SgwdO/GwqJ7nyxT2tK2n1CCR3084kQVh7gEy8jNQh8=" } }); hive = HiveHelper.OpenHiveRemote(secrets); } else { hive = HiveHelper.OpenHive(sshCredentialsSecret: "neon-ssh-credentials"); } // Ensure that we're running on a manager node. We won't be able // to query swarm status otherwise. var nodeRole = Environment.GetEnvironmentVariable("NEON_NODE_ROLE"); if (string.IsNullOrEmpty(nodeRole)) { log.LogCritical(() => "Service does not appear to be running on a neonHIVE."); Program.Exit(1, immediate: true); } if (!string.Equals(nodeRole, NodeRole.Manager, StringComparison.OrdinalIgnoreCase)) { log.LogCritical(() => $"[neon-hive-manager] service is running on a [{nodeRole}] hive node. Running on only [{NodeRole.Manager}] nodes are supported."); Program.Exit(1, immediate: true); } // Open the hive data services and then start the main service task. log.LogDebug(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogDebug(() => $"Connecting: Docker"); using (docker = HiveHelper.OpenDocker()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // We're passing [useBootstrap=true] here so that the HiveMQ client will // connect directly to the HiveMQ cluster nodes as opposed to routing // traffic through the private traffic manager. This is necessary because // the load balancers rely on HiveMQ to broadcast update notifications. // // One consequence of this is that this service will need to be restarted // whenever HiveMQ instances are relocated to different hive hosts. // We're going to monitor for changes to the HiveMQ bootstrap settings // and gracefully terminate the process when this happens. We're then // depending on Docker to restart the process so we'll be able to pick // up the change. hive.HiveMQ.Internal.HiveMQBootstrapChanged += (s, a) => { log.LogInfo("HiveMQ bootstrap settings change detected. Terminating service with [exitcode=-1] expecting that Docker will restart it."); // Use ExitCode=-1 so that we'll restart even if the service/container // was not configured with [restart=always]. terminator.Exit(-1); }; using (proxyNotifyChannel = hive.HiveMQ.Internal.GetProxyNotifyChannel(useBootstrap: true).Open()) { await RunAsync(); } } } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle process termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler( () => { // Cancel any operations in progress. exit = true; terminator.CancellationTokenSource.Cancel(); // This gracefully closes the [proxyNotifyChannel] so HiveMQ will // promptly remove the associated queue. if (proxyNotifyChannel != null) { proxyNotifyChannel.Dispose(); proxyNotifyChannel = null; } try { NeonHelper.WaitFor(() => !processingConfigs, terminator.Timeout); log.LogInfo(() => "Tasks stopped gracefully."); } catch (TimeoutException) { log.LogWarn(() => $"Tasks did not stop within [{terminator.Timeout}]."); } }); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { var vaultCredentialsSecret = "neon-proxy-manager-credentials"; Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, "neon-proxy-manager")); } else { hive = HiveHelper.OpenHive(); } // [neon-proxy-manager] requires access to the [IHostingManager] implementation for the // current environment, so we'll need to initialize the hosting loader. HostingLoader.Initialize(); try { // Log into Vault using a Docker secret. var vaultCredentialsSecret = Environment.GetEnvironmentVariable("VAULT_CREDENTIALS"); if (string.IsNullOrEmpty(vaultCredentialsSecret)) { log.LogCritical("[VAULT_CREDENTIALS] environment variable does not exist."); Program.Exit(1, immediate: true); } var vaultSecret = HiveHelper.GetSecret(vaultCredentialsSecret); if (string.IsNullOrEmpty(vaultSecret)) { log.LogCritical($"Cannot read Docker secret [{vaultCredentialsSecret}]."); Program.Exit(1, immediate: true); } var vaultCredentials = HiveCredentials.ParseJson(vaultSecret); if (vaultCredentials == null) { log.LogCritical($"Cannot parse Docker secret [{vaultCredentialsSecret}]."); Program.Exit(1, immediate: true); } // Open the hive data services and then start the main service task. log.LogInfo(() => $"Connecting: Vault"); using (vault = HiveHelper.OpenVault(vaultCredentials)) { log.LogInfo(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogInfo(() => $"Connecting: Docker"); using (docker = HiveHelper.OpenDocker()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // NOTE: // // We're passing [useBootstrap=true] here so that the HiveMQ client will // connect directly to the HiveMQ cluster nodes as opposed to routing // traffic through the private traffic manager. This is necessary because // the load balancers rely on HiveMQ to broadcast update notifications. // // One consequence of this is that this service will need to be restarted // whenever HiveMQ instances are relocated to different hive hosts. // We're going to monitor for changes to the HiveMQ bootstrap settings // and gracefully terminate the process when this happens. We're then // depending on Docker to restart the process so we'll be able to pick // up the change. hive.HiveMQ.Internal.HiveMQBootstrapChanged += (s, a) => { log.LogInfo("HiveMQ bootstrap settings change detected. Terminating service with [exitcode=-1] expecting that Docker will restart it."); // Use ExitCode=-1 so that we'll restart even if the service/container // was not configured with [restart=always]. terminator.Exit(-1); }; using (proxyNotifyChannel = hive.HiveMQ.Internal.GetProxyNotifyChannel(useBootstrap: true).Open()) { // Read the service settings, initializing their default values // if they don't already exist. if (!await consul.KV.Exists(certWarnDaysKey)) { log.LogInfo($"Persisting setting [{certWarnDaysKey}=30.0]"); await consul.KV.PutDouble(certWarnDaysKey, 30.0); } if (!await consul.KV.Exists(cacheRemoveSecondsKey)) { log.LogInfo($"Persisting setting [{cacheRemoveSecondsKey}=300.0]"); await consul.KV.PutDouble(cacheRemoveSecondsKey, 300.0); } if (!await consul.KV.Exists(failsafeSecondsKey)) { log.LogInfo($"Persisting setting [{failsafeSecondsKey}=120.0]"); await consul.KV.PutDouble(failsafeSecondsKey, 120); } certWarnTime = TimeSpan.FromDays(await consul.KV.GetDouble(certWarnDaysKey)); cacheRemoveDelay = TimeSpan.FromDays(await consul.KV.GetDouble(cacheRemoveSecondsKey)); failsafeInterval = TimeSpan.FromSeconds(await consul.KV.GetDouble(failsafeSecondsKey)); log.LogInfo(() => $"Using setting [{certWarnDaysKey}={certWarnTime.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{cacheRemoveSecondsKey}={cacheRemoveDelay.TotalSeconds}]"); log.LogInfo(() => $"Using setting [{failsafeSecondsKey}={failsafeInterval.TotalSeconds}]"); // Run the service tasks. var tasks = new List <Task>(); tasks.Add(ConfigGeneratorAsync()); tasks.Add(FailsafeBroadcasterAsync()); await NeonHelper.WaitAllAsync(tasks); } } } } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <inheritdoc/> public void LogCritical(object message, string activityId = null) { log.LogCritical(message, activityId); capture.AppendLine($"[CRITICAL] {message}"); }
/// <summary> /// Temporarily implements our own resource watcher. /// </summary> /// <param name="cancellationToken">The <see cref="CancellationToken"/> used to stop the watcher when the operator is demoted.</param> /// <returns></returns> private async Task WatchAsync(CancellationToken cancellationToken) { await SyncContext.Clear; //----------------------------------------------------------------- // We're going to use this dictionary to keep track of the [Status] // property of the resources we're watching so we can distinguish // between changes to the status vs. changes to anything else in // the resource. // // The dictionary simply holds the status property serialized to // JSON, with these keyed by resource name. Note that the resource // entities might not have a [Status] property. var entityType = typeof(TEntity); var statusGetter = entityType.GetProperty("Status")?.GetMethod; var generationCache = new Dictionary <string, long>(StringComparer.InvariantCultureIgnoreCase); var statusJsonCache = new Dictionary <string, string>(StringComparer.InvariantCultureIgnoreCase); //----------------------------------------------------------------- // Our watcher handler action. var actionAsync = async(WatchEvent <TEntity> @event) => { await SyncContext.Clear; await mutex.ExecuteActionAsync( async() => { try { var resource = @event.Value; var resourceName = resource.Metadata.Name; var newGeneration = resource.Metadata.Generation.Value; if (!filter(resource)) { return; } switch (@event.Type) { case WatchEventType.Added: try { options.ReconcileCounter?.Inc(); await CreateController().ReconcileAsync(resource); } catch (Exception e) { options.ReconcileErrorCounter.Inc(); log.LogError(e); } generationCache[resourceName] = newGeneration; break; case WatchEventType.Bookmark: break; // We don't care about these. case WatchEventType.Error: // I believe we're only going to see this for extreme scenarios, like: // // 1. The CRD we're watching was deleted and recreated. // 2. The watcher is so far behind that part of the // history is no longer available. // // We're going to log this and terminate the application, expecting // that Kubernetes will reschedule it so we can start over. var stub = new TEntity(); if (!string.IsNullOrEmpty(resourceNamespace)) { log.LogCritical($"Critical error watching: [namespace={resourceNamespace}] {stub.ApiGroupAndVersion}/{stub.Kind}"); } else { log.LogCritical($"Critical error watching: {stub.ApiGroupAndVersion}/{stub.Kind}"); } log.LogCritical("Terminating the pod so Kubernetes can reschedule it and we can restart the watch."); Environment.Exit(1); break; case WatchEventType.Deleted: try { options.DeleteCounter?.Inc(); await CreateController().DeletedAsync(resource); } catch (Exception e) { options.DeleteErrorCounter?.Inc(); log.LogError(e); } generationCache.Remove(resourceName); statusJsonCache.Remove(resourceName); break; case WatchEventType.Modified: // Reconcile when the resource generation changes. if (!generationCache.TryGetValue(resourceName, out var oldGeneration)) { Covenant.Assert(false, $"Resource [{resourceName}] does not known."); } if (newGeneration < oldGeneration) { try { options.ReconcileCounter?.Inc(); await CreateController().ReconcileAsync(resource); } catch (Exception e) { options.ReconcileErrorCounter?.Inc(); log.LogError(e); } } // There's no need for STATUS-MODIFIED when the resource has no status. if (statusGetter == null) { return; } var newStatus = statusGetter.Invoke(resource, Array.Empty <object>()); var newStatusJson = newStatus == null ? null : JsonSerializer.Serialize(newStatus); statusJsonCache.TryGetValue(resourceName, out var oldStatusJson); if (newStatusJson != oldStatusJson) { try { options.StatusModifyCounter?.Inc(); await CreateController().StatusModifiedAsync(resource); } catch (Exception e) { options.StatusModifyErrorCounter?.Inc(); log.LogError(e); } } break; } } catch (Exception e) { log.LogCritical(e); log.LogCritical("Cannot recover from exception within watch loop. Terminating process."); Environment.Exit(1); } }); }; //----------------------------------------------------------------- // Start the watcher. try { await k8s.WatchAsync <TEntity>(actionAsync, namespaceParameter : resourceNamespace, cancellationToken : cancellationToken); } catch (OperationCanceledException) { // This is thrown when the watcher is stopped due the operator being demoted. return; } }