/// <summary> /// Constructor. /// </summary> /// <param name="hive">The hive proxy.</param> /// <param name="hiveLoginPath">The path to the hive login file.</param> public RegistryCache(HiveProxy hive, string hiveLoginPath) { Covenant.Requires <ArgumentNullException>(hive != null); this.hive = hive; this.hiveLoginPath = hiveLoginPath; }
public Test_AnsibleDockerRegistry(HiveFixture fixture) { fixture.LoginAndInitialize(); this.hiveFixture = fixture; this.hive = fixture.Hive; // Ensure that tests start without a local registry // and related assets. var manager = this.hive.GetReachableManager(); if (this.hive.Docker.InspectService("neon-registry") != null) { manager.DockerCommand(RunOptions.None, "docker service rm neon-registry"); } hiveFixture.ClearVolumes(); this.hive.Certificate.Remove("neon-registry"); this.hive.PublicTraffic.RemoveRule("neon-registry"); this.hive.Dns.Remove("xunit-registry.neonforge.net"); this.hive.Dns.Remove("xunit-registry2.neonforge.net"); this.hive.Registry.Logout("xunit-registry.neonforge.net"); this.hive.Registry.Logout("xunit-registry2.neonforge.net"); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption || commandLine.Arguments.Length == 0) { Console.WriteLine(usage); Program.Exit(0); } var hiveLogin = Program.ConnectHive(); var hive = new HiveProxy(hiveLogin); var command = commandLine.Arguments.ElementAtOrDefault(0); switch (command) { case "ls": case "list": var maxNameLength = hive.Definition.SortedNodes.Max(n => n.Name.Length); foreach (var node in hive.Definition.SortedNodes) { Console.WriteLine(node.Name + new string(' ', maxNameLength - node.Name.Length + 4) + node.PrivateAddress.ToString()); } break; default: Console.Error.WriteLine($"*** ERROR: Unknown command: [{command}]"); Program.Exit(1); break; } }
public Test_Hive(HiveFixture fixture) { if (!fixture.LoginAndInitialize(action: () => { // This adds a [HostsFixture] to the [HiveFixture] (which inherits // from [TestFixtureSet]). We'll name the HostFixture so we can use // it to setup local DNS entries for the tests. fixture.AddFixture("hosts", hosts = new HostsFixture()); })) { // This call ensures that the hive is reset to a // pristine state before each test is invoked. fixture.Reset(); // Retrieve the hosts fixture and reset it. hosts = (HostsFixture)fixture["hosts"]; hosts.Reset(); } this.hiveFixture = fixture; this.hive = fixture.Hive; }
/// <summary> /// Returns the fully qualified image name required to upgrade a container or service. /// </summary> /// <param name="hive">The hive proxy.</param> /// <param name="componentInfo">The hive component version information.</param> /// <param name="componentName">The service or container name.</param> /// <param name="imageTag"></param> /// <returns>The fully qualified image or <c>null</c> if there is no known image for the service or container.</returns> private static string GetUpdateImage(HiveProxy hive, HiveComponentInfo componentInfo, string componentName, string imageTag = null) { if (!componentInfo.ComponentToImage.TryGetValue(componentName, out var imageName)) { hive.FirstManager.LogLine($"WARNING: Cannot map service or container named [{componentName}] to an image."); return(null); } if (!componentInfo.ImageToFullyQualified.TryGetValue(imageName, out var image)) { hive.FirstManager.LogLine($"WARNING: Cannot map unqualified image name [{imageName}] to a fully qualified image."); return(null); } if (!string.IsNullOrEmpty(imageTag)) { // Replace the default image tag with the override. var posColon = image.LastIndexOf(':'); if (posColon != -1) { image = image.Substring(0, posColon); } return($"{image}:{imageTag}"); } else { return(image); } }
/// <summary> /// Updates docker on a hive node. /// </summary> /// <param name="hive">The target hive.</param> /// <param name="node">The target node.</param> /// <param name="dockerPackageUri">The Docker Debian package URI.</param> private static void UpdateDocker(HiveProxy hive, SshProxy <NodeDefinition> node, string dockerPackageUri) { try { if (node.Metadata.InSwarm) { node.Status = "swarm: drain services"; hive.Docker.DrainNode(node.Name); } node.Status = "stop: docker"; node.SudoCommand("systemctl stop docker").EnsureSuccess(); node.Status = "download: docker package"; node.SudoCommand($"curl {Program.CurlOptions} {dockerPackageUri} -o /tmp/docker.deb").EnsureSuccess(); node.Status = "update: docker"; node.SudoCommand("gdebi /tmp/docker.deb").EnsureSuccess(); node.SudoCommand("rm /tmp/docker.deb"); node.Status = "restart: docker"; node.SudoCommand("systemctl start docker").EnsureSuccess(); if (node.Metadata.InSwarm) { node.Status = "swarm: activate"; hive.Docker.ActivateNode(node.Name); } } catch (Exception e) { node.Fault($"[docker] update failed: {NeonHelper.ExceptionError(e)}"); } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.Arguments.Length == 0) { Console.WriteLine(usage); Program.Exit(1); } hiveLogin = Program.ConnectHive(); hive = new HiveProxy(hiveLogin); var command = commandLine.Arguments.ElementAt(0); var yaml = commandLine.HasOption("--yaml"); if (command == "help") { Console.WriteLine(help); Program.Exit(0); } switch (command) { case "addr": case "addresses": ListAddresses(commandLine); break; case "get": GetEntry(commandLine); break; case "ls": case "list": ListEntries(commandLine); break; case "set": SetEntry(commandLine); break; case "rm": case "remove": RemoveEntry(commandLine); break; default: Console.Error.WriteLine($"*** ERROR: Unknown command: [{command}]"); Program.Exit(1); break; } }
public Test_HiveState(HiveFixture fixture) { if (!fixture.LoginAndInitialize()) { fixture.Reset(); } this.hiveFixture = fixture; this.hive = fixture.Hive; }
public Test_AnsibleTrafficManager(HiveFixture fixture) { if (!fixture.LoginAndInitialize()) { fixture.ClearTrafficManagers(); } this.hiveFixture = fixture; this.hive = fixture.Hive; }
/// <summary> /// Verifies the hive log service health. /// </summary> /// <param name="hive">The hive proxy.</param> public static void CheckLogServices(HiveProxy hive) { if (!hive.Definition.Log.Enabled) { return; } CheckLogEsDataService(hive); CheckLogCollectorService(hive); CheckLogKibanaService(hive); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } var hiveLogin = Program.ConnectHive(); var hive = new HiveProxy(hiveLogin); if (commandLine.Arguments.Length != 1) { Console.Error.WriteLine("*** ERROR: SETTING=VALUE expected."); Console.Error.WriteLine(); Console.Error.WriteLine(usage); Program.Exit(1); } var noVerify = commandLine.HasOption("--no-verify"); var assignment = commandLine.Arguments[0]; var fields = assignment.Split(new char[] { '=' }, 2); if (fields.Length != 2) { Console.Error.WriteLine("*** ERROR: SETTING=VALUE expected."); Console.Error.WriteLine(); Console.Error.WriteLine(usage); Program.Exit(1); } var setting = fields[0].ToLowerInvariant(); var value = fields[1]; if (noVerify) { hive.Globals.Set(setting, value); } else { try { hive.Globals.SetUser(setting, value); } catch (Exception e) { Console.Error.WriteLine($"*** ERROR: {e.Message}"); Program.Exit(1); } } Console.WriteLine(); Console.WriteLine($"* updated: {setting}"); }
public Test_AnsibleDockerLogin(HiveFixture fixture) { fixture.LoginAndInitialize(); this.hiveFixture = fixture; this.hive = fixture.Hive; // Ensure that we're not already logged into Docker Hub. this.hive.Registry.Logout(HiveConst.DockerPublicRegistry); }
public Test_AnsibleGlobals(HiveFixture fixture) { // We're going to use unique dashboard name for each test // so we only need to reset the test fixture once for // all tests implemented by this class. fixture.LoginAndInitialize(login: null); this.hiveFixture = fixture; this.hive = fixture.Hive; }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { hiveLogin = Program.ConnectHive(); hive = new HiveProxy(hiveLogin); reserved = new HashSet <string>(StringComparer.InvariantCultureIgnoreCase) { "get", "list", "ls", "rm", "remove", "set" }; if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } var command = commandLine.Arguments.ElementAtOrDefault(0); switch (command) { case "get": Get(commandLine); break; case "ls": case "list": List(commandLine); break; case "rm": case "remove": Remove(commandLine); break; case "set": Set(commandLine); break; default: Show(commandLine); break; } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } var hiveLogin = Program.ConnectHive(); var hive = new HiveProxy(hiveLogin); // $todo(jeff.lill): Implement this }
public Test_HiveDns(HiveFixture fixture) { if (!fixture.LoginAndInitialize()) { fixture.Reset(); } this.hiveFixture = fixture; this.hive = fixture.Hive; // Wait for the hive DNS and all node resolvers to be in // a consistent state. This is slow (about a minute). fixture.ConvergeDns(); }
/// <summary> /// Initializes the hive login and hive proxy and verifies that the /// current user has root privileges and the hive enables a VPN. /// </summary> private void RootLogin() { hiveLogin = Program.ConnectHive(); if (!hiveLogin.Definition.Vpn.Enabled) { Console.Error.WriteLine(VpnNotEnabled); Program.Exit(1); } if (string.IsNullOrEmpty(hiveLogin.VpnCredentials.CaZipKey)) { Console.Error.WriteLine(MustHaveRootPrivileges); Program.Exit(1); } hive = HiveHelper.OpenHive(hiveLogin); }
/// <summary> /// Appends the steps required to start a neonHIVE related Docker service and upload /// a script to the hive managers to make it easy to restart the service manually or /// for hive updates. /// </summary> /// <param name="hive">The target hive.</param> /// <param name="steps">The target step list.</param> /// <param name="serviceName">Identifies the service.</param> /// <param name="image">The Docker image to be used by the service.</param> /// <param name="command">The <c>docker service create ...</c> command.</param> /// <param name="runOptions">Optional run options (defaults to <see cref="RunOptions.FaultOnError"/>).</param> /// <remarks> /// <para> /// This method performs the following steps: /// </para> /// <list type="number"> /// <item> /// Passes <paramref name="image"/> to <see cref="Program.ResolveDockerImage(string)"/> to /// obtain the actual image to be started. /// </item> /// <item> /// Generates the first few lines of the script file that sets the /// default image as the <c>TARGET_IMAGE</c> macro and then overrides /// this with the script parameter (if there is one). /// </item> /// <item> /// Appends the commands to the script, replacing any text that matches /// <see cref="ImagePlaceholderArg"/> with <c>${TARGET_IMAGE}</c> to make it easy /// for services to be upgraded later. /// </item> /// <item> /// Starts the service. /// </item> /// <item> /// Uploads the generated script to each hive manager to [<see cref="HiveHostFolders.Scripts"/>/<paramref name="serviceName"/>.sh]. /// </item> /// </list> /// </remarks> public static void AddServiceStartSteps(HiveProxy hive, ConfigStepList steps, string serviceName, string image, IBashCommandFormatter command, RunOptions runOptions = RunOptions.FaultOnError) { Covenant.Requires <ArgumentNullException>(hive != null); Covenant.Requires <ArgumentNullException>(steps != null); Covenant.Requires <ArgumentNullException>(!string.IsNullOrWhiteSpace(serviceName)); Covenant.Requires <ArgumentNullException>(!string.IsNullOrWhiteSpace(image)); Covenant.Requires <ArgumentNullException>(command != null); // Generate the service start script. var script = CreateStartScript(serviceName, image, false, command); // Add steps to upload the script to the managers and then call the script // to create the service on the first manager. var scriptPath = LinuxPath.Combine(HiveHostFolders.Scripts, $"{serviceName}.sh"); steps.Add(hive.GetFileUploadSteps(hive.Managers, scriptPath, script, permissions: "740")); steps.Add(CommandStep.CreateIdempotentDocker(hive.FirstManager.Name, $"setup/{serviceName}", scriptPath)); }
/// <summary> /// Appends the steps required to start a neonHIVE related Docker container and upload /// a script to the hive managers to make it easy to restart the service manually or /// for hive updates. /// </summary> /// <param name="hive">The target hive.</param> /// <param name="steps">The target step list.</param> /// <param name="node">The target hive node.</param> /// <param name="containerName">Identifies the service.</param> /// <param name="image">The Docker image to be used by the container.</param> /// <param name="command">The <c>docker service create ...</c> command.</param> /// <param name="runOptions">Optional run options (defaults to <see cref="RunOptions.FaultOnError"/>).</param> /// <remarks> /// <para> /// This method performs the following steps: /// </para> /// <list type="number"> /// <item> /// Passes <paramref name="image"/> to <see cref="Program.ResolveDockerImage(string)"/> to /// obtain the actual image to be started. /// </item> /// <item> /// Generates the first few lines of the script file that sets the /// default image as the <c>TARGET_IMAGE</c> macro and then overrides /// this with the script parameter (if there is one). We also add /// a Docker command that pulls the image. /// </item> /// <item> /// Appends the commands to the script, replacing any text that matches /// <see cref="ImagePlaceholderArg"/> with <c>${TARGET_IMAGE}</c> to make it easy /// for services to be upgraded later. /// </item> /// <item> /// Starts the service. /// </item> /// <item> /// Uploads the generated script to each hive manager to [<see cref="HiveHostFolders.Scripts"/>/<paramref name="containerName"/>.sh]. /// </item> /// </list> /// </remarks> public static void AddContainerStartSteps(HiveProxy hive, ConfigStepList steps, SshProxy <NodeDefinition> node, string containerName, string image, IBashCommandFormatter command, RunOptions runOptions = RunOptions.FaultOnError) { Covenant.Requires <ArgumentNullException>(hive != null); Covenant.Requires <ArgumentNullException>(steps != null); Covenant.Requires <ArgumentNullException>(!string.IsNullOrWhiteSpace(containerName)); Covenant.Requires <ArgumentNullException>(!string.IsNullOrWhiteSpace(image)); Covenant.Requires <ArgumentNullException>(command != null); // Generate the container start script. var script = CreateStartScript(containerName, image, true, command); // Add steps to upload the script to the managers and then call the script // to create the container on the target node. var scriptPath = LinuxPath.Combine(HiveHostFolders.Scripts, $"{containerName}.sh"); steps.Add(hive.GetFileUploadSteps(node, scriptPath, script, permissions: "740")); steps.Add(CommandStep.CreateIdempotentDocker(node.Name, $"setup/{containerName}", scriptPath)); }
/// <summary> /// Starts a neonHIVE related Docker service and also uploads a script to the /// hive managers to make it easy to restart the service manually or for hive /// updates. /// </summary> /// <param name="hive">The target hive.</param> /// <param name="serviceName">Identifies the service.</param> /// <param name="image">The Docker image to be used by the service.</param> /// <param name="command">The <c>docker service create ...</c> command.</param> /// <param name="runOptions">Optional run options (defaults to <see cref="RunOptions.FaultOnError"/>).</param> /// <remarks> /// <para> /// This method performs the following steps: /// </para> /// <list type="number"> /// <item> /// Passes <paramref name="image"/> to <see cref="Program.ResolveDockerImage(string)"/> to /// obtain the actual image to be started. /// </item> /// <item> /// Generates the first few lines of the script file that sets the /// default image as the <c>TARGET_IMAGE</c> macro and then overrides /// this with the script parameter (if there is one). /// </item> /// <item> /// Appends the commands to the script, replacing any text that matches /// <see cref="ImagePlaceholderArg"/> with <c>${TARGET_IMAGE}</c> to make it easy /// for services to be upgraded later. /// </item> /// <item> /// Starts the service. /// </item> /// <item> /// Uploads the generated script to each hive manager to [<see cref="HiveHostFolders.Scripts"/>/<paramref name="serviceName"/>.sh]. /// </item> /// </list> /// </remarks> public static void StartService(HiveProxy hive, string serviceName, string image, IBashCommandFormatter command, RunOptions runOptions = RunOptions.FaultOnError) { Covenant.Requires <ArgumentNullException>(hive != null); Covenant.Requires <ArgumentNullException>(!string.IsNullOrWhiteSpace(serviceName)); Covenant.Requires <ArgumentNullException>(!string.IsNullOrWhiteSpace(image)); Covenant.Requires <ArgumentNullException>(command != null); var firstManager = hive.FirstManager; firstManager.Status = $"start: {serviceName}"; // Generate the service start script. var script = CreateStartScript(serviceName, image, false, command); // Upload the script to each of the manager nodes and set permissions. var scriptPath = LinuxPath.Combine(HiveHostFolders.Scripts, $"{serviceName}.sh"); foreach (var manager in hive.Managers) { manager.UploadText(scriptPath, script); manager.SudoCommand($"chmod 740 {scriptPath}"); } // Run the script without a parameter on the first manager to start the service. firstManager.IdempotentDockerCommand($"setup/{serviceName}", response => { if (response.ExitCode != 0) { firstManager.Fault(response.ErrorSummary); } }, runOptions, scriptPath); firstManager.Status = string.Empty; }
public Test_HiveTrafficManager(HiveFixture fixture) { if (!fixture.LoginAndInitialize()) { fixture.Reset(); } this.hiveFixture = fixture; this.hive = fixture.Hive; // Generate a self-signed wildcard certificate we can reuse across tests if // we haven't already created one. if (certificate == null) { var hosts = new string[] { testHostname, $"*.{testHostname}" }; certificate = TlsCertificate.CreateSelfSigned(hosts); } }
/// <summary> /// Adds a global step that restarts the designated cluster nodes one-by-one. /// </summary> /// <param name="hive">The hive proxy.</param> /// <param name="controller">The setup controller.</param> /// <param name="predicate"> /// Optionally specifies the predicate to be used to select the hive nodes /// to be rebooted. This defaults to <c>null</c> indicating that all nodes /// will be rebooted. /// </param> /// <param name="stepLabel"> /// Optionally specifies the step label. This default to <b>restart nodes</b>. /// </param> /// <param name="stablizeTime"> /// The time to wait after the node has been restarted for things /// to stablize. This defaults to <see cref="Program.WaitSeconds"/>. /// </param> public static void AddRestartClusterStep( HiveProxy hive, SetupController <NodeDefinition> controller, Func <NodeDefinition, bool> predicate = null, string stepLabel = null, TimeSpan stablizeTime = default(TimeSpan)) { Covenant.Requires <ArgumentNullException>(hive != null); Covenant.Requires <ArgumentNullException>(controller != null); predicate = predicate ?? (node => true); stepLabel = stepLabel ?? "restart nodes"; if (stablizeTime <= TimeSpan.Zero) { stablizeTime = TimeSpan.FromSeconds(Program.WaitSeconds); } controller.AddGlobalStep(stepLabel, () => { foreach (var node in hive.Nodes.Where(n => predicate(n.Metadata))) { node.Status = "restart pending"; } // We're going to restart selected nodes by type in this order: // // Managers // Workers // Pets var restartNode = new Action <SshProxy <NodeDefinition> >( node => { node.Status = "restart"; node.Reboot(wait: true); node.Status = $"stabilize ({stablizeTime.TotalSeconds}s)"; Thread.Sleep(stablizeTime); node.Status = "READY"; }); // Manager nodes. foreach (var node in hive.Nodes.Where(n => n.Metadata.IsManager && predicate(n.Metadata))) { restartNode(node); } // Worker nodes. foreach (var node in hive.Nodes.Where(n => n.Metadata.IsWorker && predicate(n.Metadata))) { restartNode(node); } // Pet nodes. foreach (var node in hive.Nodes.Where(n => n.Metadata.IsPet && predicate(n.Metadata))) { restartNode(node); } // Clear the node status. foreach (var node in hive.Nodes) { node.Status = string.Empty; } }); }
/// <summary> /// Scans the hive and adds the steps to a <see cref="SetupController{NodeMetadata}"/> required /// to update the hive to the most recent version. /// </summary> /// <param name="hive">The target hive proxy.</param> /// <param name="controller">The setup controller.</param> /// <param name="dockerVersion">The version of Docker required.</param> /// <returns>The number of pending updates.</returns> /// <exception cref="HiveException">Thrown if there was an error selecting the updates.</exception> /// <remarks> /// <note> /// This method does not allow an older version of the component to be installed. /// In this case, the current version will remain. /// </note> /// </remarks> public static void AddDockerUpdateSteps(HiveProxy hive, SetupController <NodeDefinition> controller, string dockerVersion) { Covenant.Requires <ArgumentNullException>(hive != null); Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(dockerVersion)); var newVersion = (SemanticVersion)dockerVersion; var pendingNodes = new HashSet <string>(StringComparer.InvariantCultureIgnoreCase); var dockerPackageUri = hive.Headend.GetDockerPackageUri(dockerVersion, out var message); // Update the managers first. pendingNodes.Clear(); foreach (var node in hive.Managers) { if ((SemanticVersion)node.GetDockerVersion() < newVersion) { pendingNodes.Add(node.Name); } } if (pendingNodes.Count > 0) { controller.AddStep("managers: update docker", (node, stepDelay) => { Thread.Sleep(stepDelay); UpdateDocker(hive, node, dockerPackageUri); }, n => pendingNodes.Contains(n.Name), parallelLimit: 1); } // Update the workers. pendingNodes.Clear(); foreach (var node in hive.Workers) { if ((SemanticVersion)node.GetDockerVersion() < newVersion) { pendingNodes.Add(node.Name); } } if (pendingNodes.Count > 0) { controller.AddStep("workers: update docker", (node, stepDelay) => { Thread.Sleep(stepDelay); UpdateDocker(hive, node, dockerPackageUri); }, n => pendingNodes.Contains(n.Name), parallelLimit: 1); } // Update the pets. pendingNodes.Clear(); foreach (var node in hive.Pets) { if ((SemanticVersion)node.GetDockerVersion() < newVersion) { pendingNodes.Add(node.Name); } } if (pendingNodes.Count > 0) { controller.AddStep("workers: update docker", (node, stepDelay) => { Thread.Sleep(stepDelay); UpdateDocker(hive, node, dockerPackageUri); }, n => pendingNodes.Contains(n.Name), parallelLimit: 1); } }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { // Split the command line on "--". var split = commandLine.Split(SplitItem); var leftCommandLine = split.Left; var rightCommandLine = split.Right; // Basic initialization. if (leftCommandLine.HasHelpOption || rightCommandLine == null) { Console.WriteLine(usage); Program.Exit(0); } // Initialize the hive and connect to a manager. var hiveLogin = Program.ConnectHive(); hive = new HiveProxy(hiveLogin); // Determine which node we're going to target. var node = (SshProxy <NodeDefinition>)null; var nodeName = leftCommandLine.GetOption("--node", null); if (!string.IsNullOrEmpty(nodeName)) { node = hive.GetNode(nodeName); } else { node = hive.GetReachableManager(); } // A handful commands upload files and need to be run as a bundle. if (!leftCommandLine.HasOption("--no-upload")) { var arg1 = rightCommandLine.Arguments.ElementAtOrDefault(0); var arg2 = rightCommandLine.Arguments.ElementAtOrDefault(1); if (arg1 == "deploy") { Deploy(node, rightCommandLine); } else if (arg1 == "stack" && arg2 == "deploy") { Deploy(node, rightCommandLine); } else if (arg1 == "secret" && arg2 == "create") { SecretCreate(node, rightCommandLine); } else if (arg1 == "config" && arg2 == "create") { ConfigCreate(node, rightCommandLine); } } // Otherwise, we're just going to execute the command as is. var response = node.SudoCommand($"{remoteDockerPath} {rightCommandLine}", RunOptions.IgnoreRemotePath); Console.Write(response.AllText); Program.Exit(response.ExitCode); }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle process termination signals. terminator = new ProcessTerminator(log); try { // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { var secrets = new DebugSecrets(); // NOTE: // // Add your target hive's Vault credentials here for // manual debugging. Take care not to commit sensitive // credentials for production hives. // // You'll find this information in the ROOT hive login // for the target hive. secrets.Add("neon-hive-manager-vaultkeys", new VaultCredentials() { RootToken = "cd5831fa-86ec-cc22-b1f3-051f88147382", KeyThreshold = 1, UnsealKeys = new List <string>() { "8SgwdO/GwqJ7nyxT2tK2n1CCR3084kQVh7gEy8jNQh8=" } }); hive = HiveHelper.OpenHiveRemote(secrets); } else { hive = HiveHelper.OpenHive(sshCredentialsSecret: "neon-ssh-credentials"); } // Ensure that we're running on a manager node. We won't be able // to query swarm status otherwise. var nodeRole = Environment.GetEnvironmentVariable("NEON_NODE_ROLE"); if (string.IsNullOrEmpty(nodeRole)) { log.LogCritical(() => "Service does not appear to be running on a neonHIVE."); Program.Exit(1, immediate: true); } if (!string.Equals(nodeRole, NodeRole.Manager, StringComparison.OrdinalIgnoreCase)) { log.LogCritical(() => $"[neon-hive-manager] service is running on a [{nodeRole}] hive node. Running on only [{NodeRole.Manager}] nodes are supported."); Program.Exit(1, immediate: true); } // Open the hive data services and then start the main service task. log.LogDebug(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogDebug(() => $"Connecting: Docker"); using (docker = HiveHelper.OpenDocker()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // We're passing [useBootstrap=true] here so that the HiveMQ client will // connect directly to the HiveMQ cluster nodes as opposed to routing // traffic through the private traffic manager. This is necessary because // the load balancers rely on HiveMQ to broadcast update notifications. // // One consequence of this is that this service will need to be restarted // whenever HiveMQ instances are relocated to different hive hosts. // We're going to monitor for changes to the HiveMQ bootstrap settings // and gracefully terminate the process when this happens. We're then // depending on Docker to restart the process so we'll be able to pick // up the change. hive.HiveMQ.Internal.HiveMQBootstrapChanged += (s, a) => { log.LogInfo("HiveMQ bootstrap settings change detected. Terminating service with [exitcode=-1] expecting that Docker will restart it."); // Use ExitCode=-1 so that we'll restart even if the service/container // was not configured with [restart=always]. terminator.Exit(-1); }; using (proxyNotifyChannel = hive.HiveMQ.Internal.GetProxyNotifyChannel(useBootstrap: true).Open()) { await RunAsync(); } } } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Main program entry point. /// </summary> /// <param name="args">The command line arguments.</param> public static void Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); log.LogInfo(() => $"Starting [{serviceName}]"); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler(() => terminator.ReadyToExit()); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { var vaultCredentialsSecret = "neon-proxy-manager-credentials"; Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, "neon-proxy-manager")); } else { hive = HiveHelper.OpenHive(); } // Parse the command line. var commandLine = new CommandLine(args); if (commandLine.Arguments.Count() != 2) { log.LogError($"*** ERROR: Invalid command line arguments: {commandLine}"); log.LogError($"*** Expected: MYSECRET MYCONSULKEY"); SleepForever(); } var secretName = commandLine.Arguments[0]; var consulKey = commandLine.Arguments[1]; try { // Read the secret file. var secretPath = ($"/run/secrets/{secretName}"); log.LogInfo($"Reading secret [{secretName}]."); if (!File.Exists(secretPath)) { log.LogError($"The secret file [{secretPath}] does not exist."); } else { var secret = File.ReadAllBytes(secretPath); log.LogInfo($"Writing secret to Consul [{consulKey}]."); HiveHelper.Consul.KV.PutBytes(consulKey, secret).Wait(); } } catch (Exception e) { log.LogError(e); } SleepForever(); }
/// <inheritdoc/> public override void Run(CommandLine commandLine) { HiveProxy hive; if (commandLine.HasHelpOption) { Console.WriteLine(usage); Program.Exit(0); } var hiveLogin = Program.HiveLogin; // Print the current login status if no hive name was passed. if (hiveLogin == null) { Console.Error.WriteLine("*** You are not logged in."); Program.Exit(1); } Console.WriteLine(hiveLogin.LoginName); // Parse and validate the hive definition. hive = new HiveProxy(hiveLogin, (nodeName, publicAddress, privateAddress, append) => { return(new SshProxy <NodeDefinition>(nodeName, publicAddress, privateAddress, hiveLogin.GetSshCredentials(), TextWriter.Null)); }); // Verify the credentials by logging into a manager node. var verifyCredentials = true; Console.Error.WriteLine(); Console.Error.WriteLine($"Checking login [{hiveLogin.LoginName}]..."); if (hiveLogin.ViaVpn) { var vpnClient = HiveHelper.VpnGetClient(hiveLogin.HiveName); if (vpnClient == null) { Console.Error.WriteLine("*** ERROR: VPN is not running."); } else { switch (vpnClient.State) { case HiveHelper.VpnState.Connecting: Console.Error.WriteLine("VPN is connecting"); break; case HiveHelper.VpnState.Healthy: Console.Error.WriteLine("VPN connection is healthy"); break; case HiveHelper.VpnState.Unhealthy: Console.Error.WriteLine("*** ERROR: VPN connection is not healthy"); verifyCredentials = false; break; } } } if (verifyCredentials) { Console.Error.WriteLine("Authenticating..."); try { hive.GetReachableManager().Connect(); Console.Error.WriteLine("Authenticated"); } catch (Exception e) { Console.Error.WriteLine($"*** ERROR: Hive authentication failed: {NeonHelper.ExceptionError(e)}"); } } Console.WriteLine(); return; }
/// <summary> /// Scans the hive and adds the steps to a <see cref="SetupController{NodeMetadata}"/> required /// to update the hive to the most recent version. /// </summary> /// <param name="hive">The target hive proxy.</param> /// <param name="controller">The setup controller.</param> /// <param name="restartRequired">Returns as <c>true</c> if one or more cluster nodes will be restarted during the update.</param> /// <param name="servicesOnly">Optionally indicate that only hive service and container images should be updated.</param> /// <param name="serviceUpdateParallism">Optionally specifies the parallism to use when updating services.</param> /// <param name="imageTag">Optionally overrides the default image tag.</param> /// <returns>The number of pending updates.</returns> /// <exception cref="HiveException">Thrown if there was an error selecting the updates.</exception> public static int AddHiveUpdateSteps(HiveProxy hive, SetupController <NodeDefinition> controller, out bool restartRequired, bool servicesOnly = false, int serviceUpdateParallism = 1, string imageTag = null) { Covenant.Requires <ArgumentNullException>(hive != null); restartRequired = false; var pendingUpdateCount = 0; // Obtain and parse the current hive version. if (!SemanticVersion.TryParse(hive.Globals.Version, out var hiveVersion)) { throw new HiveException($"Unable to retrieve or parse the hive version global [{HiveGlobals.Version}]."); } if (!servicesOnly) { // Scan for the first update that applies. var firstUpdate = Updates .Where(u => u.FromVersion >= hiveVersion) .OrderBy(u => u.FromVersion) .FirstOrDefault(); if (firstUpdate != null) { // Determine which updates apply. We're going to sort the available updates // in ascending order by [FromVersion] and then in decending order by [ToVersion] // to favor overlapping updates that advance the hive the most. var nextVersion = firstUpdate.FromVersion; foreach (var update in Updates .OrderBy(u => u.FromVersion) .ThenByDescending(u => u.ToVersion)) { if (update.FromVersion >= nextVersion) { pendingUpdateCount++; update.Hive = hive; nextVersion = update.ToVersion; if (!servicesOnly) { update.AddUpdateSteps(controller); if (update.RestartRequired) { restartRequired = true; } } } } } } var componentInfo = hive.Headend.GetComponentInfo(hive.Globals.Version, ThisAssembly.Git.Branch); var systemContainers = HiveConst.DockerContainers; var systemServices = HiveConst.DockerServices; var firstManager = hive.FirstManager; if (hive.Definition.Docker.RegistryCache) { controller.AddGlobalStep("pull images to cache", () => { foreach (var container in systemContainers) { var image = GetUpdateImage(hive, componentInfo, container, imageTag); if (image != null) { firstManager.Status = $"run: docker pull {image}"; firstManager.SudoCommand($"docker pull {image}"); firstManager.Status = string.Empty; } } foreach (var service in systemServices) { var image = GetUpdateImage(hive, componentInfo, service, imageTag); if (image != null) { firstManager.Status = $"run: docker pull {image}"; firstManager.SudoCommand($"docker pull {image}"); firstManager.Status = string.Empty; } } }); } controller.AddStep("update services", (node, stepDelay) => { // List the neonHIVE services actually running and only update those. var runningServices = new HashSet <string>(); var response = node.SudoCommand("docker service ls --format \"{{.Name}}\""); using (var reader = new StringReader(response.OutputText)) { foreach (var service in reader.Lines()) { runningServices.Add(service); } } foreach (var service in systemServices.Where(s => runningServices.Contains(s))) { var image = GetUpdateImage(hive, componentInfo, service, imageTag); if (image != null) { // $todo(jeff.lill): // // We should check the service image to see if we actually need to perform an // upgrade. There's no point in restarting the service instances unnecessarily. // // https://github.com/jefflill/NeonForge/issues/378 firstManager.Status = $"update: {image}"; node.SudoCommand($"docker service update --force --image {image} --update-parallelism {serviceUpdateParallism} {service}"); firstManager.Status = string.Empty; // Update the service creation scripts on all manager nodes for all built-in // services. Note that this depends on how [ServicesBase.CreateStartScript()] // formatted the generated code at the top of the script. foreach (var manager in hive.Managers) { UpdateStartScript(manager, service, $"{image}"); } } } }, node => node == firstManager); controller.AddGlobalStep("update containers", () => { // $todo(jeff.lill): // // We should check the service image to see if we actually need to perform an // upgrade. There's no point in restarting the service instances unnecessarily. // // https://github.com/jefflill/NeonForge/issues/378 // We're going to update containers on each node, one node at a time // and then stablize for a period of time before moving on to the // next node. This will help keep clustered applications like HiveMQ // and databases like Couchbase that are deployed as containers happy // by not blowing all of the application instances away at the same // time while updating. // // Hopefully, there will be enough time after updating a clustered // application container for the container to rejoin the cluster // before we update the next node. foreach (var node in hive.Nodes) { // List the neonHIVE containers actually running and only update those. // Note that we're going to use the local script to start the container // so we don't need to hardcode the Docker options here. We won't restart // the container if the script doesn't exist. // // Note that we'll update and restart the containers in parallel if the // hive has a local registry, otherwise we'll just go with the user // specified parallelism to avoid overwhelming the network with image // downloads. // $todo(jeff.lill): // // A case could be made for having a central place for generating container // (and service) scripts for hive setup as well as situations like this. // It could also be possible then to be able to scan for and repair missing // or incorrect scripts. var runningContainers = new HashSet <string>(); var response = node.SudoCommand("docker ps --format \"{{.Names}}\""); using (var reader = new StringReader(response.OutputText)) { foreach (var container in reader.Lines()) { runningContainers.Add(container); } } foreach (var container in systemContainers.Where(s => runningContainers.Contains(s))) { var image = GetUpdateImage(hive, componentInfo, container, imageTag); if (image != null) { var scriptPath = LinuxPath.Combine(HiveHostFolders.Scripts, $"{container}.sh"); if (node.FileExists(scriptPath)) { // The container has a creation script, so update the script, stop/remove the // container and then run the script to restart the container. UpdateStartScript(node, container, $"{image}"); node.Status = $"stop: {container}"; node.DockerCommand("docker", "rm", "--force", container); node.Status = $"restart: {container}"; node.SudoCommand(scriptPath); } else { var warning = $"WARNING: Container script [{scriptPath}] is not present on this node so we can't update the [{container}] container."; node.Status = warning; node.Log(warning); Thread.Sleep(TimeSpan.FromSeconds(5)); } } } node.Status = $"stablizing ({Program.WaitSeconds}s)"; Thread.Sleep(TimeSpan.FromSeconds(Program.WaitSeconds)); node.Status = "READY"; } }); return(pendingUpdateCount); }
/// <summary> /// Application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static async Task Main(string[] args) { LogManager.Default.SetLogLevel(Environment.GetEnvironmentVariable("LOG_LEVEL")); log = LogManager.Default.GetLogger(typeof(Program)); // Create process terminator to handle termination signals. terminator = new ProcessTerminator(log); terminator.AddHandler( () => { // Cancel any operations in progress. terminator.CancellationTokenSource.Cancel(); }); // Read the environment variables. // $hack(jeff.lill: // // We're going to scan the Consul configuration key to determine whether this // instance is managing the public or private proxy (or bridges) so we'll // be completely compatible with existing deployments. // // In theory, we could have passed a new environment variable but that's not // worth the trouble. configKey = Environment.GetEnvironmentVariable("CONFIG_KEY"); if (string.IsNullOrEmpty(configKey)) { log.LogError("[CONFIG_KEY] environment variable is required."); Program.Exit(1, immediate: true); } isPublic = configKey.Contains("/public/"); var proxyName = isPublic ? "public" : "private"; serviceName = $"neon-proxy-{proxyName}:{GitVersion}"; log.LogInfo(() => $"Starting [{serviceName}]"); configHashKey = Environment.GetEnvironmentVariable("CONFIG_HASH_KEY"); if (string.IsNullOrEmpty(configHashKey)) { log.LogError("[CONFIG_HASH_KEY] environment variable is required."); Program.Exit(1, immediate: true); } vaultCredentialsName = Environment.GetEnvironmentVariable("VAULT_CREDENTIALS"); if (string.IsNullOrEmpty(vaultCredentialsName)) { log.LogWarn("HTTPS routes are not supported because VAULT_CREDENTIALS is not specified or blank."); } var warnSeconds = Environment.GetEnvironmentVariable("WARN_SECONDS"); if (string.IsNullOrEmpty(warnSeconds) || !double.TryParse(warnSeconds, out var warnSecondsValue)) { warnInterval = TimeSpan.FromSeconds(300); } else { warnInterval = TimeSpan.FromSeconds(warnSecondsValue); } var startSeconds = Environment.GetEnvironmentVariable("START_SECONDS"); if (string.IsNullOrEmpty(startSeconds) || !double.TryParse(startSeconds, out var startSecondsValue)) { startDelay = TimeSpan.FromSeconds(10); } else { startDelay = TimeSpan.FromSeconds(startSecondsValue); } var maxHAProxyCountString = Environment.GetEnvironmentVariable("MAX_HAPROXY_COUNT"); if (!int.TryParse(maxHAProxyCountString, out maxHAProxyCount)) { maxHAProxyCount = 10; } if (maxHAProxyCount < 0) { maxHAProxyCount = 0; } debugMode = "true".Equals(Environment.GetEnvironmentVariable("DEBUG"), StringComparison.InvariantCultureIgnoreCase); log.LogInfo(() => $"LOG_LEVEL={LogManager.Default.LogLevel.ToString().ToUpper()}"); log.LogInfo(() => $"CONFIG_KEY={configKey}"); log.LogInfo(() => $"CONFIG_HASH_KEY={configHashKey}"); log.LogInfo(() => $"VAULT_CREDENTIALS={vaultCredentialsName}"); log.LogInfo(() => $"WARN_SECONDS={warnInterval}"); log.LogInfo(() => $"START_SECONDS={startDelay}"); log.LogInfo(() => $"MAX_HAPROXY_COUNT={maxHAProxyCount}"); log.LogInfo(() => $"DEBUG={debugMode}"); // Ensure that the required directories exist. Directory.CreateDirectory(tmpfsFolder); Directory.CreateDirectory(configFolder); Directory.CreateDirectory(configUpdateFolder); // Establish the hive connections. if (NeonHelper.IsDevWorkstation) { throw new NotImplementedException("This service works only within a Linux container with HAProxy installed."); //var vaultCredentialsSecret = "neon-proxy-manager-credentials"; //Environment.SetEnvironmentVariable("VAULT_CREDENTIALS", vaultCredentialsSecret); //hive = HiveHelper.OpenHiveRemote(new DebugSecrets().VaultAppRole(vaultCredentialsSecret, $"neon-proxy-{proxyName}")); } else { hive = HiveHelper.OpenHive(); } try { // Log into Vault using the Vault credentials persisted as a Docker // secret, if one was specified. We won't open Vault otherwise. if (!string.IsNullOrEmpty(vaultCredentialsName)) { var vaultSecret = HiveHelper.GetSecret(vaultCredentialsName); if (string.IsNullOrEmpty(vaultSecret)) { log.LogCritical($"Cannot read Docker secret [{vaultCredentialsName}]."); Program.Exit(1, immediate: true); } var vaultCredentials = HiveCredentials.ParseJson(vaultSecret); if (vaultCredentials == null) { log.LogCritical($"Cannot parse Docker secret [{vaultCredentialsName}]."); Program.Exit(1, immediate: true); } log.LogInfo(() => $"Connecting: Vault"); vault = HiveHelper.OpenVault(vaultCredentials); } else { vault = null; // $hack(jeff.lill): // // This is a bit of backwards compatible hack. Instances started without the // VAULT_CREDENTIALS environment variable are assumed to be proxy bridges. isBridge = true; } // Open Consul and then start the service tasks. log.LogInfo(() => $"Connecting: Consul"); using (consul = HiveHelper.OpenConsul()) { log.LogInfo(() => $"Connecting: {HiveMQChannels.ProxyNotify} channel"); // Verify that the required Consul keys exist or loop to wait until they // are created. This will allow the service wait for pending hive setup // operations to be completed. while (!await consul.KV.Exists(configKey)) { log.LogWarn(() => $"Waiting for [{configKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } while (!await consul.KV.Exists(configHashKey)) { log.LogWarn(() => $"Waiting for [{configHashKey}] key to be present in Consul."); await Task.Delay(TimeSpan.FromSeconds(5)); } // Crank up the service tasks. await NeonHelper.WaitAllAsync( ErrorPollerAsync(), HAProxShim()); } } catch (Exception e) { log.LogCritical(e); Program.Exit(1); return; } finally { HiveHelper.CloseHive(); terminator.ReadyToExit(); } Program.Exit(0); return; }
/// <summary> /// Constructor. /// </summary> /// <param name="hive">The hive proxy.</param> public LogServices(HiveProxy hive) { Covenant.Requires <ArgumentNullException>(hive != null); this.hive = hive; }