Beispiel #1
0
        public async void TestGetGitHubZipAsync()
        {
            var headendClient = new HeadendClient();
            var zip           = await headendClient.GetHelmChartZipAsync("elasticsearch");

            Assert.IsType <byte[]>(zip);
        }
Beispiel #2
0
        /// <summary>
        /// Constructor.
        /// </summary>
        public MainForm()
        {
            MainForm.Current = this;

            InitializeComponent();

            Load  += MainForm_Load;
            Shown += (s, a) => Visible = false; // The main form should always be hidden

            // Ensure that temporary files are written to the users temporary folder because
            // there's a decent chance that this folder will be encrypted at rest.

            TempFile.Root   = KubeHelper.TempFolder;
            TempFolder.Root = KubeHelper.TempFolder;

            // Preload the notification icons and animations for better performance.

            appIcon             = new Icon(@"Images\app.ico");
            connectedIcon       = new Icon(@"Images\connected.ico");
            disconnectedIcon    = new Icon(@"Images\disconnected.ico");
            errorIcon           = new Icon(@"Images\error.ico");
            connectingAnimation = AnimatedIcon.Load("Images", "connecting", animationFrameRate);
            workingAnimation    = AnimatedIcon.Load("Images", "working", animationFrameRate);
            errorAnimation      = AnimatedIcon.Load("Images", "error", animationFrameRate);
            notifyStack         = new Stack <NotifyState>();

            // Initialize the cluster hosting provider components.

            HostingLoader.Initialize();

            // Initialize the client state.

            proxies      = new List <ReverseProxy>();
            portForwards = new List <PortForward>();
            Headend      = new HeadendClient();
            KubeHelper.LoadClientConfig();
        }
Beispiel #3
0
        /// <summary>
        /// Sets hive definition related variables for a <see cref="PreprocessReader"/>.
        /// </summary>
        /// <param name="preprocessReader">The reader.</param>
        /// <param name="hiveDefinition">The hive definition.</param>
        /// <param name="nodeDefinition">The target node definition.</param>
        private static void SetHiveVariables(PreprocessReader preprocessReader, HiveDefinition hiveDefinition, NodeDefinition nodeDefinition)
        {
            Covenant.Requires <ArgumentNullException>(preprocessReader != null);
            Covenant.Requires <ArgumentNullException>(hiveDefinition != null);

            // Generate the manager node variables in sorted order.  The variable
            // names will be formatted as:
            //
            //      NEON_MANAGER_#
            //
            // where [#] is the zero-based index of the node.  This is compatible
            // with the [getmanager] function included the script.
            //
            // Each variable defines an associative array with [name] and [address]
            // properties.
            //
            // Then generate the NEON_MANAGER_NAMES and NEON_MANAGER_ADDRESSES arrays.
            //
            // NOTE: We need to use Linux-style line endings.

            var sbManagers                  = new StringBuilder();
            var sbManagerNamesArray         = new StringBuilder();
            var sbManagerAddressesArray     = new StringBuilder();
            var sbPeerManagerAddressesArray = new StringBuilder();
            var sbManagerNodesSummary       = new StringBuilder();
            var index            = 0;
            var managerNameWidth = 0;

            sbManagerNamesArray.Append("(");
            sbManagerAddressesArray.Append("(");
            sbPeerManagerAddressesArray.Append("(");

            foreach (var manager in hiveDefinition.SortedManagers)
            {
                sbManagers.Append($"declare -x -A NEON_MANAGER_{index}\n");
                sbManagers.Append($"NEON_MANAGER_{index}=( [\"name\"]=\"{manager.Name}\" [\"address\"]=\"{manager.PrivateAddress}\" )\n");
                sbManagers.Append("\n");
                index++;

                sbManagerNamesArray.Append($" \"{manager.Name}\"");
                sbManagerAddressesArray.Append($" \"{manager.PrivateAddress}\"");

                if (manager != nodeDefinition)
                {
                    sbPeerManagerAddressesArray.Append($" \"{manager.PrivateAddress}\"");
                }

                managerNameWidth = Math.Max(manager.Name.Length, managerNameWidth);
            }

            sbManagerNamesArray.Append(" )");
            sbManagerAddressesArray.Append(" )");
            sbPeerManagerAddressesArray.Append(" )");

            foreach (var manager in hiveDefinition.SortedManagers)
            {
                var nameField = manager.Name;

                if (nameField.Length < managerNameWidth)
                {
                    nameField += new string(' ', managerNameWidth - nameField.Length);
                }

                // The blanks below are just enough so that the "=" sign lines up
                // with the summary output from [hive.conf.sh].

                if (sbManagerNodesSummary.Length == 0)
                {
                    sbManagerNodesSummary.Append($"    echo \"NEON_MANAGER_NODES                 = {nameField}: {manager.PrivateAddress}\" 1>&2\n");
                }
                else
                {
                    sbManagerNodesSummary.Append($"    echo \"                                     {nameField}: {manager.PrivateAddress}\" 1>&2\n");
                }
            }

            foreach (var manager in hiveDefinition.SortedManagers)
            {
                sbManagers.Append($"declare -x -A NEON_MANAGER_{index}\n");
                sbManagers.Append($"NEON_MANAGER_{index}=( [\"name\"]=\"{manager.Name}\" [\"address\"]=\"{manager.PrivateAddress}\" )\n");
                index++;
            }

            sbManagers.Append("\n");
            sbManagers.Append($"declare -x NEON_MANAGER_NAMES={sbManagerNamesArray}\n");
            sbManagers.Append($"declare -x NEON_MANAGER_ADDRESSES={sbManagerAddressesArray}\n");

            sbManagers.Append("\n");

            if (hiveDefinition.Managers.Count() > 1)
            {
                sbManagers.Append($"declare -x NEON_MANAGER_PEERS={sbPeerManagerAddressesArray}\n");
            }
            else
            {
                sbManagers.Append("export NEON_MANAGER_PEERS=\"\"\n");
            }

            // Generate the manager and worker NTP time sources.

            var managerTimeSources = string.Empty;
            var workerTimeSources  = string.Empty;

            if (hiveDefinition.TimeSources != null)
            {
                foreach (var source in hiveDefinition.TimeSources)
                {
                    if (string.IsNullOrWhiteSpace(source))
                    {
                        continue;
                    }

                    if (managerTimeSources.Length > 0)
                    {
                        managerTimeSources += " ";
                    }

                    managerTimeSources += $"\"{source}\"";
                }
            }

            foreach (var manager in hiveDefinition.SortedManagers)
            {
                if (workerTimeSources.Length > 0)
                {
                    workerTimeSources += " ";
                }

                workerTimeSources += $"\"{manager.PrivateAddress}\"";
            }

            if (string.IsNullOrWhiteSpace(managerTimeSources))
            {
                // Default to reasonable public time sources.

                managerTimeSources = "\"pool.ntp.org\"";
            }

            // Generate the Docker daemon command line options.

            var sbDockerOptions = new StringBuilder();

            if (Program.ServiceManager == ServiceManager.Systemd)
            {
                sbDockerOptions.AppendWithSeparator($"-H unix:///var/run/docker.sock");
            }
            else
            {
                throw new NotImplementedException();
            }

            if (hiveDefinition.DebugMode)
            {
                // Expose the Docker Swarm REST API on the node's internal hive IP address so it
                // can be reached by apps like [neon-proxy-manager] running off the manager node
                // (potentially in the debugger).

                sbDockerOptions.AppendWithSeparator($"-H tcp://{nodeDefinition.PrivateAddress}:{NetworkPorts.Docker}");
            }

            preprocessReader.Set("docker.options", sbDockerOptions);

            // Define the Consul command line options.

            var consulOptions = string.Empty;

            if (hiveDefinition.Dashboard.Consul)
            {
                if (consulOptions.Length > 0)
                {
                    consulOptions += " ";
                }

                consulOptions += "-ui";
            }

            // Format the network upstream nameservers as semicolon separated
            // to be compatible with the PowerDNS Recursor [forward-zones-recurse]
            // configuration setting.
            //
            // Note that manager nodes will recurse to upstream (external) DNS
            // servers and workers/pets will recurse to the managers so they can
            // dynamically pickup hive DNS changes.

            if (hiveDefinition.Network?.Nameservers == null)
            {
                // $hack(jeff.lill):
                //
                // [Network] will be null if we're just preparing servers, not doing full setup
                // so we'll set this to the defaults to avoid null references below.

                hiveDefinition.Network = new NetworkOptions();
            }

            var nameservers = string.Empty;

            if (nodeDefinition.Role == NodeRole.Manager)
            {
                for (int i = 0; i < hiveDefinition.Network.Nameservers.Length; i++)
                {
                    if (i > 0)
                    {
                        nameservers += ";";
                    }

                    nameservers += hiveDefinition.Network.Nameservers[i].Trim();
                }
            }
            else
            {
                foreach (var manager in hiveDefinition.SortedManagers)
                {
                    if (nameservers.Length > 0)
                    {
                        nameservers += ";";
                    }

                    nameservers += manager.PrivateAddress;
                }
            }

            // Set the variables.

            preprocessReader.Set("load-hive-conf", HiveHostFolders.Config + "/hive.conf.sh --echo-summary");
            preprocessReader.Set("load-hive-conf-quiet", HiveHostFolders.Config + "/hive.conf.sh");

            SetBashVariable(preprocessReader, "hive.provisioner", hiveDefinition.Provisioner);
            SetBashVariable(preprocessReader, "hive.rootuser", Program.MachineUsername);

            SetBashVariable(preprocessReader, "node.driveprefix", hiveDefinition.DrivePrefix);

            SetBashVariable(preprocessReader, "neon.folders.archive", HiveHostFolders.Archive);
            SetBashVariable(preprocessReader, "neon.folders.bin", HiveHostFolders.Bin);
            SetBashVariable(preprocessReader, "neon.folders.exec", HiveHostFolders.Exec);
            SetBashVariable(preprocessReader, "neon.folders.config", HiveHostFolders.Config);
            SetBashVariable(preprocessReader, "neon.folders.scripts", HiveHostFolders.Scripts);
            SetBashVariable(preprocessReader, "neon.folders.secrets", HiveHostFolders.Secrets);
            SetBashVariable(preprocessReader, "neon.folders.setup", HiveHostFolders.Setup);
            SetBashVariable(preprocessReader, "neon.folders.source", HiveHostFolders.Source);
            SetBashVariable(preprocessReader, "neon.folders.state", HiveHostFolders.State);
            SetBashVariable(preprocessReader, "neon.folders.tmpfs", HiveHostFolders.Tmpfs);
            SetBashVariable(preprocessReader, "neon.folders.tools", HiveHostFolders.Tools);

            preprocessReader.Set("neon.hosts.neon-log-es-data", hiveDefinition.Hostnames.LogEsData);

            SetBashVariable(preprocessReader, "nodes.manager.count", hiveDefinition.Managers.Count());
            preprocessReader.Set("nodes.managers", sbManagers);
            preprocessReader.Set("nodes.manager.summary", sbManagerNodesSummary);

            SetBashVariable(preprocessReader, "ntp.manager.sources", managerTimeSources);
            SetBashVariable(preprocessReader, "ntp.worker.sources", workerTimeSources);

            if (!hiveDefinition.BareDocker)
            {
                // When we're not deploying bare Docker, the manager nodes will use the
                // configured name servers as the hive's upstream DNS and the worker
                // nodes will be configured to query the name servers.

                if (nodeDefinition.IsManager)
                {
                    preprocessReader.Set("net.nameservers", nameservers);
                }
                else
                {
                    var managerNameservers = string.Empty;

                    foreach (var manager in hiveDefinition.Managers)
                    {
                        if (managerNameservers.Length > 0)
                        {
                            managerNameservers += ";";
                        }

                        managerNameservers += manager.PrivateAddress.ToString();
                    }

                    preprocessReader.Set("net.nameservers", managerNameservers);
                }
            }
            else
            {
                // All servers use the configured upstream nameservers when we're not
                // deploying the Local DNS.

                preprocessReader.Set("net.nameservers", nameservers);
            }

            SetBashVariable(preprocessReader, "net.powerdns.recursor.package.uri", hiveDefinition.Network.PdnsRecursorPackageUri);
            preprocessReader.Set("net.powerdns.recursor.hosts", GetPowerDnsHosts(hiveDefinition, nodeDefinition));

            var dockerPackageUri = new HeadendClient().GetDockerPackageUri(hiveDefinition.Docker.Version, out var packageMessage);

            if (dockerPackageUri == null)
            {
                // $todo(jeff.lill:
                //
                // This should probably be replaced with hive definition validation code.

                Console.WriteLine($"*** ERROR: {packageMessage}");
                Program.Exit(1);
            }

            SetBashVariable(preprocessReader, "docker.packageuri", dockerPackageUri);

            SetBashVariable(preprocessReader, "consul.version", hiveDefinition.Consul.Version);
            SetBashVariable(preprocessReader, "consul.options", consulOptions);
            SetBashVariable(preprocessReader, "consul.address", $"{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}");
            SetBashVariable(preprocessReader, "consul.fulladdress", $"https://{hiveDefinition.Hostnames.Consul}:{hiveDefinition.Consul.Port}");
            SetBashVariable(preprocessReader, "consul.hostname", hiveDefinition.Hostnames.Consul);
            SetBashVariable(preprocessReader, "consul.port", hiveDefinition.Consul.Port);
            SetBashVariable(preprocessReader, "consul.tls", hiveDefinition.Consul.Tls ? "true" : "false");

            SetBashVariable(preprocessReader, "vault.version", hiveDefinition.Vault.Version);

            SetBashVariable(preprocessReader, "vault.download", $"https://releases.hashicorp.com/vault/{hiveDefinition.Vault.Version}/vault_{hiveDefinition.Vault.Version}_linux_amd64.zip");
            SetBashVariable(preprocessReader, "vault.hostname", hiveDefinition.Hostnames.Vault);
            SetBashVariable(preprocessReader, "vault.port", hiveDefinition.Vault.Port);
            SetBashVariable(preprocessReader, "vault.consulpath", "vault/");
            SetBashVariable(preprocessReader, "vault.maximumlease", hiveDefinition.Vault.MaximimLease);
            SetBashVariable(preprocessReader, "vault.defaultlease", hiveDefinition.Vault.DefaultLease);
            SetBashVariable(preprocessReader, "vault.dashboard", hiveDefinition.Dashboard.Vault ? "true" : "false");

            SetBashVariable(preprocessReader, "log.enabled", hiveDefinition.Log.Enabled);

            //-----------------------------------------------------------------
            // Configure the variables for the [setup-disk.sh] script.

            switch (hiveDefinition.Hosting.Environment)
            {
            case HostingEnvironments.Aws:

                throw new NotImplementedException("$todo(jeff.lill)");

            case HostingEnvironments.Azure:

                switch (Program.OSProperties.TargetOS)
                {
                case TargetOS.Ubuntu_16_04:

                    // The primary Azure data drive is [/dev/sdb] so any mounted drive will be [/dev/sdc].

                    if (nodeDefinition.Azure.HardDriveCount == 0)
                    {
                        SetBashVariable(preprocessReader, "data.disk", "PRIMARY");
                    }
                    else
                    {
                        SetBashVariable(preprocessReader, "data.disk", "/dev/sdc");
                    }
                    break;

                default:

                    throw new NotImplementedException($"Support for [{Program.OSProperties.TargetOS}] is not implemented.");
                }

                break;

            case HostingEnvironments.Google:

                throw new NotImplementedException("$todo(jeff.lill)");

            case HostingEnvironments.HyperV:
            case HostingEnvironments.HyperVDev:
            case HostingEnvironments.Machine:
            case HostingEnvironments.Unknown:
            case HostingEnvironments.XenServer:

                // VMs for all of these environments simply host their data on the
                // primary OS disk only for now, the idea being that this disk
                // can be sized up as necessary.  There are valid scenarios where
                // folks would like the data on a different drive (e.g. for better
                // performance).  I'm putting support for that on the backlog.

                SetBashVariable(preprocessReader, "data.disk", "PRIMARY");
                break;

            default:

                throw new NotImplementedException($"The [{hiveDefinition.Hosting.Environment}] hosting environment is not implemented.");
            }
        }
Beispiel #4
0
        /// <inheritdoc/>
        public override void Run(CommandLine commandLine)
        {
            if (commandLine.HasHelpOption)
            {
                Help();
                Program.Exit(0);
            }

            // Special-case handling of the [--remove-templates] option.

            if (commandLine.HasOption("--remove-templates"))
            {
                Console.WriteLine("Removing cached virtual machine templates.");

                foreach (var fileName in Directory.GetFiles(KubeHelper.VmTemplatesFolder, "*.*", SearchOption.TopDirectoryOnly))
                {
                    File.Delete(fileName);
                }

                Program.Exit(0);
            }

            // Implement the command.

            if (KubeHelper.CurrentContext != null)
            {
                Console.Error.WriteLine("*** ERROR: You are logged into a cluster.  You need to logout before preparing another.");
                Program.Exit(1);
            }

            if (commandLine.Arguments.Length == 0)
            {
                Console.Error.WriteLine($"*** ERROR: CLUSTER-DEF expected.");
                Program.Exit(1);
            }

            clusterDefPath = commandLine.Arguments[0];
            force          = commandLine.GetFlag("--force");

            ClusterDefinition.ValidateFile(clusterDefPath, strict: true);

            var clusterDefinition = ClusterDefinition.FromFile(clusterDefPath, strict: true);

            clusterDefinition.Provisioner = $"neon-cli:{Program.Version}";  // Identify this tool/version as the cluster provisioner

            // NOTE:
            //
            // Azure has a more restrictive password policy and our default
            // machine password does not meet the requirements:
            //
            // The supplied password must be between 6-72 characters long and must
            // satisfy at least 3 of password complexity requirements from the following:
            //
            //      1. Contains an uppercase character
            //      2. Contains a lowercase character
            //      3. Contains a numeric digit
            //      4. Contains a special character
            //      5. Control characters are not allowed
            //
            // It's also probably not a great idea to use a static password when
            // provisioning VMs in public clouds because it might be possible for
            // somebody to use this fact the SSH into nodes while the cluster is
            // being setup and before we set the secure password at the end.
            //
            // This is less problematic for non-cloud environments because it's
            // likely that the hosts won't initially be able to receive inbound
            // Internet traffic and besides, we need to have a known password
            // embedded into the VM templates.
            //
            // We're going to handle this for cloud environments by looking
            // at [Program.MachinePassword].  If this is set to the default
            // machine password then we're going to replace it with a randomlly
            // generated password with a few extra characters to ensure that
            // it meets the target cloud's password requirements.  We'll use
            // a non-default password if the operator specified one.

            if (clusterDefinition.Hosting.IsCloudProvider && Program.MachinePassword == KubeConst.DefaulVmTemplatePassword)
            {
                Program.MachinePassword = NeonHelper.GetCryptoRandomPassword(20);

                // Append a string that guarantees that the generated password meets
                // cloud minimum requirements.

                Program.MachinePassword += ".Aa0";
            }

            // NOTE: Cluster prepare starts new log files.

            cluster = new ClusterProxy(clusterDefinition, Program.CreateNodeProxy <NodeDefinition>, appendToLog: false, defaultRunOptions: RunOptions.LogOutput | RunOptions.FaultOnError);

            if (KubeHelper.Config.GetContext(cluster.Definition.Name) != null)
            {
                Console.Error.WriteLine($"*** ERROR: A context named [{cluster.Definition.Name}] already exists.");
                Program.Exit(1);
            }

            // Configure global options.

            if (commandLine.HasOption("--unredacted"))
            {
                cluster.SecureRunOptions = RunOptions.None;
            }

            var failed = false;

            try
            {
                KubeHelper.Desktop.StartOperationAsync($"Preparing [{cluster.Name}]").Wait();

                //-----------------------------------------------------------------
                // Try to ensure that no servers are already deployed on the IP addresses defined
                // for cluster nodes because provisoning over an existing cluster will likely
                // corrupt the existing cluster and also probably prevent the new cluster from
                // provisioning correctly.
                //
                // Note that we're not going to perform this check for the [Machine] hosting
                // environment because we're expecting the bare machines to be already running
                // with the assigned addresses and we're also not going to do this for cloud
                // environments because we're assuming that the cluster will run in its own
                // private network so there'll ne no possibility of conflicts.

                if (cluster.Definition.Hosting.Environment != HostingEnvironments.Machine &&
                    !cluster.Definition.Hosting.IsCloudProvider)
                {
                    Console.WriteLine();
                    Console.WriteLine(" Scanning for IP address conflicts...");
                    Console.WriteLine();

                    var pingOptions   = new PingOptions(ttl: 32, dontFragment: true);
                    var pingTimeout   = TimeSpan.FromSeconds(2);
                    var pingConflicts = new List <NodeDefinition>();
                    var pingAttempts  = 2;

                    // I'm going to use up to 20 threads at a time here for simplicity
                    // rather then doing this as async operations.

                    var parallelOptions = new ParallelOptions()
                    {
                        MaxDegreeOfParallelism = 20
                    };

                    Parallel.ForEach(cluster.Definition.NodeDefinitions.Values, parallelOptions,
                                     node =>
                    {
                        using (var pinger = new Pinger())
                        {
                            // We're going to try pinging up to [pingAttempts] times for each node
                            // just in case the network it sketchy and we're losing reply packets.

                            for (int i = 0; i < pingAttempts; i++)
                            {
                                var reply = pinger.SendPingAsync(node.PrivateAddress, (int)pingTimeout.TotalMilliseconds).Result;

                                if (reply.Status == IPStatus.Success)
                                {
                                    lock (pingConflicts)
                                    {
                                        pingConflicts.Add(node);
                                    }

                                    break;
                                }
                            }
                        }
                    });

                    if (pingConflicts.Count > 0)
                    {
                        Console.Error.WriteLine($"*** ERROR: Cannot provision the cluster because [{pingConflicts.Count}] other");
                        Console.Error.WriteLine($"***        machines conflict with the following cluster nodes:");
                        Console.Error.WriteLine();

                        foreach (var node in pingConflicts.OrderBy(n => NetHelper.AddressToUint(IPAddress.Parse(n.PrivateAddress))))
                        {
                            Console.Error.WriteLine($"{node.PrivateAddress, 16}:    {node.Name}");
                        }

                        Program.Exit(1);
                    }
                }

                //-----------------------------------------------------------------
                // Perform basic environment provisioning.  This creates basic cluster components
                // such as virtual machines, networks, load balancers, public IP addresses, security
                // groups,... as required for the environment.

                hostingManager = new HostingManagerFactory(() => HostingLoader.Initialize()).GetMaster(cluster, Program.LogPath);

                if (hostingManager == null)
                {
                    Console.Error.WriteLine($"*** ERROR: No hosting manager for the [{cluster.Definition.Hosting.Environment}] hosting environment could be located.");
                    Program.Exit(1);
                }

                hostingManager.HostUsername = Program.MachineUsername;
                hostingManager.HostPassword = Program.MachinePassword;
                hostingManager.ShowStatus   = !Program.Quiet;
                hostingManager.MaxParallel  = Program.MaxParallel;
                hostingManager.WaitSeconds  = Program.WaitSeconds;

                if (hostingManager.RequiresAdminPrivileges)
                {
                    Program.VerifyAdminPrivileges($"Provisioning to [{cluster.Definition.Hosting.Environment}] requires elevated administrator privileges.");
                }

                if (!hostingManager.Provision(force))
                {
                    Program.Exit(1);
                }

                // Get the mounted drive prefix from the hosting manager.

                cluster.Definition.DrivePrefix = hostingManager.DrivePrefix;

                // Ensure that the nodes have valid IP addresses.

                cluster.Definition.ValidatePrivateNodeAddresses();

                var ipAddressToServer = new Dictionary <IPAddress, SshProxy <NodeDefinition> >();

                foreach (var node in cluster.Nodes.OrderBy(n => n.Name))
                {
                    SshProxy <NodeDefinition> duplicateServer;

                    if (node.PrivateAddress == IPAddress.Any)
                    {
                        throw new ArgumentException($"Node [{node.Name}] has not been assigned an IP address.");
                    }

                    if (ipAddressToServer.TryGetValue(node.PrivateAddress, out duplicateServer))
                    {
                        throw new ArgumentException($"Nodes [{duplicateServer.Name}] and [{node.Name}] have the same IP address [{node.Metadata.PrivateAddress}].");
                    }

                    ipAddressToServer.Add(node.PrivateAddress, node);
                }

                // We're going to use the masters as package caches unless the user
                // specifies something else.

                packageCaches = commandLine.GetOption("--package-cache");     // This overrides the cluster definition, if specified.

                if (!string.IsNullOrEmpty(packageCaches))
                {
                    cluster.Definition.PackageProxy = packageCaches;
                }

                if (string.IsNullOrEmpty(cluster.Definition.PackageProxy))
                {
                    var sbProxies = new StringBuilder();

                    foreach (var master in cluster.Masters)
                    {
                        sbProxies.AppendWithSeparator($"{master.PrivateAddress}:{NetworkPorts.AppCacherNg}");
                    }

                    cluster.Definition.PackageProxy = sbProxies.ToString();
                }

                //-----------------------------------------------------------------
                // Prepare the cluster.

                // Write the operation begin marker to all cluster node logs.

                cluster.LogLine(logBeginMarker);

                var nodesText = cluster.Nodes.Count() == 1 ? "node" : "nodes";
                var operation = $"Preparing [{cluster.Definition.Name}] {nodesText}";

                var controller =
                    new SetupController <NodeDefinition>(operation, cluster.Nodes)
                {
                    ShowStatus  = !Program.Quiet,
                    MaxParallel = Program.MaxParallel
                };

                controller.AddGlobalStep("setup details",
                                         () =>
                {
                    using (var client = new HeadendClient())
                    {
                        kubeSetupInfo = client.GetSetupInfoAsync(cluster.Definition).Result;
                    }
                });

                // Prepare the nodes.

                controller.AddWaitUntilOnlineStep(timeout: TimeSpan.FromMinutes(15));
                hostingManager.AddPostProvisionSteps(controller);
                controller.AddStep("verify OS", CommonSteps.VerifyOS);

                controller.AddStep("prepare",
                                   (node, stepDelay) =>
                {
                    Thread.Sleep(stepDelay);
                    CommonSteps.PrepareNode(node, cluster.Definition, kubeSetupInfo, shutdown: false);
                },
                                   stepStaggerSeconds: cluster.Definition.Setup.StepStaggerSeconds);

                if (!controller.Run())
                {
                    // Write the operation end/failed marker to all cluster node logs.

                    cluster.LogLine(logFailedMarker);

                    Console.Error.WriteLine("*** ERROR: One or more configuration steps failed.");
                    Program.Exit(1);
                }

                // Persist the cluster context extension.

                var contextExtensionsPath = KubeHelper.GetContextExtensionPath((KubeContextName)$"{KubeConst.RootUser}@{clusterDefinition.Name}");
                var contextExtension      = new KubeContextExtension(contextExtensionsPath)
                {
                    ClusterDefinition = clusterDefinition,
                    SshUsername       = Program.MachineUsername,
                    SshPassword       = Program.MachinePassword,
                    SetupDetails      = new KubeSetupDetails()
                    {
                        SetupPending = true
                    }
                };

                contextExtension.Save();

                // Write the operation end marker to all cluster node logs.

                cluster.LogLine(logEndMarker);
            }
            catch
            {
                failed = true;
                throw;
            }
            finally
            {
                if (!failed)
                {
                    KubeHelper.Desktop.EndOperationAsync($"Cluster [{cluster.Name}] has been prepared and is ready for setup.").Wait();
                }
                else
                {
                    KubeHelper.Desktop.EndOperationAsync($"Cluster [{cluster.Name}] prepare has failed.", failed: true).Wait();
                }
            }
        }