private async Task <Stream> GetDebugInfoFromNodeAsync(JsonOperationContext jsonOperationContext, string url, IEnumerable <string> databaseNames, X509Certificate2 certificate) { var bodyJson = new DynamicJsonValue { [nameof(NodeDebugInfoRequestHeader.FromUrl)] = ServerStore.GetNodeHttpServerUrl(), [nameof(NodeDebugInfoRequestHeader.DatabaseNames)] = databaseNames }; using (var ms = new MemoryStream()) using (var writer = new BlittableJsonTextWriter(jsonOperationContext, ms)) { jsonOperationContext.Write(writer, bodyJson); writer.Flush(); ms.Flush(); var requestExecutor = ClusterRequestExecutor.CreateForSingleNode(url, certificate); requestExecutor.DefaultTimeout = ServerStore.Configuration.Cluster.OperationTimeout.AsTimeSpan; var rawStreamCommand = new GetRawStreamResultCommand("admin/debug/remote-cluster-info-package", ms); await requestExecutor.ExecuteAsync(rawStreamCommand, jsonOperationContext); rawStreamCommand.Result.Position = 0; return(rawStreamCommand.Result); } }
protected void RedirectToLeader() { if (ServerStore.LeaderTag == null) { throw new NoLeaderException(); } ClusterTopology topology; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { topology = ServerStore.GetClusterTopology(context); } var url = topology.GetUrlFromTag(ServerStore.LeaderTag); if (string.Equals(url, ServerStore.GetNodeHttpServerUrl(), StringComparison.OrdinalIgnoreCase)) { throw new NoLeaderException($"This node is not the leader, but the current topology does mark it as the leader. Such confusion is usually an indication of a network or configuration problem."); } var leaderLocation = url + HttpContext.Request.Path + HttpContext.Request.QueryString; HttpContext.Response.StatusCode = (int)HttpStatusCode.TemporaryRedirect; HttpContext.Response.Headers.Remove("Content-Type"); HttpContext.Response.Headers.Add("Location", leaderLocation); }
private async Task <Stream> GetDebugInfoFromNodeAsync( JsonOperationContext context, RequestExecutor requestExecutor, IEnumerable <string> databaseNames) { var bodyJson = new DynamicJsonValue { [nameof(NodeDebugInfoRequestHeader.FromUrl)] = ServerStore.GetNodeHttpServerUrl(), [nameof(NodeDebugInfoRequestHeader.DatabaseNames)] = databaseNames }; using (var ms = new MemoryStream()) using (var writer = new BlittableJsonTextWriter(context, ms)) { context.Write(writer, bodyJson); writer.Flush(); ms.Flush(); var rawStreamCommand = new GetRawStreamResultCommand($"admin/debug/remote-cluster-info-package", ms); await requestExecutor.ExecuteAsync(rawStreamCommand, context); rawStreamCommand.Result.Position = 0; return(rawStreamCommand.Result); } }
public static SetupParameters Get(ServerStore serverStore) { var result = new SetupParameters(); DetermineFixedPortNumber(serverStore, result); DetermineFixedTcpPortNumber(serverStore, result); result.IsDocker = Environment.GetEnvironmentVariable("RAVEN_IN_DOCKER") == "true"; result.DockerHostname = result.IsDocker ? new Uri(serverStore.GetNodeHttpServerUrl()).Host : null; return(result); }
public static SetupParameters Get(ServerStore serverStore) { var result = new SetupParameters(); DetermineFixedPortNumber(serverStore, result); DetermineFixedTcpPortNumber(serverStore, result); result.IsDocker = PlatformDetails.RunningOnDocker; result.DockerHostname = result.IsDocker ? new Uri(serverStore.GetNodeHttpServerUrl()).Host : null; return(result); }
private string GetUrl(string tag, ClusterTopology clusterTopology) { string url = null; if (Server.ServerStore.NodeTag == tag) { url = ServerStore.GetNodeHttpServerUrl(HttpContext.Request.GetClientRequestedNodeUrl()); } if (url == null) { url = clusterTopology.GetUrlFromTag(tag); } return(url); }
public static async Task <SetupParameters> Get(ServerStore serverStore) { var result = new SetupParameters(); DetermineFixedPortNumber(serverStore, result); DetermineFixedTcpPortNumber(serverStore, result); result.IsDocker = PlatformDetails.RunningOnDocker; result.DockerHostname = result.IsDocker ? new Uri(serverStore.GetNodeHttpServerUrl()).Host : null; result.RunningOnMacOsx = PlatformDetails.RunningOnMacOsx; result.IsAws = await DetectIfRunningInAws(); if (result.IsAws == false) { result.IsAzure = await DetectIfRunningInAzure(); } result.RunningOnPosix = PlatformDetails.RunningOnPosix; return(result); }
public async Task AddNode() { SetupCORSHeaders(); var nodeUrl = GetQueryStringValueAndAssertIfSingleAndNotEmpty("url"); var watcher = GetBoolValueQueryString("watcher", false); var assignedCores = GetIntValueQueryString("assignedCores", false); if (assignedCores <= 0) { throw new ArgumentException("Assigned cores must be greater than 0!"); } nodeUrl = UrlHelper.TryGetLeftPart(nodeUrl); var remoteIsHttps = nodeUrl.StartsWith("https:", StringComparison.OrdinalIgnoreCase); if (HttpContext.Request.IsHttps != remoteIsHttps) { throw new InvalidOperationException($"Cannot add node '{nodeUrl}' to cluster because it will create invalid mix of HTTPS & HTTP endpoints. A cluster must be only HTTPS or only HTTP."); } NodeInfo nodeInfo; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (var requestExecutor = ClusterRequestExecutor.CreateForSingleNode(nodeUrl, Server.Certificate.Certificate)) { requestExecutor.DefaultTimeout = ServerStore.Engine.OperationTimeout; var infoCmd = new GetNodeInfoCommand(); try { await requestExecutor.ExecuteAsync(infoCmd, ctx); } catch (AllTopologyNodesDownException e) { throw new InvalidOperationException($"Couldn't contact node at {nodeUrl}", e); } nodeInfo = infoCmd.Result; if (ServerStore.IsPassive() && nodeInfo.TopologyId != null) { throw new TopologyMismatchException("You can't add new node to an already existing cluster"); } } if (assignedCores != null && assignedCores > nodeInfo.NumberOfCores) { throw new ArgumentException("Cannot add node because the assigned cores is larger " + $"than the available cores on that machine: {nodeInfo.NumberOfCores}"); } ServerStore.EnsureNotPassive(); if (assignedCores == null) { assignedCores = ServerStore.LicenseManager.GetCoresToAssign(nodeInfo.NumberOfCores); } Debug.Assert(assignedCores <= nodeInfo.NumberOfCores); ServerStore.LicenseManager.AssertCanAddNode(nodeUrl, assignedCores.Value); if (ServerStore.IsLeader()) { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) { string topologyId; ClusterTopology clusterTopology; using (ctx.OpenReadTransaction()) { clusterTopology = ServerStore.GetClusterTopology(ctx); topologyId = clusterTopology.TopologyId; } var possibleNode = clusterTopology.TryGetNodeTagByUrl(nodeUrl); if (possibleNode.HasUrl) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster because this url is already used by node {possibleNode.NodeTag}"); } if (nodeInfo.ServerId == ServerStore.GetServerId()) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster because it's a synonym of the current node URL:{ServerStore.GetNodeHttpServerUrl()}"); } if (nodeInfo.TopologyId != null) { if (topologyId != nodeInfo.TopologyId) { throw new TopologyMismatchException( $"Adding a new node to cluster failed. The new node is already in another cluster. " + $"Expected topology id: {topologyId}, but we get {nodeInfo.TopologyId}"); } if (nodeInfo.CurrentState != RachisState.Passive) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster " + $"because it's already in the cluster under tag :{nodeInfo.NodeTag} " + $"and URL: {clusterTopology.GetUrlFromTag(nodeInfo.NodeTag)}"); } } var nodeTag = nodeInfo.NodeTag == RachisConsensus.InitialTag ? null : nodeInfo.NodeTag; CertificateDefinition oldServerCert = null; X509Certificate2 certificate = null; if (remoteIsHttps) { if (nodeInfo.Certificate == null) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because it has no certificate while trying to use HTTPS"); } certificate = new X509Certificate2(Convert.FromBase64String(nodeInfo.Certificate), (string)null, X509KeyStorageFlags.MachineKeySet); var now = DateTime.UtcNow; if (certificate.NotBefore.ToUniversalTime() > now) { // Because of time zone and time drift issues, we can't assume that the certificate generation will be // proper. Because of that, we allow tolerance of the NotBefore to be a bit earlier / later than the // current time. Clients may still fail to work with our certificate because of timing issues, // but the admin needs to setup time sync properly and there isn't much we can do at that point if ((certificate.NotBefore.ToUniversalTime() - now).TotalDays > 1) { throw new InvalidOperationException( $"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate '{certificate.FriendlyName}' is not yet valid. It starts on {certificate.NotBefore}"); } } if (certificate.NotAfter.ToUniversalTime() < now) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate '{certificate.FriendlyName}' expired on {certificate.NotAfter}"); } var expected = GetStringQueryString("expectedThumbprint", required: false); if (expected != null) { if (certificate.Thumbprint != expected) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate thumbprint '{certificate.Thumbprint}' doesn't match the expected thumbprint '{expected}'."); } } using (ctx.OpenReadTransaction()) { var key = Constants.Certificates.Prefix + certificate.Thumbprint; var readCert = ServerStore.Cluster.Read(ctx, key); if (readCert != null) { oldServerCert = JsonDeserializationServer.CertificateDefinition(readCert); } } if (oldServerCert == null) { var certificateDefinition = new CertificateDefinition { Certificate = nodeInfo.Certificate, Thumbprint = certificate.Thumbprint, NotAfter = certificate.NotAfter, Name = "Server Certificate for " + nodeUrl, SecurityClearance = SecurityClearance.ClusterNode }; var res = await ServerStore.PutValueInClusterAsync(new PutCertificateCommand(Constants.Certificates.Prefix + certificate.Thumbprint, certificateDefinition)); await ServerStore.Cluster.WaitForIndexNotification(res.Index); } } await ServerStore.AddNodeToClusterAsync(nodeUrl, nodeTag, validateNotInTopology : false, asWatcher : watcher ?? false); using (ctx.OpenReadTransaction()) { clusterTopology = ServerStore.GetClusterTopology(ctx); possibleNode = clusterTopology.TryGetNodeTagByUrl(nodeUrl); nodeTag = possibleNode.HasUrl ? possibleNode.NodeTag : null; if (certificate != null) { var key = Constants.Certificates.Prefix + certificate.Thumbprint; var modifiedServerCert = JsonDeserializationServer.CertificateDefinition(ServerStore.Cluster.Read(ctx, key)); if (modifiedServerCert == null) { throw new ConcurrencyException("After adding the certificate, it was removed, shouldn't happen unless another admin removed it midway through."); } if (oldServerCert == null) { modifiedServerCert.Name = "Server certificate for Node " + nodeTag; } else { var value = "Node " + nodeTag; if (modifiedServerCert.Name.Contains(value) == false) { modifiedServerCert.Name += ", " + value; } } var res = await ServerStore.PutValueInClusterAsync(new PutCertificateCommand(key, modifiedServerCert)); await ServerStore.Cluster.WaitForIndexNotification(res.Index); } var nodeDetails = new NodeDetails { NodeTag = nodeTag, AssignedCores = assignedCores.Value, NumberOfCores = nodeInfo.NumberOfCores, InstalledMemoryInGb = nodeInfo.InstalledMemoryInGb, UsableMemoryInGb = nodeInfo.UsableMemoryInGb, BuildInfo = nodeInfo.BuildInfo }; await ServerStore.LicenseManager.CalculateLicenseLimits(nodeDetails, forceFetchingNodeInfo : true, waitToUpdate : true); } NoContentStatus(); return; } } RedirectToLeader(); }
public Task GetClusterTopology() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var topology = ServerStore.GetClusterTopology(context); var nodeTag = ServerStore.NodeTag; if (topology.AllNodes.Count == 0) { var tag = ServerStore.NodeTag ?? "A"; var serverUrl = ServerStore.GetNodeHttpServerUrl(HttpContext.Request.GetClientRequestedNodeUrl()); topology = new ClusterTopology( topology.TopologyId ?? "dummy", new Dictionary <string, string> { [tag] = serverUrl }, new Dictionary <string, string>(), new Dictionary <string, string>(), tag ); nodeTag = tag; } else { var isClientIndependent = GetBoolValueQueryString("clientIndependent", false) ?? false; if (isClientIndependent == false) { topology.ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(ServerStore, HttpContext); } } HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var loadLicenseLimits = ServerStore.LoadLicenseLimits(); var nodeLicenseDetails = loadLicenseLimits == null ? null : DynamicJsonValue.Convert(loadLicenseLimits.NodeLicenseDetails); var json = new DynamicJsonValue { ["Topology"] = topology.ToSortedJson(), ["Leader"] = ServerStore.LeaderTag, ["LeaderShipDuration"] = ServerStore.Engine.CurrentLeader?.LeaderShipDuration, ["CurrentState"] = ServerStore.CurrentRachisState, ["NodeTag"] = nodeTag, ["CurrentTerm"] = ServerStore.Engine.CurrentTerm, ["NodeLicenseDetails"] = nodeLicenseDetails, [nameof(ServerStore.Engine.LastStateChangeReason)] = ServerStore.LastStateChangeReason() }; var clusterErrors = ServerStore.GetClusterErrors(); if (clusterErrors.Count > 0) { json["Errors"] = clusterErrors; } var nodesStatues = ServerStore.GetNodesStatuses(); json["Status"] = DynamicJsonValue.Convert(nodesStatues); context.Write(writer, json); writer.Flush(); } } return(Task.CompletedTask); }
public async Task GetClusterWideInfoPackage() { var contentDisposition = $"attachment; filename={DateTime.UtcNow:yyyy-MM-dd H:mm:ss} Cluster Wide.zip"; HttpContext.Response.Headers["Content-Disposition"] = contentDisposition; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext transactionOperationContext)) using (ServerStore.ContextPool.AllocateOperationContext(out JsonOperationContext jsonOperationContext)) using (transactionOperationContext.OpenReadTransaction()) { using (var ms = new MemoryStream()) { using (var archive = new ZipArchive(ms, ZipArchiveMode.Create, true)) { var localEndpointClient = new LocalEndpointClient(Server); using (var localMemoryStream = new MemoryStream()) { //assuming that if the name tag is empty var nodeName = $"Node - [{ServerStore.NodeTag ?? "Empty node tag"}]"; using (var localArchive = new ZipArchive(localMemoryStream, ZipArchiveMode.Create, true)) { await WriteServerWide(localArchive, jsonOperationContext, localEndpointClient, _serverWidePrefix); await WriteForAllLocalDatabases(localArchive, jsonOperationContext, localEndpointClient); await WriteLogFile(localArchive); } localMemoryStream.Position = 0; var entry = archive.CreateEntry($"{nodeName}.zip"); entry.ExternalAttributes = ((int)(FilePermissions.S_IRUSR | FilePermissions.S_IWUSR)) << 16; using (var entryStream = entry.Open()) { localMemoryStream.CopyTo(entryStream); entryStream.Flush(); } } var databaseNames = ServerStore.Cluster.GetDatabaseNames(transactionOperationContext); var topology = ServerStore.GetClusterTopology(transactionOperationContext); //this means no databases are defined in the cluster //in this case just output server-wide endpoints from all cluster nodes if (databaseNames.Count == 0) { foreach (var tagWithUrl in topology.AllNodes) { if (tagWithUrl.Value.Contains(ServerStore.GetNodeHttpServerUrl())) { continue; } try { await WriteDebugInfoPackageForNodeAsync( jsonOperationContext, archive, tag : tagWithUrl.Key, url : tagWithUrl.Value, certificate : Server.Certificate.Certificate, databaseNames : null); } catch (Exception e) { var entryName = $"Node - [{tagWithUrl.Key}]"; DebugInfoPackageUtils.WriteExceptionAsZipEntry(e, archive, entryName); } } } else { var nodeUrlToDatabaseNames = CreateUrlToDatabaseNamesMapping(transactionOperationContext, databaseNames); foreach (var urlToDatabaseNamesMap in nodeUrlToDatabaseNames) { if (urlToDatabaseNamesMap.Key.Contains(ServerStore.GetNodeHttpServerUrl())) { continue; //skip writing local data, we do it separately } try { await WriteDebugInfoPackageForNodeAsync( jsonOperationContext, archive, tag : urlToDatabaseNamesMap.Value.Item2, url : urlToDatabaseNamesMap.Key, databaseNames : urlToDatabaseNamesMap.Value.Item1, certificate : Server.Certificate.Certificate); } catch (Exception e) { var entryName = $"Node - [{urlToDatabaseNamesMap.Value.Item2}]"; DebugInfoPackageUtils.WriteExceptionAsZipEntry(e, archive, entryName); } } } } ms.Position = 0; await ms.CopyToAsync(ResponseBodyStream()); } } }
public static void ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(this ClusterTopology topology, ServerStore serverStore, HttpContext httpContext) { var currentNodeUrlAsSeenByTheClient = serverStore.GetNodeHttpServerUrl(httpContext.Request.GetClientRequestedNodeUrl()); topology.ReplaceCurrentNodeUrlWithClientRequestedUrl(serverStore.NodeTag, currentNodeUrlAsSeenByTheClient); }
public async Task AddNode() { var nodeUrl = GetQueryStringValueAndAssertIfSingleAndNotEmpty("url"); var tag = GetStringQueryString("tag", false); var watcher = GetBoolValueQueryString("watcher", false); var raftRequestId = GetRaftRequestIdFromQuery(); var maxUtilizedCores = GetIntValueQueryString("maxUtilizedCores", false); if (maxUtilizedCores != null && maxUtilizedCores <= 0) { throw new ArgumentException("Max utilized cores cores must be greater than 0"); } nodeUrl = nodeUrl.Trim(); if (Uri.IsWellFormedUriString(nodeUrl, UriKind.Absolute) == false) { throw new InvalidOperationException($"Given node URL '{nodeUrl}' is not in a correct format."); } nodeUrl = UrlHelper.TryGetLeftPart(nodeUrl); var remoteIsHttps = nodeUrl.StartsWith("https:", StringComparison.OrdinalIgnoreCase); if (HttpContext.Request.IsHttps != remoteIsHttps) { throw new InvalidOperationException($"Cannot add node '{nodeUrl}' to cluster because it will create invalid mix of HTTPS & HTTP endpoints. A cluster must be only HTTPS or only HTTP."); } tag = tag?.Trim(); NodeInfo nodeInfo; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (var requestExecutor = ClusterRequestExecutor.CreateForSingleNode(nodeUrl, Server.Certificate.Certificate)) { requestExecutor.DefaultTimeout = ServerStore.Engine.OperationTimeout; // test connection to remote. var result = await ServerStore.TestConnectionToRemote(nodeUrl, database : null); if (result.Success == false) { throw new InvalidOperationException(result.Error); } // test connection from remote to destination result = await ServerStore.TestConnectionFromRemote(requestExecutor, ctx, nodeUrl); if (result.Success == false) { throw new InvalidOperationException(result.Error); } var infoCmd = new GetNodeInfoCommand(); try { await requestExecutor.ExecuteAsync(infoCmd, ctx); } catch (AllTopologyNodesDownException e) { throw new InvalidOperationException($"Couldn't contact node at {nodeUrl}", e); } nodeInfo = infoCmd.Result; if (SchemaUpgrader.CurrentVersion.ServerVersion != nodeInfo.ServerSchemaVersion) { var nodesVersion = nodeInfo.ServerSchemaVersion == 0 ? "Pre 4.2 version" : nodeInfo.ServerSchemaVersion.ToString(); throw new InvalidOperationException($"Can't add node with mismatched storage schema version.{Environment.NewLine}" + $"My version is {SchemaUpgrader.CurrentVersion.ServerVersion}, while node's version is {nodesVersion}"); } if (ServerStore.IsPassive() && nodeInfo.TopologyId != null) { throw new TopologyMismatchException("You can't add new node to an already existing cluster"); } } if (ServerStore.ValidateFixedPort && nodeInfo.HasFixedPort == false) { throw new InvalidOperationException($"Failed to add node '{nodeUrl}' to cluster. " + $"Node '{nodeUrl}' has port '0' in 'Configuration.Core.ServerUrls' setting. " + "Adding a node with non fixed port is forbidden. Define a fixed port for the node to enable cluster creation."); } await ServerStore.EnsureNotPassiveAsync(); ServerStore.LicenseManager.AssertCanAddNode(); if (ServerStore.IsLeader()) { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) { var clusterTopology = ServerStore.GetClusterTopology(); var possibleNode = clusterTopology.TryGetNodeTagByUrl(nodeUrl); if (possibleNode.HasUrl) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster because this url is already used by node {possibleNode.NodeTag}"); } if (nodeInfo.ServerId == ServerStore.GetServerId()) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster because it's a synonym of the current node URL:{ServerStore.GetNodeHttpServerUrl()}"); } if (nodeInfo.TopologyId != null) { AssertCanAddNodeWithTopologyId(clusterTopology, nodeInfo, nodeUrl); } var nodeTag = nodeInfo.NodeTag == RachisConsensus.InitialTag ? tag : nodeInfo.NodeTag; CertificateDefinition oldServerCert = null; X509Certificate2 certificate = null; if (remoteIsHttps) { if (nodeInfo.Certificate == null) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because it has no certificate while trying to use HTTPS"); } certificate = new X509Certificate2(Convert.FromBase64String(nodeInfo.Certificate), (string)null, X509KeyStorageFlags.MachineKeySet); var now = DateTime.UtcNow; if (certificate.NotBefore.ToUniversalTime() > now) { // Because of time zone and time drift issues, we can't assume that the certificate generation will be // proper. Because of that, we allow tolerance of the NotBefore to be a bit earlier / later than the // current time. Clients may still fail to work with our certificate because of timing issues, // but the admin needs to setup time sync properly and there isn't much we can do at that point if ((certificate.NotBefore.ToUniversalTime() - now).TotalDays > 1) { throw new InvalidOperationException( $"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate '{certificate.FriendlyName}' is not yet valid. It starts on {certificate.NotBefore}"); } } if (certificate.NotAfter.ToUniversalTime() < now) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate '{certificate.FriendlyName}' expired on {certificate.NotAfter}"); } var expected = GetStringQueryString("expectedThumbprint", required: false); if (expected != null) { if (certificate.Thumbprint != expected) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate thumbprint '{certificate.Thumbprint}' doesn't match the expected thumbprint '{expected}'."); } } // if it's the same server certificate as our own, we don't want to add it to the cluster if (certificate.Thumbprint != Server.Certificate.Certificate.Thumbprint) { using (ctx.OpenReadTransaction()) { var readCert = ServerStore.Cluster.GetCertificateByThumbprint(ctx, certificate.Thumbprint); if (readCert != null) { oldServerCert = JsonDeserializationServer.CertificateDefinition(readCert); } } if (oldServerCert == null) { var certificateDefinition = new CertificateDefinition { Certificate = nodeInfo.Certificate, Thumbprint = certificate.Thumbprint, PublicKeyPinningHash = certificate.GetPublicKeyPinningHash(), NotAfter = certificate.NotAfter, Name = "Server Certificate for " + nodeUrl, SecurityClearance = SecurityClearance.ClusterNode }; var res = await ServerStore.PutValueInClusterAsync(new PutCertificateCommand(certificate.Thumbprint, certificateDefinition, $"{raftRequestId}/put-new-certificate")); await ServerStore.Cluster.WaitForIndexNotification(res.Index); } } } await ServerStore.AddNodeToClusterAsync(nodeUrl, nodeTag, validateNotInTopology : true, asWatcher : watcher ?? false); using (ctx.OpenReadTransaction()) { clusterTopology = ServerStore.GetClusterTopology(ctx); possibleNode = clusterTopology.TryGetNodeTagByUrl(nodeUrl); nodeTag = possibleNode.HasUrl ? possibleNode.NodeTag : null; if (certificate != null && certificate.Thumbprint != Server.Certificate.Certificate.Thumbprint) { var modifiedServerCert = JsonDeserializationServer.CertificateDefinition(ServerStore.Cluster.GetCertificateByThumbprint(ctx, certificate.Thumbprint)); if (modifiedServerCert == null) { throw new ConcurrencyException("After adding the certificate, it was removed, shouldn't happen unless another admin removed it midway through."); } if (oldServerCert == null) { modifiedServerCert.Name = "Server certificate for Node " + nodeTag; } else { var value = "Node " + nodeTag; if (modifiedServerCert.Name.Contains(value) == false) { modifiedServerCert.Name += ", " + value; } } var res = await ServerStore.PutValueInClusterAsync(new PutCertificateCommand(certificate.Thumbprint, modifiedServerCert, $"{raftRequestId}/put-modified-certificate")); await ServerStore.Cluster.WaitForIndexNotification(res.Index); } var detailsPerNode = new DetailsPerNode { MaxUtilizedCores = maxUtilizedCores, NumberOfCores = nodeInfo.NumberOfCores, InstalledMemoryInGb = nodeInfo.InstalledMemoryInGb, UsableMemoryInGb = nodeInfo.UsableMemoryInGb, BuildInfo = nodeInfo.BuildInfo, OsInfo = nodeInfo.OsInfo }; var maxCores = ServerStore.LicenseManager.LicenseStatus.MaxCores; try { await ServerStore.PutNodeLicenseLimitsAsync(nodeTag, detailsPerNode, maxCores, $"{raftRequestId}/put-license-limits"); } catch { // we'll retry this again later } } NoContentStatus(); return; } } RedirectToLeader(); }