public async Task PromoteNode() { if (ServerStore.LeaderTag == null) { NoContentStatus(); return; } SetupCORSHeaders(); if (ServerStore.IsLeader() == false) { RedirectToLeader(); return; } var nodeTag = GetStringQueryString("nodeTag"); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var topology = ServerStore.GetClusterTopology(context); if (topology.Watchers.ContainsKey(nodeTag) == false) { throw new InvalidOperationException( $"Failed to promote node {nodeTag} beacuse {nodeTag} is not a watcher in the cluster topology"); } var url = topology.GetUrlFromTag(nodeTag); await ServerStore.Engine.ModifyTopologyAsync(nodeTag, url, Leader.TopologyModification.Promotable); NoContentStatus(); } }
public Task GetNodeInfo() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var json = new DynamicJsonValue(); using (context.OpenReadTransaction()) { json[nameof(NodeInfo.NodeTag)] = ServerStore.NodeTag; json[nameof(NodeInfo.TopologyId)] = ServerStore.GetClusterTopology(context).TopologyId; json[nameof(NodeInfo.Certificate)] = ServerStore.Server.Certificate.CertificateForClients; json[nameof(ServerStore.Engine.LastStateChangeReason)] = ServerStore.LastStateChangeReason(); json[nameof(NodeInfo.NumberOfCores)] = ProcessorInfo.ProcessorCount; var memoryInformation = MemoryInformation.GetMemoryInfo(); json[nameof(NodeInfo.InstalledMemoryInGb)] = memoryInformation.InstalledMemory.GetDoubleValue(SizeUnit.Gigabytes); json[nameof(NodeInfo.UsableMemoryInGb)] = memoryInformation.TotalPhysicalMemory.GetDoubleValue(SizeUnit.Gigabytes); json[nameof(NodeInfo.BuildInfo)] = LicenseManager.BuildInfo; json[nameof(NodeInfo.ServerId)] = ServerStore.GetServerId().ToString(); json[nameof(NodeInfo.CurrentState)] = ServerStore.CurrentRachisState; } context.Write(writer, json); writer.Flush(); } return(Task.CompletedTask); }
protected void RedirectToLeader() { if (ServerStore.LeaderTag == null) { throw new NoLeaderException(); } ClusterTopology topology; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { topology = ServerStore.GetClusterTopology(context); } var url = topology.GetUrlFromTag(ServerStore.LeaderTag); if (string.Equals(url, ServerStore.GetNodeHttpServerUrl(), StringComparison.OrdinalIgnoreCase)) { throw new NoLeaderException($"This node is not the leader, but the current topology does mark it as the leader. Such confusion is usually an indication of a network or configuration problem."); } var leaderLocation = url + HttpContext.Request.Path + HttpContext.Request.QueryString; HttpContext.Response.StatusCode = (int)HttpStatusCode.TemporaryRedirect; HttpContext.Response.Headers.Remove("Content-Type"); HttpContext.Response.Headers.Add("Location", leaderLocation); }
public Task GetClusterTopology() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var topology = ServerStore.GetClusterTopology(context); var nodeTag = ServerStore.NodeTag; if (topology.Members.Count == 0) { var tag = ServerStore.NodeTag ?? "A"; var serverUrl = ServerStore.NodeHttpServerUrl; topology = new ClusterTopology( "dummy", new Dictionary <string, string> { [tag] = serverUrl }, new Dictionary <string, string>(), new Dictionary <string, string>(), tag ); nodeTag = tag; } HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var loadLicenseLimits = ServerStore.LoadLicenseLimits(); var nodeLicenseDetails = loadLicenseLimits == null ? null : DynamicJsonValue.Convert(loadLicenseLimits.NodeLicenseDetails); var json = new DynamicJsonValue { ["Topology"] = topology.ToSortedJson(), ["Leader"] = ServerStore.LeaderTag, ["CurrentState"] = ServerStore.CurrentRachisState, ["NodeTag"] = nodeTag, ["CurrentTerm"] = ServerStore.Engine.CurrentTerm, ["NodeLicenseDetails"] = nodeLicenseDetails, [nameof(ServerStore.Engine.LastStateChangeReason)] = ServerStore.LastStateChangeReason() }; var clusterErrors = ServerStore.GetClusterErrors(); if (clusterErrors.Count > 0) { json["Errors"] = clusterErrors; } var nodesStatues = ServerStore.GetNodesStatuses(); json["Status"] = DynamicJsonValue.Convert(nodesStatues); context.Write(writer, json); writer.Flush(); } } return(Task.CompletedTask); }
public ClusterTopology GetClusterTopology() { using (_server.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (ctx.OpenReadTransaction()) { return(_server.GetClusterTopology(ctx)); } }
public async Task DistributeKeyInCluster() { await ServerStore.EnsureNotPassiveAsync(); var name = GetStringQueryString("name"); var nodes = GetStringValuesQueryString("node"); using (var reader = new StreamReader(HttpContext.Request.Body)) { var base64 = await reader.ReadToEndAsync(); using (Server.ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) { ClusterTopology clusterTopology; using (ctx.OpenReadTransaction()) clusterTopology = ServerStore.GetClusterTopology(ctx); foreach (var node in nodes) { if (string.IsNullOrEmpty(node)) { continue; } if (string.Equals(node, "?") || string.Equals(node, ServerStore.NodeTag, StringComparison.OrdinalIgnoreCase)) { var key = Convert.FromBase64String(base64); if (key.Length != 256 / 8) { throw new ArgumentException($"Key size must be 256 bits, but was {key.Length * 8}", nameof(key)); } StoreKeyLocally(name, key, ctx); } else { var url = clusterTopology.GetUrlFromTag(node); if (url == null) { throw new InvalidOperationException($"Node {node} is not a part of the cluster, cannot send secret key."); } if (url.StartsWith("https:", StringComparison.OrdinalIgnoreCase) == false) { throw new InvalidOperationException($"Cannot put secret key for {name} on node {node} with url {url} because it is not using HTTPS"); } await SendKeyToNodeAsync(name, base64, ctx, ServerStore, node, url).ConfigureAwait(false); } } } } HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; }
public async Task Watch() { var nodeTag = GetStringQueryString("node", required: true); using (var webSocket = await HttpContext.WebSockets.AcceptWebSocketAsync()) { try { if (nodeTag.Equals(ServerStore.NodeTag, StringComparison.OrdinalIgnoreCase)) { var canAccessDatabase = GetDatabaseAccessValidationFunc(); await SendNotifications(canAccessDatabase, webSocket); } else { ClusterTopology topology; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext txOperationContext)) using (txOperationContext.OpenReadTransaction()) { topology = ServerStore.GetClusterTopology(txOperationContext); } var remoteNodeUrl = topology.GetUrlFromTag(nodeTag); if (string.IsNullOrEmpty(remoteNodeUrl)) { throw new InvalidOperationException($"Could not find node url for node tag '{nodeTag}'"); } var currentCert = GetCurrentCertificate(); using (var connection = new ProxyWebSocketConnection(webSocket, remoteNodeUrl, $"/admin/cluster-dashboard/remote/watch?thumbprint={currentCert?.Thumbprint}", ServerStore.ContextPool, ServerStore.ServerShutdown)) { await connection.Establish(Server.Certificate?.Certificate); await connection.RelayData(); } } } catch (OperationCanceledException) { // ignored } catch (Exception ex) { await HandleException(ex, webSocket); } } }
private async Task CreateInternal(BlittableJsonReaderObject bjro, SubscriptionCreationOptions options, DocumentsOperationContext context, long?id, bool?disabled) { if (TrafficWatchManager.HasRegisteredClients) { AddStringToHttpContext(bjro.ToString(), TrafficWatchChangeType.Subscriptions); } var sub = SubscriptionConnection.ParseSubscriptionQuery(options.Query); if (Enum.TryParse(options.ChangeVector, out Constants.Documents.SubscriptionChangeVectorSpecialStates changeVectorSpecialValue)) { switch (changeVectorSpecialValue) { case Constants.Documents.SubscriptionChangeVectorSpecialStates.BeginningOfTime: options.ChangeVector = null; break; case Constants.Documents.SubscriptionChangeVectorSpecialStates.LastDocument: options.ChangeVector = Database.DocumentsStorage.GetLastDocumentChangeVector(context.Transaction.InnerTransaction, context, sub.Collection); break; } } var mentor = options.MentorNode; var subscriptionId = await Database.SubscriptionStorage.PutSubscription(options, GetRaftRequestIdFromQuery(), id, disabled, mentor); var name = options.Name ?? subscriptionId.ToString(); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext serverContext)) using (serverContext.OpenReadTransaction()) { // need to wait on the relevant remote node var node = Database.SubscriptionStorage.GetResponsibleNode(serverContext, name); if (node != null && node != ServerStore.NodeTag) { await WaitForExecutionOnSpecificNode(serverContext, ServerStore.GetClusterTopology(serverContext), node, subscriptionId); } } HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; await using (var writer = new AsyncBlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { [nameof(CreateSubscriptionResult.Name)] = name }); } }
public Task ClusterDomains() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { List <string> domains = null; if (ServerStore.CurrentRachisState != RachisState.Passive) { ClusterTopology clusterTopology; using (context.OpenReadTransaction()) clusterTopology = ServerStore.GetClusterTopology(context); domains = clusterTopology.AllNodes.Select(node => new Uri(node.Value).DnsSafeHost).ToList(); } else { var myUrl = Server.Configuration.Core.PublicServerUrl.HasValue ? Server.Configuration.Core.PublicServerUrl.Value.UriValue : Server.Configuration.Core.ServerUrls[0]; var myDomain = new Uri(myUrl).DnsSafeHost; domains = new List <string> { myDomain }; } using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WritePropertyName("ClusterDomains"); writer.WriteStartArray(); var first = true; foreach (var domain in domains) { if (first == false) { writer.WriteComma(); } first = false; writer.WriteString(domain); } writer.WriteEndArray(); writer.WriteEndObject(); } } return(Task.CompletedTask); }
private Dictionary <string, (HashSet <string>, string)> CreateUrlToDatabaseNamesMapping(TransactionOperationContext transactionOperationContext, IEnumerable <string> databaseNames) { var nodeUrlToDatabaseNames = new Dictionary <string, (HashSet <string>, string)>(); var clusterTopology = ServerStore.GetClusterTopology(transactionOperationContext); foreach (var databaseName in databaseNames) { var topology = ServerStore.Cluster.ReadDatabaseTopology(transactionOperationContext, databaseName); var nodeUrlsAndTags = topology.AllNodes.Select(tag => (clusterTopology.GetUrlFromTag(tag), tag)); foreach (var urlAndTag in nodeUrlsAndTags) { if (nodeUrlToDatabaseNames.TryGetValue(urlAndTag.Item1, out (HashSet <string>, string)databaseNamesWithNodeTag)) { databaseNamesWithNodeTag.Item1.Add(databaseName); }
public Task GetNodeInfo() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var json = new DynamicJsonValue(); using (context.OpenReadTransaction()) { json[nameof(NodeInfo.NodeTag)] = ServerStore.NodeTag; json[nameof(NodeInfo.TopologyId)] = ServerStore.GetClusterTopology(context).TopologyId; json[nameof(NodeInfo.Certificate)] = ServerStore.RavenServer.ClusterCertificateHolder.CertificateForClients; json[nameof(ServerStore.Engine.LastStateChangeReason)] = ServerStore.LastStateChangeReason(); } context.Write(writer, json); writer.Flush(); } return(Task.CompletedTask); }
public async Task PingTest() { var dest = GetStringQueryString("url", false) ?? GetStringQueryString("node", false); var topology = ServerStore.GetClusterTopology(); var tasks = new List <Task <PingResult> >(); if (string.IsNullOrEmpty(dest)) { foreach (var node in topology.AllNodes) { tasks.Add(PingOnce(node.Value)); } } else { var url = topology.GetUrlFromTag(dest); tasks.Add(PingOnce(url ?? dest)); } using (ServerStore.ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var write = new BlittableJsonTextWriter(context, ResponseBodyStream())) { write.WriteStartObject(); write.WritePropertyName("Result"); write.WriteStartArray(); while (tasks.Count > 0) { var task = await Task.WhenAny(tasks); tasks.Remove(task); context.Write(write, task.Result.ToJson()); if (tasks.Count > 0) { write.WriteComma(); } write.Flush(); } write.WriteEndArray(); write.WriteEndObject(); write.Flush(); } }
public async Task GetClusterWideInfoPackage() { var contentDisposition = $"attachment; filename={DateTime.UtcNow:yyyy-MM-dd H:mm:ss} Cluster Wide.zip"; HttpContext.Response.Headers["Content-Disposition"] = contentDisposition; HttpContext.Response.Headers["Content-Type"] = "application/zip"; var token = CreateOperationToken(); var operationId = GetLongQueryString("operationId", false) ?? ServerStore.Operations.GetNextOperationId(); await ServerStore.Operations.AddOperation(null, "Created debug package for all cluster nodes", Operations.Operations.OperationType.DebugPackage, async _ => { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext transactionOperationContext)) using (ServerStore.ContextPool.AllocateOperationContext(out JsonOperationContext jsonOperationContext)) using (transactionOperationContext.OpenReadTransaction()) { await using (var ms = new MemoryStream()) { using (var archive = new ZipArchive(ms, ZipArchiveMode.Create, true)) { var topology = ServerStore.GetClusterTopology(transactionOperationContext); foreach (var(tag, url) in topology.AllNodes) { try { await WriteDebugInfoPackageForNodeAsync(jsonOperationContext, archive, tag, url, Server.Certificate.Certificate); } catch (Exception e) { await DebugInfoPackageUtils.WriteExceptionAsZipEntryAsync(e, archive, $"Node - [{tag}]"); } } } ms.Position = 0; await ms.CopyToAsync(ResponseBodyStream(), token.Token); } } return(null); }, operationId, token : token); }
public static OngoingTasksResult GetOngoingTasksFor(string dbName, ServerStore store) { var ongoingTasksResult = new OngoingTasksResult(); using (store.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { DatabaseTopology dbTopology; ClusterTopology clusterTopology; DatabaseRecord databaseRecord; using (context.OpenReadTransaction()) { databaseRecord = store.Cluster.ReadDatabase(context, dbName); if (databaseRecord == null) { return(ongoingTasksResult); } dbTopology = databaseRecord.Topology; clusterTopology = store.GetClusterTopology(context); ongoingTasksResult.OngoingTasksList.AddRange(CollectSubscriptionTasks(context, databaseRecord, clusterTopology, store)); } foreach (var tasks in new[] { CollectExternalReplicationTasks(databaseRecord.ExternalReplication, dbTopology, clusterTopology, store), CollectEtlTasks(databaseRecord, dbTopology, clusterTopology, store), CollectBackupTasks(databaseRecord, dbTopology, clusterTopology, store) }) { ongoingTasksResult.OngoingTasksList.AddRange(tasks); } if (store.DatabasesLandlord.DatabasesCache.TryGetValue(dbName, out var database) && database.Status == TaskStatus.RanToCompletion) { ongoingTasksResult.SubscriptionsCount = (int)database.Result.SubscriptionStorage.GetAllSubscriptionsCount(); } return(ongoingTasksResult); } }
public async Task RemoveEntryFromLog() { var index = GetLongQueryString("index"); var first = GetBoolValueQueryString("first", false) ?? true; var nodeList = new List <string>(); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var removed = ServerStore.Engine.RemoveEntryFromRaftLog(index); if (removed) { nodeList.Add(ServerStore.NodeTag); } if (first) { foreach (var node in ServerStore.GetClusterTopology(context).AllNodes) { if (node.Value == Server.WebUrl) { continue; } var cmd = new RemoveEntryFromRaftLogCommand(index); using (var requestExecutor = ClusterRequestExecutor.CreateForSingleNode(node.Value, Server.Certificate.Certificate)) { await requestExecutor.ExecuteAsync(cmd, context); nodeList.AddRange(cmd.Result); } } } await using (var writer = new AsyncBlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WriteArray("Nodes", nodeList); writer.WriteEndObject(); } } }
public Task GetRemoteTaskTopology() { var database = GetStringQueryString("database"); var databaseGroupId = GetStringQueryString("groupId"); var remoteTask = GetStringQueryString("remote-task"); if (Authenticate(HttpContext, ServerStore, database, remoteTask) == false) { return(Task.CompletedTask); } List <string> nodes; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var pullReplication = ServerStore.Cluster.ReadPullReplicationDefinition(database, remoteTask, context); if (pullReplication.Disabled) { throw new InvalidOperationException($"The pull replication '{remoteTask}' is disabled."); } var topology = ServerStore.Cluster.ReadDatabaseTopology(context, database); nodes = GetResponsibleNodes(topology, databaseGroupId, pullReplication); } using (ServerStore.ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var output = new DynamicJsonArray(); var clusterTopology = ServerStore.GetClusterTopology(); foreach (var node in nodes) { output.Add(clusterTopology.GetUrlFromTag(node)); } context.Write(writer, new DynamicJsonValue { ["Results"] = output }); } return(Task.CompletedTask); }
private void RedirectToLeader() { if (ServerStore.LeaderTag == null) { throw new NoLeaderException(); } ClusterTopology topology; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { topology = ServerStore.GetClusterTopology(context); } var url = topology.GetUrlFromTag(ServerStore.LeaderTag); var leaderLocation = url + HttpContext.Request.Path + HttpContext.Request.QueryString; HttpContext.Response.StatusCode = (int)HttpStatusCode.TemporaryRedirect; HttpContext.Response.Headers.Remove("Content-Type"); HttpContext.Response.Headers.Add("Location", leaderLocation); }
public async Task DemoteNode() { if (ServerStore.LeaderTag == null) { NoContentStatus(); return; } SetupCORSHeaders(); if (ServerStore.IsLeader() == false) { RedirectToLeader(); return; } var nodeTag = GetStringQueryString("nodeTag"); if (nodeTag == ServerStore.LeaderTag) { throw new InvalidOperationException( $"Failed to demote node {nodeTag} because {nodeTag} is the current leader in the cluster topology. In order to demote {nodeTag} perform a Step-Down first"); } using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var topology = ServerStore.GetClusterTopology(context); if (topology.Promotables.ContainsKey(nodeTag) == false && topology.Members.ContainsKey(nodeTag) == false) { throw new InvalidOperationException( $"Failed to demote node {nodeTag} because {nodeTag} is not a voter in the cluster topology"); } var url = topology.GetUrlFromTag(nodeTag); await ServerStore.Engine.ModifyTopologyAsync(nodeTag, url, Leader.TopologyModification.NonVoter); NoContentStatus(); } }
public OngoingTasksResult GetOngoingTasksInternal() { var ongoingTasksResult = new OngoingTasksResult(); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { DatabaseTopology dbTopology; ClusterTopology clusterTopology; DatabaseRecord databaseRecord; using (context.OpenReadTransaction()) { databaseRecord = ServerStore.Cluster.ReadDatabase(context, Database.Name); if (databaseRecord == null) { return(ongoingTasksResult); } dbTopology = databaseRecord.Topology; clusterTopology = ServerStore.GetClusterTopology(context); ongoingTasksResult.OngoingTasksList.AddRange(CollectSubscriptionTasks(context, databaseRecord, clusterTopology)); } foreach (var tasks in new[] { CollectExternalReplicationTasks(databaseRecord.ExternalReplication, dbTopology, clusterTopology), CollectEtlTasks(databaseRecord, dbTopology, clusterTopology), CollectBackupTasks(databaseRecord, dbTopology, clusterTopology) }) { ongoingTasksResult.OngoingTasksList.AddRange(tasks); } ongoingTasksResult.SubscriptionsCount = (int)Database.SubscriptionStorage.GetAllSubscriptionsCount(); return(ongoingTasksResult); } }
private static bool IsOriginAllowed(string origin, ServerStore serverStore) { if (serverStore.Server.Certificate.Certificate == null) { // running in unsafe mode - since server can be access via multiple urls/aliases accept them return(true); } var topology = serverStore.GetClusterTopology(); // check explicitly each topology type to avoid allocations in topology.AllNodes foreach (var kvp in topology.Members) { if (kvp.Value.Equals(origin, StringComparison.OrdinalIgnoreCase)) { return(true); } } foreach (var kvp in topology.Watchers) { if (kvp.Value.Equals(origin, StringComparison.OrdinalIgnoreCase)) { return(true); } } foreach (var kvp in topology.Promotables) { if (kvp.Value.Equals(origin, StringComparison.OrdinalIgnoreCase)) { return(true); } } return(false); }
public async Task AddNode() { SetupCORSHeaders(); var nodeUrl = GetQueryStringValueAndAssertIfSingleAndNotEmpty("url"); var watcher = GetBoolValueQueryString("watcher", false); var assignedCores = GetIntValueQueryString("assignedCores", false); if (assignedCores <= 0) { throw new ArgumentException("Assigned cores must be greater than 0!"); } nodeUrl = UrlHelper.TryGetLeftPart(nodeUrl); var remoteIsHttps = nodeUrl.StartsWith("https:", StringComparison.OrdinalIgnoreCase); if (HttpContext.Request.IsHttps != remoteIsHttps) { throw new InvalidOperationException($"Cannot add node '{nodeUrl}' to cluster because it will create invalid mix of HTTPS & HTTP endpoints. A cluster must be only HTTPS or only HTTP."); } NodeInfo nodeInfo; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (var requestExecutor = ClusterRequestExecutor.CreateForSingleNode(nodeUrl, Server.Certificate.Certificate)) { requestExecutor.DefaultTimeout = ServerStore.Engine.OperationTimeout; var infoCmd = new GetNodeInfoCommand(); try { await requestExecutor.ExecuteAsync(infoCmd, ctx); } catch (AllTopologyNodesDownException e) { throw new InvalidOperationException($"Couldn't contact node at {nodeUrl}", e); } nodeInfo = infoCmd.Result; if (ServerStore.IsPassive() && nodeInfo.TopologyId != null) { throw new TopologyMismatchException("You can't add new node to an already existing cluster"); } } if (assignedCores != null && assignedCores > nodeInfo.NumberOfCores) { throw new ArgumentException("Cannot add node because the assigned cores is larger " + $"than the available cores on that machine: {nodeInfo.NumberOfCores}"); } ServerStore.EnsureNotPassive(); if (assignedCores == null) { assignedCores = ServerStore.LicenseManager.GetCoresToAssign(nodeInfo.NumberOfCores); } Debug.Assert(assignedCores <= nodeInfo.NumberOfCores); ServerStore.LicenseManager.AssertCanAddNode(nodeUrl, assignedCores.Value); if (ServerStore.IsLeader()) { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) { string topologyId; ClusterTopology clusterTopology; using (ctx.OpenReadTransaction()) { clusterTopology = ServerStore.GetClusterTopology(ctx); topologyId = clusterTopology.TopologyId; } var possibleNode = clusterTopology.TryGetNodeTagByUrl(nodeUrl); if (possibleNode.HasUrl) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster because this url is already used by node {possibleNode.NodeTag}"); } if (nodeInfo.ServerId == ServerStore.GetServerId()) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster because it's a synonym of the current node URL:{ServerStore.GetNodeHttpServerUrl()}"); } if (nodeInfo.TopologyId != null) { if (topologyId != nodeInfo.TopologyId) { throw new TopologyMismatchException( $"Adding a new node to cluster failed. The new node is already in another cluster. " + $"Expected topology id: {topologyId}, but we get {nodeInfo.TopologyId}"); } if (nodeInfo.CurrentState != RachisState.Passive) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster " + $"because it's already in the cluster under tag :{nodeInfo.NodeTag} " + $"and URL: {clusterTopology.GetUrlFromTag(nodeInfo.NodeTag)}"); } } var nodeTag = nodeInfo.NodeTag == RachisConsensus.InitialTag ? null : nodeInfo.NodeTag; CertificateDefinition oldServerCert = null; X509Certificate2 certificate = null; if (remoteIsHttps) { if (nodeInfo.Certificate == null) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because it has no certificate while trying to use HTTPS"); } certificate = new X509Certificate2(Convert.FromBase64String(nodeInfo.Certificate), (string)null, X509KeyStorageFlags.MachineKeySet); var now = DateTime.UtcNow; if (certificate.NotBefore.ToUniversalTime() > now) { // Because of time zone and time drift issues, we can't assume that the certificate generation will be // proper. Because of that, we allow tolerance of the NotBefore to be a bit earlier / later than the // current time. Clients may still fail to work with our certificate because of timing issues, // but the admin needs to setup time sync properly and there isn't much we can do at that point if ((certificate.NotBefore.ToUniversalTime() - now).TotalDays > 1) { throw new InvalidOperationException( $"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate '{certificate.FriendlyName}' is not yet valid. It starts on {certificate.NotBefore}"); } } if (certificate.NotAfter.ToUniversalTime() < now) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate '{certificate.FriendlyName}' expired on {certificate.NotAfter}"); } var expected = GetStringQueryString("expectedThumbprint", required: false); if (expected != null) { if (certificate.Thumbprint != expected) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate thumbprint '{certificate.Thumbprint}' doesn't match the expected thumbprint '{expected}'."); } } using (ctx.OpenReadTransaction()) { var key = Constants.Certificates.Prefix + certificate.Thumbprint; var readCert = ServerStore.Cluster.Read(ctx, key); if (readCert != null) { oldServerCert = JsonDeserializationServer.CertificateDefinition(readCert); } } if (oldServerCert == null) { var certificateDefinition = new CertificateDefinition { Certificate = nodeInfo.Certificate, Thumbprint = certificate.Thumbprint, NotAfter = certificate.NotAfter, Name = "Server Certificate for " + nodeUrl, SecurityClearance = SecurityClearance.ClusterNode }; var res = await ServerStore.PutValueInClusterAsync(new PutCertificateCommand(Constants.Certificates.Prefix + certificate.Thumbprint, certificateDefinition)); await ServerStore.Cluster.WaitForIndexNotification(res.Index); } } await ServerStore.AddNodeToClusterAsync(nodeUrl, nodeTag, validateNotInTopology : false, asWatcher : watcher ?? false); using (ctx.OpenReadTransaction()) { clusterTopology = ServerStore.GetClusterTopology(ctx); possibleNode = clusterTopology.TryGetNodeTagByUrl(nodeUrl); nodeTag = possibleNode.HasUrl ? possibleNode.NodeTag : null; if (certificate != null) { var key = Constants.Certificates.Prefix + certificate.Thumbprint; var modifiedServerCert = JsonDeserializationServer.CertificateDefinition(ServerStore.Cluster.Read(ctx, key)); if (modifiedServerCert == null) { throw new ConcurrencyException("After adding the certificate, it was removed, shouldn't happen unless another admin removed it midway through."); } if (oldServerCert == null) { modifiedServerCert.Name = "Server certificate for Node " + nodeTag; } else { var value = "Node " + nodeTag; if (modifiedServerCert.Name.Contains(value) == false) { modifiedServerCert.Name += ", " + value; } } var res = await ServerStore.PutValueInClusterAsync(new PutCertificateCommand(key, modifiedServerCert)); await ServerStore.Cluster.WaitForIndexNotification(res.Index); } var nodeDetails = new NodeDetails { NodeTag = nodeTag, AssignedCores = assignedCores.Value, NumberOfCores = nodeInfo.NumberOfCores, InstalledMemoryInGb = nodeInfo.InstalledMemoryInGb, UsableMemoryInGb = nodeInfo.UsableMemoryInGb, BuildInfo = nodeInfo.BuildInfo }; await ServerStore.LicenseManager.CalculateLicenseLimits(nodeDetails, forceFetchingNodeInfo : true, waitToUpdate : true); } NoContentStatus(); return; } } RedirectToLeader(); }
public Task GetOngoingTaskInfo() { if (ResourceNameValidator.IsValidResourceName(Database.Name, ServerStore.Configuration.Core.DataDirectory.FullPath, out string errorMessage) == false) { throw new BadRequestException(errorMessage); } var key = GetLongQueryString("key"); var typeStr = GetQueryStringValueAndAssertIfSingleAndNotEmpty("type"); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { using (context.OpenReadTransaction()) { var clusterTopology = ServerStore.GetClusterTopology(context); var record = ServerStore.Cluster.ReadDatabase(context, Database.Name); var dbTopology = record?.Topology; if (Enum.TryParse <OngoingTaskType>(typeStr, true, out var type) == false) { throw new ArgumentException($"Unknown task type: {type}", "type"); } string tag; switch (type) { case OngoingTaskType.Replication: var watcher = record?.ExternalReplication.Find(x => x.TaskId == key); if (watcher == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } var taskInfo = GetExternalReplicationInfo(dbTopology, clusterTopology, watcher); WriteResult(context, taskInfo); break; case OngoingTaskType.Backup: var backupConfiguration = record?.PeriodicBackups?.Find(x => x.TaskId == key); if (backupConfiguration == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } tag = dbTopology?.WhoseTaskIsIt(backupConfiguration, ServerStore.Engine.CurrentState); var backupDestinations = GetBackupDestinations(backupConfiguration); var backupStatus = Database.PeriodicBackupRunner.GetBackupStatus(key); var nextBackup = Database.PeriodicBackupRunner.GetNextBackupDetails(record, backupConfiguration, backupStatus); var backupTaskInfo = new OngoingTaskBackup { TaskId = backupConfiguration.TaskId, BackupType = backupConfiguration.BackupType, TaskName = backupConfiguration.Name, TaskState = backupConfiguration.Disabled ? OngoingTaskState.Disabled : OngoingTaskState.Enabled, ResponsibleNode = new NodeId { NodeTag = tag, NodeUrl = clusterTopology.GetUrlFromTag(tag) }, BackupDestinations = backupDestinations, LastFullBackup = backupStatus.LastFullBackup, LastIncrementalBackup = backupStatus.LastIncrementalBackup, NextBackup = nextBackup }; WriteResult(context, backupTaskInfo); break; case OngoingTaskType.SqlEtl: var sqlEtl = record?.SqlEtls?.Find(x => x.TaskId == key); if (sqlEtl == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } WriteResult(context, new OngoingTaskSqlEtlDetails() { TaskId = sqlEtl.TaskId, TaskName = sqlEtl.Name, Configuration = sqlEtl, TaskState = GetEtlTaskState(sqlEtl) }); break; case OngoingTaskType.RavenEtl: var ravenEtl = record?.RavenEtls?.Find(x => x.TaskId == key); if (ravenEtl == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } WriteResult(context, new OngoingTaskRavenEtlDetails() { TaskId = ravenEtl.TaskId, TaskName = ravenEtl.Name, Configuration = ravenEtl, TaskState = GetEtlTaskState(ravenEtl) }); break; case OngoingTaskType.Subscription: var nameKey = GetQueryStringValueAndAssertIfSingleAndNotEmpty("taskName"); var itemKey = SubscriptionState.GenerateSubscriptionItemKeyName(record.DatabaseName, nameKey); var doc = ServerStore.Cluster.Read(context, itemKey); if (doc == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } var subscriptionState = JsonDeserializationClient.SubscriptionState(doc); tag = dbTopology?.WhoseTaskIsIt(subscriptionState, ServerStore.Engine.CurrentState); var subscriptionStateInfo = new SubscriptionStateWithNodeDetails { Query = subscriptionState.Query, ChangeVectorForNextBatchStartingPoint = subscriptionState.ChangeVectorForNextBatchStartingPoint, SubscriptionId = subscriptionState.SubscriptionId, SubscriptionName = subscriptionState.SubscriptionName, LastBatchAckTime = subscriptionState.LastBatchAckTime, Disabled = subscriptionState.Disabled, LastClientConnectionTime = subscriptionState.LastClientConnectionTime, MentorNode = subscriptionState.MentorNode, ResponsibleNode = new NodeId { NodeTag = tag, NodeUrl = clusterTopology.GetUrlFromTag(tag) } }; // Todo: here we'll need to talk with the running node? TaskConnectionStatus = subscriptionState.Disabled ? OngoingTaskConnectionStatus.NotActive : OngoingTaskConnectionStatus.Active, WriteResult(context, subscriptionStateInfo.ToJson()); break; default: HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } } } return(Task.CompletedTask); }
private void WriteDatabaseInfo(string databaseName, BlittableJsonReaderObject dbRecordBlittable, TransactionOperationContext context, BlittableJsonTextWriter writer) { var online = ServerStore.DatabasesLandlord.DatabasesCache.TryGetValue(databaseName, out Task <DocumentDatabase> dbTask) && dbTask != null && dbTask.IsCompleted; // Check for exceptions if (dbTask != null && dbTask.IsFaulted) { WriteFaultedDatabaseInfo(context, writer, dbTask, databaseName); return; } var dbRecord = JsonDeserializationCluster.DatabaseRecord(dbRecordBlittable); var db = online ? dbTask.Result : null; var indexingStatus = db?.IndexStore.Status ?? IndexRunningStatus.Running; // Looking for disabled indexing flag inside the database settings for offline database status if (dbRecord.Settings.TryGetValue(RavenConfiguration.GetKey(x => x.Indexing.Disabled), out var val) && val == "true") { indexingStatus = IndexRunningStatus.Disabled; } var disabled = dbRecord.Disabled; var topology = dbRecord.Topology; var clusterTopology = ServerStore.GetClusterTopology(context); var nodesTopology = new NodesTopology(); if (topology != null) { foreach (var member in topology.Members) { var url = clusterTopology.GetUrlFromTag(member); var node = new InternalReplication { Database = databaseName, NodeTag = member, Url = url }; nodesTopology.Members.Add(GetNodeId(node)); SetNodeStatus(topology, member, nodesTopology); } foreach (var promotable in topology.Promotables) { var node = GetNode(databaseName, clusterTopology, promotable, out var promotableTask); var mentor = topology.WhoseTaskIsIt(promotableTask, ServerStore.IsPassive()); nodesTopology.Promotables.Add(GetNodeId(node, mentor)); SetNodeStatus(topology, promotable, nodesTopology); } foreach (var rehab in topology.Rehabs) { var node = GetNode(databaseName, clusterTopology, rehab, out var promotableTask); var mentor = topology.WhoseTaskIsIt(promotableTask, ServerStore.IsPassive()); nodesTopology.Rehabs.Add(GetNodeId(node, mentor)); SetNodeStatus(topology, rehab, nodesTopology); } } if (online == false) { // If state of database is found in the cache we can continue if (ServerStore.DatabaseInfoCache.TryWriteOfflineDatabaseStatusToRequest( context, writer, databaseName, disabled, indexingStatus, nodesTopology)) { return; } // We won't find it if it is a new database or after a dirty shutdown, so just report empty values then } var size = new Size(GetTotalSize(db)); var databaseInfo = new DatabaseInfo { Name = databaseName, Disabled = disabled, TotalSize = size, IsAdmin = true, //TODO: implement me! IsEncrypted = dbRecord.Encrypted, UpTime = online ? (TimeSpan?)GetUptime(db) : null, BackupInfo = GetBackupInfo(db), Alerts = db?.NotificationCenter.GetAlertCount() ?? 0, RejectClients = false, //TODO: implement me! LoadError = null, IndexingErrors = db?.IndexStore.GetIndexes().Sum(index => index.GetErrorCount()) ?? 0, DocumentsCount = db?.DocumentsStorage.GetNumberOfDocuments() ?? 0, HasRevisionsConfiguration = db?.DocumentsStorage.RevisionsStorage.Configuration != null, HasExpirationConfiguration = db?.ExpiredDocumentsCleaner != null, IndexesCount = db?.IndexStore.GetIndexes().Count() ?? 0, IndexingStatus = indexingStatus, NodesTopology = nodesTopology, ReplicationFactor = topology?.ReplicationFactor ?? -1, DynamicNodesDistribution = topology?.DynamicNodesDistribution ?? false }; var doc = databaseInfo.ToJson(); context.Write(writer, doc); }
public async Task GetTopology() { var name = GetQueryStringValueAndAssertIfSingleAndNotEmpty("name"); var applicationIdentifier = GetStringQueryString("applicationIdentifier", required: false); if (applicationIdentifier != null) { AlertIfDocumentStoreCreationRateIsNotReasonable(applicationIdentifier, name); } using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { using (context.OpenReadTransaction()) using (var rawRecord = ServerStore.Cluster.ReadRawDatabaseRecord(context, name)) { if (await CanAccessDatabaseAsync(name, requireAdmin: false) == false) { return; } if (rawRecord == null) { // here we return 503 so clients will try to failover to another server // if this is a newly created db that we haven't been notified about it yet HttpContext.Response.StatusCode = (int)HttpStatusCode.ServiceUnavailable; HttpContext.Response.Headers["Database-Missing"] = name; await using (var writer = new AsyncBlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { ["Type"] = "Error", ["Message"] = "Database " + name + " wasn't found" }); } return; } var clusterTopology = ServerStore.GetClusterTopology(context); if (ServerStore.IsPassive() && clusterTopology.TopologyId != null) { // we were kicked-out from the cluster HttpContext.Response.StatusCode = (int)HttpStatusCode.ServiceUnavailable; return; } if (rawRecord.Topology.Members.Count == 0 && rawRecord.Topology.Rehabs.Count == 0 && rawRecord.DeletionInProgress.Any()) { // The database at deletion progress from all nodes HttpContext.Response.StatusCode = (int)HttpStatusCode.ServiceUnavailable; HttpContext.Response.Headers["Database-Missing"] = name; await using (var writer = new AsyncBlittableJsonTextWriter(context, HttpContext.Response.Body)) { context.Write(writer, new DynamicJsonValue { ["Type"] = "Error", ["Message"] = "Database " + name + " was deleted" }); } return; } clusterTopology.ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(ServerStore, HttpContext); await using (var writer = new AsyncBlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { [nameof(Topology.Nodes)] = new DynamicJsonArray( rawRecord.Topology.Members.Select(x => new DynamicJsonValue { [nameof(ServerNode.Url)] = GetUrl(x, clusterTopology), [nameof(ServerNode.ClusterTag)] = x, [nameof(ServerNode.ServerRole)] = ServerNode.Role.Member, [nameof(ServerNode.Database)] = rawRecord.DatabaseName }) .Concat(rawRecord.Topology.Rehabs.Select(x => new DynamicJsonValue { [nameof(ServerNode.Url)] = GetUrl(x, clusterTopology), [nameof(ServerNode.ClusterTag)] = x, [nameof(ServerNode.Database)] = rawRecord.DatabaseName, [nameof(ServerNode.ServerRole)] = ServerNode.Role.Rehab }) ) ), [nameof(Topology.Etag)] = rawRecord.Topology.Stamp?.Index ?? -1 }); } } } }
public async Task GetClusterWideInfoPackage() { var contentDisposition = $"attachment; filename={DateTime.UtcNow:yyyy-MM-dd H:mm:ss} Cluster Wide.zip"; HttpContext.Response.Headers["Content-Disposition"] = contentDisposition; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext transactionOperationContext)) using (ServerStore.ContextPool.AllocateOperationContext(out JsonOperationContext jsonOperationContext)) using (transactionOperationContext.OpenReadTransaction()) { using (var ms = new MemoryStream()) { using (var archive = new ZipArchive(ms, ZipArchiveMode.Create, true)) { var localEndpointClient = new LocalEndpointClient(Server); using (var localMemoryStream = new MemoryStream()) { //assuming that if the name tag is empty var nodeName = $"Node - [{ServerStore.NodeTag ?? "Empty node tag"}]"; using (var localArchive = new ZipArchive(localMemoryStream, ZipArchiveMode.Create, true)) { await WriteServerWide(localArchive, jsonOperationContext, localEndpointClient, _serverWidePrefix); await WriteForAllLocalDatabases(localArchive, jsonOperationContext, localEndpointClient); await WriteLogFile(localArchive); } localMemoryStream.Position = 0; var entry = archive.CreateEntry($"{nodeName}.zip"); entry.ExternalAttributes = ((int)(FilePermissions.S_IRUSR | FilePermissions.S_IWUSR)) << 16; using (var entryStream = entry.Open()) { localMemoryStream.CopyTo(entryStream); entryStream.Flush(); } } var databaseNames = ServerStore.Cluster.GetDatabaseNames(transactionOperationContext); var topology = ServerStore.GetClusterTopology(transactionOperationContext); //this means no databases are defined in the cluster //in this case just output server-wide endpoints from all cluster nodes if (databaseNames.Count == 0) { foreach (var tagWithUrl in topology.AllNodes) { if (tagWithUrl.Value.Contains(ServerStore.GetNodeHttpServerUrl())) { continue; } try { await WriteDebugInfoPackageForNodeAsync( jsonOperationContext, archive, tag : tagWithUrl.Key, url : tagWithUrl.Value, certificate : Server.Certificate.Certificate, databaseNames : null); } catch (Exception e) { var entryName = $"Node - [{tagWithUrl.Key}]"; DebugInfoPackageUtils.WriteExceptionAsZipEntry(e, archive, entryName); } } } else { var nodeUrlToDatabaseNames = CreateUrlToDatabaseNamesMapping(transactionOperationContext, databaseNames); foreach (var urlToDatabaseNamesMap in nodeUrlToDatabaseNames) { if (urlToDatabaseNamesMap.Key.Contains(ServerStore.GetNodeHttpServerUrl())) { continue; //skip writing local data, we do it separately } try { await WriteDebugInfoPackageForNodeAsync( jsonOperationContext, archive, tag : urlToDatabaseNamesMap.Value.Item2, url : urlToDatabaseNamesMap.Key, databaseNames : urlToDatabaseNamesMap.Value.Item1, certificate : Server.Certificate.Certificate); } catch (Exception e) { var entryName = $"Node - [{urlToDatabaseNamesMap.Value.Item2}]"; DebugInfoPackageUtils.WriteExceptionAsZipEntry(e, archive, entryName); } } } } ms.Position = 0; await ms.CopyToAsync(ResponseBodyStream()); } } }
public async Task ApplyCommand() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { if (ServerStore.IsLeader() == false) { throw new NoLeaderException("Not a leader, cannot accept commands."); } HttpContext.Response.Headers["Reached-Leader"] = "true"; var commandJson = await context.ReadForMemoryAsync(RequestBodyStream(), "external/rachis/command"); CommandBase command; try { command = CommandBase.CreateFrom(commandJson); } catch (InvalidOperationException e) { RequestRouter.AssertClientVersion(HttpContext, e); throw; } switch (command) { case AddDatabaseCommand addDatabase: if (addDatabase.Record.Topology.Count == 0) { ServerStore.AssignNodesToDatabase(ServerStore.GetClusterTopology(), addDatabase.Record); } break; case AddOrUpdateCompareExchangeBatchCommand batchCmpExchange: batchCmpExchange.ContextToWriteResult = context; break; case CompareExchangeCommandBase cmpExchange: cmpExchange.ContextToWriteResult = context; break; } var isClusterAdmin = IsClusterAdmin(); command.VerifyCanExecuteCommand(ServerStore, context, isClusterAdmin); var(etag, result) = await ServerStore.Engine.PutAsync(command); HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; var ms = context.CheckoutMemoryStream(); try { using (var writer = new BlittableJsonTextWriter(context, ms)) { context.Write(writer, new DynamicJsonValue { [nameof(ServerStore.PutRaftCommandResult.RaftCommandIndex)] = etag, [nameof(ServerStore.PutRaftCommandResult.Data)] = result }); writer.Flush(); } // now that we know that we properly serialized it ms.Position = 0; await ms.CopyToAsync(ResponseBodyStream()); } finally { context.ReturnMemoryStream(ms); } } }
protected RestoreBackupTaskBase(ServerStore serverStore, RestoreBackupConfigurationBase restoreFromConfiguration, string nodeTag, OperationCancelToken operationCancelToken) { _serverStore = serverStore; RestoreFromConfiguration = restoreFromConfiguration; _nodeTag = nodeTag; _operationCancelToken = operationCancelToken; var dataDirectoryThatWillBeUsed = string.IsNullOrWhiteSpace(RestoreFromConfiguration.DataDirectory) ? _serverStore.Configuration.Core.DataDirectory.FullPath : new PathSetting(RestoreFromConfiguration.DataDirectory, _serverStore.Configuration.Core.DataDirectory.FullPath).FullPath; if (ResourceNameValidator.IsValidResourceName(RestoreFromConfiguration.DatabaseName, dataDirectoryThatWillBeUsed, out string errorMessage) == false) { throw new InvalidOperationException(errorMessage); } _serverStore.EnsureNotPassive(); ClusterTopology clusterTopology; using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { if (_serverStore.Cluster.DatabaseExists(context, RestoreFromConfiguration.DatabaseName)) { throw new ArgumentException($"Cannot restore data to an existing database named {RestoreFromConfiguration.DatabaseName}"); } clusterTopology = _serverStore.GetClusterTopology(context); } _hasEncryptionKey = string.IsNullOrWhiteSpace(RestoreFromConfiguration.EncryptionKey) == false; if (_hasEncryptionKey) { var key = Convert.FromBase64String(RestoreFromConfiguration.EncryptionKey); if (key.Length != 256 / 8) { throw new InvalidOperationException($"The size of the key must be 256 bits, but was {key.Length * 8} bits."); } if (AdminDatabasesHandler.NotUsingHttps(clusterTopology.GetUrlFromTag(_serverStore.NodeTag))) { throw new InvalidOperationException("Cannot restore an encrypted database to a node which doesn't support SSL!"); } } var backupEncryptionSettings = RestoreFromConfiguration.BackupEncryptionSettings; if (backupEncryptionSettings != null) { if (backupEncryptionSettings.EncryptionMode == EncryptionMode.UseProvidedKey && backupEncryptionSettings.Key == null) { throw new InvalidOperationException($"{nameof(BackupEncryptionSettings.EncryptionMode)} is set to {nameof(EncryptionMode.UseProvidedKey)} but an encryption key wasn't provided"); } if (backupEncryptionSettings.EncryptionMode != EncryptionMode.UseProvidedKey && backupEncryptionSettings.Key != null) { throw new InvalidOperationException($"{nameof(BackupEncryptionSettings.EncryptionMode)} is set to {backupEncryptionSettings.EncryptionMode} but an encryption key was provided"); } } var hasRestoreDataDirectory = string.IsNullOrWhiteSpace(RestoreFromConfiguration.DataDirectory) == false; if (hasRestoreDataDirectory && HasFilesOrDirectories(dataDirectoryThatWillBeUsed)) { throw new ArgumentException("New data directory must be empty of any files or folders, " + $"path: {dataDirectoryThatWillBeUsed}"); } if (hasRestoreDataDirectory == false) { RestoreFromConfiguration.DataDirectory = GetDataDirectory(); } _restoringToDefaultDataDirectory = IsDefaultDataDirectory(RestoreFromConfiguration.DataDirectory, RestoreFromConfiguration.DatabaseName); }
private void WriteDatabaseInfo(string databaseName, BlittableJsonReaderObject dbRecordBlittable, TransactionOperationContext context, AbstractBlittableJsonTextWriter writer) { try { var online = ServerStore.DatabasesLandlord.DatabasesCache.TryGetValue(databaseName, out Task <DocumentDatabase> dbTask) && dbTask != null && dbTask.IsCompleted; // Check for exceptions if (dbTask != null && dbTask.IsFaulted) { var exception = dbTask.Exception.ExtractSingleInnerException(); WriteFaultedDatabaseInfo(databaseName, exception, context, writer); return; } var dbRecord = JsonDeserializationCluster.DatabaseRecord(dbRecordBlittable); var db = online ? dbTask.Result : null; var indexingStatus = db?.IndexStore?.Status ?? IndexRunningStatus.Running; // Looking for disabled indexing flag inside the database settings for offline database status if (dbRecord.Settings.TryGetValue(RavenConfiguration.GetKey(x => x.Indexing.Disabled), out var val) && bool.TryParse(val, out var indexingDisabled) && indexingDisabled) { indexingStatus = IndexRunningStatus.Disabled; } var disabled = dbRecord.Disabled; var topology = dbRecord.Topology; var clusterTopology = ServerStore.GetClusterTopology(context); clusterTopology.ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(ServerStore, HttpContext); var studioEnvironment = StudioConfiguration.StudioEnvironment.None; if (dbRecord.Studio != null && !dbRecord.Studio.Disabled) { studioEnvironment = dbRecord.Studio.Environment; } var nodesTopology = new NodesTopology(); var statuses = ServerStore.GetNodesStatuses(); if (topology != null) { foreach (var member in topology.Members) { var url = clusterTopology.GetUrlFromTag(member); var node = new InternalReplication { Database = databaseName, NodeTag = member, Url = url }; nodesTopology.Members.Add(GetNodeId(node)); SetNodeStatus(topology, member, nodesTopology, statuses); } foreach (var promotable in topology.Promotables) { topology.PredefinedMentors.TryGetValue(promotable, out var mentorCandidate); var node = GetNode(databaseName, clusterTopology, promotable, mentorCandidate, out var promotableTask); var mentor = topology.WhoseTaskIsIt(ServerStore.Engine.CurrentState, promotableTask, null); nodesTopology.Promotables.Add(GetNodeId(node, mentor)); SetNodeStatus(topology, promotable, nodesTopology, statuses); } foreach (var rehab in topology.Rehabs) { var node = GetNode(databaseName, clusterTopology, rehab, null, out var promotableTask); var mentor = topology.WhoseTaskIsIt(ServerStore.Engine.CurrentState, promotableTask, null); nodesTopology.Rehabs.Add(GetNodeId(node, mentor)); SetNodeStatus(topology, rehab, nodesTopology, statuses); } } if (online == false) { // if state of database is found in the cache we can continue if (ServerStore.DatabaseInfoCache.TryGet(databaseName, databaseInfoJson => { databaseInfoJson.Modifications = new DynamicJsonValue(databaseInfoJson) { [nameof(DatabaseInfo.Disabled)] = disabled, [nameof(DatabaseInfo.IndexingStatus)] = indexingStatus.ToString(), [nameof(DatabaseInfo.NodesTopology)] = nodesTopology.ToJson(), [nameof(DatabaseInfo.DeletionInProgress)] = DynamicJsonValue.Convert(dbRecord.DeletionInProgress), [nameof(DatabaseInfo.Environment)] = studioEnvironment }; context.Write(writer, databaseInfoJson); })) { return; } // we won't find it if it is a new database or after a dirty shutdown, // so just report empty values then } var size = db?.GetSizeOnDisk() ?? (new Size(0), new Size(0)); var databaseInfo = new DatabaseInfo { Name = databaseName, Disabled = disabled, TotalSize = size.Data, TempBuffersSize = size.TempBuffers, IsAdmin = true, IsEncrypted = dbRecord.Encrypted, UpTime = online ? (TimeSpan?)GetUptime(db) : null, BackupInfo = GetBackupInfo(db), Alerts = db?.NotificationCenter.GetAlertCount() ?? 0, RejectClients = false, LoadError = null, IndexingErrors = db?.IndexStore?.GetIndexes()?.Sum(index => index.GetErrorCount()) ?? 0, DocumentsCount = db?.DocumentsStorage.GetNumberOfDocuments() ?? 0, HasRevisionsConfiguration = db?.DocumentsStorage.RevisionsStorage.Configuration != null, HasExpirationConfiguration = db?.ExpiredDocumentsCleaner != null, IndexesCount = db?.IndexStore?.GetIndexes()?.Count() ?? 0, IndexingStatus = indexingStatus, Environment = studioEnvironment, NodesTopology = nodesTopology, ReplicationFactor = topology?.ReplicationFactor ?? -1, DynamicNodesDistribution = topology?.DynamicNodesDistribution ?? false, DeletionInProgress = dbRecord.DeletionInProgress }; var doc = databaseInfo.ToJson(); context.Write(writer, doc); } catch (Exception e) { if (Logger.IsInfoEnabled) { Logger.Info($"Failed to get database info for: {databaseName}", e); } WriteFaultedDatabaseInfo(databaseName, e, context, writer); } }
public Task GetTopology() { var name = GetQueryStringValueAndAssertIfSingleAndNotEmpty("name"); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { var dbId = Constants.Documents.Prefix + name; using (context.OpenReadTransaction()) using (var dbBlit = ServerStore.Cluster.Read(context, dbId, out long _)) { if (TryGetAllowedDbs(name, out var _, requireAdmin: false) == false) { return(Task.CompletedTask); } if (dbBlit == null) { // here we return 503 so clients will try to failover to another server // if this is a newly created db that we haven't been notified about it yet HttpContext.Response.StatusCode = (int)HttpStatusCode.ServiceUnavailable; HttpContext.Response.Headers["Database-Missing"] = name; using (var writer = new BlittableJsonTextWriter(context, HttpContext.Response.Body)) { context.Write(writer, new DynamicJsonValue { ["Type"] = "Error", ["Message"] = "Database " + name + " wasn't found" }); } return(Task.CompletedTask); } var clusterTopology = ServerStore.GetClusterTopology(context); if (ServerStore.IsPassive() && clusterTopology.TopologyId != null) { // we were kicked-out from the cluster HttpContext.Response.StatusCode = (int)HttpStatusCode.ServiceUnavailable; return(Task.CompletedTask); } clusterTopology.ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(ServerStore, HttpContext); var dbRecord = JsonDeserializationCluster.DatabaseRecord(dbBlit); using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { [nameof(Topology.Nodes)] = new DynamicJsonArray( dbRecord.Topology.Members.Select(x => new DynamicJsonValue { [nameof(ServerNode.Url)] = GetUrl(x, clusterTopology), [nameof(ServerNode.ClusterTag)] = x, [nameof(ServerNode.ServerRole)] = ServerNode.Role.Member, [nameof(ServerNode.Database)] = dbRecord.DatabaseName }) .Concat(dbRecord.Topology.Rehabs.Select(x => new DynamicJsonValue { [nameof(ServerNode.Url)] = GetUrl(x, clusterTopology), [nameof(ServerNode.ClusterTag)] = x, [nameof(ServerNode.Database)] = dbRecord.DatabaseName, [nameof(ServerNode.ServerRole)] = ServerNode.Role.Rehab }) ) ), [nameof(Topology.Etag)] = dbRecord.Topology.Stamp?.Index ?? -1 }); } } } return(Task.CompletedTask); }
public override void Initialize(ServerStore serverStore, TransactionOperationContext context) { ClusterNodes = serverStore.GetClusterTopology(context).AllNodes.Keys.ToArray(); }