private static async Task GetIdentitiesValues(JsonOperationContext ctx, DocumentDatabase database, ServerStore serverStore, List <string> identities, List <int> positionInListToCommandIndex, CommandData[] cmds) { var newIds = await serverStore.GenerateClusterIdentitiesBatchAsync(database.Name, identities); Debug.Assert(newIds.Count == identities.Count); var emptyChangeVector = ctx.GetLazyString(""); for (var index = 0; index < positionInListToCommandIndex.Count; index++) { var value = positionInListToCommandIndex[index]; cmds[value].Id = cmds[value].Id.Substring(0, cmds[value].Id.Length - 1) + "/" + newIds[index]; if (string.IsNullOrEmpty(cmds[value].ChangeVector) == false) { ThrowInvalidUsageOfChangeVectorWithIdentities(cmds[value]); } cmds[value].ChangeVector = emptyChangeVector; } }
public ServerLicenseType(ServerStore store) : base("1.9.1") { _store = store; }
public long Apply(ClusterOperationContext context, long uptoInclusive, Leader leader, ServerStore serverStore, Stopwatch duration) { Debug.Assert(context.Transaction != null); var lastAppliedIndex = _parent.GetLastCommitIndex(context); var maxTimeAllowedToWaitForApply = _parent.Timeout.TimeoutPeriod / 4; for (var index = lastAppliedIndex + 1; index <= uptoInclusive; index++) { var cmd = _parent.GetEntry(context, index, out RachisEntryFlags flags); if (cmd == null || flags == RachisEntryFlags.Invalid) { throw new InvalidOperationException("Expected to apply entry " + index + " but it isn't stored"); } lastAppliedIndex = index; if (flags != RachisEntryFlags.StateMachineCommand) { _parent.LogHistory.UpdateHistoryLog(context, index, _parent.CurrentTerm, cmd, null, null); continue; } Apply(context, cmd, index, leader, serverStore); if (duration.ElapsedMilliseconds >= maxTimeAllowedToWaitForApply) { // we don't want to spend so much time applying commands that we will time out the leader // so we time this from the follower perspective and abort after applying a single command // or 25% of the time has already passed break; } } var term = _parent.GetTermForKnownExisting(context, lastAppliedIndex); _parent.SetLastCommitIndex(context, lastAppliedIndex, term); return(lastAppliedIndex); }
public TotalDatabaseWritesPerSecond(ServerStore serverStore) : base(serverStore, SnmpOids.Databases.General.TotalWritesPerSecond) { }
public static async Task GetFullBackupDataDirectory(string path, int requestTimeoutInMs, bool getNodesInfo, ServerStore serverStore, Stream responseStream) { var pathResult = GetActualFullPath(serverStore, path); var info = new DataDirectoryInfo(serverStore, pathResult.FolderPath, null, isBackup: true, getNodesInfo, requestTimeoutInMs, responseStream); await info.UpdateDirectoryResult(databaseName : null, error : pathResult.Error); }
public RavenEtl(Transformation transformation, RavenEtlConfiguration configuration, DocumentDatabase database, ServerStore serverStore) : base(transformation, configuration, database, serverStore, RavenEtlTag) { _configuration = configuration; _serverStore = serverStore; Metrics = new EtlMetricsCountersManager(); if (configuration.TestMode == false) { _requestExecutor = CreateNewRequestExecutor(configuration, serverStore); _serverStore.Server.ServerCertificateChanged += OnServerCertificateChanged; } _script = new RavenEtlDocumentTransformer.ScriptInput(transformation); }
public RavenEtl(Transformation transformation, RavenEtlConfiguration configuration, DocumentDatabase database, ServerStore serverStore) : base(transformation, configuration, database, serverStore, RavenEtlTag) { Metrics = new EtlMetricsCountersManager(); _requestExecutor = RequestExecutor.Create(configuration.Connection.TopologyDiscoveryUrls, configuration.Connection.Database, serverStore.Server.Certificate.Certificate, DocumentConventions.Default); _script = new RavenEtlDocumentTransformer.ScriptInput(transformation); }
public virtual void VerifyCanExecuteCommand(ServerStore store, TransactionOperationContext context, bool isClusterAdmin) { // sub classes can assert what their required clearance // should be to execute this command, the minimum level // is operator }
protected RestoreBackupTaskBase(ServerStore serverStore, RestoreBackupConfigurationBase restoreFromConfiguration, string nodeTag, OperationCancelToken operationCancelToken) { _serverStore = serverStore; RestoreFromConfiguration = restoreFromConfiguration; _nodeTag = nodeTag; _operationCancelToken = operationCancelToken; var dataDirectoryThatWillBeUsed = string.IsNullOrWhiteSpace(RestoreFromConfiguration.DataDirectory) ? _serverStore.Configuration.Core.DataDirectory.FullPath : new PathSetting(RestoreFromConfiguration.DataDirectory, _serverStore.Configuration.Core.DataDirectory.FullPath).FullPath; if (ResourceNameValidator.IsValidResourceName(RestoreFromConfiguration.DatabaseName, dataDirectoryThatWillBeUsed, out string errorMessage) == false) { throw new InvalidOperationException(errorMessage); } _serverStore.EnsureNotPassive(); ClusterTopology clusterTopology; using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { if (_serverStore.Cluster.DatabaseExists(context, RestoreFromConfiguration.DatabaseName)) { throw new ArgumentException($"Cannot restore data to an existing database named {RestoreFromConfiguration.DatabaseName}"); } clusterTopology = _serverStore.GetClusterTopology(context); } _hasEncryptionKey = string.IsNullOrWhiteSpace(RestoreFromConfiguration.EncryptionKey) == false; if (_hasEncryptionKey) { var key = Convert.FromBase64String(RestoreFromConfiguration.EncryptionKey); if (key.Length != 256 / 8) { throw new InvalidOperationException($"The size of the key must be 256 bits, but was {key.Length * 8} bits."); } if (AdminDatabasesHandler.NotUsingHttps(clusterTopology.GetUrlFromTag(_serverStore.NodeTag))) { throw new InvalidOperationException("Cannot restore an encrypted database to a node which doesn't support SSL!"); } } var backupEncryptionSettings = RestoreFromConfiguration.BackupEncryptionSettings; if (backupEncryptionSettings != null) { if (backupEncryptionSettings.EncryptionMode == EncryptionMode.UseProvidedKey && backupEncryptionSettings.Key == null) { throw new InvalidOperationException($"{nameof(BackupEncryptionSettings.EncryptionMode)} is set to {nameof(EncryptionMode.UseProvidedKey)} but an encryption key wasn't provided"); } if (backupEncryptionSettings.EncryptionMode != EncryptionMode.UseProvidedKey && backupEncryptionSettings.Key != null) { throw new InvalidOperationException($"{nameof(BackupEncryptionSettings.EncryptionMode)} is set to {backupEncryptionSettings.EncryptionMode} but an encryption key was provided"); } } var hasRestoreDataDirectory = string.IsNullOrWhiteSpace(RestoreFromConfiguration.DataDirectory) == false; if (hasRestoreDataDirectory && HasFilesOrDirectories(dataDirectoryThatWillBeUsed)) { throw new ArgumentException("New data directory must be empty of any files or folders, " + $"path: {dataDirectoryThatWillBeUsed}"); } if (hasRestoreDataDirectory == false) { RestoreFromConfiguration.DataDirectory = GetDataDirectory(); } _restoringToDefaultDataDirectory = IsDefaultDataDirectory(RestoreFromConfiguration.DataDirectory, RestoreFromConfiguration.DatabaseName); }
public ClusterMessage(TextWriter tw, ServerStore server) : base(tw) { _server = server; }
public virtual void Initialize(ServerStore serverStore, TransactionOperationContext context) { }
public ClusterMaintenanceWorker(TcpConnectionOptions tcp, CancellationToken externalToken, ServerStore serverStore, string leader, long term) { _tcp = tcp; _cts = CancellationTokenSource.CreateLinkedTokenSource(externalToken); _token = _cts.Token; _server = serverStore; _logger = LoggingSource.Instance.GetLogger <ClusterMaintenanceWorker>(serverStore.NodeTag); _name = $"Maintenance worker connection to leader {leader} in term {term}"; WorkerSamplePeriod = _server.Configuration.Cluster.WorkerSamplePeriod.AsTimeSpan; CurrentTerm = term; }
public static IEnumerable <AbstractDashboardNotification> FetchDatabasesInfo(ServerStore serverStore, Func <string, bool> isValidFor, CancellationTokenSource cts) { var databasesInfo = new DatabasesInfo(); var indexingSpeed = new IndexingSpeed(); var trafficWatch = new TrafficWatch(); var drivesUsage = new DrivesUsage(); using (serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext transactionContext)) using (transactionContext.OpenReadTransaction()) { foreach (var databaseTuple in serverStore.Cluster.ItemsStartingWith(transactionContext, Constants.Documents.Prefix, 0, int.MaxValue)) { var databaseName = databaseTuple.ItemName.Substring(Constants.Documents.Prefix.Length); if (cts.IsCancellationRequested) { yield break; } if (isValidFor != null && isValidFor(databaseName) == false) { continue; } if (serverStore.DatabasesLandlord.DatabasesCache.TryGetValue(databaseName, out var databaseTask) == false) { // database does not exist on this server, is offline or disabled SetOfflineDatabaseInfo(serverStore, transactionContext, databaseName, databasesInfo, drivesUsage, disabled: IsDatabaseDisabled(databaseTuple.Value)); continue; } try { var databaseOnline = IsDatabaseOnline(databaseTask, out var database); if (databaseOnline == false) { SetOfflineDatabaseInfo(serverStore, transactionContext, databaseName, databasesInfo, drivesUsage, disabled: false); continue; } var indexingSpeedItem = new IndexingSpeedItem { Database = database.Name, IndexedPerSecond = database.Metrics.MapIndexes.IndexedPerSec.OneSecondRate, MappedPerSecond = database.Metrics.MapReduceIndexes.MappedPerSec.OneSecondRate, ReducedPerSecond = database.Metrics.MapReduceIndexes.ReducedPerSec.OneSecondRate }; indexingSpeed.Items.Add(indexingSpeedItem); var replicationFactor = GetReplicationFactor(databaseTuple.Value); var documentsStorage = database.DocumentsStorage; var indexStorage = database.IndexStore; var trafficWatchItem = new TrafficWatchItem { Database = database.Name, RequestsPerSecond = (int)database.Metrics.Requests.RequestsPerSec.OneSecondRate, DocumentWritesPerSecond = (int)database.Metrics.Docs.PutsPerSec.OneSecondRate, AttachmentWritesPerSecond = (int)database.Metrics.Attachments.PutsPerSec.OneSecondRate, CounterWritesPerSecond = (int)database.Metrics.Counters.PutsPerSec.OneSecondRate, DocumentsWriteBytesPerSecond = database.Metrics.Docs.BytesPutsPerSec.OneSecondRate, AttachmentsWriteBytesPerSecond = database.Metrics.Attachments.BytesPutsPerSec.OneSecondRate, CountersWriteBytesPerSecond = database.Metrics.Counters.BytesPutsPerSec.OneSecondRate }; trafficWatch.Items.Add(trafficWatchItem); var currentEnvironmentsHash = database.GetEnvironmentsHash(); if (CachedDatabaseInfo.TryGetValue(database.Name, out var item) && item.Hash == currentEnvironmentsHash) { databasesInfo.Items.Add(item.Item); if (item.NextDiskSpaceCheck < SystemTime.UtcNow) { item.MountPoints = new List <Client.ServerWide.Operations.MountPointUsage>(); DiskUsageCheck(item, database, drivesUsage, cts); } else { foreach (var cachedMountPoint in item.MountPoints) { UpdateMountPoint(cachedMountPoint, database.Name, drivesUsage); } } } else { using (documentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext)) using (documentsContext.OpenReadTransaction()) { var databaseInfoItem = new DatabaseInfoItem { Database = database.Name, DocumentsCount = documentsStorage.GetNumberOfDocuments(documentsContext), IndexesCount = database.IndexStore.Count, AlertsCount = database.NotificationCenter.GetAlertCount(), ReplicationFactor = replicationFactor, ErroredIndexesCount = indexStorage.GetIndexes().Count(index => index.GetErrorCount() > 0), Online = true }; databasesInfo.Items.Add(databaseInfoItem); CachedDatabaseInfo[database.Name] = item = new DatabaseInfoCache { Hash = currentEnvironmentsHash, Item = databaseInfoItem }; } DiskUsageCheck(item, database, drivesUsage, cts); } } catch (Exception) { SetOfflineDatabaseInfo(serverStore, transactionContext, databaseName, databasesInfo, drivesUsage, disabled: false); } } } yield return(databasesInfo); yield return(indexingSpeed); yield return(trafficWatch); yield return(drivesUsage); }
private static void UpdateDatabaseInfo(BlittableJsonReaderObject databaseRecord, ServerStore serverStore, string databaseName, DrivesUsage existingDrivesUsage, DatabaseInfoItem databaseInfoItem) { DatabaseInfo databaseInfo = null; if (serverStore.DatabaseInfoCache.TryGet(databaseName, databaseInfoJson => databaseInfo = JsonDeserializationServer.DatabaseInfo(databaseInfoJson)) == false) { return; } Debug.Assert(databaseInfo != null); var databaseTopology = serverStore.Cluster.ReadDatabaseTopology(databaseRecord); databaseRecord.TryGet(nameof(DatabaseRecord.Indexes), out BlittableJsonReaderObject indexes); var indexesCount = indexes?.Count ?? 0; databaseInfoItem.DocumentsCount = databaseInfo.DocumentsCount ?? 0; databaseInfoItem.IndexesCount = databaseInfo.IndexesCount ?? indexesCount; databaseInfoItem.ReplicationFactor = databaseTopology?.ReplicationFactor ?? databaseInfo.ReplicationFactor; databaseInfoItem.ErroredIndexesCount = databaseInfo.IndexingErrors ?? 0; if (databaseInfo.MountPointsUsage == null) { return; } foreach (var mountPointUsage in databaseInfo.MountPointsUsage) { var driveName = mountPointUsage.DiskSpaceResult.DriveName; var diskSpaceResult = DiskSpaceChecker.GetDiskSpaceInfo( mountPointUsage.DiskSpaceResult.DriveName, new DriveInfoBase { DriveName = driveName }); if (diskSpaceResult != null) { // update the latest drive info mountPointUsage.DiskSpaceResult = new Client.ServerWide.Operations.DiskSpaceResult { DriveName = diskSpaceResult.DriveName, VolumeLabel = diskSpaceResult.VolumeLabel, TotalFreeSpaceInBytes = diskSpaceResult.TotalFreeSpace.GetValue(SizeUnit.Bytes), TotalSizeInBytes = diskSpaceResult.TotalSize.GetValue(SizeUnit.Bytes) }; } UpdateMountPoint(mountPointUsage, databaseName, existingDrivesUsage); } }
private void WriteDatabaseInfo(string databaseName, BlittableJsonReaderObject dbRecordBlittable, TransactionOperationContext context, AbstractBlittableJsonTextWriter writer) { try { var online = ServerStore.DatabasesLandlord.DatabasesCache.TryGetValue(databaseName, out Task <DocumentDatabase> dbTask) && dbTask != null && dbTask.IsCompleted; // Check for exceptions if (dbTask != null && dbTask.IsFaulted) { var exception = dbTask.Exception.ExtractSingleInnerException(); WriteFaultedDatabaseInfo(databaseName, exception, context, writer); return; } var dbRecord = JsonDeserializationCluster.DatabaseRecord(dbRecordBlittable); var db = online ? dbTask.Result : null; var indexingStatus = db?.IndexStore?.Status ?? IndexRunningStatus.Running; // Looking for disabled indexing flag inside the database settings for offline database status if (dbRecord.Settings.TryGetValue(RavenConfiguration.GetKey(x => x.Indexing.Disabled), out var val) && bool.TryParse(val, out var indexingDisabled) && indexingDisabled) { indexingStatus = IndexRunningStatus.Disabled; } var disabled = dbRecord.Disabled; var topology = dbRecord.Topology; var clusterTopology = ServerStore.GetClusterTopology(context); clusterTopology.ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(ServerStore, HttpContext); var studioEnvironment = StudioConfiguration.StudioEnvironment.None; if (dbRecord.Studio != null && !dbRecord.Studio.Disabled) { studioEnvironment = dbRecord.Studio.Environment; } var nodesTopology = new NodesTopology(); var statuses = ServerStore.GetNodesStatuses(); if (topology != null) { foreach (var member in topology.Members) { var url = clusterTopology.GetUrlFromTag(member); var node = new InternalReplication { Database = databaseName, NodeTag = member, Url = url }; nodesTopology.Members.Add(GetNodeId(node)); SetNodeStatus(topology, member, nodesTopology, statuses); } foreach (var promotable in topology.Promotables) { topology.PredefinedMentors.TryGetValue(promotable, out var mentorCandidate); var node = GetNode(databaseName, clusterTopology, promotable, mentorCandidate, out var promotableTask); var mentor = topology.WhoseTaskIsIt(ServerStore.Engine.CurrentState, promotableTask, null); nodesTopology.Promotables.Add(GetNodeId(node, mentor)); SetNodeStatus(topology, promotable, nodesTopology, statuses); } foreach (var rehab in topology.Rehabs) { var node = GetNode(databaseName, clusterTopology, rehab, null, out var promotableTask); var mentor = topology.WhoseTaskIsIt(ServerStore.Engine.CurrentState, promotableTask, null); nodesTopology.Rehabs.Add(GetNodeId(node, mentor)); SetNodeStatus(topology, rehab, nodesTopology, statuses); } } if (online == false) { // if state of database is found in the cache we can continue if (ServerStore.DatabaseInfoCache.TryGet(databaseName, databaseInfoJson => { databaseInfoJson.Modifications = new DynamicJsonValue(databaseInfoJson) { [nameof(DatabaseInfo.Disabled)] = disabled, [nameof(DatabaseInfo.IndexingStatus)] = indexingStatus.ToString(), [nameof(DatabaseInfo.NodesTopology)] = nodesTopology.ToJson(), [nameof(DatabaseInfo.DeletionInProgress)] = DynamicJsonValue.Convert(dbRecord.DeletionInProgress), [nameof(DatabaseInfo.Environment)] = studioEnvironment }; context.Write(writer, databaseInfoJson); })) { return; } // we won't find it if it is a new database or after a dirty shutdown, // so just report empty values then } var size = db?.GetSizeOnDisk() ?? (new Size(0), new Size(0)); var databaseInfo = new DatabaseInfo { Name = databaseName, Disabled = disabled, TotalSize = size.Data, TempBuffersSize = size.TempBuffers, IsAdmin = true, IsEncrypted = dbRecord.Encrypted, UpTime = online ? (TimeSpan?)GetUptime(db) : null, BackupInfo = GetBackupInfo(db), Alerts = db?.NotificationCenter.GetAlertCount() ?? 0, RejectClients = false, LoadError = null, IndexingErrors = db?.IndexStore?.GetIndexes()?.Sum(index => index.GetErrorCount()) ?? 0, DocumentsCount = db?.DocumentsStorage.GetNumberOfDocuments() ?? 0, HasRevisionsConfiguration = db?.DocumentsStorage.RevisionsStorage.Configuration != null, HasExpirationConfiguration = db?.ExpiredDocumentsCleaner != null, IndexesCount = db?.IndexStore?.GetIndexes()?.Count() ?? 0, IndexingStatus = indexingStatus, Environment = studioEnvironment, NodesTopology = nodesTopology, ReplicationFactor = topology?.ReplicationFactor ?? -1, DynamicNodesDistribution = topology?.DynamicNodesDistribution ?? false, DeletionInProgress = dbRecord.DeletionInProgress }; var doc = databaseInfo.ToJson(); context.Write(writer, doc); } catch (Exception e) { if (Logger.IsInfoEnabled) { Logger.Info($"Failed to get database info for: {databaseName}", e); } WriteFaultedDatabaseInfo(databaseName, e, context, writer); } }
public virtual Task OnSnapshotInstalledAsync(long lastIncludedIndex, ServerStore serverStore, CancellationToken token) { return(Task.CompletedTask); }
public Task GetTopology() { var name = GetQueryStringValueAndAssertIfSingleAndNotEmpty("name"); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { var dbId = Constants.Documents.Prefix + name; using (context.OpenReadTransaction()) using (var dbBlit = ServerStore.Cluster.Read(context, dbId, out long _)) { if (TryGetAllowedDbs(name, out var _, requireAdmin: false) == false) { return(Task.CompletedTask); } if (dbBlit == null) { // here we return 503 so clients will try to failover to another server // if this is a newly created db that we haven't been notified about it yet HttpContext.Response.StatusCode = (int)HttpStatusCode.ServiceUnavailable; HttpContext.Response.Headers["Database-Missing"] = name; using (var writer = new BlittableJsonTextWriter(context, HttpContext.Response.Body)) { context.Write(writer, new DynamicJsonValue { ["Type"] = "Error", ["Message"] = "Database " + name + " wasn't found" }); } return(Task.CompletedTask); } var clusterTopology = ServerStore.GetClusterTopology(context); if (ServerStore.IsPassive() && clusterTopology.TopologyId != null) { // we were kicked-out from the cluster HttpContext.Response.StatusCode = (int)HttpStatusCode.ServiceUnavailable; return(Task.CompletedTask); } clusterTopology.ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(ServerStore, HttpContext); var dbRecord = JsonDeserializationCluster.DatabaseRecord(dbBlit); using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { [nameof(Topology.Nodes)] = new DynamicJsonArray( dbRecord.Topology.Members.Select(x => new DynamicJsonValue { [nameof(ServerNode.Url)] = GetUrl(x, clusterTopology), [nameof(ServerNode.ClusterTag)] = x, [nameof(ServerNode.ServerRole)] = ServerNode.Role.Member, [nameof(ServerNode.Database)] = dbRecord.DatabaseName }) .Concat(dbRecord.Topology.Rehabs.Select(x => new DynamicJsonValue { [nameof(ServerNode.Url)] = GetUrl(x, clusterTopology), [nameof(ServerNode.ClusterTag)] = x, [nameof(ServerNode.Database)] = dbRecord.DatabaseName, [nameof(ServerNode.ServerRole)] = ServerNode.Role.Rehab }) ) ), [nameof(Topology.Etag)] = dbRecord.Topology.Stamp?.Index ?? -1 }); } } } return(Task.CompletedTask); }
private static void BeforeSchemaUpgrade(StorageEnvironment storageEnvironment, ServerStore serverStore) { // doing this before the schema upgrade to allow to downgrade in case we cannot start the server using (var contextPool = new TransactionContextPool(storageEnvironment, serverStore.Configuration.Memory.MaxContextSizeToKeep)) { var license = serverStore.LoadLicense(contextPool); if (license == null) { return; } var licenseStatus = LicenseManager.GetLicenseStatus(license); if (licenseStatus.Expiration >= RavenVersionAttribute.Instance.ReleaseDate) { return; } string licenseJson = null; var fromPath = false; if (string.IsNullOrEmpty(serverStore.Configuration.Licensing.License) == false) { licenseJson = serverStore.Configuration.Licensing.License; } else if (File.Exists(serverStore.Configuration.Licensing.LicensePath.FullPath)) { try { licenseJson = File.ReadAllText(serverStore.Configuration.Licensing.LicensePath.FullPath); fromPath = true; } catch { // expected } } var errorMessage = $"Cannot start the RavenDB server because the expiration date of current license ({FormattedDateTime(licenseStatus.Expiration ?? DateTime.MinValue)}) " + $"is before the release date of this version ({FormattedDateTime(RavenVersionAttribute.Instance.ReleaseDate)})"; string expiredLicenseMessage = ""; if (string.IsNullOrEmpty(licenseJson) == false) { if (LicenseHelper.TryDeserializeLicense(licenseJson, out License localLicense)) { var localLicenseStatus = LicenseManager.GetLicenseStatus(localLicense); if (localLicenseStatus.Expiration >= RavenVersionAttribute.Instance.ReleaseDate) { serverStore.LicenseManager.OnBeforeInitialize += () => serverStore.LicenseManager.TryActivateLicenseAsync(throwOnActivationFailure: serverStore.Server.ThrowOnLicenseActivationFailure).Wait(serverStore.ServerShutdown); return; } var configurationKey = fromPath ? RavenConfiguration.GetKey(x => x.Licensing.LicensePath) : RavenConfiguration.GetKey(x => x.Licensing.License); expiredLicenseMessage = localLicense.Id == license.Id ? ". You can update current license using the setting.json file" : $". The license '{localLicense.Id}' obtained from '{configurationKey}' with expiration date of '{FormattedDateTime(localLicenseStatus.Expiration ?? DateTime.MinValue)}' is also expired."; } else { errorMessage += ". Could not parse the license from setting.json file."; throw new LicenseExpiredException(errorMessage); } } var licenseStorage = new LicenseStorage(); licenseStorage.Initialize(storageEnvironment, contextPool); var buildInfo = licenseStorage.GetBuildInfo(); if (buildInfo != null) { errorMessage += $" You can downgrade to the latest build that was working ({buildInfo.FullVersion})"; } if (string.IsNullOrEmpty(expiredLicenseMessage) == false) { errorMessage += expiredLicenseMessage; } throw new LicenseExpiredException(errorMessage);
private static RequestExecutor CreateNewRequestExecutor(RavenEtlConfiguration configuration, ServerStore serverStore) { var certificate = serverStore.Server.Certificate.Certificate; if (certificate != null && configuration.UsingEncryptedCommunicationChannel() == false && configuration.AllowEtlOnNonEncryptedChannel) { // we're running on HTTPS but sending data to non encrypted server // let's not provide the server certificate so we won't fail on request executor's URL validation certificate = null; } return(RequestExecutor.Create(configuration.Connection.TopologyDiscoveryUrls, configuration.Connection.Database, certificate, DocumentConventions.DefaultForServer)); }
public DocumentDatabase(string name, RavenConfiguration configuration, ServerStore serverStore, Action <string> addToInitLog) { Name = name; _logger = LoggingSource.Instance.GetLogger <DocumentDatabase>(Name); _serverStore = serverStore; _addToInitLog = addToInitLog; StartTime = Time.GetUtcNow(); LastAccessTime = Time.GetUtcNow(); Configuration = configuration; Scripts = new ScriptRunnerCache(this, Configuration); try { using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (ctx.OpenReadTransaction()) { MasterKey = serverStore.GetSecretKey(ctx, Name); var databaseRecord = _serverStore.Cluster.ReadDatabase(ctx, Name); if (databaseRecord != null) { // can happen when we are in the process of restoring a database if (databaseRecord.Encrypted && MasterKey == null) { throw new InvalidOperationException($"Attempt to create encrypted db {Name} without supplying the secret key"); } if (databaseRecord.Encrypted == false && MasterKey != null) { throw new InvalidOperationException($"Attempt to create a non-encrypted db {Name}, but a secret key exists for this db."); } } } QueryMetadataCache = new QueryMetadataCache(); IoChanges = new IoChangesNotifications(); Changes = new DocumentsChanges(); DocumentTombstoneCleaner = new DocumentTombstoneCleaner(this); DocumentsStorage = new DocumentsStorage(this, addToInitLog); IndexStore = new IndexStore(this, serverStore); QueryRunner = new QueryRunner(this); EtlLoader = new EtlLoader(this, serverStore); ReplicationLoader = new ReplicationLoader(this, serverStore); SubscriptionStorage = new SubscriptionStorage(this, serverStore); Metrics = new MetricCounters(); TxMerger = new TransactionOperationsMerger(this, DatabaseShutdown); HugeDocuments = new HugeDocuments(configuration.PerformanceHints.HugeDocumentsCollectionSize, configuration.PerformanceHints.HugeDocumentSize.GetValue(SizeUnit.Bytes)); ConfigurationStorage = new ConfigurationStorage(this); NotificationCenter = new NotificationCenter.NotificationCenter(ConfigurationStorage.NotificationsStorage, Name, _databaseShutdown.Token); Operations = new Operations.Operations(Name, ConfigurationStorage.OperationsStorage, NotificationCenter, Changes); DatabaseInfoCache = serverStore.DatabaseInfoCache; RachisLogIndexNotifications = new RachisLogIndexNotifications(DatabaseShutdown); CatastrophicFailureNotification = new CatastrophicFailureNotification((environmentId, e) => { serverStore.DatabasesLandlord.CatastrophicFailureHandler.Execute(name, e, environmentId); }); } catch (Exception) { Dispose(); throw; } }
public async Task GetClusterwideInfoPackage() { var contentDisposition = $"attachment; filename={DateTime.UtcNow:yyyy-MM-dd H:mm:ss} Cluster Wide.zip"; HttpContext.Response.Headers["Content-Disposition"] = contentDisposition; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext transactionOperationContext)) using (ServerStore.ContextPool.AllocateOperationContext(out JsonOperationContext jsonOperationContext)) using (transactionOperationContext.OpenReadTransaction()) { using (var ms = new MemoryStream()) { using (var archive = new ZipArchive(ms, ZipArchiveMode.Create, true)) { var localEndpointClient = new LocalEndpointClient(Server); using (var localMemoryStream = new MemoryStream()) { //assuming that if the name tag is empty var nodeName = $"Node - [{ServerStore.NodeTag ?? "Empty node tag"}]"; using (var localArchive = new ZipArchive(localMemoryStream, ZipArchiveMode.Create, true)) { await WriteServerWide(localArchive, jsonOperationContext, localEndpointClient); await WriteForAllLocalDatabases(localArchive, jsonOperationContext, localEndpointClient); } localMemoryStream.Position = 0; var entry = archive.CreateEntry($"{nodeName}.zip"); entry.ExternalAttributes = ((int)(FilePermissions.S_IRUSR | FilePermissions.S_IWUSR)) << 16; using (var entryStream = entry.Open()) { localMemoryStream.CopyTo(entryStream); entryStream.Flush(); } } var databaseNames = ServerStore.Cluster.GetDatabaseNames(transactionOperationContext).ToList(); var topology = ServerStore.GetClusterTopology(transactionOperationContext); //this means no databases are defined in the cluster //in this case just output server-wide endpoints from all cluster nodes if (databaseNames.Count == 0) { foreach (var tagWithUrl in topology.AllNodes) { if (tagWithUrl.Value.Contains(ServerStore.GetNodeHttpServerUrl())) { continue; } try { await WriteDebugInfoPackageForNodeAsync( jsonOperationContext, archive, tag : tagWithUrl.Key, url : tagWithUrl.Value, certificate : Server.Certificate.Certificate, databaseNames : null); } catch (Exception e) { var entryName = $"Node - [{tagWithUrl.Key}]"; DebugInfoPackageUtils.WriteExceptionAsZipEntry(e, archive, entryName); } } } else { var nodeUrlToDatabaseNames = CreateUrlToDatabaseNamesMapping(transactionOperationContext, databaseNames); foreach (var urlToDatabaseNamesMap in nodeUrlToDatabaseNames) { if (urlToDatabaseNamesMap.Key.Contains(ServerStore.GetNodeHttpServerUrl())) { continue; //skip writing local data, we do it separately } try { await WriteDebugInfoPackageForNodeAsync( jsonOperationContext, archive, tag : urlToDatabaseNamesMap.Value.Item2, url : urlToDatabaseNamesMap.Key, databaseNames : urlToDatabaseNamesMap.Value.Item1, certificate : Server.Certificate.Certificate); } catch (Exception e) { var entryName = $"Node - [{urlToDatabaseNamesMap.Value.Item2}]"; DebugInfoPackageUtils.WriteExceptionAsZipEntry(e, archive, entryName); } } } } ms.Position = 0; await ms.CopyToAsync(ResponseBodyStream()); } } }
public async Task GetClusterTopology() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var topology = ServerStore.GetClusterTopology(context); var nodeTag = ServerStore.NodeTag; if (topology.AllNodes.Count == 0) { var tag = ServerStore.NodeTag ?? "A"; var serverUrl = ServerStore.GetNodeHttpServerUrl(HttpContext.Request.GetClientRequestedNodeUrl()); topology = new ClusterTopology( topology.TopologyId ?? "dummy", new Dictionary <string, string> { [tag] = serverUrl }, new Dictionary <string, string>(), new Dictionary <string, string>(), tag, -1L ); nodeTag = tag; } else { var isClientIndependent = GetBoolValueQueryString("clientIndependent", false) ?? false; if (isClientIndependent == false && HttpContext.Items.TryGetValue(nameof(LocalEndpointClient.DebugPackage), out var _) == false) { topology.ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(ServerStore, HttpContext); } } HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; await using (var writer = new AsyncBlittableJsonTextWriter(context, ResponseBodyStream())) { var loadLicenseLimits = ServerStore.LoadLicenseLimits(); var nodeLicenseDetails = loadLicenseLimits == null ? null : DynamicJsonValue.Convert(loadLicenseLimits.NodeLicenseDetails); var json = new DynamicJsonValue { [nameof(ClusterTopologyResponse.Topology)] = topology.ToSortedJson(), [nameof(ClusterTopologyResponse.Etag)] = topology.Etag, [nameof(ClusterTopologyResponse.Leader)] = ServerStore.LeaderTag, ["LeaderShipDuration"] = ServerStore.Engine.CurrentLeader?.LeaderShipDuration, ["CurrentState"] = ServerStore.CurrentRachisState, [nameof(ClusterTopologyResponse.NodeTag)] = nodeTag, ["CurrentTerm"] = ServerStore.Engine.CurrentTerm, ["NodeLicenseDetails"] = nodeLicenseDetails, [nameof(ServerStore.Engine.LastStateChangeReason)] = ServerStore.LastStateChangeReason() }; var clusterErrors = ServerStore.GetClusterErrors(); if (clusterErrors.Count > 0) { json["Errors"] = clusterErrors; } var nodesStatues = ServerStore.GetNodesStatuses(); json["Status"] = DynamicJsonValue.Convert(nodesStatues); context.Write(writer, json); } } }
public static void AssertDestinationAndRegionAreAllowed(PeriodicBackupConfiguration configuration, ServerStore serverStore) { foreach (var backupDestination in configuration.GetDestinations()) { serverStore.Configuration.Backup.AssertDestinationAllowed(backupDestination); } if (configuration.S3Settings != null && configuration.S3Settings.Disabled == false) { serverStore.Configuration.Backup.AssertRegionAllowed(configuration.S3Settings.AwsRegionName); } if (configuration.GlacierSettings != null && configuration.GlacierSettings.Disabled == false) { serverStore.Configuration.Backup.AssertRegionAllowed(configuration.GlacierSettings.AwsRegionName); } }
public async Task Bootstrap() { await ServerStore.EnsureNotPassiveAsync(); NoContentStatus(); }
public static void UpdateLocalPathIfNeeded(PeriodicBackupConfiguration configuration, ServerStore serverStore) { if (configuration.LocalSettings == null || configuration.LocalSettings.Disabled) { return; } var folderPath = configuration.LocalSettings.FolderPath; if (string.IsNullOrWhiteSpace(configuration.LocalSettings.FolderPath)) { return; } var pathResult = GetActualFullPath(serverStore, folderPath); if (pathResult.Error != null) { throw new ArgumentException(pathResult.Error); } configuration.LocalSettings.FolderPath = pathResult.FolderPath; }
public async Task AddNode() { var nodeUrl = GetQueryStringValueAndAssertIfSingleAndNotEmpty("url"); var tag = GetStringQueryString("tag", false); var watcher = GetBoolValueQueryString("watcher", false); var raftRequestId = GetRaftRequestIdFromQuery(); var maxUtilizedCores = GetIntValueQueryString("maxUtilizedCores", false); if (maxUtilizedCores != null && maxUtilizedCores <= 0) { throw new ArgumentException("Max utilized cores cores must be greater than 0"); } nodeUrl = nodeUrl.Trim(); if (Uri.IsWellFormedUriString(nodeUrl, UriKind.Absolute) == false) { throw new InvalidOperationException($"Given node URL '{nodeUrl}' is not in a correct format."); } nodeUrl = UrlHelper.TryGetLeftPart(nodeUrl); var remoteIsHttps = nodeUrl.StartsWith("https:", StringComparison.OrdinalIgnoreCase); if (HttpContext.Request.IsHttps != remoteIsHttps) { throw new InvalidOperationException($"Cannot add node '{nodeUrl}' to cluster because it will create invalid mix of HTTPS & HTTP endpoints. A cluster must be only HTTPS or only HTTP."); } tag = tag?.Trim(); NodeInfo nodeInfo; using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (var requestExecutor = ClusterRequestExecutor.CreateForSingleNode(nodeUrl, Server.Certificate.Certificate)) { requestExecutor.DefaultTimeout = ServerStore.Engine.OperationTimeout; // test connection to remote. var result = await ServerStore.TestConnectionToRemote(nodeUrl, database : null); if (result.Success == false) { throw new InvalidOperationException(result.Error); } // test connection from remote to destination result = await ServerStore.TestConnectionFromRemote(requestExecutor, ctx, nodeUrl); if (result.Success == false) { throw new InvalidOperationException(result.Error); } var infoCmd = new GetNodeInfoCommand(); try { await requestExecutor.ExecuteAsync(infoCmd, ctx); } catch (AllTopologyNodesDownException e) { throw new InvalidOperationException($"Couldn't contact node at {nodeUrl}", e); } nodeInfo = infoCmd.Result; if (SchemaUpgrader.CurrentVersion.ServerVersion != nodeInfo.ServerSchemaVersion) { var nodesVersion = nodeInfo.ServerSchemaVersion == 0 ? "Pre 4.2 version" : nodeInfo.ServerSchemaVersion.ToString(); throw new InvalidOperationException($"Can't add node with mismatched storage schema version.{Environment.NewLine}" + $"My version is {SchemaUpgrader.CurrentVersion.ServerVersion}, while node's version is {nodesVersion}"); } if (ServerStore.IsPassive() && nodeInfo.TopologyId != null) { throw new TopologyMismatchException("You can't add new node to an already existing cluster"); } } if (ServerStore.ValidateFixedPort && nodeInfo.HasFixedPort == false) { throw new InvalidOperationException($"Failed to add node '{nodeUrl}' to cluster. " + $"Node '{nodeUrl}' has port '0' in 'Configuration.Core.ServerUrls' setting. " + "Adding a node with non fixed port is forbidden. Define a fixed port for the node to enable cluster creation."); } await ServerStore.EnsureNotPassiveAsync(); ServerStore.LicenseManager.AssertCanAddNode(); if (ServerStore.IsLeader()) { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) { var clusterTopology = ServerStore.GetClusterTopology(); var possibleNode = clusterTopology.TryGetNodeTagByUrl(nodeUrl); if (possibleNode.HasUrl) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster because this url is already used by node {possibleNode.NodeTag}"); } if (nodeInfo.ServerId == ServerStore.GetServerId()) { throw new InvalidOperationException($"Can't add a new node on {nodeUrl} to cluster because it's a synonym of the current node URL:{ServerStore.GetNodeHttpServerUrl()}"); } if (nodeInfo.TopologyId != null) { AssertCanAddNodeWithTopologyId(clusterTopology, nodeInfo, nodeUrl); } var nodeTag = nodeInfo.NodeTag == RachisConsensus.InitialTag ? tag : nodeInfo.NodeTag; CertificateDefinition oldServerCert = null; X509Certificate2 certificate = null; if (remoteIsHttps) { if (nodeInfo.Certificate == null) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because it has no certificate while trying to use HTTPS"); } certificate = new X509Certificate2(Convert.FromBase64String(nodeInfo.Certificate), (string)null, X509KeyStorageFlags.MachineKeySet); var now = DateTime.UtcNow; if (certificate.NotBefore.ToUniversalTime() > now) { // Because of time zone and time drift issues, we can't assume that the certificate generation will be // proper. Because of that, we allow tolerance of the NotBefore to be a bit earlier / later than the // current time. Clients may still fail to work with our certificate because of timing issues, // but the admin needs to setup time sync properly and there isn't much we can do at that point if ((certificate.NotBefore.ToUniversalTime() - now).TotalDays > 1) { throw new InvalidOperationException( $"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate '{certificate.FriendlyName}' is not yet valid. It starts on {certificate.NotBefore}"); } } if (certificate.NotAfter.ToUniversalTime() < now) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate '{certificate.FriendlyName}' expired on {certificate.NotAfter}"); } var expected = GetStringQueryString("expectedThumbprint", required: false); if (expected != null) { if (certificate.Thumbprint != expected) { throw new InvalidOperationException($"Cannot add node {nodeTag} with url {nodeUrl} to cluster because its certificate thumbprint '{certificate.Thumbprint}' doesn't match the expected thumbprint '{expected}'."); } } // if it's the same server certificate as our own, we don't want to add it to the cluster if (certificate.Thumbprint != Server.Certificate.Certificate.Thumbprint) { using (ctx.OpenReadTransaction()) { var readCert = ServerStore.Cluster.GetCertificateByThumbprint(ctx, certificate.Thumbprint); if (readCert != null) { oldServerCert = JsonDeserializationServer.CertificateDefinition(readCert); } } if (oldServerCert == null) { var certificateDefinition = new CertificateDefinition { Certificate = nodeInfo.Certificate, Thumbprint = certificate.Thumbprint, PublicKeyPinningHash = certificate.GetPublicKeyPinningHash(), NotAfter = certificate.NotAfter, Name = "Server Certificate for " + nodeUrl, SecurityClearance = SecurityClearance.ClusterNode }; var res = await ServerStore.PutValueInClusterAsync(new PutCertificateCommand(certificate.Thumbprint, certificateDefinition, $"{raftRequestId}/put-new-certificate")); await ServerStore.Cluster.WaitForIndexNotification(res.Index); } } } await ServerStore.AddNodeToClusterAsync(nodeUrl, nodeTag, validateNotInTopology : true, asWatcher : watcher ?? false); using (ctx.OpenReadTransaction()) { clusterTopology = ServerStore.GetClusterTopology(ctx); possibleNode = clusterTopology.TryGetNodeTagByUrl(nodeUrl); nodeTag = possibleNode.HasUrl ? possibleNode.NodeTag : null; if (certificate != null && certificate.Thumbprint != Server.Certificate.Certificate.Thumbprint) { var modifiedServerCert = JsonDeserializationServer.CertificateDefinition(ServerStore.Cluster.GetCertificateByThumbprint(ctx, certificate.Thumbprint)); if (modifiedServerCert == null) { throw new ConcurrencyException("After adding the certificate, it was removed, shouldn't happen unless another admin removed it midway through."); } if (oldServerCert == null) { modifiedServerCert.Name = "Server certificate for Node " + nodeTag; } else { var value = "Node " + nodeTag; if (modifiedServerCert.Name.Contains(value) == false) { modifiedServerCert.Name += ", " + value; } } var res = await ServerStore.PutValueInClusterAsync(new PutCertificateCommand(certificate.Thumbprint, modifiedServerCert, $"{raftRequestId}/put-modified-certificate")); await ServerStore.Cluster.WaitForIndexNotification(res.Index); } var detailsPerNode = new DetailsPerNode { MaxUtilizedCores = maxUtilizedCores, NumberOfCores = nodeInfo.NumberOfCores, InstalledMemoryInGb = nodeInfo.InstalledMemoryInGb, UsableMemoryInGb = nodeInfo.UsableMemoryInGb, BuildInfo = nodeInfo.BuildInfo, OsInfo = nodeInfo.OsInfo }; var maxCores = ServerStore.LicenseManager.LicenseStatus.MaxCores; try { await ServerStore.PutNodeLicenseLimitsAsync(nodeTag, detailsPerNode, maxCores, $"{raftRequestId}/put-license-limits"); } catch { // we'll retry this again later } } NoContentStatus(); return; } } RedirectToLeader(); }
public static IEnumerable <AbstractDashboardNotification> FetchDatabasesInfo(ServerStore serverStore, Func <string, bool> isValidFor, CancellationTokenSource cts) { var databasesInfo = new DatabasesInfo(); var indexingSpeed = new IndexingSpeed(); var trafficWatch = new TrafficWatch(); var drivesUsage = new DrivesUsage(); using (serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext transactionContext)) using (transactionContext.OpenReadTransaction()) { foreach (var databaseTuple in serverStore.Cluster.ItemsStartingWith(transactionContext, Constants.Documents.Prefix, 0, int.MaxValue)) { var databaseName = databaseTuple.ItemName.Substring(Constants.Documents.Prefix.Length); if (cts.IsCancellationRequested) { yield break; } if (isValidFor != null && isValidFor(databaseName) == false) { continue; } if (serverStore.DatabasesLandlord.DatabasesCache.TryGetValue(databaseName, out var databaseTask) == false) { // database does not exist on this server or disabled SetOfflineDatabaseInfo(serverStore, databaseName, databasesInfo, drivesUsage, disabled: true); continue; } var databaseOnline = IsDatabaseOnline(databaseTask, out var database); if (databaseOnline == false) { SetOfflineDatabaseInfo(serverStore, databaseName, databasesInfo, drivesUsage, disabled: false); continue; } var indexingSpeedItem = new IndexingSpeedItem { Database = database.Name, IndexedPerSecond = database.Metrics.MapIndexes.IndexedPerSec.FiveSecondRate, MappedPerSecond = database.Metrics.MapReduceIndexes.MappedPerSec.FiveSecondRate, ReducedPerSecond = database.Metrics.MapReduceIndexes.ReducedPerSec.FiveSecondRate }; indexingSpeed.Items.Add(indexingSpeedItem); var replicationFactor = GetReplicationFactor(databaseTuple.Value); var documentsStorage = database.DocumentsStorage; var indexStorage = database.IndexStore; using (documentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext)) using (documentsContext.OpenReadTransaction()) { var databaseInfoItem = new DatabaseInfoItem { Database = databaseName, DocumentsCount = documentsStorage.GetNumberOfDocuments(documentsContext), IndexesCount = database.IndexStore.Count, AlertsCount = database.NotificationCenter.GetAlertCount(), ReplicationFactor = replicationFactor, ErroredIndexesCount = indexStorage.GetIndexes().Count(index => index.GetErrorCount() > 0), Online = true }; databasesInfo.Items.Add(databaseInfoItem); } var trafficWatchItem = new TrafficWatchItem { Database = databaseName, RequestsPerSecond = (int)database.Metrics.Requests.RequestsPerSec.FiveSecondRate, WritesPerSecond = (int)database.Metrics.Docs.PutsPerSec.FiveSecondRate, WriteBytesPerSecond = database.Metrics.Docs.BytesPutsPerSec.FiveSecondRate }; trafficWatch.Items.Add(trafficWatchItem); foreach (var mountPointUsage in database.GetMountPointsUsage()) { if (cts.IsCancellationRequested) { yield break; } UpdateMountPoint(mountPointUsage, databaseName, drivesUsage); } } } yield return(databasesInfo); yield return(indexingSpeed); yield return(trafficWatch); yield return(drivesUsage); }
public async Task ApplyCommand() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { if (ServerStore.IsLeader() == false) { throw new NoLeaderException("Not a leader, cannot accept commands."); } HttpContext.Response.Headers["Reached-Leader"] = "true"; var commandJson = await context.ReadForMemoryAsync(RequestBodyStream(), "external/rachis/command"); try { var command = CommandBase.CreateFrom(commandJson); switch (command) { case AddOrUpdateCompareExchangeBatchCommand batchCmpExchangeCommand: batchCmpExchangeCommand.ContextToWriteResult = context; break; case CompareExchangeCommandBase cmpExchange: cmpExchange.ContextToWriteResult = context; break; } var isClusterAdmin = IsClusterAdmin(); command.VerifyCanExecuteCommand(ServerStore, context, isClusterAdmin); var(etag, result) = await ServerStore.Engine.PutAsync(command); HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; var ms = context.CheckoutMemoryStream(); try { await using (var writer = new AsyncBlittableJsonTextWriter(context, ms)) { context.Write(writer, new DynamicJsonValue { [nameof(ServerStore.PutRaftCommandResult.RaftCommandIndex)] = etag, [nameof(ServerStore.PutRaftCommandResult.Data)] = result, }); } // now that we know that we properly serialized it ms.Position = 0; await ms.CopyToAsync(ResponseBodyStream()); } finally { context.ReturnMemoryStream(ms); } } catch (NotLeadingException e) { HttpContext.Response.Headers["Reached-Leader"] = "false"; throw new NoLeaderException("Lost the leadership, cannot accept commands.", e); } catch (InvalidOperationException e) { RequestRouter.AssertClientVersion(HttpContext, e); throw; } } }
protected abstract void Apply(ClusterOperationContext context, BlittableJsonReaderObject cmd, long index, Leader leader, ServerStore serverStore);
public static async Task BuildCommandsAsync(JsonOperationContext ctx, BatchHandler.MergedBatchCommand command, Stream stream, DocumentDatabase database, ServerStore serverStore) { CommandData[] cmds = Empty; List <string> identities = null; List <int> positionInListToCommandIndex = null; int index = -1; var state = new JsonParserState(); using (ctx.GetManagedBuffer(out JsonOperationContext.ManagedPinnedBuffer buffer)) using (var parser = new UnmanagedJsonParser(ctx, state, "bulk_docs")) using (var modifier = new BlittableMetadataModifier(ctx)) { while (parser.Read() == false) { await RefillParserBuffer(stream, buffer, parser); } if (state.CurrentTokenType != JsonParserToken.StartObject) { ThrowUnexpectedToken(JsonParserToken.StartObject, state); } while (parser.Read() == false) { await RefillParserBuffer(stream, buffer, parser); } if (state.CurrentTokenType != JsonParserToken.String) { ThrowUnexpectedToken(JsonParserToken.String, state); } if (GetLongFromStringBuffer(state) != 8314892176759549763) // Commands { ThrowUnexpectedToken(JsonParserToken.String, state); } while (parser.Read() == false) { await RefillParserBuffer(stream, buffer, parser); } if (state.CurrentTokenType != JsonParserToken.StartArray) { ThrowUnexpectedToken(JsonParserToken.StartArray, state); } while (true) { while (parser.Read() == false) { await RefillParserBuffer(stream, buffer, parser); } if (state.CurrentTokenType == JsonParserToken.EndArray) { break; } index++; if (index >= cmds.Length) { cmds = IncreaseSizeOfCommandsBuffer(index, cmds); } var commandData = await ReadSingleCommand(ctx, stream, state, parser, buffer, modifier, default); if (commandData.Type == CommandType.PATCH) { commandData.PatchCommand = new PatchDocumentCommand( ctx, commandData.Id, commandData.ChangeVector, skipPatchIfChangeVectorMismatch: false, (commandData.Patch, commandData.PatchArgs), (commandData.PatchIfMissing, commandData.PatchIfMissingArgs), database, isTest: false, debugMode: false, collectResultsNeeded: true, returnDocument: commandData.ReturnDocument ); } if (commandData.Type == CommandType.BatchPATCH) { commandData.PatchCommand = new BatchPatchDocumentCommand( ctx, commandData.Ids, skipPatchIfChangeVectorMismatch: false, (commandData.Patch, commandData.PatchArgs), (commandData.PatchIfMissing, commandData.PatchIfMissingArgs), database, isTest: false, debugMode: false, collectResultsNeeded: true ); } if (commandData.Type == CommandType.PUT && string.IsNullOrEmpty(commandData.Id) == false && commandData.Id[commandData.Id.Length - 1] == '|') { if (identities == null) { identities = new List <string>(); positionInListToCommandIndex = new List <int>(); } // queue identities requests in order to send them at once to the leader (using List for simplicity) identities.Add(commandData.Id); positionInListToCommandIndex.Add(index); } cmds[index] = commandData; } if (identities != null) { await GetIdentitiesValues(ctx, database, serverStore, identities, positionInListToCommandIndex, cmds); } command.ParsedCommands = new ArraySegment <CommandData>(cmds, 0, index + 1); if (await IsClusterTransaction(stream, parser, buffer, state)) { command.IsClusterTransaction = true; } } }