public DocumentDatabase(string name, RavenConfiguration configuration, ServerStore serverStore) { Scripts = new ScriptRunnerCache(this); _logger = LoggingSource.Instance.GetLogger <DocumentDatabase>(Name); _serverStore = serverStore; StartTime = SystemTime.UtcNow; Name = name; Configuration = configuration; try { using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (ctx.OpenReadTransaction()) { MasterKey = serverStore.GetSecretKey(ctx, Name); var databaseRecord = _serverStore.Cluster.ReadDatabase(ctx, Name); if (databaseRecord != null) { // can happen when we are in the process of restoring a database if (databaseRecord.Encrypted && MasterKey == null) { throw new InvalidOperationException($"Attempt to create encrypted db {Name} without supplying the secret key"); } if (databaseRecord.Encrypted == false && MasterKey != null) { throw new InvalidOperationException($"Attempt to create a non-encrypted db {Name}, but a secret key exists for this db."); } } } QueryMetadataCache = new QueryMetadataCache(); IoChanges = new IoChangesNotifications(); Changes = new DocumentsChanges(); DocumentTombstoneCleaner = new DocumentTombstoneCleaner(this); DocumentsStorage = new DocumentsStorage(this); IndexStore = new IndexStore(this, serverStore, _indexAndTransformerLocker); EtlLoader = new EtlLoader(this, serverStore); ReplicationLoader = new ReplicationLoader(this, serverStore); SubscriptionStorage = new SubscriptionStorage(this, serverStore); Metrics = new MetricsCountersManager(); TxMerger = new TransactionOperationsMerger(this, DatabaseShutdown); HugeDocuments = new HugeDocuments(configuration.PerformanceHints.HugeDocumentsCollectionSize, configuration.PerformanceHints.HugeDocumentSize.GetValue(SizeUnit.Bytes)); ConfigurationStorage = new ConfigurationStorage(this); NotificationCenter = new NotificationCenter.NotificationCenter(ConfigurationStorage.NotificationsStorage, Name, _databaseShutdown.Token); Operations = new Operations.Operations(Name, ConfigurationStorage.OperationsStorage, NotificationCenter, Changes); DatabaseInfoCache = serverStore.DatabaseInfoCache; RachisLogIndexNotifications = new RachisLogIndexNotifications(DatabaseShutdown); CatastrophicFailureNotification = new CatastrophicFailureNotification(e => { serverStore.DatabasesLandlord.UnloadResourceOnCatastrophicFailure(name, e); }); } catch (Exception) { Dispose(); throw; } }
public void Initialize(InitializeOptions options = InitializeOptions.None) { try { NotificationCenter.Initialize(this); DocumentsStorage.Initialize((options & InitializeOptions.GenerateNewDatabaseId) == InitializeOptions.GenerateNewDatabaseId); TxMerger.Start(); ConfigurationStorage.Initialize(); if ((options & InitializeOptions.SkipLoadingDatabaseRecord) == InitializeOptions.SkipLoadingDatabaseRecord) { return; } long index; DatabaseRecord record; using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) record = _serverStore.Cluster.ReadDatabase(context, Name, out index); if (record == null) { DatabaseDoesNotExistException.Throw(Name); } PeriodicBackupRunner = new PeriodicBackupRunner(this, _serverStore); _indexStoreTask = IndexStore.InitializeAsync(record); ReplicationLoader?.Initialize(record); EtlLoader.Initialize(record); DocumentTombstoneCleaner.Start(); try { _indexStoreTask.Wait(DatabaseShutdown); } finally { _indexStoreTask = null; } SubscriptionStorage.Initialize(); NotifyFeaturesAboutStateChange(record, index); } catch (Exception) { Dispose(); throw; } }
private void NotifyFeaturesAboutStateChange(DatabaseRecord record, long index) { lock (this) { Debug.Assert(string.Equals(Name, record.DatabaseName, StringComparison.OrdinalIgnoreCase), $"{Name} != {record.DatabaseName}"); // index and LastDatabaseRecordIndex could have equal values when we transit from/to passive and want to update the tasks. if (LastDatabaseRecordIndex > index) { if (_logger.IsInfoEnabled) { _logger.Info($"Skipping record {index} (current {LastDatabaseRecordIndex}) for {record.DatabaseName} because it was already precessed."); } return; } if (_logger.IsInfoEnabled) { _logger.Info($"Starting to process record {index} (current {LastDatabaseRecordIndex}) for {record.DatabaseName}."); } try { InitializeFromDatabaseRecord(record); LastDatabaseRecordIndex = index; IndexStore.HandleDatabaseRecordChange(record, index); ReplicationLoader?.HandleDatabaseRecordChange(record); EtlLoader?.HandleDatabaseRecordChange(record); OnDatabaseRecordChanged(record); SubscriptionStorage?.HandleDatabaseValueChange(record); if (_logger.IsInfoEnabled) { _logger.Info($"Finish to process record {index} for {record.DatabaseName}."); } } catch (Exception e) { if (_logger.IsInfoEnabled) { _logger.Info($"Encounter an error while processing record {index} for {record.DatabaseName}.", e); } throw; } } }
public unsafe void Dispose() { if (_databaseShutdown.IsCancellationRequested) { return; // double dispose? } lock (this) { if (_databaseShutdown.IsCancellationRequested) { return; // double dispose? } //before we dispose of the database we take its latest info to be displayed in the studio try { var databaseInfo = GenerateDatabaseInfo(); if (databaseInfo != null) { DatabaseInfoCache?.InsertDatabaseInfo(databaseInfo, Name); } } catch (Exception e) { // if we encountered a catastrophic failure we might not be able to retrieve database info if (_logger.IsInfoEnabled) { _logger.Info("Failed to generate and store database info", e); } } _databaseShutdown.Cancel(); // we'll wait for 1 minute to drain all the requests // from the database var sp = Stopwatch.StartNew(); while (sp.ElapsedMilliseconds < 60 * 1000) { if (Interlocked.Read(ref _usages) == 0) { break; } if (_waitForUsagesOnDisposal.Wait(1000)) { _waitForUsagesOnDisposal.Reset(); } } var exceptionAggregator = new ExceptionAggregator(_logger, $"Could not dispose {nameof(DocumentDatabase)} {Name}"); foreach (var connection in RunningTcpConnections) { exceptionAggregator.Execute(() => { connection.Dispose(); }); } exceptionAggregator.Execute(() => { TxMerger?.Dispose(); }); if (_indexStoreTask != null) { exceptionAggregator.Execute(() => { _indexStoreTask.Wait(DatabaseShutdown); }); } exceptionAggregator.Execute(() => { IndexStore?.Dispose(); }); exceptionAggregator.Execute(() => { ExpiredDocumentsCleaner?.Dispose(); }); exceptionAggregator.Execute(() => { PeriodicBackupRunner?.Dispose(); }); exceptionAggregator.Execute(() => { DocumentTombstoneCleaner?.Dispose(); }); exceptionAggregator.Execute(() => { ReplicationLoader?.Dispose(); }); exceptionAggregator.Execute(() => { EtlLoader?.Dispose(); }); exceptionAggregator.Execute(() => { Operations?.Dispose(exceptionAggregator); }); exceptionAggregator.Execute(() => { NotificationCenter?.Dispose(); }); exceptionAggregator.Execute(() => { SubscriptionStorage?.Dispose(); }); exceptionAggregator.Execute(() => { ConfigurationStorage?.Dispose(); }); exceptionAggregator.Execute(() => { DocumentsStorage?.Dispose(); }); exceptionAggregator.Execute(() => { _databaseShutdown.Dispose(); }); exceptionAggregator.Execute(() => { if (MasterKey == null) { return; } fixed(byte *pKey = MasterKey) { Sodium.sodium_memzero(pKey, (UIntPtr)MasterKey.Length); } }); exceptionAggregator.ThrowIfNeeded(); } }
public void Initialize(InitializeOptions options = InitializeOptions.None) { try { _addToInitLog("Initializing NotificationCenter"); NotificationCenter.Initialize(this); _addToInitLog("Initializing DocumentStorage"); DocumentsStorage.Initialize((options & InitializeOptions.GenerateNewDatabaseId) == InitializeOptions.GenerateNewDatabaseId); _addToInitLog("Starting Transaction Merger"); TxMerger.Start(); _addToInitLog("Initializing ConfigurationStorage"); ConfigurationStorage.Initialize(); if ((options & InitializeOptions.SkipLoadingDatabaseRecord) == InitializeOptions.SkipLoadingDatabaseRecord) { return; } _addToInitLog("Loading Database"); long index; DatabaseRecord record; using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) record = _serverStore.Cluster.ReadDatabase(context, Name, out index); if (record == null) { DatabaseDoesNotExistException.Throw(Name); } PeriodicBackupRunner = new PeriodicBackupRunner(this, _serverStore); _addToInitLog("Initializing IndexStore (async)"); _indexStoreTask = IndexStore.InitializeAsync(record); _addToInitLog("Initializing Replication"); ReplicationLoader?.Initialize(record); _addToInitLog("Initializing ETL"); EtlLoader.Initialize(record); DocumentTombstoneCleaner.Start(); try { _indexStoreTask.Wait(DatabaseShutdown); } finally { _addToInitLog("Initializing IndexStore completed"); _indexStoreTask = null; } SubscriptionStorage.Initialize(); _addToInitLog("Initializing SubscriptionStorage completed"); TaskExecutor.Execute((state) => { try { NotifyFeaturesAboutStateChange(record, index); } catch { // We ignore the exception since it was caught in the function itself } }, null); } catch (Exception) { Dispose(); throw; } }
public async Task CanReplicateTombstonesFromDifferentCollections() { var id = "Oren\r\nEini"; using (var store1 = GetDocumentStore()) using (var store2 = GetDocumentStore()) { var storage1 = Server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(store1.Database).Result; var storage2 = Server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(store2.Database).Result; using (var session = store1.OpenSession()) { session.Store(new User { Name = "Karmel" }, id); session.SaveChanges(); } var results = await SetupReplicationAsync(store1, store2); Assert.True(WaitForDocument(store2, id)); using (var session = store1.OpenSession()) { session.Delete(id); session.SaveChanges(); } EnsureReplicating(store1, store2); using (var session = store1.OpenSession()) { session.Store(new Company { Name = "Karmel" }, id); session.SaveChanges(); } Assert.True(WaitForDocument(store2, id)); using (var session = store1.OpenSession()) { session.Delete(id); session.SaveChanges(); } EnsureReplicating(store1, store2); using (storage1.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) using (ctx.OpenReadTransaction()) { Assert.Equal(2, storage1.DocumentsStorage.GetNumberOfTombstones(ctx)); } using (storage2.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) using (ctx.OpenReadTransaction()) { Assert.Equal(2, storage2.DocumentsStorage.GetNumberOfTombstones(ctx)); } var val = await WaitForValueAsync(() => { var state = ReplicationLoader.GetExternalReplicationState(Server.ServerStore, store1.Database, results[0].TaskId); return(state.LastSentEtag); }, 7); Assert.Equal(7, val); await storage1.TombstoneCleaner.ExecuteCleanup(); await storage2.TombstoneCleaner.ExecuteCleanup(); using (storage1.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) using (ctx.OpenReadTransaction()) { Assert.Equal(0, storage1.DocumentsStorage.GetNumberOfTombstones(ctx)); } using (storage2.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) using (ctx.OpenReadTransaction()) { Assert.Equal(0, storage2.DocumentsStorage.GetNumberOfTombstones(ctx)); } } }
private static DatabaseOngoingTasksInfoItem GetOngoingTasksInfoItem(DocumentDatabase database, ServerStore serverStore, TransactionOperationContext context, out long ongoingTasksCount) { var dbRecord = database.ReadDatabaseRecord(); var extRepCount = dbRecord.ExternalReplications.Count; long extRepCountOnNode = GetTaskCountOnNode <ExternalReplication>(database, dbRecord, serverStore, dbRecord.ExternalReplications, task => ReplicationLoader.GetExternalReplicationState(serverStore, database.Name, task.TaskId)); long replicationHubCountOnNode = 0; var replicationHubCount = database.ReplicationLoader.OutgoingHandlers.Count(x => x.IsPullReplicationAsHub); replicationHubCountOnNode += replicationHubCount; var replicationSinkCount = dbRecord.SinkPullReplications.Count; long replicationSinkCountOnNode = GetTaskCountOnNode <PullReplicationAsSink>(database, dbRecord, serverStore, dbRecord.SinkPullReplications, task => null); var ravenEtlCount = database.EtlLoader.RavenDestinations.Count; long ravenEtlCountOnNode = GetTaskCountOnNode <RavenEtlConfiguration>(database, dbRecord, serverStore, database.EtlLoader.RavenDestinations, task => EtlLoader.GetProcessState(task.Transforms, database, task.Name)); var sqlEtlCount = database.EtlLoader.SqlDestinations.Count; long sqlEtlCountOnNode = GetTaskCountOnNode <SqlEtlConfiguration>(database, dbRecord, serverStore, database.EtlLoader.SqlDestinations, task => EtlLoader.GetProcessState(task.Transforms, database, task.Name)); var olapEtlCount = database.EtlLoader.OlapDestinations.Count; long olapEtlCountOnNode = GetTaskCountOnNode <OlapEtlConfiguration>(database, dbRecord, serverStore, database.EtlLoader.OlapDestinations, task => EtlLoader.GetProcessState(task.Transforms, database, task.Name)); var periodicBackupCount = database.PeriodicBackupRunner.PeriodicBackups.Count; long periodicBackupCountOnNode = GetTaskCountOnNode <PeriodicBackupConfiguration>(database, dbRecord, serverStore, database.PeriodicBackupRunner.PeriodicBackups.Select(x => x.Configuration), task => database.PeriodicBackupRunner.GetBackupStatus(task.TaskId), task => task.Name.StartsWith("Server Wide") == false); var subscriptionCount = database.SubscriptionStorage.GetAllSubscriptionsCount(); long subscriptionCountOnNode = GetSubscriptionCountOnNode(database, dbRecord, serverStore, context); ongoingTasksCount = extRepCount + replicationHubCount + replicationSinkCount + ravenEtlCount + sqlEtlCount + olapEtlCount + periodicBackupCount + subscriptionCount; return(new DatabaseOngoingTasksInfoItem() { Database = database.Name, ExternalReplicationCount = extRepCountOnNode, ReplicationHubCount = replicationHubCountOnNode, ReplicationSinkCount = replicationSinkCountOnNode, RavenEtlCount = ravenEtlCountOnNode, SqlEtlCount = sqlEtlCountOnNode, OlapEtlCount = olapEtlCountOnNode, PeriodicBackupCount = periodicBackupCountOnNode, SubscriptionCount = subscriptionCountOnNode }); }