public DocumentDatabase(string name, RavenConfiguration configuration, ServerStore serverStore) { StartTime = SystemTime.UtcNow; Name = name; ResourceName = "db/" + name; Configuration = configuration; _logger = LoggingSource.Instance.GetLogger <DocumentDatabase>(Name); Notifications = new DocumentsNotifications(); DocumentsStorage = new DocumentsStorage(this); IndexStore = new IndexStore(this); TransformerStore = new TransformerStore(this); SqlReplicationLoader = new SqlReplicationLoader(this); DocumentReplicationLoader = new DocumentReplicationLoader(this); DocumentTombstoneCleaner = new DocumentTombstoneCleaner(this); SubscriptionStorage = new SubscriptionStorage(this); Operations = new DatabaseOperations(this); Metrics = new MetricsCountersManager(); IoMetrics = serverStore?.IoMetrics ?? new IoMetrics(256, 256); Patch = new PatchDocument(this); TxMerger = new TransactionOperationsMerger(this, DatabaseShutdown); HugeDocuments = new HugeDocuments(configuration.Databases.MaxCollectionSizeHugeDocuments, configuration.Databases.MaxWarnSizeHugeDocuments); ConfigurationStorage = new ConfigurationStorage(this, serverStore); DatabaseInfoCache = serverStore?.DatabaseInfoCache; }
private void InitializeInternal() { TxMerger.Start(); _indexStoreTask = IndexStore.InitializeAsync(); _transformerStoreTask = TransformerStore.InitializeAsync(); SqlReplicationLoader.Initialize(); DocumentTombstoneCleaner.Initialize(); BundleLoader = new BundleLoader(this); try { _indexStoreTask.Wait(DatabaseShutdown); } finally { _indexStoreTask = null; } try { _transformerStoreTask.Wait(DatabaseShutdown); } finally { _transformerStoreTask = null; } SubscriptionStorage.Initialize(); //Index Metadata Store shares Voron env and context pool with documents storage, //so replication of both documents and indexes/transformers can be made within one transaction ConfigurationStorage.Initialize(IndexStore, TransformerStore); DocumentReplicationLoader.Initialize(); }
public DocumentDatabase(string name, RavenConfiguration configuration, ServerStore serverStore) { Scripts = new ScriptRunnerCache(this); _logger = LoggingSource.Instance.GetLogger <DocumentDatabase>(Name); _serverStore = serverStore; StartTime = SystemTime.UtcNow; Name = name; Configuration = configuration; try { using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (ctx.OpenReadTransaction()) { MasterKey = serverStore.GetSecretKey(ctx, Name); var databaseRecord = _serverStore.Cluster.ReadDatabase(ctx, Name); if (databaseRecord != null) { // can happen when we are in the process of restoring a database if (databaseRecord.Encrypted && MasterKey == null) { throw new InvalidOperationException($"Attempt to create encrypted db {Name} without supplying the secret key"); } if (databaseRecord.Encrypted == false && MasterKey != null) { throw new InvalidOperationException($"Attempt to create a non-encrypted db {Name}, but a secret key exists for this db."); } } } QueryMetadataCache = new QueryMetadataCache(); IoChanges = new IoChangesNotifications(); Changes = new DocumentsChanges(); DocumentTombstoneCleaner = new DocumentTombstoneCleaner(this); DocumentsStorage = new DocumentsStorage(this); IndexStore = new IndexStore(this, serverStore, _indexAndTransformerLocker); EtlLoader = new EtlLoader(this, serverStore); ReplicationLoader = new ReplicationLoader(this, serverStore); SubscriptionStorage = new SubscriptionStorage(this, serverStore); Metrics = new MetricsCountersManager(); TxMerger = new TransactionOperationsMerger(this, DatabaseShutdown); HugeDocuments = new HugeDocuments(configuration.PerformanceHints.HugeDocumentsCollectionSize, configuration.PerformanceHints.HugeDocumentSize.GetValue(SizeUnit.Bytes)); ConfigurationStorage = new ConfigurationStorage(this); NotificationCenter = new NotificationCenter.NotificationCenter(ConfigurationStorage.NotificationsStorage, Name, _databaseShutdown.Token); Operations = new Operations.Operations(Name, ConfigurationStorage.OperationsStorage, NotificationCenter, Changes); DatabaseInfoCache = serverStore.DatabaseInfoCache; RachisLogIndexNotifications = new RachisLogIndexNotifications(DatabaseShutdown); CatastrophicFailureNotification = new CatastrophicFailureNotification(e => { serverStore.DatabasesLandlord.UnloadResourceOnCatastrophicFailure(name, e); }); } catch (Exception) { Dispose(); throw; } }
public void Initialize(InitializeOptions options = InitializeOptions.None) { try { NotificationCenter.Initialize(this); DocumentsStorage.Initialize((options & InitializeOptions.GenerateNewDatabaseId) == InitializeOptions.GenerateNewDatabaseId); TxMerger.Start(); ConfigurationStorage.Initialize(); if ((options & InitializeOptions.SkipLoadingDatabaseRecord) == InitializeOptions.SkipLoadingDatabaseRecord) { return; } long index; DatabaseRecord record; using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) record = _serverStore.Cluster.ReadDatabase(context, Name, out index); if (record == null) { DatabaseDoesNotExistException.Throw(Name); } PeriodicBackupRunner = new PeriodicBackupRunner(this, _serverStore); _indexStoreTask = IndexStore.InitializeAsync(record); ReplicationLoader?.Initialize(record); EtlLoader.Initialize(record); DocumentTombstoneCleaner.Start(); try { _indexStoreTask.Wait(DatabaseShutdown); } finally { _indexStoreTask = null; } SubscriptionStorage.Initialize(); NotifyFeaturesAboutStateChange(record, index); } catch (Exception) { Dispose(); throw; } }
public unsafe void Dispose() { if (_databaseShutdown.IsCancellationRequested) { return; // double dispose? } lock (this) { if (_databaseShutdown.IsCancellationRequested) { return; // double dispose? } //before we dispose of the database we take its latest info to be displayed in the studio try { var databaseInfo = GenerateDatabaseInfo(); if (databaseInfo != null) { DatabaseInfoCache?.InsertDatabaseInfo(databaseInfo, Name); } } catch (Exception e) { // if we encountered a catastrophic failure we might not be able to retrieve database info if (_logger.IsInfoEnabled) { _logger.Info("Failed to generate and store database info", e); } } _databaseShutdown.Cancel(); // we'll wait for 1 minute to drain all the requests // from the database var sp = Stopwatch.StartNew(); while (sp.ElapsedMilliseconds < 60 * 1000) { if (Interlocked.Read(ref _usages) == 0) { break; } if (_waitForUsagesOnDisposal.Wait(1000)) { _waitForUsagesOnDisposal.Reset(); } } var exceptionAggregator = new ExceptionAggregator(_logger, $"Could not dispose {nameof(DocumentDatabase)} {Name}"); foreach (var connection in RunningTcpConnections) { exceptionAggregator.Execute(() => { connection.Dispose(); }); } exceptionAggregator.Execute(() => { TxMerger?.Dispose(); }); if (_indexStoreTask != null) { exceptionAggregator.Execute(() => { _indexStoreTask.Wait(DatabaseShutdown); }); } exceptionAggregator.Execute(() => { IndexStore?.Dispose(); }); exceptionAggregator.Execute(() => { ExpiredDocumentsCleaner?.Dispose(); }); exceptionAggregator.Execute(() => { PeriodicBackupRunner?.Dispose(); }); exceptionAggregator.Execute(() => { DocumentTombstoneCleaner?.Dispose(); }); exceptionAggregator.Execute(() => { ReplicationLoader?.Dispose(); }); exceptionAggregator.Execute(() => { EtlLoader?.Dispose(); }); exceptionAggregator.Execute(() => { Operations?.Dispose(exceptionAggregator); }); exceptionAggregator.Execute(() => { NotificationCenter?.Dispose(); }); exceptionAggregator.Execute(() => { SubscriptionStorage?.Dispose(); }); exceptionAggregator.Execute(() => { ConfigurationStorage?.Dispose(); }); exceptionAggregator.Execute(() => { DocumentsStorage?.Dispose(); }); exceptionAggregator.Execute(() => { _databaseShutdown.Dispose(); }); exceptionAggregator.Execute(() => { if (MasterKey == null) { return; } fixed(byte *pKey = MasterKey) { Sodium.sodium_memzero(pKey, (UIntPtr)MasterKey.Length); } }); exceptionAggregator.ThrowIfNeeded(); } }
public void Initialize(InitializeOptions options = InitializeOptions.None) { try { _addToInitLog("Initializing NotificationCenter"); NotificationCenter.Initialize(this); _addToInitLog("Initializing DocumentStorage"); DocumentsStorage.Initialize((options & InitializeOptions.GenerateNewDatabaseId) == InitializeOptions.GenerateNewDatabaseId); _addToInitLog("Starting Transaction Merger"); TxMerger.Start(); _addToInitLog("Initializing ConfigurationStorage"); ConfigurationStorage.Initialize(); if ((options & InitializeOptions.SkipLoadingDatabaseRecord) == InitializeOptions.SkipLoadingDatabaseRecord) { return; } _addToInitLog("Loading Database"); long index; DatabaseRecord record; using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) record = _serverStore.Cluster.ReadDatabase(context, Name, out index); if (record == null) { DatabaseDoesNotExistException.Throw(Name); } PeriodicBackupRunner = new PeriodicBackupRunner(this, _serverStore); _addToInitLog("Initializing IndexStore (async)"); _indexStoreTask = IndexStore.InitializeAsync(record); _addToInitLog("Initializing Replication"); ReplicationLoader?.Initialize(record); _addToInitLog("Initializing ETL"); EtlLoader.Initialize(record); DocumentTombstoneCleaner.Start(); try { _indexStoreTask.Wait(DatabaseShutdown); } finally { _addToInitLog("Initializing IndexStore completed"); _indexStoreTask = null; } SubscriptionStorage.Initialize(); _addToInitLog("Initializing SubscriptionStorage completed"); TaskExecutor.Execute((state) => { try { NotifyFeaturesAboutStateChange(record, index); } catch { // We ignore the exception since it was caught in the function itself } }, null); } catch (Exception) { Dispose(); throw; } }
public void Dispose() { //before we dispose of the database we take its latest info to be displayed in the studio var databaseInfo = GenerateDatabaseInfo(); DatabaseInfoCache?.InsertDatabaseInfo(databaseInfo, Name); _databaseShutdown.Cancel(); // we'll wait for 1 minute to drain all the requests // from the database for (int i = 0; i < 60; i++) { if (Interlocked.Read(ref _usages) == 0) { break; } _waitForUsagesOnDisposal.Wait(100); } var exceptionAggregator = new ExceptionAggregator(_logger, $"Could not dispose {nameof(DocumentDatabase)}"); foreach (var connection in RunningTcpConnections) { exceptionAggregator.Execute(() => { connection.Dispose(); }); } exceptionAggregator.Execute(() => { TxMerger.Dispose(); }); exceptionAggregator.Execute(() => { DocumentReplicationLoader.Dispose(); }); if (_indexStoreTask != null) { exceptionAggregator.Execute(() => { _indexStoreTask.Wait(DatabaseShutdown); _indexStoreTask = null; }); } if (_transformerStoreTask != null) { exceptionAggregator.Execute(() => { _transformerStoreTask.Wait(DatabaseShutdown); _transformerStoreTask = null; }); } exceptionAggregator.Execute(() => { IndexStore?.Dispose(); IndexStore = null; }); exceptionAggregator.Execute(() => { BundleLoader?.Dispose(); BundleLoader = null; }); exceptionAggregator.Execute(() => { DocumentTombstoneCleaner?.Dispose(); DocumentTombstoneCleaner = null; }); exceptionAggregator.Execute(() => { DocumentReplicationLoader?.Dispose(); DocumentReplicationLoader = null; }); exceptionAggregator.Execute(() => { SqlReplicationLoader?.Dispose(); SqlReplicationLoader = null; }); exceptionAggregator.Execute(() => { Operations?.Dispose(exceptionAggregator); Operations = null; }); exceptionAggregator.Execute(() => { SubscriptionStorage?.Dispose(); }); exceptionAggregator.Execute(() => { ConfigurationStorage?.Dispose(); }); exceptionAggregator.Execute(() => { DocumentsStorage?.Dispose(); DocumentsStorage = null; }); exceptionAggregator.ThrowIfNeeded(); }