public static void DeleteDatabaseFiles(RavenConfiguration configuration) { if (configuration.Core.RunInMemory) { return; } IOExtensions.DeleteDirectory(configuration.Core.DataDirectory.FullPath); if (configuration.Storage.TempPath != null) { IOExtensions.DeleteDirectory(configuration.Storage.TempPath.FullPath); } if (configuration.Indexing.StoragePath != null) { IOExtensions.DeleteDirectory(configuration.Indexing.StoragePath.FullPath); } if (configuration.Indexing.TempPath != null) { IOExtensions.DeleteDirectory(configuration.Indexing.TempPath.FullPath); } }
public void ForceIndexReset() { try { if (analyzer != null) { analyzer.Close(); } if (currentIndexSearcherHolder != null) { currentIndexSearcherHolder.SetIndexSearcher(null); } SafeDispose(crashMarker); SafeDispose(writer); SafeDispose(directory); IOExtensions.DeleteDirectory(indexDirectory); } finally { OpenIndexOnStartup(); } }
public void DeleteIndex(string name) { Index value; if (indexes.TryGetValue(name, out value) == false) { log.Debug("Ignoring delete for non existing index {0}", name); return; } log.Debug("Deleting index {0}", name); value.Dispose(); Index ignored; var dirOnDisk = Path.Combine(path, MonoHttpUtility.UrlEncode(name)); documentDatabase.TransactionalStorage.Batch(accessor => accessor.Lists.Remove("Raven/Indexes/QueryTime", name)); if (!indexes.TryRemove(name, out ignored) || !Directory.Exists(dirOnDisk)) { return; } IOExtensions.DeleteDirectory(dirOnDisk); }
public HttpResponseMessage DatabasesDelete(string id) { if (IsSystemDatabase(id)) { return(GetMessageWithString("System Database document cannot be deleted", HttpStatusCode.Forbidden)); } //get configuration even if the database is disabled var configuration = DatabasesLandlord.CreateTenantConfiguration(id, true); if (configuration == null) { return(GetEmptyMessage()); } var docKey = "Raven/Databases/" + id; Database.Documents.Delete(docKey, null, null); bool result; if (bool.TryParse(InnerRequest.RequestUri.ParseQueryString()["hard-delete"], out result) && result) { IOExtensions.DeleteDirectory(configuration.DataDirectory); if (configuration.IndexStoragePath != null) { IOExtensions.DeleteDirectory(configuration.IndexStoragePath); } if (configuration.JournalsStoragePath != null) { IOExtensions.DeleteDirectory(configuration.JournalsStoragePath); } } return(GetEmptyMessage()); }
public async Task CanDumpWhenHiddenDocsWithLimit_Dumper() { var backupPath = NewDataPath("BackupFolder"); using (var server = GetNewServer()) { using (var store = new DocumentStore { Url = "http://localhost:8079" }.Initialize()) { InsertHidenUsers(store, 2000); var user1 = store.DatabaseCommands.Get("users/1"); Assert.Null(user1); InsertUsers(store, 1, 25); // now perform full backup var dumper = new DataDumper(server.SystemDatabase) { SmugglerOptions = { Incremental = true } }; await dumper.ExportData(new SmugglerExportOptions { ToFile = backupPath }); } } VerifyDump(backupPath, store => { using (var session = store.OpenSession()) { Assert.Equal(25, session.Query <User>().Customize(x => x.WaitForNonStaleResultsAsOfNow()).Count()); } }); IOExtensions.DeleteDirectory(backupPath); }
public async Task CanDumpAttachmentsEmpty_Smuggler() { var backupPath = NewDataPath("BackupFolder"); using (var store = NewRemoteDocumentStore()) { var dumper = new SmugglerApi { SmugglerOptions = { Incremental = true, BatchSize = 100, Limit = 206 } }; await dumper.ExportData( new SmugglerExportOptions { ToFile = backupPath, From = new RavenConnectionStringOptions { Url = "http://localhost:8079", DefaultDatabase = store.DefaultDatabase, } }); } VerifyDump(backupPath, store => Assert.Equal(0, store.DatabaseCommands.GetAttachmentHeadersStartingWith("user", 0, 500).Count())); IOExtensions.DeleteDirectory(backupPath); }
protected void ClearDatabaseDirectory() { bool isRetry = false; while (true) { try { IOExtensions.DeleteDirectory(DataDir); break; } catch (IOException) { if (isRetry) { throw; } GC.Collect(); GC.WaitForPendingFinalizers(); isRetry = true; } } }
public async Task CanDumpAttachments_Smuggler() { var backupPath = NewDataPath("BackupFolder"); using (var store = NewRemoteDocumentStore()) { InsertAttachments(store, 328); var options = new SmugglerOptions { BackupPath = backupPath, BatchSize = 100 }; var dumper = new SmugglerApi(options, new RavenConnectionStringOptions { Url = "http://localhost:8079", }); var backupStatus = new PeriodicBackupStatus(); await dumper.ExportData(null, null, true, backupStatus); } VerifyDump(backupPath, store => Assert.Equal(328, store.DatabaseCommands.GetAttachmentHeadersStartingWith("user", 0, 500).Count())); IOExtensions.DeleteDirectory(backupPath); }
public WebTestFixture() { if (IsIisExpressInstalled() == false) { return; } try { path = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString("N")); var from = Path.GetFullPath("../"); IOExtensions.CopyDirectory(from, path); iisExpressDriver = new IISExpressDriver(); iisExpressDriver.Start(path, Port); } catch (Exception) { IOExtensions.DeleteDirectory(path); throw; } }
public async Task CanDumpAttachmentsEmpty_Dumper() { var backupPath = NewDataPath("BackupFolder"); try { using (var store = NewDocumentStore()) { var dumper = new DatabaseDataDumper(store.SystemDatabase) { Options = { Incremental = true, BatchSize = 100, Limit = 206 } }; await dumper.ExportData(new SmugglerExportOptions <RavenConnectionStringOptions> { ToFile = backupPath }); } VerifyDump(backupPath, store => Assert.Equal(0, store.DatabaseCommands.GetAttachmentHeadersStartingWith("user", 0, 500).Count())); } finally { IOExtensions.DeleteDirectory(backupPath); } }
public void CleanupAllClusteringData(DocumentDatabase systemDatabase) { // dispose cluster manager Dispose(); // delete Raft Storage var voronDataPath = Path.Combine(systemDatabase.Configuration.DataDirectory ?? AppDomain.CurrentDomain.BaseDirectory, "Raft"); IOExtensions.DeleteDirectory(voronDataPath); // delete last applied commit systemDatabase.TransactionalStorage.Batch(accessor => { accessor.Lists.Remove("Raven/Cluster", "Status"); }); // delete Raven-Non-Cluster-Database markers from databases settings int nextStart = 0; var databases = systemDatabase .Documents .GetDocumentsWithIdStartingWith(Constants.Database.Prefix, null, null, 0, int.MaxValue, systemDatabase.WorkContext.CancellationToken, ref nextStart); foreach (var database in databases) { var settings = database.Value <RavenJObject>("Settings"); if (settings != null && settings.ContainsKey(Constants.Cluster.NonClusterDatabaseMarker)) { settings.Remove(Constants.Cluster.NonClusterDatabaseMarker); var jsonDocument = ((RavenJObject)database).ToJsonDocument(); systemDatabase.Documents.Put(jsonDocument.Key, jsonDocument.Etag, jsonDocument.DataAsJson, jsonDocument.Metadata, null); } } //deleting the replication state from the system database DatabasesLandlord.SystemDatabase.Documents.Delete(Constants.Cluster.ClusterReplicationStateDocumentKey, null, null); }
private MessageWithStatusCode DeleteCounterStorage(string id, bool isHardDeleteNeeded) { //get configuration even if the counters is disabled var configuration = CountersLandlord.CreateTenantConfiguration(id, true); if (configuration == null) { return new MessageWithStatusCode { ErrorCode = HttpStatusCode.NotFound, Message = "Counter storage wasn't found" } } ; var docKey = Constants.Counter.Prefix + id; SystemDatabase.Documents.Delete(docKey, null, null); if (isHardDeleteNeeded && configuration.Core.RunInMemory == false) { IOExtensions.DeleteDirectory(configuration.Counter.DataDirectory); } return(new MessageWithStatusCode()); }
public static void DeleteDatabaseFiles(RavenConfiguration configuration) { // we always want to try to delete the directories // because Voron and Periodic Backup are creating temp ones //if (configuration.Core.RunInMemory) // return; IOExtensions.DeleteDirectory(configuration.Core.DataDirectory.FullPath); if (configuration.Storage.TempPath != null) { IOExtensions.DeleteDirectory(configuration.Storage.TempPath.FullPath); } if (configuration.Indexing.StoragePath != null) { IOExtensions.DeleteDirectory(configuration.Indexing.StoragePath.FullPath); } if (configuration.Indexing.TempPath != null) { IOExtensions.DeleteDirectory(configuration.Indexing.TempPath.FullPath); } }
public void Dispose() { documentStore.Dispose(); ravenDbServer.Dispose(); IOExtensions.DeleteDirectory(path); }
public void Dispose() { IOExtensions.DeleteDirectory(path); }
public void ClearDatabase(string serverLocation) { var databaseLocation = Path.Combine(serverLocation, "Tenants", "HibernatingRhinos.TestsDatabase"); IOExtensions.DeleteDirectory(databaseLocation); }
/// <summary> /// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. /// </summary> /// <filterpriority>2</filterpriority> public void Dispose() { IOExtensions.DeleteDirectory("Test"); }
public ReduceStaleness() { IOExtensions.DeleteDirectory("Test"); }
public override void Dispose() { IOExtensions.DeleteDirectory("Data"); IOExtensions.DeleteDirectory("NHibernate"); base.Dispose(); }
public override void RespondToAdmin(IHttpContext context) { if (EnsureSystemDatabase(context) == false) { return; } var match = urlMatcher.Match(context.GetRequestUrl()); var db = Uri.UnescapeDataString(match.Groups[1].Value); DatabaseDocument dbDoc; var docKey = "Raven/Databases/" + db; switch (context.Request.HttpMethod) { case "GET": if (db.Equals(Constants.SystemDatabase, StringComparison.OrdinalIgnoreCase)) { //fetch fake (empty) system database document var systemDatabaseDocument = new DatabaseDocument { Id = Constants.SystemDatabase }; var serializedDatabaseDocument = RavenJObject.FromObject(systemDatabaseDocument); context.WriteJson(serializedDatabaseDocument); } else { dbDoc = GetDatabaseDocument(context, docKey, db); context.WriteJson(dbDoc); } break; case "PUT": if (!db.Equals(Constants.SystemDatabase, StringComparison.OrdinalIgnoreCase)) { dbDoc = context.ReadJsonObject <DatabaseDocument>(); server.Protect(dbDoc); var json = RavenJObject.FromObject(dbDoc); json.Remove("Id"); Database.Put(docKey, null, json, new RavenJObject(), null); } else { context.SetStatusToForbidden(); //forbidden to edit system database document } break; case "DELETE": if (!db.Equals(Constants.SystemDatabase, StringComparison.OrdinalIgnoreCase)) { var configuration = server.CreateTenantConfiguration(db); var databasedocument = Database.Get(docKey, null); if (configuration == null) { return; } Database.Delete(docKey, null, null); bool result; if (bool.TryParse(context.Request.QueryString["hard-delete"], out result) && result) { IOExtensions.DeleteDirectory(configuration.DataDirectory); IOExtensions.DeleteDirectory(configuration.IndexStoragePath); if (databasedocument != null) { dbDoc = databasedocument.DataAsJson.JsonDeserialization <DatabaseDocument>(); if (dbDoc != null && dbDoc.Settings.ContainsKey(Constants.RavenLogsPath)) { IOExtensions.DeleteDirectory(dbDoc.Settings[Constants.RavenLogsPath]); } } } } else { context.SetStatusToForbidden(); //forbidden to delete system database document } break; } }
public async Task <IOperationResult> Execute(Action <IOperationProgress> onProgress) { var databaseName = RestoreFromConfiguration.DatabaseName; var result = new RestoreResult { DataDirectory = RestoreFromConfiguration.DataDirectory }; try { var filesToRestore = await GetOrderedFilesToRestore(); using (_serverStore.ContextPool.AllocateOperationContext(out JsonOperationContext serverContext)) { if (onProgress == null) { onProgress = _ => { } } ; Stopwatch sw = null; RestoreSettings restoreSettings = null; var firstFile = filesToRestore[0]; var extension = Path.GetExtension(firstFile); var snapshotRestore = false; if ((extension == Constants.Documents.PeriodicBackup.SnapshotExtension) || (extension == Constants.Documents.PeriodicBackup.EncryptedSnapshotExtension)) { onProgress.Invoke(result.Progress); snapshotRestore = true; sw = Stopwatch.StartNew(); if (extension == Constants.Documents.PeriodicBackup.EncryptedSnapshotExtension) { _hasEncryptionKey = RestoreFromConfiguration.EncryptionKey != null || RestoreFromConfiguration.BackupEncryptionSettings?.Key != null; } // restore the snapshot restoreSettings = await SnapshotRestore(serverContext, firstFile, onProgress, result); if (restoreSettings != null && RestoreFromConfiguration.SkipIndexes) { // remove all indexes from the database record restoreSettings.DatabaseRecord.AutoIndexes = null; restoreSettings.DatabaseRecord.Indexes = null; } // removing the snapshot from the list of files filesToRestore.RemoveAt(0); } else { result.SnapshotRestore.Skipped = true; result.SnapshotRestore.Processed = true; onProgress.Invoke(result.Progress); } if (restoreSettings == null) { restoreSettings = new RestoreSettings { DatabaseRecord = new DatabaseRecord(databaseName) { // we only have a smuggler restore // use the encryption key to encrypt the database Encrypted = _hasEncryptionKey } }; DatabaseHelper.Validate(databaseName, restoreSettings.DatabaseRecord, _serverStore.Configuration); } var databaseRecord = restoreSettings.DatabaseRecord; if (databaseRecord.Settings == null) { databaseRecord.Settings = new Dictionary <string, string>(); } var runInMemoryConfigurationKey = RavenConfiguration.GetKey(x => x.Core.RunInMemory); databaseRecord.Settings.Remove(runInMemoryConfigurationKey); if (_serverStore.Configuration.Core.RunInMemory) { databaseRecord.Settings[runInMemoryConfigurationKey] = "false"; } var dataDirectoryConfigurationKey = RavenConfiguration.GetKey(x => x.Core.DataDirectory); databaseRecord.Settings.Remove(dataDirectoryConfigurationKey); // removing because we want to restore to given location, not to serialized in backup one if (_restoringToDefaultDataDirectory == false) { databaseRecord.Settings[dataDirectoryConfigurationKey] = RestoreFromConfiguration.DataDirectory; } if (_hasEncryptionKey) { // save the encryption key so we'll be able to access the database _serverStore.PutSecretKey(RestoreFromConfiguration.EncryptionKey, databaseName, overwrite: false); } var addToInitLog = new Action <string>(txt => // init log is not save in mem during RestoreBackup { var msg = $"[RestoreBackup] {DateTime.UtcNow} :: Database '{databaseName}' : {txt}"; if (Logger.IsInfoEnabled) { Logger.Info(msg); } }); var configuration = _serverStore .DatabasesLandlord .CreateDatabaseConfiguration(databaseName, ignoreDisabledDatabase: true, ignoreBeenDeleted: true, ignoreNotRelevant: true, databaseRecord); using (var database = new DocumentDatabase(databaseName, configuration, _serverStore, addToInitLog)) { // smuggler needs an existing document database to operate var options = InitializeOptions.SkipLoadingDatabaseRecord; if (snapshotRestore) { options |= InitializeOptions.GenerateNewDatabaseId; } database.Initialize(options); databaseRecord.Topology = new DatabaseTopology(); // restoring to the current node only databaseRecord.Topology.Members.Add(_nodeTag); // we are currently restoring, shouldn't try to access it databaseRecord.DatabaseState = DatabaseStateStatus.RestoreInProgress; var(index, _) = await _serverStore.WriteDatabaseRecordAsync(databaseName, databaseRecord, null, RaftIdGenerator.NewId(), restoreSettings.DatabaseValues, isRestore : true); await _serverStore.Cluster.WaitForIndexNotification(index); using (database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { if (snapshotRestore) { await RestoreFromSmugglerFile(onProgress, database, firstFile, context); await SmugglerRestore(database, filesToRestore, context, databaseRecord, onProgress, result); result.SnapshotRestore.Processed = true; var summary = database.GetDatabaseSummary(); result.Documents.ReadCount += summary.DocumentsCount; result.Documents.Attachments.ReadCount += summary.AttachmentsCount; result.Counters.ReadCount += summary.CounterEntriesCount; result.RevisionDocuments.ReadCount += summary.RevisionsCount; result.Conflicts.ReadCount += summary.ConflictsCount; result.Indexes.ReadCount += databaseRecord.GetIndexesCount(); result.CompareExchange.ReadCount += summary.CompareExchangeCount; result.CompareExchangeTombstones.ReadCount += summary.CompareExchangeTombstonesCount; result.Identities.ReadCount += summary.IdentitiesCount; result.AddInfo($"Successfully restored {result.SnapshotRestore.ReadCount} files during snapshot restore, took: {sw.ElapsedMilliseconds:#,#;;0}ms"); onProgress.Invoke(result.Progress); } else { await SmugglerRestore(database, filesToRestore, context, databaseRecord, onProgress, result); } DisableOngoingTasksIfNeeded(databaseRecord); result.DatabaseRecord.Processed = true; result.Documents.Processed = true; result.RevisionDocuments.Processed = true; result.Conflicts.Processed = true; result.Indexes.Processed = true; result.Counters.Processed = true; result.Identities.Processed = true; result.CompareExchange.Processed = true; result.Subscriptions.Processed = true; onProgress.Invoke(result.Progress); } } // after the db for restore is done, we can safely set the db state to normal and write the DatabaseRecord databaseRecord.DatabaseState = DatabaseStateStatus.Normal; var(updateIndex, _) = await _serverStore.WriteDatabaseRecordAsync(databaseName, databaseRecord, null, RaftIdGenerator.DontCareId, isRestore : true); await _serverStore.Cluster.WaitForIndexNotification(updateIndex); if (databaseRecord.Topology.RelevantFor(_serverStore.NodeTag)) { // we need to wait for the database record change to be propagated properly var db = await _serverStore.DatabasesLandlord.TryGetOrCreateResourceStore(databaseName); await db.RachisLogIndexNotifications.WaitForIndexNotification(updateIndex, _operationCancelToken.Token); } return(result); } } catch (Exception e) { if (Logger.IsOperationsEnabled) { Logger.Operations("Failed to restore database", e); } var alert = AlertRaised.Create( RestoreFromConfiguration.DatabaseName, "Failed to restore database", $"Could not restore database named {RestoreFromConfiguration.DatabaseName}", AlertType.RestoreError, NotificationSeverity.Error, details: new ExceptionDetails(e)); _serverStore.NotificationCenter.Add(alert); using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { bool databaseExists; using (context.OpenReadTransaction()) { databaseExists = _serverStore.Cluster.DatabaseExists(context, RestoreFromConfiguration.DatabaseName); } if (databaseExists == false) { // delete any files that we already created during the restore IOExtensions.DeleteDirectory(RestoreFromConfiguration.DataDirectory); } else { var deleteResult = await _serverStore.DeleteDatabaseAsync(RestoreFromConfiguration.DatabaseName, true, new[] { _serverStore.NodeTag }, RaftIdGenerator.DontCareId); await _serverStore.Cluster.WaitForIndexNotification(deleteResult.Index); } } result.AddError($"Error occurred during restore of database {databaseName}. Exception: {e.Message}"); onProgress.Invoke(result.Progress); throw; } finally { Dispose(); } }
private void OpenIndexOnStartup(string indexName) { if (indexName == null) { throw new ArgumentNullException("indexName"); } startupLog.Debug("Loading saved index {0}", indexName); var indexDefinition = indexDefinitionStorage.GetIndexDefinition(indexName); if (indexDefinition == null) { return; } Index indexImplementation; bool resetTried = false; while (true) { try { var luceneDirectory = OpenOrCreateLuceneDirectory(indexDefinition, createIfMissing: resetTried); indexImplementation = CreateIndexImplementation(indexName, indexDefinition, luceneDirectory); LoadExistingSuggesionsExtentions(indexName, indexImplementation); documentDatabase.TransactionalStorage.Batch(accessor => { var read = accessor.Lists.Read("Raven/Indexes/QueryTime", indexName); if (read == null) { return; } var dateTime = read.Data.Value <DateTime>("LastQueryTime"); indexImplementation.MarkQueried(dateTime); if (dateTime > latestPersistedQueryTime) { latestPersistedQueryTime = dateTime; } }); break; } catch (Exception e) { if (resetTried) { throw new InvalidOperationException("Could not open / create index" + indexName + ", reset already tried", e); } resetTried = true; startupLog.WarnException("Could not open index " + indexName + ", forcibly resetting index", e); try { documentDatabase.TransactionalStorage.Batch(accessor => { accessor.Indexing.DeleteIndex(indexName); accessor.Indexing.AddIndex(indexName, indexDefinition.IsMapReduce); }); var indexDirectory = indexName; var indexFullPath = Path.Combine(path, MonoHttpUtility.UrlEncode(indexDirectory)); IOExtensions.DeleteDirectory(indexFullPath); } catch (Exception exception) { throw new InvalidOperationException("Could not reset index " + indexName, exception); } } } indexes.TryAdd(indexName, indexImplementation); }
public EncryptedFileSystemBackupRestore() { IOExtensions.DeleteDirectory(backupDir); }
public void AfterFailedRestoreOfIndex_ShouldGenerateWarningAndResetIt() { using (var db = new DocumentDatabase(new RavenConfiguration { DataDirectory = DataDir, RunInUnreliableYetFastModeThatIsNotSuitableForProduction = false, Settings = { { "Raven/Esent/CircularLog", "false" } } })) { db.SpinBackgroundWorkers(); db.PutIndex(new RavenDocumentsByEntityName().IndexName, new RavenDocumentsByEntityName().CreateIndexDefinition()); db.Put("users/1", null, RavenJObject.Parse("{'Name':'Arek'}"), RavenJObject.Parse("{'Raven-Entity-Name':'Users'}"), null); db.Put("users/2", null, RavenJObject.Parse("{'Name':'David'}"), RavenJObject.Parse("{'Raven-Entity-Name':'Users'}"), null); WaitForIndexing(db); db.StartBackup(BackupDir, false, new DatabaseDocument()); WaitForBackup(db, true); db.Put("users/3", null, RavenJObject.Parse("{'Name':'Daniel'}"), RavenJObject.Parse("{'Raven-Entity-Name':'Users'}"), null); WaitForIndexing(db); db.StartBackup(BackupDir, true, new DatabaseDocument()); WaitForBackup(db, true); } IOExtensions.DeleteDirectory(DataDir); var incrementalDirectories = Directory.GetDirectories(BackupDir, "Inc*"); // delete 'index-files.required-for-index-restore' to make backup corrupted according to the reported error File.Delete(Path.Combine(incrementalDirectories.First(), "Indexes\\Raven%2fDocumentsByEntityName\\index-files.required-for-index-restore")); var sb = new StringBuilder(); DocumentDatabase.Restore(new RavenConfiguration(), BackupDir, DataDir, s => sb.Append(s), defrag: true); Assert.Contains( "Error: Index Raven%2fDocumentsByEntityName could not be restored. All already copied index files was deleted." + " Index will be recreated after launching Raven instance", sb.ToString()); using (var db = new DocumentDatabase(new RavenConfiguration { DataDirectory = DataDir })) { db.SpinBackgroundWorkers(); QueryResult queryResult; do { queryResult = db.Query("Raven/DocumentsByEntityName", new IndexQuery { Query = "Tag:[[Users]]", PageSize = 10 }); } while (queryResult.IsStale); Assert.Equal(3, queryResult.Results.Count); } }
public async Task Execute(Action <IOperationProgress> onProgress, CompactionResult result) { if (_isCompactionInProgress) { throw new InvalidOperationException($"Database '{_database}' cannot be compacted because compaction is already in progress."); } result.AddMessage($"Started database compaction for {_database}"); onProgress?.Invoke(result); _isCompactionInProgress = true; bool done = false; string compactDirectory = null; string tmpDirectory = null; try { var documentDatabase = await _serverStore.DatabasesLandlord.TryGetOrCreateResourceStore(_database); var configuration = _serverStore.DatabasesLandlord.CreateDatabaseConfiguration(_database); using (await _serverStore.DatabasesLandlord.UnloadAndLockDatabase(_database, "it is being compacted")) using (var src = DocumentsStorage.GetStorageEnvironmentOptionsFromConfiguration(configuration, new IoChangesNotifications(), new CatastrophicFailureNotification((endId, path, exception) => throw new InvalidOperationException($"Failed to compact database {_database} ({path})", exception)))) { InitializeOptions(src, configuration, documentDatabase); DirectoryExecUtils.SubscribeToOnDirectoryInitializeExec(src, configuration.Storage, documentDatabase.Name, DirectoryExecUtils.EnvironmentType.Compaction, Logger); var basePath = configuration.Core.DataDirectory.FullPath; compactDirectory = basePath + "-compacting"; tmpDirectory = basePath + "-old"; EnsureDirectoriesPermission(basePath, compactDirectory, tmpDirectory); IOExtensions.DeleteDirectory(compactDirectory); IOExtensions.DeleteDirectory(tmpDirectory); configuration.Core.DataDirectory = new PathSetting(compactDirectory); using (var dst = DocumentsStorage.GetStorageEnvironmentOptionsFromConfiguration(configuration, new IoChangesNotifications(), new CatastrophicFailureNotification((envId, path, exception) => throw new InvalidOperationException($"Failed to compact database {_database} ({path})", exception)))) { InitializeOptions(dst, configuration, documentDatabase); DirectoryExecUtils.SubscribeToOnDirectoryInitializeExec(dst, configuration.Storage, documentDatabase.Name, DirectoryExecUtils.EnvironmentType.Compaction, Logger); _token.ThrowIfCancellationRequested(); StorageCompaction.Execute(src, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)dst, progressReport => { result.Progress.TreeProgress = progressReport.TreeProgress; result.Progress.TreeTotal = progressReport.TreeTotal; result.Progress.TreeName = progressReport.TreeName; result.Progress.GlobalProgress = progressReport.GlobalProgress; result.Progress.GlobalTotal = progressReport.GlobalTotal; result.AddMessage(progressReport.Message); onProgress?.Invoke(result); }, _token); } result.TreeName = null; _token.ThrowIfCancellationRequested(); EnsureDirectoriesPermission(basePath, compactDirectory, tmpDirectory); IOExtensions.DeleteDirectory(tmpDirectory); SwitchDatabaseDirectories(basePath, tmpDirectory, compactDirectory); done = true; } } catch (Exception e) { throw new InvalidOperationException($"Failed to execute compaction for {_database}", e); } finally { IOExtensions.DeleteDirectory(compactDirectory); if (done) { IOExtensions.DeleteDirectory(tmpDirectory); } _isCompactionInProgress = false; } }
public override void Dispose() { IOExtensions.DeleteDirectory(path); base.Dispose(); }
public void ShouldNotSetAutoIndexesToAbandonedPriorityAfterDatabaseRecovery() { using (var db = new DocumentDatabase(new RavenConfiguration { DataDirectory = DataDir, RunInUnreliableYetFastModeThatIsNotSuitableForProduction = false })) { db.SpinBackgroundWorkers(); db.Indexes.PutIndex(new RavenDocumentsByEntityName().IndexName, new RavenDocumentsByEntityName().CreateIndexDefinition()); db.Documents.Put("users/1", null, RavenJObject.Parse("{'Name':'Arek'}"), RavenJObject.Parse("{'Raven-Entity-Name':'Users'}"), null); db.Documents.Put("users/2", null, RavenJObject.Parse("{'Name':'David'}"), RavenJObject.Parse("{'Raven-Entity-Name':'Users'}"), null); var results = db.ExecuteDynamicQuery("Users", new IndexQuery() { PageSize = 128, Start = 0, Cutoff = SystemTime.UtcNow, Query = "Name:Arek" }, CancellationToken.None); WaitForIndexing(db); var autoIdexes = db.Statistics.Indexes.Where(x => x.Name.StartsWith("Auto")).ToList(); Assert.True(autoIdexes.Count > 0); autoIdexes.ForEach(x => db.TransactionalStorage.Batch(accessor => accessor.Indexing.SetIndexPriority(x.Id, IndexingPriority.Idle))); db.Maintenance.StartBackup(BackupDir, false, new DatabaseDocument()); WaitForBackup(db, true); } IOExtensions.DeleteDirectory(DataDir); MaintenanceActions.Restore(new RavenConfiguration(), new DatabaseRestoreRequest { BackupLocation = BackupDir, DatabaseLocation = DataDir, Defrag = true }, s => { }); using (var db = new DocumentDatabase(new RavenConfiguration { DataDirectory = DataDir, RunInUnreliableYetFastModeThatIsNotSuitableForProduction = false, })) { db.SpinBackgroundWorkers(); db.RunIdleOperations(); var autoIndexes = db.Statistics.Indexes.Where(x => x.Name.StartsWith("Auto")).ToList(); Assert.True(autoIndexes.Count > 0); foreach (var indexStats in autoIndexes) { Assert.NotEqual(indexStats.Priority, IndexingPriority.Abandoned); } } }
public async Task <IOperationResult> Execute(Action <IOperationProgress> onProgress) { var databaseName = _restoreConfiguration.DatabaseName; var result = new RestoreResult { DataDirectory = _restoreConfiguration.DataDirectory }; try { if (onProgress == null) { onProgress = _ => { } } ; Stopwatch sw = null; RestoreSettings restoreSettings = null; var firstFile = _filesToRestore[0]; var lastFile = _filesToRestore.Last(); var extension = Path.GetExtension(firstFile); var snapshotRestore = false; if (extension == Constants.Documents.PeriodicBackup.SnapshotExtension) { onProgress.Invoke(result.Progress); snapshotRestore = true; sw = Stopwatch.StartNew(); // restore the snapshot restoreSettings = SnapshotRestore(firstFile, _restoreConfiguration.DataDirectory, onProgress, result); // removing the snapshot from the list of files _filesToRestore.RemoveAt(0); } else { result.SnapshotRestore.Skipped = true; result.SnapshotRestore.Processed = true; onProgress.Invoke(result.Progress); } if (restoreSettings == null) { restoreSettings = new RestoreSettings { DatabaseRecord = new DatabaseRecord(databaseName) { // we only have a smuggler restore // use the encryption key to encrypt the database Encrypted = _hasEncryptionKey } }; DatabaseHelper.Validate(databaseName, restoreSettings.DatabaseRecord, _serverStore.Configuration); } var databaseRecord = restoreSettings.DatabaseRecord; if (databaseRecord.Settings == null) { databaseRecord.Settings = new Dictionary <string, string>(); } var runInMemoryConfigurationKey = RavenConfiguration.GetKey(x => x.Core.RunInMemory); databaseRecord.Settings.Remove(runInMemoryConfigurationKey); if (_serverStore.Configuration.Core.RunInMemory) { databaseRecord.Settings[runInMemoryConfigurationKey] = "false"; } var dataDirectoryConfigurationKey = RavenConfiguration.GetKey(x => x.Core.DataDirectory); databaseRecord.Settings.Remove(dataDirectoryConfigurationKey); // removing because we want to restore to given location, not to serialized in backup one if (_restoringToDefaultDataDirectory == false) { databaseRecord.Settings[dataDirectoryConfigurationKey] = _restoreConfiguration.DataDirectory; } if (_hasEncryptionKey) { // save the encryption key so we'll be able to access the database _serverStore.PutSecretKey(_restoreConfiguration.EncryptionKey, databaseName, overwrite: false); } var addToInitLog = new Action <string>(txt => // init log is not save in mem during RestoreBackup { var msg = $"[RestoreBackup] {DateTime.UtcNow} :: Database '{databaseName}' : {txt}"; if (Logger.IsInfoEnabled) { Logger.Info(msg); } }); using (var database = new DocumentDatabase(databaseName, new RavenConfiguration(databaseName, ResourceType.Database) { Core = { DataDirectory = new PathSetting(_restoreConfiguration.DataDirectory), RunInMemory = false } }, _serverStore, addToInitLog)) { // smuggler needs an existing document database to operate var options = InitializeOptions.SkipLoadingDatabaseRecord; if (snapshotRestore) { options |= InitializeOptions.GenerateNewDatabaseId; } database.Initialize(options); if (snapshotRestore) { result.SnapshotRestore.Processed = true; var summary = database.GetDatabaseSummary(); result.Documents.ReadCount += summary.DocumentsCount; result.Documents.Attachments.ReadCount += summary.AttachmentsCount; result.Counters.ReadCount += summary.CountersCount; result.RevisionDocuments.ReadCount += summary.RevisionsCount; result.Conflicts.ReadCount += summary.ConflictsCount; result.Indexes.ReadCount += databaseRecord.GetIndexesCount(); result.AddInfo($"Successfully restored {result.SnapshotRestore.ReadCount} " + $"files during snapshot restore, took: {sw.ElapsedMilliseconds:#,#;;0}ms"); onProgress.Invoke(result.Progress); } using (database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { SmugglerRestore(_restoreConfiguration.BackupLocation, database, context, databaseRecord, onProgress, result); result.DatabaseRecord.Processed = true; result.Documents.Processed = true; result.RevisionDocuments.Processed = true; result.Conflicts.Processed = true; result.Indexes.Processed = true; result.Counters.Processed = true; onProgress.Invoke(result.Progress); databaseRecord.Topology = new DatabaseTopology(); // restoring to the current node only databaseRecord.Topology.Members.Add(_nodeTag); databaseRecord.Disabled = true; // we are currently restoring, shouldn't try to access it _serverStore.EnsureNotPassive(); DisableOngoingTasksIfNeeded(databaseRecord); var(index, _) = await _serverStore.WriteDatabaseRecordAsync(databaseName, databaseRecord, null, restoreSettings.DatabaseValues, isRestore : true); await _serverStore.Cluster.WaitForIndexNotification(index); // restore identities & cmpXchg values RestoreFromLastFile(onProgress, database, lastFile, context, result); } } // after the db for restore is done, we can safely set the db status to active databaseRecord = _serverStore.LoadDatabaseRecord(databaseName, out _); databaseRecord.Disabled = false; var(updateIndex, _) = await _serverStore.WriteDatabaseRecordAsync(databaseName, databaseRecord, null); await _serverStore.Cluster.WaitForIndexNotification(updateIndex); return(result); } catch (Exception e) { if (Logger.IsOperationsEnabled) { Logger.Operations("Failed to restore database", e); } var alert = AlertRaised.Create( _restoreConfiguration.DatabaseName, "Failed to restore database", $"Could not restore database named {_restoreConfiguration.DatabaseName}", AlertType.RestoreError, NotificationSeverity.Error, details: new ExceptionDetails(e)); _serverStore.NotificationCenter.Add(alert); if (_serverStore.LoadDatabaseRecord(_restoreConfiguration.DatabaseName, out var _) == null) { // delete any files that we already created during the restore IOExtensions.DeleteDirectory(_restoreConfiguration.DataDirectory); } else { var deleteResult = await _serverStore.DeleteDatabaseAsync(_restoreConfiguration.DatabaseName, true, new[] { _serverStore.NodeTag }); await _serverStore.Cluster.WaitForIndexNotification(deleteResult.Index); } result.AddError($"Error occurred during restore of database {databaseName}. Exception: {e.Message}"); onProgress.Invoke(result.Progress); throw; } finally { _operationCancelToken.Dispose(); } }
public void Export_And_Import_Incremental_Documents() { var file = Path.Combine(NewDataPath(), "Incremental"); IOExtensions.DeleteDirectory(file); using (var session = documentStore.OpenSession()) { var foo = new Foo { Something = "Something1", Id = "Test/1" }; session.Store(foo); session.SaveChanges(); } var connection = new RavenConnectionStringOptions { Url = "http://localhost:8079/" }; var smugglerApi = new SmugglerApi { SmugglerOptions = { OperateOnTypes = ItemType.Documents | ItemType.Indexes | ItemType.Attachments, Incremental = true } }; smugglerApi.ExportData( new SmugglerExportOptions { ToFile = file, From = connection, }).Wait(TimeSpan.FromSeconds(15)); using (var session = documentStore.OpenSession()) { var foo = new Foo { Something = "Something2", Id = "Test/2" }; session.Store(foo); session.SaveChanges(); } smugglerApi.ExportData( new SmugglerExportOptions { ToFile = file, From = connection, }).Wait(TimeSpan.FromSeconds(15)); server.Dispose(); CreateServer(); smugglerApi.SmugglerOptions.Incremental = true; smugglerApi.ImportData(new SmugglerImportOptions { FromFile = file, To = connection }).Wait(TimeSpan.FromSeconds(15)); using (var session = documentStore.OpenSession()) { var doc = session.Load <Foo>("Test/1"); Assert.Equal(doc.Something, "Something1"); doc = session.Load <Foo>("Test/2"); Assert.Equal(doc.Something, "Something2"); } }
protected void ClearDatabaseDirectory() { IOExtensions.DeleteDirectory(DbName); IOExtensions.DeleteDirectory(DbDirectory); }