public static void Export(BlittableJsonTextWriter writer, Index index, JsonOperationContext context, bool removeAnalyzers) { if (index.Type == IndexType.Faulty) { return; } writer.WriteStartObject(); writer.WritePropertyName(nameof(IndexDefinition.Type)); writer.WriteString(index.Type.ToString()); writer.WriteComma(); writer.WritePropertyName(nameof(IndexDefinition)); if (index.Type == IndexType.Map || index.Type == IndexType.MapReduce || index.Type == IndexType.JavaScriptMap || index.Type == IndexType.JavaScriptMapReduce) { var indexDefinition = index.GetIndexDefinition(); writer.WriteIndexDefinition(context, indexDefinition, removeAnalyzers); } else if (index.Type == IndexType.AutoMap || index.Type == IndexType.AutoMapReduce) { index.Definition.Persist(context, writer); } else { throw new NotSupportedException(index.Type.ToString()); } writer.WriteEndObject(); }
protected Task <IOperationResult> ExecutePatch(IndexQueryServerSide query, Index index, QueryOperationOptions options, PatchRequest patch, BlittableJsonReaderObject patchArgs, DocumentsOperationContext context, Action <DeterminateProgress> onProgress, OperationCancelToken token) { return(ExecuteOperation(query, index, options, context, onProgress, (key, retrieveDetails) => { var command = new PatchDocumentCommand(context, key, expectedChangeVector: null, skipPatchIfChangeVectorMismatch: false, patch: (patch, patchArgs), patchIfMissing: (null, null), database: Database, debugMode: false, isTest: false, collectResultsNeeded: true, returnDocument: false); return new BulkOperationCommand <PatchDocumentCommand>(command, retrieveDetails, x => new BulkOperationResult.PatchDetails { Id = key, ChangeVector = x.PatchResult.ChangeVector, Status = x.PatchResult.Status }, c => c.PatchResult?.Dispose()); }, token));
private static Query MaybeCacheQuery(Index index, Query query) { if (index.Configuration.QueryClauseCacheDisabled) { return(query); } return(new CachingQuery(query, index, query.ToString())); }
public static Query NotEqual(Index index, string fieldName, double value) { return(new BooleanQuery { { new MatchAllDocsQuery(), Occur.MUST }, { CreateRange(index, fieldName, value, true, value, true), Occur.MUST_NOT } }); }
protected Task <IOperationResult> ExecuteDelete(IndexQueryServerSide query, Index index, QueryOperationOptions options, DocumentsOperationContext context, Action <DeterminateProgress> onProgress, OperationCancelToken token) { return(ExecuteOperation(query, index, options, context, onProgress, (key, retrieveDetails) => { var command = new DeleteDocumentCommand(key, null, Database); return new BulkOperationCommand <DeleteDocumentCommand>(command, retrieveDetails, x => new BulkOperationResult.DeleteDetails { Id = key, Etag = x.DeleteResult?.Etag }, null); }, token)); }
private static Query CreateRange(Index index, string fieldName, string minValue, LuceneTermType minValueType, bool inclusiveMin, string maxValue, LuceneTermType maxValueType, bool inclusiveMax, bool exact) { var minTermIsNullOrStar = minValueType == LuceneTermType.Null || minValue.Equals(Asterisk); var maxTermIsNullOrStar = maxValueType == LuceneTermType.Null || maxValue.Equals(Asterisk); if (minTermIsNullOrStar && maxTermIsNullOrStar) { return(new WildcardQuery(new Term(fieldName, Asterisk))); } var range = new TermRangeQuery(fieldName, minTermIsNullOrStar ? null : GetTermValue(minValue, minValueType, exact), maxTermIsNullOrStar ? null : GetTermValue(maxValue, maxValueType, exact), inclusiveMin, inclusiveMax); return(MaybeCacheQuery(index, range)); }
protected async Task <SuggestionQueryResult> ExecuteSuggestion( IndexQueryServerSide query, Index index, DocumentsOperationContext documentsContext, long?existingResultEtag, OperationCancelToken token) { if (query.Metadata.SelectFields.Length == 0) { throw new InvalidQueryException("Suggestion query must have at least one suggest token in SELECT.", query.Metadata.QueryText, query.QueryParameters); } var fields = index.Definition.IndexFields; foreach (var f in query.Metadata.SelectFields) { if (f.IsSuggest == false) { throw new InvalidQueryException("Suggestion query must have only suggest tokens in SELECT.", query.Metadata.QueryText, query.QueryParameters); } var selectField = (SuggestionField)f; if (fields.TryGetValue(selectField.Name, out var field) == false) { throw new InvalidOperationException($"Index '{index.Name}' does not have a field '{selectField.Name}'."); } if (field.HasSuggestions == false) { throw new InvalidOperationException($"Index '{index.Name}' does not have suggestions configured for field '{selectField.Name}'."); } } if (existingResultEtag.HasValue) { var etag = index.GetIndexEtag(query.Metadata); if (etag == existingResultEtag.Value) { return(SuggestionQueryResult.NotModifiedResult); } } return(await index.SuggestionQuery(query, documentsContext, token)); }
public async Task <IOperationResult> Execute(Action <IOperationProgress> onProgress) { var databaseName = RestoreFromConfiguration.DatabaseName; var result = new RestoreResult { DataDirectory = RestoreFromConfiguration.DataDirectory }; try { var filesToRestore = await GetOrderedFilesToRestore(); using (_serverStore.ContextPool.AllocateOperationContext(out JsonOperationContext serverContext)) { if (onProgress == null) { onProgress = _ => { } } ; Stopwatch sw = null; RestoreSettings restoreSettings = null; var firstFile = filesToRestore[0]; var extension = Path.GetExtension(firstFile); var snapshotRestore = false; if ((extension == Constants.Documents.PeriodicBackup.SnapshotExtension) || (extension == Constants.Documents.PeriodicBackup.EncryptedSnapshotExtension)) { onProgress.Invoke(result.Progress); snapshotRestore = true; sw = Stopwatch.StartNew(); if (extension == Constants.Documents.PeriodicBackup.EncryptedSnapshotExtension) { _hasEncryptionKey = RestoreFromConfiguration.EncryptionKey != null || RestoreFromConfiguration.BackupEncryptionSettings?.Key != null; } // restore the snapshot restoreSettings = await SnapshotRestore(serverContext, firstFile, onProgress, result); if (restoreSettings != null && RestoreFromConfiguration.SkipIndexes) { // remove all indexes from the database record restoreSettings.DatabaseRecord.AutoIndexes = null; restoreSettings.DatabaseRecord.Indexes = null; } // removing the snapshot from the list of files filesToRestore.RemoveAt(0); } else { result.SnapshotRestore.Skipped = true; result.SnapshotRestore.Processed = true; onProgress.Invoke(result.Progress); } if (restoreSettings == null) { restoreSettings = new RestoreSettings { DatabaseRecord = new DatabaseRecord(databaseName) { // we only have a smuggler restore // use the encryption key to encrypt the database Encrypted = _hasEncryptionKey } }; DatabaseHelper.Validate(databaseName, restoreSettings.DatabaseRecord, _serverStore.Configuration); } var databaseRecord = restoreSettings.DatabaseRecord; databaseRecord.Settings ??= new Dictionary <string, string>(); var runInMemoryConfigurationKey = RavenConfiguration.GetKey(x => x.Core.RunInMemory); databaseRecord.Settings.Remove(runInMemoryConfigurationKey); if (_serverStore.Configuration.Core.RunInMemory) { databaseRecord.Settings[runInMemoryConfigurationKey] = "false"; } var dataDirectoryConfigurationKey = RavenConfiguration.GetKey(x => x.Core.DataDirectory); databaseRecord.Settings.Remove(dataDirectoryConfigurationKey); // removing because we want to restore to given location, not to serialized in backup one if (_restoringToDefaultDataDirectory == false) { databaseRecord.Settings[dataDirectoryConfigurationKey] = RestoreFromConfiguration.DataDirectory; } if (_hasEncryptionKey) { // save the encryption key so we'll be able to access the database _serverStore.PutSecretKey(RestoreFromConfiguration.EncryptionKey, databaseName, overwrite: false); } var addToInitLog = new Action <string>(txt => // init log is not save in mem during RestoreBackup { var msg = $"[RestoreBackup] {DateTime.UtcNow} :: Database '{databaseName}' : {txt}"; if (Logger.IsInfoEnabled) { Logger.Info(msg); } }); var configuration = _serverStore .DatabasesLandlord .CreateDatabaseConfiguration(databaseName, ignoreDisabledDatabase: true, ignoreBeenDeleted: true, ignoreNotRelevant: true, databaseRecord); using (var database = new DocumentDatabase(databaseName, configuration, _serverStore, addToInitLog)) { // smuggler needs an existing document database to operate var options = InitializeOptions.SkipLoadingDatabaseRecord; if (snapshotRestore) { options |= InitializeOptions.GenerateNewDatabaseId; } database.Initialize(options); databaseRecord.Topology = new DatabaseTopology(); // restoring to the current node only databaseRecord.Topology.Members.Add(_nodeTag); // we are currently restoring, shouldn't try to access it databaseRecord.DatabaseState = DatabaseStateStatus.RestoreInProgress; await SaveDatabaseRecordAsync(databaseName, databaseRecord, restoreSettings.DatabaseValues, result, onProgress); using (database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { if (snapshotRestore) { await RestoreFromSmugglerFile(onProgress, database, firstFile, context); await SmugglerRestore(database, filesToRestore, context, databaseRecord, onProgress, result); result.SnapshotRestore.Processed = true; var summary = database.GetDatabaseSummary(); result.Documents.ReadCount += summary.DocumentsCount; result.Documents.Attachments.ReadCount += summary.AttachmentsCount; result.Counters.ReadCount += summary.CounterEntriesCount; result.RevisionDocuments.ReadCount += summary.RevisionsCount; result.Conflicts.ReadCount += summary.ConflictsCount; result.Indexes.ReadCount += databaseRecord.GetIndexesCount(); result.CompareExchange.ReadCount += summary.CompareExchangeCount; result.CompareExchangeTombstones.ReadCount += summary.CompareExchangeTombstonesCount; result.Identities.ReadCount += summary.IdentitiesCount; result.TimeSeries.ReadCount += summary.TimeSeriesSegmentsCount; result.AddInfo($"Successfully restored {result.SnapshotRestore.ReadCount} files during snapshot restore, took: {sw.ElapsedMilliseconds:#,#;;0}ms"); onProgress.Invoke(result.Progress); } else { await SmugglerRestore(database, filesToRestore, context, databaseRecord, onProgress, result); } DisableOngoingTasksIfNeeded(databaseRecord); Raven.Server.Smuggler.Documents.DatabaseSmuggler.EnsureProcessed(result, skipped: false); onProgress.Invoke(result.Progress); } if (snapshotRestore) { RegenerateDatabaseIdInIndexes(configuration, database); } } // after the db for restore is done, we can safely set the db state to normal and write the DatabaseRecord databaseRecord.DatabaseState = DatabaseStateStatus.Normal; await SaveDatabaseRecordAsync(databaseName, databaseRecord, null, result, onProgress); return(result); } } catch (Exception e) { if (Logger.IsOperationsEnabled) { Logger.Operations("Failed to restore database", e); } var alert = AlertRaised.Create( RestoreFromConfiguration.DatabaseName, "Failed to restore database", $"Could not restore database named {RestoreFromConfiguration.DatabaseName}", AlertType.RestoreError, NotificationSeverity.Error, details: new ExceptionDetails(e)); _serverStore.NotificationCenter.Add(alert); using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { bool databaseExists; using (context.OpenReadTransaction()) { databaseExists = _serverStore.Cluster.DatabaseExists(context, RestoreFromConfiguration.DatabaseName); } if (databaseExists == false) { // delete any files that we already created during the restore IOExtensions.DeleteDirectory(RestoreFromConfiguration.DataDirectory); } else { try { var deleteResult = await _serverStore.DeleteDatabaseAsync(RestoreFromConfiguration.DatabaseName, true, new[] { _serverStore.NodeTag }, RaftIdGenerator.DontCareId); await _serverStore.Cluster.WaitForIndexNotification(deleteResult.Index, TimeSpan.FromSeconds(60)); } catch (TimeoutException te) { result.AddError($"Failed to delete the database {databaseName} after a failed restore. " + $"In order to restart the restore process this database needs to be deleted manually. Exception: {te}."); onProgress.Invoke(result.Progress); } } } result.AddError($"Error occurred during restore of database {databaseName}. Exception: {e}"); onProgress.Invoke(result.Progress); throw; } finally { Dispose(); } void RegenerateDatabaseIdInIndexes(RavenConfiguration configuration, DocumentDatabase database) { // this code will generate new DatabaseId for each index. // This is something that we need to do when snapshot restore is executed to match the newly generated database id var indexesPath = configuration.Indexing.StoragePath.FullPath; if (Directory.Exists(indexesPath) == false) { return; } foreach (var indexPath in Directory.GetDirectories(indexesPath)) { Index index = null; try { index = Index.Open(indexPath, database, generateNewDatabaseId: true); } catch (Exception e) { result.AddError($"Could not open index from path '{indexPath}'. Error: {e.Message}"); } finally { index?.Dispose(); } } } }
public static Query GreaterThan(Index index, string fieldName, LuceneTermType termType, string value, bool exact) { return(CreateRange(index, fieldName, value, termType, false, Null, LuceneTermType.Null, true, exact)); }
public static Query LessThanOrEqual(Index index, string fieldName, double value) { return(CreateRange(index, fieldName, double.MinValue, true, value, true)); }
public static Query LessThanOrEqual(Index index, string fieldName, LuceneTermType termType, string value, bool exact) { return(CreateRange(index, fieldName, Asterisk, LuceneTermType.WildCard, false, value, termType, true, exact)); }
public static Query LessThan(Index index, string fieldName, long value) { return(CreateRange(index, fieldName, long.MinValue, true, value, false)); }
private static Query CreateRange(Index index, string fieldName, double minValue, bool inclusiveMin, double maxValue, bool inclusiveMax) { var query = NumericRangeQuery.NewDoubleRange(fieldName, 4, minValue, maxValue, inclusiveMin, inclusiveMax); return(MaybeCacheQuery(index, query)); }
public static Query Equal(Index index, string fieldName, long value) { return(CreateRange(index, fieldName, value, true, value, true)); }
public static Query Between(Index index, string fieldName, double fromValue, bool fromInclusive, double toValue, bool toInclusive) { return(CreateRange(index, fieldName, fromValue, fromInclusive, toValue, toInclusive)); }
public static Query GreaterThan(Index index, string fieldName, double value) { return(CreateRange(index, fieldName, value, false, double.MaxValue, true)); }