private static void PutOrder(DocumentDatabase database, DynamicJsonValue dynamicOrder, DocumentsOperationContext context, int number) { using (var tx = context.OpenWriteTransaction()) { using (var doc = CreateDocument(context, $"orders/{number}", dynamicOrder)) { database.DocumentsStorage.Put(context, $"orders/{number}", null, doc); } tx.Commit(); } }
private void FlushDocuments(DocumentsOperationContext context, List <BulkInsertDoc> docsToWrite, ref int totalSize) { if (docsToWrite.Count == 0) { return; } if (_logger.IsInfoEnabled) { _logger.Info( $"Writing {docsToWrite.Count:#,#} documents to disk using bulk insert, total {totalSize/1024:#,#} kb to write"); } Stopwatch sp = Stopwatch.StartNew(); using (var tx = context.OpenWriteTransaction()) { tx.InnerTransaction.LowLevelTransaction.IsLazyTransaction = true; foreach (var bulkInsertDoc in docsToWrite) { var reader = new BlittableJsonReaderObject(bulkInsertDoc.Pointer, bulkInsertDoc.Used, context); reader.BlittableValidation(); string docKey; BlittableJsonReaderObject metadata; if (reader.TryGet(Constants.Metadata.Key, out metadata) == false) { const string message = "'@metadata' is missing in received document for bulk insert"; throw new InvalidDataException(message); } if (metadata.TryGet(Constants.Metadata.Id, out docKey) == false) { const string message = "'@id' is missing in received document for bulk insert"; throw new InvalidDataException(message); } TcpConnection.DocumentDatabase.DocumentsStorage.Put(context, docKey, null, reader); } tx.Commit(); } foreach (var bulkInsertDoc in docsToWrite) { _docsToRelease.Add(bulkInsertDoc); } if (_logger.IsInfoEnabled) { _logger.Info( $"Writing {docsToWrite.Count:#,#} documents in bulk insert took {sp.ElapsedMilliseconds:#,#l;0} ms"); } docsToWrite.Clear(); totalSize = 0; }
internal static IEnumerable <ReplayProgress> Replay(DocumentDatabase database, Stream replayStream) { DocumentsOperationContext txCtx = null; IDisposable txDisposable = null; DocumentsTransaction previousTx = null; using (database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.GetManagedBuffer(out var buffer)) using (var gZipStream = new GZipStream(replayStream, CompressionMode.Decompress, leaveOpen: true)) { var peepingTomStream = new PeepingTomStream(gZipStream, context); var state = new JsonParserState(); var parser = new UnmanagedJsonParser(context, state, "file"); var commandsProgress = 0; var readers = UnmanagedJsonParserHelper.ReadArrayToMemory(context, peepingTomStream, parser, state, buffer); using (var readersItr = readers.GetEnumerator()) { ReadStartRecordingDetails(readersItr, context, peepingTomStream); while (readersItr.MoveNext()) { using (readersItr.Current) { if (readersItr.Current.TryGet(nameof(RecordingDetails.Type), out string strType) == false) { throw new ReplayTransactionsException($"Can't read {nameof(RecordingDetails.Type)} of replay detail", peepingTomStream); } if (Enum.TryParse <TxInstruction>(strType, true, out var type)) { switch (type) { case TxInstruction.BeginTx: txDisposable = database.DocumentsStorage.ContextPool.AllocateOperationContext(out txCtx); txCtx.OpenWriteTransaction(); break; case TxInstruction.Commit: txCtx.Transaction.Commit(); break; case TxInstruction.DisposeTx: txDisposable.Dispose(); break; case TxInstruction.BeginAsyncCommitAndStartNewTransaction: previousTx = txCtx.Transaction; txCtx.Transaction = txCtx.Transaction.BeginAsyncCommitAndStartNewTransaction(txCtx); txDisposable = txCtx.Transaction; break; case TxInstruction.EndAsyncCommit: previousTx.EndAsyncCommit(); break; case TxInstruction.DisposePrevTx: previousTx.Dispose(); break; } continue; } try { var cmd = DeserializeCommand(context, database, strType, readersItr.Current, peepingTomStream); commandsProgress += cmd.ExecuteDirectly(txCtx); TransactionOperationsMerger.UpdateGlobalReplicationInfoBeforeCommit(txCtx); } catch (Exception) { //TODO To accept exceptions that was thrown while recording txDisposable.Dispose(); throw; } yield return(new ReplayProgress { CommandsProgress = commandsProgress }); } } } } }
public async Task <ImportResult> Import(DocumentsOperationContext context, Stream stream, Action <IOperationProgress> onProgress = null) { var result = new ImportResult(); var progress = new IndeterminateProgress(); var state = new JsonParserState(); JsonOperationContext.ManagedPinnedBuffer buffer; using (context.GetManagedBuffer(out buffer)) using (var parser = new UnmanagedJsonParser(context, state, "fileName")) { var operateOnType = "__top_start_object"; var buildVersion = 0L; var identities = new Dictionary <string, long>(); VersioningStorage versioningStorage = null; while (true) { if (parser.Read() == false) { var read = await stream.ReadAsync(buffer.Buffer.Array, buffer.Buffer.Offset, buffer.Length); if (read == 0) { if (state.CurrentTokenType != JsonParserToken.EndObject) { throw new EndOfStreamException("Stream ended without reaching end of json content"); } break; } parser.SetBuffer(buffer, read); continue; } switch (state.CurrentTokenType) { case JsonParserToken.String: unsafe { operateOnType = new LazyStringValue(null, state.StringBuffer, state.StringSize, context).ToString(); } break; case JsonParserToken.Integer: switch (operateOnType) { case "BuildVersion": buildVersion = state.Long; break; } break; case JsonParserToken.StartObject: if (operateOnType == "__top_start_object") { operateOnType = null; break; } context.CachedProperties.NewDocument(); var builder = new BlittableJsonDocumentBuilder(_batchPutCommand.Context, BlittableJsonDocumentBuilder.UsageMode.ToDisk, "ImportObject", parser, state); builder.ReadNestedObject(); while (builder.Read() == false) { var read = await stream.ReadAsync(buffer.Buffer.Array, buffer.Buffer.Offset, buffer.Length); if (read == 0) { throw new EndOfStreamException("Stream ended without reaching end of json content"); } parser.SetBuffer(buffer, read); } builder.FinalizeDocument(); if (operateOnType == "Docs" && Options.OperateOnTypes.HasFlag(DatabaseItemType.Documents)) { progress.Progress = "Importing Documents"; onProgress?.Invoke(progress); PatchDocument patch = null; PatchRequest patchRequest = null; if (string.IsNullOrWhiteSpace(Options.TransformScript) == false) { patch = new PatchDocument(context.DocumentDatabase); patchRequest = new PatchRequest { Script = Options.TransformScript }; } result.DocumentsCount++; var reader = builder.CreateReader(); var document = new Document { Data = reader, }; if (Options.IncludeExpired == false && document.Expired(_database.Time.GetUtcNow())) { continue; } TransformScriptOrDisableVersioningIfNeeded(context, patch, reader, document, patchRequest); _batchPutCommand.Add(document.Data); if (result.DocumentsCount % 1000 == 0) { progress.Progress = $"Imported {result.DocumentsCount} documents"; onProgress?.Invoke(progress); } await HandleBatchOfDocuments(context, parser, buildVersion).ConfigureAwait(false); } else if (operateOnType == "RevisionDocuments" && Options.OperateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments)) { if (versioningStorage == null) { break; } result.RevisionDocumentsCount++; var reader = builder.CreateReader(); _batchPutCommand.Add(reader); await HandleBatchOfDocuments(context, parser, buildVersion).ConfigureAwait(false);; } else { using (builder) { switch (operateOnType) { case "Attachments": result.Warnings.Add("Attachments are not supported anymore. Use RavenFS isntead. Skipping."); break; case "Indexes": if (Options.OperateOnTypes.HasFlag(DatabaseItemType.Indexes) == false) { continue; } result.IndexesCount++; progress.Progress = "importing Indexes"; onProgress?.Invoke(progress); try { IndexProcessor.Import(builder, _database, buildVersion, Options.RemoveAnalyzers); } catch (Exception e) { result.Warnings.Add($"Could not import index. Message: {e.Message}"); } break; case "Transformers": if (Options.OperateOnTypes.HasFlag(DatabaseItemType.Transformers) == false) { continue; } result.TransformersCount++; progress.Progress = "Importing Transformers"; onProgress?.Invoke(progress); try { TransformerProcessor.Import(builder, _database, buildVersion); } catch (Exception e) { result.Warnings.Add($"Could not import transformer. Message: {e.Message}"); } break; case "Identities": if (Options.OperateOnTypes.HasFlag(DatabaseItemType.Identities)) { result.IdentitiesCount++; progress.Progress = "Importing Identities"; onProgress?.Invoke(progress); using (var reader = builder.CreateReader()) { try { string identityKey, identityValueString; long identityValue; if (reader.TryGet("Key", out identityKey) == false || reader.TryGet("Value", out identityValueString) == false || long.TryParse(identityValueString, out identityValue) == false) { result.Warnings.Add($"Cannot import the following identity: '{reader}'. Skipping."); } else { identities[identityKey] = identityValue; } } catch (Exception e) { result.Warnings.Add($"Cannot import the following identity: '{reader}'. Error: {e}. Skipping."); } } } break; default: result.Warnings.Add( $"The following type is not recognized: '{operateOnType}'. Skipping."); break; } } } break; case JsonParserToken.StartArray: switch (operateOnType) { case "RevisionDocuments": // We are taking a reference here since the documents import can activate or disable the versioning. // We hold a local copy because the user can disable the bundle during the import process, exteranly. // In this case we want to continue to import the revisions documents. versioningStorage = _database.BundleLoader.VersioningStorage; _batchPutCommand.IsRevision = true; break; } break; case JsonParserToken.EndArray: switch (operateOnType) { case "Docs": await FinishBatchOfDocuments(); _batchPutCommand = new MergedBatchPutCommand(_database, buildVersion); break; case "RevisionDocuments": await FinishBatchOfDocuments(); break; case "Identities": if (identities.Count > 0) { using (var tx = context.OpenWriteTransaction()) { _database.DocumentsStorage.UpdateIdentities(context, identities); tx.Commit(); } } identities = null; break; } break; } } } return(result); }
private bool CleanupDocumentsOnce(DocumentsOperationContext context, DateTime currentTime) { int count = 0; var earlyExit = false; var keysToDelete = new List <Slice>(); var currentTicks = currentTime.Ticks; var sp = Stopwatch.StartNew(); using (var tx = context.OpenWriteTransaction()) { var expirationTree = tx.InnerTransaction.CreateTree(DocumentsByExpiration); while (true) { using (var it = expirationTree.Iterate(false)) { if (it.Seek(Slices.BeforeAllKeys) == false) { break; } var entryTicks = it.CurrentKey.CreateReader().ReadBigEndianInt64(); if (entryTicks >= currentTicks) { break; } using (var multiIt = expirationTree.MultiRead(it.CurrentKey)) { if (multiIt.Seek(Slices.BeforeAllKeys)) { do { if (sp.ElapsedMilliseconds > 150) { earlyExit = true; break; } var clonedKey = multiIt.CurrentKey.Clone(tx.InnerTransaction.Allocator); keysToDelete.Add(clonedKey); var document = _database.DocumentsStorage.Get(context, clonedKey); if (document == null) { continue; } // Validate that the expiration value in metadata is still the same. // We have to check this as the user can update this valud. string expirationDate; BlittableJsonReaderObject metadata; if (document.Data.TryGet(Constants.Metadata.Key, out metadata) == false || metadata.TryGet(Constants.Expiration.RavenExpirationDate, out expirationDate) == false) { continue; } DateTime date; if (DateTime.TryParseExact(expirationDate, "O", CultureInfo.InvariantCulture, DateTimeStyles.RoundtripKind, out date) == false) { continue; } if (currentTime < date) { continue; } var deleted = _database.DocumentsStorage.Delete(context, clonedKey, key: null, expectedEtag: null); count++; if (_logger.IsInfoEnabled && deleted == null) { _logger.Info($"Tried to delete expired document '{clonedKey}' but document was not found."); } } while (multiIt.MoveNext()); } } var treeKey = it.CurrentKey.Clone(tx.InnerTransaction.Allocator); foreach (var slice in keysToDelete) { expirationTree.MultiDelete(treeKey, slice); } } if (earlyExit) { break; } } tx.Commit(); } if (_logger.IsInfoEnabled) { _logger.Info($"Successfully deleted {count:#,#;;0} documents in {sp.ElapsedMilliseconds:#,#;;0} ms. Found more stuff to delete? {earlyExit}"); } return(earlyExit); }
private static void Put_docs(DocumentsOperationContext context, DocumentDatabase database) { using (var tx = context.OpenWriteTransaction()) { using (var doc = CreateDocument(context, "users/1", new DynamicJsonValue { ["Location"] = new DynamicJsonValue { ["Country"] = "USA", ["State"] = "Texas" }, ["ResidenceAddress"] = new DynamicJsonValue { ["Country"] = "UK" }, ["Hobbies"] = new DynamicJsonArray { "sport", "books" }, [Constants.Documents.Metadata.Key] = new DynamicJsonValue { [Constants.Documents.Metadata.Collection] = "Users" } })) { database.DocumentsStorage.Put(context, "users/1", null, doc); } using (var doc = CreateDocument(context, "users/2", new DynamicJsonValue { ["Location"] = new DynamicJsonValue() { ["Country"] = "Poland", ["State"] = "Pomerania" }, ["ResidenceAddress"] = new DynamicJsonValue { ["Country"] = "UK" }, ["Hobbies"] = new DynamicJsonArray() { "music", "sport" }, [Constants.Documents.Metadata.Key] = new DynamicJsonValue { [Constants.Documents.Metadata.Collection] = "Users" } })) { database.DocumentsStorage.Put(context, "users/2", null, doc); } using (var doc = CreateDocument(context, "users/3", new DynamicJsonValue { ["Hobbies"] = new DynamicJsonArray() { "music", "sport" }, ["Location"] = new DynamicJsonValue() { ["State"] = "Pomerania", ["Country"] = "Poland" }, ["ResidenceAddress"] = new DynamicJsonValue { ["Country"] = "UK" }, [Constants.Documents.Metadata.Key] = new DynamicJsonValue { [Constants.Documents.Metadata.Collection] = "Users" } })) { database.DocumentsStorage.Put(context, "users/3", null, doc); } tx.Commit(); } }
private void MergeTransactionsOnce() { DocumentsOperationContext context = null; IDisposable returnContext = null; DocumentsTransaction tx = null; try { var pendingOps = GetBufferForPendingOps(); returnContext = _parent.DocumentsStorage.ContextPool.AllocateOperationContext(out context); { try { _recording.State?.Record(context, TxInstruction.BeginTx); tx = context.OpenWriteTransaction(); } catch (Exception e) { try { if (_operations.TryDequeue(out MergedTransactionCommand command)) { command.Exception = e; DoCommandNotification(command); } return; } finally { if (tx != null) { _recording.State?.Record(context, TxInstruction.DisposeTx, tx.Disposed == false); tx.Dispose(); } } } PendingOperations result; try { var transactionMeter = TransactionPerformanceMetrics.MeterPerformanceRate(); try { result = ExecutePendingOperationsInTransaction(pendingOps, context, null, ref transactionMeter); UpdateGlobalReplicationInfoBeforeCommit(context); } finally { transactionMeter.Dispose(); } } catch (Exception e) { // need to dispose here since we are going to open new tx for each operation if (tx != null) { _recording.State?.Record(context, TxInstruction.DisposeTx, tx.Disposed == false); tx.Dispose(); } if (e is HighDirtyMemoryException highDirtyMemoryException) { if (_log.IsInfoEnabled) { var errorMessage = $"{pendingOps.Count:#,#0} operations were cancelled because of high dirty memory, details: {highDirtyMemoryException.Message}"; _log.Info(errorMessage, highDirtyMemoryException); } NotifyHighDirtyMemoryFailure(pendingOps, highDirtyMemoryException); } else { if (_log.IsInfoEnabled) { _log.Info($"Failed to run merged transaction with {pendingOps.Count:#,#0}, will retry independently", e); } NotifyTransactionFailureAndRerunIndependently(pendingOps, e); } return; } switch (result) { case PendingOperations.CompletedAll: try { tx.InnerTransaction.LowLevelTransaction.RetrieveCommitStats(out var stats); _recording.State?.Record(context, TxInstruction.Commit); tx.Commit(); SlowWriteNotification.Notify(stats, _parent); _recording.State?.Record(context, TxInstruction.DisposeTx, tx.Disposed == false); tx.Dispose(); } catch (Exception e) { foreach (var op in pendingOps) { op.Exception = e; } } finally { NotifyOnThreadPool(pendingOps); } return; case PendingOperations.HasMore: MergeTransactionsWithAsyncCommit(ref context, ref returnContext, pendingOps); return; default: Debug.Assert(false, "Should never happen"); return; } } } finally { if (context?.Transaction != null) { using (_parent.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) { _recording.State?.Record(ctx, TxInstruction.DisposeTx, context.Transaction.Disposed == false); } context.Transaction.Dispose(); } returnContext?.Dispose(); } }
private async Task <IOperationResult> ExecuteOperation(string indexName, IndexQueryServerSide query, QueryOperationOptions options, DocumentsOperationContext context, Action <DeterminateProgress> onProgress, Action <string> action, OperationCancelToken token) { var index = GetIndex(indexName); if (index.Type.IsMapReduce()) { throw new InvalidOperationException("Cannot execute bulk operation on Map-Reduce indexes."); } query = ConvertToOperationQuery(query, options); const int BatchSize = 1024; RavenTransaction tx = null; var operationsInCurrentBatch = 0; List <string> resultKeys; try { var results = await index.Query(query, context, token).ConfigureAwait(false); if (options.AllowStale == false && results.IsStale) { throw new InvalidOperationException("Cannot perform bulk operation. Query is stale."); } resultKeys = new List <string>(results.Results.Count); foreach (var document in results.Results) { resultKeys.Add(document.Key.ToString()); } } finally //make sure to close tx if DocumentConflictException is thrown { context.CloseTransaction(); } var progress = new DeterminateProgress { Total = resultKeys.Count, Processed = 0 }; onProgress(progress); using (var rateGate = options.MaxOpsPerSecond.HasValue ? new RateGate(options.MaxOpsPerSecond.Value, TimeSpan.FromSeconds(1)) : null) { foreach (var document in resultKeys) { if (rateGate != null && rateGate.WaitToProceed(0) == false) { using (tx) { tx?.Commit(); } tx = null; rateGate.WaitToProceed(); } if (tx == null) { operationsInCurrentBatch = 0; tx = context.OpenWriteTransaction(); } action(document); operationsInCurrentBatch++; progress.Processed++; if (progress.Processed % 128 == 0) { onProgress(progress); } if (operationsInCurrentBatch < BatchSize) { continue; } using (tx) { tx.Commit(); } tx = null; } } using (tx) { tx?.Commit(); } return(new BulkOperationResult { Total = progress.Total }); }
private IOperationResult ExecuteOperation(string collectionName, CollectionOperationOptions options, DocumentsOperationContext context, Action <DeterminateProgress> onProgress, Action <string> action, OperationCancelToken token) { const int batchSize = 1024; var progress = new DeterminateProgress(); var cancellationToken = token.Token; long lastEtag; long totalCount; using (context.OpenReadTransaction()) { lastEtag = _database.DocumentsStorage.GetLastDocumentEtag(context, collectionName); _database.DocumentsStorage.GetNumberOfDocumentsToProcess(context, collectionName, 0, out totalCount); } progress.Total = totalCount; long startEtag = 0; using (var rateGate = options.MaxOpsPerSecond.HasValue ? new RateGate(options.MaxOpsPerSecond.Value, TimeSpan.FromSeconds(1)) : null) { bool done = false; //The reason i do this nested loop is because i can't operate on a document while iterating the document tree. while (startEtag <= lastEtag) { cancellationToken.ThrowIfCancellationRequested(); bool wait = false; using (var tx = context.OpenWriteTransaction()) { var documents = _database.DocumentsStorage.GetDocumentsFrom(context, collectionName, startEtag, 0, batchSize).ToList(); foreach (var document in documents) { cancellationToken.ThrowIfCancellationRequested(); if (document.Etag > lastEtag)// we don't want to go over the documents that we have patched { done = true; break; } if (rateGate != null && rateGate.WaitToProceed(0) == false) { wait = true; break; } startEtag = document.Etag; action(document.Key); progress.Processed++; } tx.Commit(); onProgress(progress); if (wait) { rateGate.WaitToProceed(); } if (done || documents.Count == 0) { break; } } } } return(new BulkOperationResult { Total = progress.Processed }); }