private async Task SaveSourceReplicationInformation(LegacySourceReplicationInformation replicationSource, DocumentsOperationContext context, string documentId) { var blittable = EntityToBlittable.ConvertCommandToBlittable(replicationSource, context); using (var cmd = new MergedPutCommand(blittable, documentId, null, Database)) { await Database.TxMerger.Enqueue(cmd); } }
public async Task Put() { DocumentsOperationContext context; using (ContextPool.AllocateOperationContext(out context)) { var id = GetQueryStringValueAndAssertIfSingleAndNotEmpty("id"); var doc = await context.ReadForDiskAsync(RequestBodyStream(), id); var etag = GetLongFromHeaders("If-Match"); var cmd = new MergedPutCommand { Database = Database, ExepctedEtag = etag, Key = id, Document = doc }; await Database.TxMerger.Enqueue(cmd); if (cmd.ConcurrencyException != null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.Conflict; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WritePropertyName("Key"); writer.WriteString(cmd.Key); writer.WriteComma(); writer.WritePropertyName("Error"); writer.WriteString(cmd.ConcurrencyException.Message); writer.WriteEndObject(); } return; } HttpContext.Response.StatusCode = 201; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WritePropertyName(("Key")); writer.WriteString(cmd.PutResult.Key); writer.WriteComma(); writer.WritePropertyName(("Etag")); writer.WriteInteger(cmd.PutResult.Etag); writer.WriteEndObject(); } } }
public async Task Put() { using (ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { var id = GetQueryStringValueAndAssertIfSingleAndNotEmpty("id"); // We HAVE to read the document in full, trying to parallelize the doc read // and the identity generation needs to take into account that the identity // generation can fail and will leave the reading task hanging if we abort // easier to just do in synchronously var doc = await context.ReadForDiskAsync(RequestBodyStream(), id).ConfigureAwait(false); if (id[id.Length - 1] == '|') { var(_, clusterId, _) = await ServerStore.GenerateClusterIdentityAsync(id, Database.Name); id = clusterId; } var changeVector = context.GetLazyString(GetStringFromHeaders("If-Match")); using (var cmd = new MergedPutCommand(doc, id, changeVector, Database)) { await Database.TxMerger.Enqueue(cmd); cmd.ExceptionDispatchInfo?.Throw(); HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WritePropertyName(nameof(PutResult.Id)); writer.WriteString(cmd.PutResult.Id); writer.WriteComma(); writer.WritePropertyName(nameof(PutResult.ChangeVector)); writer.WriteString(cmd.PutResult.ChangeVector); writer.WriteEndObject(); } } } }
public async Task Put() { using (ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { var id = GetQueryStringValueAndAssertIfSingleAndNotEmpty("id"); var doc = context.ReadForDiskAsync(RequestBodyStream(), id).ConfigureAwait(false); if (id[id.Length - 1] == '|') { var(_, clusterId) = await ServerStore.GenerateClusterIdentityAsync(id, Database.Name); id = clusterId; } var changeVector = context.GetLazyString(GetStringQueryString("If-Match", false)); var cmd = new MergedPutCommand(await doc, id, changeVector, Database); await Database.TxMerger.Enqueue(cmd); cmd.ExceptionDispatchInfo?.Throw(); HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WritePropertyName(nameof(PutResult.Id)); writer.WriteString(cmd.PutResult.Id); writer.WriteComma(); writer.WritePropertyName(nameof(PutResult.ChangeVector)); writer.WriteString(cmd.PutResult.ChangeVector); writer.WriteEndObject(); } } }
protected override int ExecuteCmd(DocumentsOperationContext context) { for (int i = 0; i < NumberOfCommands; i++) { var cmd = Commands[i]; Debug.Assert(cmd.Type == CommandType.PUT); try { Database.DocumentsStorage.Put(context, cmd.Id, null, cmd.Document); } catch (Voron.Exceptions.VoronConcurrencyErrorException) { // RavenDB-10581 - If we have a concurrency error on "doc-id/" // this means that we have existing values under the current etag // we'll generate a new (random) id for them. // The TransactionMerger will re-run us when we ask it to as a // separate transaction for (; i < NumberOfCommands; i++) { cmd = Commands[i]; if (cmd.Id?.EndsWith('/') == true) { cmd.Id = MergedPutCommand.GenerateNonConflictingId(Database, cmd.Id); RetryOnError = true; } } throw; } } if (Logger.IsInfoEnabled) { Logger.Info($"Merged {NumberOfCommands:#,#;;0} operations ({Math.Round(TotalSize / 1024d, 1):#,#.#;;0} kb)"); } return(NumberOfCommands); }
public override int Execute(DocumentsOperationContext context) { _disposables.Clear(); Reply = new DynamicJsonArray(); for (int i = ParsedCommands.Offset; i < ParsedCommands.Count; i++) { var cmd = ParsedCommands.Array[ParsedCommands.Offset + i]; switch (cmd.Type) { case CommandType.PUT: DocumentsStorage.PutOperationResults putResult; try { putResult = Database.DocumentsStorage.Put(context, cmd.Id, cmd.ChangeVector, cmd.Document); } catch (Voron.Exceptions.VoronConcurrencyErrorException) { // RavenDB-10581 - If we have a concurrency error on "doc-id/" // this means that we have existing values under the current etag // we'll generate a new (random) id for them. // The TransactionMerger will re-run us when we ask it to as a // separate transaction for (; i < ParsedCommands.Count; i++) { cmd = ParsedCommands.Array[ParsedCommands.Offset + i]; if (cmd.Type == CommandType.PUT && cmd.Id?.EndsWith('/') == true) { cmd.Id = MergedPutCommand.GenerateNonConflictingId(Database, cmd.Id); RetryOnError = true; } } throw; } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } context.DocumentDatabase.HugeDocuments.AddIfDocIsHuge(cmd.Id, cmd.Document.Size); LastChangeVector = putResult.ChangeVector; ModifiedCollections?.Add(putResult.Collection.Name); // Make sure all the metadata fields are always been add var putReply = new DynamicJsonValue { ["Type"] = nameof(CommandType.PUT), [Constants.Documents.Metadata.Id] = putResult.Id, [Constants.Documents.Metadata.Collection] = putResult.Collection.Name, [Constants.Documents.Metadata.ChangeVector] = putResult.ChangeVector, [Constants.Documents.Metadata.LastModified] = putResult.LastModified }; if (putResult.Flags != DocumentFlags.None) { putReply[Constants.Documents.Metadata.Flags] = putResult.Flags; } Reply.Add(putReply); break; case CommandType.PATCH: try { cmd.PatchCommand.Execute(context); } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } var patchResult = cmd.PatchCommand.PatchResult; if (patchResult.ModifiedDocument != null) { context.DocumentDatabase.HugeDocuments.AddIfDocIsHuge(cmd.Id, patchResult.ModifiedDocument.Size); } if (patchResult.ChangeVector != null) { LastChangeVector = patchResult.ChangeVector; } if (patchResult.Collection != null) { ModifiedCollections?.Add(patchResult.Collection); } Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.ChangeVector)] = patchResult.ChangeVector, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.PATCH), ["PatchStatus"] = patchResult.Status.ToString(), ["Debug"] = patchResult.Debug }); break; case CommandType.DELETE: if (cmd.IdPrefixed == false) { DocumentsStorage.DeleteOperationResult?deleted; try { deleted = Database.DocumentsStorage.Delete(context, cmd.Id, cmd.ChangeVector); } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } if (deleted != null) { LastTombstoneEtag = deleted.Value.Etag; ModifiedCollections?.Add(deleted.Value.Collection.Name); } Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.DELETE), ["Deleted"] = deleted != null }); } else { var deleteResults = Database.DocumentsStorage.DeleteDocumentsStartingWith(context, cmd.Id); for (var j = 0; j < deleteResults.Count; j++) { LastChangeVector = deleteResults[j].ChangeVector; ModifiedCollections?.Add(deleteResults[j].Collection.Name); } Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.DELETE), ["Deleted"] = deleteResults.Count > 0 }); } break; case CommandType.AttachmentPUT: var attachmentStream = AttachmentStreams.Dequeue(); var stream = attachmentStream.Stream; _disposables.Add(stream); var attachmentPutResult = Database.DocumentsStorage.AttachmentsStorage.PutAttachment(context, cmd.Id, cmd.Name, cmd.ContentType, attachmentStream.Hash, cmd.ChangeVector, stream, updateDocument: false); LastChangeVector = attachmentPutResult.ChangeVector; if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(cmd.Id); Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = attachmentPutResult.DocumentId, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.AttachmentPUT), [nameof(BatchRequestParser.CommandData.Name)] = attachmentPutResult.Name, [nameof(BatchRequestParser.CommandData.ChangeVector)] = attachmentPutResult.ChangeVector, [nameof(AttachmentDetails.Hash)] = attachmentPutResult.Hash, [nameof(BatchRequestParser.CommandData.ContentType)] = attachmentPutResult.ContentType, [nameof(AttachmentDetails.Size)] = attachmentPutResult.Size }); break; case CommandType.AttachmentDELETE: Database.DocumentsStorage.AttachmentsStorage.DeleteAttachment(context, cmd.Id, cmd.Name, cmd.ChangeVector, updateDocument: false); if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(cmd.Id); Reply.Add(new DynamicJsonValue { ["Type"] = nameof(CommandType.AttachmentDELETE), [Constants.Documents.Metadata.Id] = cmd.Id, ["Name"] = cmd.Name }); break; } } if (_documentsToUpdateAfterAttachmentChange != null) { foreach (var documentId in _documentsToUpdateAfterAttachmentChange) { var changeVector = Database.DocumentsStorage.AttachmentsStorage.UpdateDocumentAfterAttachmentChange(context, documentId); if (changeVector != null) { LastChangeVector = changeVector; } } } return(Reply.Count); }
public override int Execute(DocumentsOperationContext context) { if (IsClusterTransaction) { Debug.Assert(false, "Shouldn't happen - cluster tx run via normal means"); return(0);// should never happened } _disposables.Clear(); DocumentsStorage.PutOperationResults?lastPutResult = null; for (int i = ParsedCommands.Offset; i < ParsedCommands.Count; i++) { var cmd = ParsedCommands.Array[ParsedCommands.Offset + i]; switch (cmd.Type) { case CommandType.PUT: DocumentsStorage.PutOperationResults putResult; try { putResult = Database.DocumentsStorage.Put(context, cmd.Id, cmd.ChangeVector, cmd.Document); } catch (Voron.Exceptions.VoronConcurrencyErrorException) { // RavenDB-10581 - If we have a concurrency error on "doc-id/" // this means that we have existing values under the current etag // we'll generate a new (random) id for them. // The TransactionMerger will re-run us when we ask it to as a // separate transaction for (; i < ParsedCommands.Count; i++) { cmd = ParsedCommands.Array[ParsedCommands.Offset + i]; if (cmd.Type == CommandType.PUT && cmd.Id?.EndsWith('/') == true) { cmd.Id = MergedPutCommand.GenerateNonConflictingId(Database, cmd.Id); RetryOnError = true; } } throw; } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } context.DocumentDatabase.HugeDocuments.AddIfDocIsHuge(cmd.Id, cmd.Document.Size); AddPutResult(putResult); lastPutResult = putResult; break; case CommandType.PATCH: try { cmd.PatchCommand.Execute(context); } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } var patchResult = cmd.PatchCommand.PatchResult; if (patchResult.ModifiedDocument != null) { context.DocumentDatabase.HugeDocuments.AddIfDocIsHuge(cmd.Id, patchResult.ModifiedDocument.Size); } if (patchResult.ChangeVector != null) { LastChangeVector = patchResult.ChangeVector; } if (patchResult.Collection != null) { ModifiedCollections?.Add(patchResult.Collection); } var patchReply = new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.ChangeVector)] = patchResult.ChangeVector, [nameof(Constants.Documents.Metadata.LastModified)] = patchResult.LastModified, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.PATCH), [nameof(PatchStatus)] = patchResult.Status, [nameof(PatchResult.Debug)] = patchResult.Debug }; if (cmd.ReturnDocument) { patchReply[nameof(PatchResult.ModifiedDocument)] = patchResult.ModifiedDocument; } Reply.Add(patchReply); break; case CommandType.DELETE: if (cmd.IdPrefixed == false) { DocumentsStorage.DeleteOperationResult?deleted; try { deleted = Database.DocumentsStorage.Delete(context, cmd.Id, cmd.ChangeVector); } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } AddDeleteResult(deleted, cmd.Id); } else { DeleteWithPrefix(context, cmd.Id); } break; case CommandType.AttachmentPUT: var attachmentStream = AttachmentStreams.Dequeue(); var stream = attachmentStream.Stream; _disposables.Add(stream); var docId = cmd.Id; if (docId[docId.Length - 1] == '/') { // attachment sent by Raven ETL, only prefix is defined if (lastPutResult == null) { ThrowUnexpectedOrderOfRavenEtlCommands(); } Debug.Assert(lastPutResult.Value.Id.StartsWith(docId)); docId = lastPutResult.Value.Id; } var attachmentPutResult = Database.DocumentsStorage.AttachmentsStorage.PutAttachment(context, docId, cmd.Name, cmd.ContentType, attachmentStream.Hash, cmd.ChangeVector, stream, updateDocument: false); LastChangeVector = attachmentPutResult.ChangeVector; if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(docId); Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = attachmentPutResult.DocumentId, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.AttachmentPUT), [nameof(BatchRequestParser.CommandData.Name)] = attachmentPutResult.Name, [nameof(BatchRequestParser.CommandData.ChangeVector)] = attachmentPutResult.ChangeVector, [nameof(AttachmentDetails.Hash)] = attachmentPutResult.Hash, [nameof(BatchRequestParser.CommandData.ContentType)] = attachmentPutResult.ContentType, [nameof(AttachmentDetails.Size)] = attachmentPutResult.Size }); break; case CommandType.AttachmentDELETE: Database.DocumentsStorage.AttachmentsStorage.DeleteAttachment(context, cmd.Id, cmd.Name, cmd.ChangeVector, updateDocument: false); if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(cmd.Id); Reply.Add(new DynamicJsonValue { ["Type"] = nameof(CommandType.AttachmentDELETE), [Constants.Documents.Metadata.Id] = cmd.Id, ["Name"] = cmd.Name }); break; case CommandType.AttachmentMOVE: var attachmentMoveResult = Database.DocumentsStorage.AttachmentsStorage.MoveAttachment(context, cmd.Id, cmd.Name, cmd.DestinationId, cmd.DestinationName, cmd.ChangeVector); LastChangeVector = attachmentMoveResult.ChangeVector; if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(cmd.Id); _documentsToUpdateAfterAttachmentChange.Add(cmd.DestinationId); Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.AttachmentMOVE), [nameof(BatchRequestParser.CommandData.Name)] = cmd.Name, [nameof(BatchRequestParser.CommandData.DestinationId)] = attachmentMoveResult.DocumentId, [nameof(BatchRequestParser.CommandData.DestinationName)] = attachmentMoveResult.Name, [nameof(BatchRequestParser.CommandData.ChangeVector)] = attachmentMoveResult.ChangeVector, [nameof(AttachmentDetails.Hash)] = attachmentMoveResult.Hash, [nameof(BatchRequestParser.CommandData.ContentType)] = attachmentMoveResult.ContentType, [nameof(AttachmentDetails.Size)] = attachmentMoveResult.Size }); break; case CommandType.AttachmentCOPY: var attachmentCopyResult = Database.DocumentsStorage.AttachmentsStorage.CopyAttachment(context, cmd.Id, cmd.Name, cmd.DestinationId, cmd.DestinationName, cmd.ChangeVector); LastChangeVector = attachmentCopyResult.ChangeVector; if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(cmd.DestinationId); Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = attachmentCopyResult.DocumentId, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.AttachmentCOPY), [nameof(BatchRequestParser.CommandData.Name)] = attachmentCopyResult.Name, [nameof(BatchRequestParser.CommandData.ChangeVector)] = attachmentCopyResult.ChangeVector, [nameof(AttachmentDetails.Hash)] = attachmentCopyResult.Hash, [nameof(BatchRequestParser.CommandData.ContentType)] = attachmentCopyResult.ContentType, [nameof(AttachmentDetails.Size)] = attachmentCopyResult.Size }); break; case CommandType.Counters: var counterDocId = cmd.Counters.DocumentId; if (cmd.FromEtl && counterDocId[counterDocId.Length - 1] == '/') { // counter sent by Raven ETL, only prefix is defined if (lastPutResult == null) { ThrowUnexpectedOrderOfRavenEtlCommands(); } Debug.Assert(lastPutResult.Value.Id.StartsWith(counterDocId)); cmd.Counters.DocumentId = lastPutResult.Value.Id; } var counterBatchCmd = new CountersHandler.ExecuteCounterBatchCommand(Database, new CounterBatch { Documents = new List <DocumentCountersOperation> { cmd.Counters }, FromEtl = cmd.FromEtl }); try { counterBatchCmd.Execute(context); } catch (DocumentDoesNotExistException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } LastChangeVector = counterBatchCmd.LastChangeVector; Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.ChangeVector)] = counterBatchCmd.LastChangeVector, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.Counters), [nameof(CountersDetail)] = counterBatchCmd.CountersDetail.ToJson(), }); break; } } if (_documentsToUpdateAfterAttachmentChange != null) { foreach (var documentId in _documentsToUpdateAfterAttachmentChange) { var changeVector = Database.DocumentsStorage.AttachmentsStorage.UpdateDocumentAfterAttachmentChange(context, documentId); if (changeVector != null) { LastChangeVector = changeVector; } } } return(Reply.Count); }
protected override long ExecuteCmd(DocumentsOperationContext context) { for (int i = 0; i < NumberOfCommands; i++) { var cmd = Commands[i]; Debug.Assert(cmd.Type == CommandType.PUT || cmd.Type == CommandType.Counters || cmd.Type == CommandType.TimeSeries || cmd.Type == CommandType.TimeSeriesBulkInsert || cmd.Type == CommandType.AttachmentPUT); switch (cmd.Type) { case CommandType.PUT: try { Database.DocumentsStorage.Put(context, cmd.Id, null, cmd.Document); } catch (VoronConcurrencyErrorException) { // RavenDB-10581 - If we have a concurrency error on "doc-id/" // this means that we have existing values under the current etag // we'll generate a new (random) id for them. // The TransactionMerger will re-run us when we ask it to as a // separate transaction for (; i < NumberOfCommands; i++) { cmd = Commands[i]; if (cmd.Type != CommandType.PUT) { continue; } if (cmd.Id?.EndsWith(Database.IdentityPartsSeparator) == true) { cmd.Id = MergedPutCommand.GenerateNonConflictingId(Database, cmd.Id); RetryOnError = true; } } throw; } break; case CommandType.Counters: { var collection = CountersHandler.ExecuteCounterBatchCommand.GetDocumentCollection(cmd.Id, Database, context, fromEtl: false, out _); foreach (var counterOperation in cmd.Counters.Operations) { counterOperation.DocumentId = cmd.Counters.DocumentId; Database.DocumentsStorage.CountersStorage.IncrementCounter(context, cmd.Id, collection, counterOperation.CounterName, counterOperation.Delta, out _); var updates = GetDocumentUpdates(cmd.Id); updates.AddCounter(counterOperation.CounterName); } break; } case CommandType.TimeSeries: case CommandType.TimeSeriesBulkInsert: { var docCollection = TimeSeriesHandler.ExecuteTimeSeriesBatchCommand.GetDocumentCollection(Database, context, cmd.Id, fromEtl: false); Database.DocumentsStorage.TimeSeriesStorage.AppendTimestamp(context, cmd.Id, docCollection, cmd.TimeSeries.Name, cmd.TimeSeries.Appends ); break; } case CommandType.AttachmentPUT: { using (cmd.AttachmentStream.Stream) { Database.DocumentsStorage.AttachmentsStorage.PutAttachment(context, cmd.Id, cmd.Name, cmd.ContentType ?? "", cmd.AttachmentStream.Hash, cmd.ChangeVector, cmd.AttachmentStream.Stream, updateDocument: false); } var updates = GetDocumentUpdates(cmd.Id); updates.AddAttachment(); break; } } } if (_documentsToUpdate.Count > 0) { foreach (var kvp in _documentsToUpdate) { var documentId = kvp.Key; var updates = kvp.Value; if (updates.Attachments) { Database.DocumentsStorage.AttachmentsStorage.UpdateDocumentAfterAttachmentChange(context, documentId); } if (updates.Counters != null && updates.Counters.Count > 0) { var docToUpdate = Database.DocumentsStorage.Get(context, documentId); if (docToUpdate != null) { Database.DocumentsStorage.CountersStorage.UpdateDocumentCounters(context, docToUpdate, documentId, updates.Counters, countersToRemove: null, NonPersistentDocumentFlags.ByCountersUpdate); } } } } if (Logger.IsInfoEnabled) { Logger.Info($"Executed {NumberOfCommands:#,#;;0} bulk insert operations, size: ({new Size(TotalSize, SizeUnit.Bytes)})"); } return(NumberOfCommands); }