public (string ChangeVector, NonPersistentDocumentFlags NonPersistentFlags) MergeConflictChangeVectorIfNeededAndDeleteConflicts( string documentChangeVector, DocumentsOperationContext context, string id, long newEtag, BlittableJsonReaderObject document) { var result = DeleteConflictsFor(context, id, document); if (result.ChangeVectors == null || result.ChangeVectors.Count == 0) { return(documentChangeVector, result.NonPersistentFlags); } string mergedChangeVectorEntries = null; var firstTime = true; foreach (var changeVector in result.ChangeVectors) { if (firstTime) { mergedChangeVectorEntries = changeVector; firstTime = false; continue; } mergedChangeVectorEntries = ChangeVectorUtils.MergeVectors(mergedChangeVectorEntries, changeVector); } if (string.IsNullOrEmpty(documentChangeVector) == false) { mergedChangeVectorEntries = ChangeVectorUtils.MergeVectors(mergedChangeVectorEntries, documentChangeVector); } var newChangeVector = ChangeVectorUtils.NewChangeVector(_documentDatabase.ServerStore.NodeTag, newEtag, _documentDatabase.DbBase64Id); mergedChangeVectorEntries = ChangeVectorUtils.MergeVectors(mergedChangeVectorEntries, newChangeVector); return(mergedChangeVectorEntries, result.NonPersistentFlags); }
private static int FindIndexOfLargestEtagAndMergeChangeVectors(IReadOnlyList <DocumentConflict> conflicts, out string mergedChangeVectorEntries) { mergedChangeVectorEntries = null; bool firstTime = true; int indexOfLargestEtag = 0; long largestEtag = 0; for (var i = 0; i < conflicts.Count; i++) { var conflict = conflicts[i]; if (conflict.Etag > largestEtag) { largestEtag = conflict.Etag; indexOfLargestEtag = i; } if (firstTime) { mergedChangeVectorEntries = conflict.ChangeVector; firstTime = false; continue; } mergedChangeVectorEntries = ChangeVectorUtils.MergeVectors(mergedChangeVectorEntries, conflict.ChangeVector); } return(indexOfLargestEtag); }
public void EtagShouldNotOverflow() { var cv1 = "A:86865297-V8jm+M9QKkuvfEUTQBfOtA, " + "C:87142328-5j4moMb8A0KxxcL9GhY/nw, " + "B:2146533895-SKM7aNMmSkW92wrQke+D4g, " + "E:1856361198-/mqfiL1AxkGlsqx1zwh2rw, " + "D:1882901489-TqJlheobc0KTcLDerIQ9oQ, " + "D:17267243-/3+4WZUBGkWL6/J4GMv2GA, " + "D:46103608-P1lQdjeAckGkdmY9RWr/Bg, " + "A:27850500-iUMDTgYwOkG25uod1g6gSg"; var cv2 = "C:87142328-5j4moMb8A0KxxcL9GhY/nw, " + "B:2146533895-SKM7aNMmSkW92wrQke+D4g, " + "E:1856361198-/mqfiL1AxkGlsqx1zwh2rw, " + "D:1882901489-TqJlheobc0KTcLDerIQ9oQ, " + "A:27850500-iUMDTgYwOkG25uod1g6gSg, " + "A:86865297-V8jm+M9QKkuvfEUTQBfOtA, " + "A:2319854662-eCGjjCNbP0CeTGSJMeqLZA"; ChangeVectorUtils.MergeVectors(cv1, cv2).ToChangeVector(); var x = ChangeVectorUtils.Distance(cv1, cv2); var y = ChangeVectorUtils.Distance(cv2, cv1); }
public bool TryResolveIdenticalDocument(DocumentsOperationContext context, string id, BlittableJsonReaderObject incomingDoc, long lastModifiedTicks, string incomingChangeVector) { var existing = _database.DocumentsStorage.GetDocumentOrTombstone(context, id, throwOnConflict: false); var existingDoc = existing.Document; var existingTombstone = existing.Tombstone; if (existingDoc != null) { var compareResult = DocumentCompare.IsEqualTo(existingDoc.Data, incomingDoc, DocumentCompare.DocumentCompareOptions.MergeMetadata); if (compareResult == DocumentCompareResult.NotEqual) { return(false); } // no real conflict here, both documents have identical content so we only merge the change vector without increasing the local etag to prevent ping-pong replication var mergedChangeVector = ChangeVectorUtils.MergeVectors(incomingChangeVector, existingDoc.ChangeVector); var nonPersistentFlags = NonPersistentDocumentFlags.FromResolver; nonPersistentFlags |= compareResult.HasFlag(DocumentCompareResult.AttachmentsNotEqual) ? NonPersistentDocumentFlags.ResolveAttachmentsConflict : NonPersistentDocumentFlags.None; if (compareResult.HasFlag(DocumentCompareResult.CountersNotEqual)) { nonPersistentFlags |= NonPersistentDocumentFlags.ResolveCountersConflict; } if (compareResult.HasFlag(DocumentCompareResult.TimeSeriesNotEqual)) { nonPersistentFlags |= NonPersistentDocumentFlags.ResolveTimeSeriesConflict; } _database.DocumentsStorage.Put(context, id, null, incomingDoc, lastModifiedTicks, mergedChangeVector, nonPersistentFlags: nonPersistentFlags); return(true); } if (existingTombstone != null && incomingDoc == null) { // Conflict between two tombstones resolves to the local tombstone existingTombstone.ChangeVector = ChangeVectorUtils.MergeVectors(incomingChangeVector, existingTombstone.ChangeVector); using (Slice.External(context.Allocator, existingTombstone.LowerId, out Slice lowerId)) { _database.DocumentsStorage.ConflictsStorage.DeleteConflicts(context, lowerId, null, existingTombstone.ChangeVector); } return(true); } return(false); }
private void HandleHiloConflict(DocumentsOperationContext context, string id, BlittableJsonReaderObject doc, string changeVector) { long highestMax; if (doc == null) { highestMax = 0; } else { if (!doc.TryGet("Max", out highestMax)) { throw new InvalidDataException("Tried to resolve HiLo document conflict but failed. Missing property name 'Max'"); } } var conflicts = _database.DocumentsStorage.ConflictsStorage.GetConflictsFor(context, id); var resolvedHiLoDoc = doc; string mergedChangeVector; if (conflicts.Count == 0) { //conflict with another existing document var localHiloDoc = _database.DocumentsStorage.Get(context, id); if (localHiloDoc.Data.TryGet("Max", out long max) && max > highestMax) { resolvedHiLoDoc = localHiloDoc.Data.Clone(context); } mergedChangeVector = ChangeVectorUtils.MergeVectors(changeVector, localHiloDoc.ChangeVector); } else { foreach (var conflict in conflicts) { if (conflict.Doc.TryGet("Max", out long tmpMax) && tmpMax > highestMax) { highestMax = tmpMax; resolvedHiLoDoc = conflict.Doc.Clone(context); } } var merged = ChangeVectorUtils.MergeVectors(conflicts.Select(c => c.ChangeVector).ToList()); mergedChangeVector = ChangeVectorUtils.MergeVectors(merged, changeVector); } _database.DocumentsStorage.Put(context, id, null, resolvedHiLoDoc, changeVector: mergedChangeVector, nonPersistentFlags: NonPersistentDocumentFlags.FromResolver); }
public string GetMergedConflictChangeVectorsAndDeleteConflicts(DocumentsOperationContext context, Slice lowerId, long newEtag, string existingChangeVector = null) { if (ConflictsCount == 0) { return(MergeVectorsWithoutConflicts(newEtag, existingChangeVector)); } var conflictChangeVectors = DeleteConflictsFor(context, lowerId, null).ChangeVectors; if (conflictChangeVectors == null || conflictChangeVectors.Count == 0) { return(MergeVectorsWithoutConflicts(newEtag, existingChangeVector)); } var newChangeVector = ChangeVectorUtils.NewChangeVector(_documentDatabase.ServerStore.NodeTag, newEtag, _documentsStorage.Environment.Base64Id); conflictChangeVectors.Add(newChangeVector); return(ChangeVectorUtils.MergeVectors(conflictChangeVectors)); }
public (string ChangeVector, NonPersistentDocumentFlags NonPersistentFlags) MergeConflictChangeVectorIfNeededAndDeleteConflicts(string documentChangeVector, DocumentsOperationContext context, string id, long newEtag, BlittableJsonReaderObject document) { var result = DeleteConflictsFor(context, id, document); if (result.ChangeVectors == null || result.ChangeVectors.Count == 0) { return(documentChangeVector, result.NonPersistentFlags); } var changeVectorList = new List <string> { documentChangeVector, context.LastDatabaseChangeVector ?? GetDatabaseChangeVector(context), ChangeVectorUtils.NewChangeVector(_documentDatabase.ServerStore.NodeTag, newEtag, _documentsStorage.Environment.Base64Id) }; changeVectorList.AddRange(result.ChangeVectors); return(ChangeVectorUtils.MergeVectors(changeVectorList), result.NonPersistentFlags); }
public bool TryResolveIdenticalDocument(DocumentsOperationContext context, string id, BlittableJsonReaderObject incomingDoc, long lastModifiedTicks, string incomingChangeVector) { var existing = _database.DocumentsStorage.GetDocumentOrTombstone(context, id, throwOnConflict: false); var existingDoc = existing.Document; var existingTombstone = existing.Tombstone; if (existingDoc != null) { var compareResult = DocumentCompare.IsEqualTo(existingDoc.Data, incomingDoc, true); if (compareResult == DocumentCompareResult.NotEqual) { return(false); } // no real conflict here, both documents have identical content var mergedChangeVector = ChangeVectorUtils.MergeVectors(incomingChangeVector, existingDoc.ChangeVector); var nonPersistentFlags = (compareResult & DocumentCompareResult.ShouldRecreateDocument) == DocumentCompareResult.ShouldRecreateDocument ? NonPersistentDocumentFlags.ResolveAttachmentsConflict : NonPersistentDocumentFlags.None; _database.DocumentsStorage.Put(context, id, null, incomingDoc, lastModifiedTicks, mergedChangeVector, nonPersistentFlags: nonPersistentFlags); return(true); } if (existingTombstone != null && incomingDoc == null) { // Conflict between two tombstones resolves to the local tombstone existingTombstone.ChangeVector = ChangeVectorUtils.MergeVectors(incomingChangeVector, existingTombstone.ChangeVector); using (Slice.External(context.Allocator, existingTombstone.LowerId, out Slice lowerId)) { _database.DocumentsStorage.ConflictsStorage.DeleteConflicts(context, lowerId, null, existingTombstone.ChangeVector); } return(true); } return(false); }
private async Task ProcessSubscriptionAsync() { if (_logger.IsInfoEnabled) { _logger.Info( $"Starting processing documents for subscription {SubscriptionId} received from {TcpConnection.TcpClient.Client.RemoteEndPoint}"); } using (DisposeOnDisconnect) using (TcpConnection.DocumentDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext docsContext)) using (RegisterForNotificationOnNewDocuments()) { var replyFromClientTask = GetReplyFromClientAsync(); string lastChangeVector = null; string subscriptionChangeVectorBeforeCurrentBatch = SubscriptionState.ChangeVectorForNextBatchStartingPoint; var startEtag = GetStartEtagForSubscription(docsContext, SubscriptionState); var patch = SetupFilterScript(); var fetcher = new SubscriptionDocumentsFetcher(TcpConnection.DocumentDatabase, _options.MaxDocsPerBatch, SubscriptionId, TcpConnection.TcpClient.Client.RemoteEndPoint); while (CancellationTokenSource.IsCancellationRequested == false) { bool anyDocumentsSentInCurrentIteration = false; var sendingCurrentBatchStopwatch = Stopwatch.StartNew(); _buffer.SetLength(0); var docsToFlush = 0; using (TcpConnection.ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var writer = new BlittableJsonTextWriter(context, _buffer)) { using (docsContext.OpenReadTransaction()) { foreach (var result in fetcher.GetDataToSend(docsContext, Collection, Revisions, SubscriptionState, patch, startEtag)) { startEtag = result.Doc.Etag; lastChangeVector = string.IsNullOrEmpty(SubscriptionState.ChangeVectorForNextBatchStartingPoint) ? result.Doc.ChangeVector : ChangeVectorUtils.MergeVectors(result.Doc.ChangeVector, SubscriptionState.ChangeVectorForNextBatchStartingPoint); if (result.Doc.Data == null) { if (sendingCurrentBatchStopwatch.ElapsedMilliseconds > 1000) { await SendHeartBeat(); sendingCurrentBatchStopwatch.Restart(); } continue; } anyDocumentsSentInCurrentIteration = true; writer.WriteStartObject(); writer.WritePropertyName(context.GetLazyStringForFieldWithCaching(TypeSegment)); writer.WriteValue(BlittableJsonToken.String, context.GetLazyStringForFieldWithCaching(DataSegment)); writer.WriteComma(); writer.WritePropertyName(context.GetLazyStringForFieldWithCaching(DataSegment)); result.Doc.EnsureMetadata(); if (result.Exception != null) { var metadata = result.Doc.Data[Client.Constants.Documents.Metadata.Key]; writer.WriteValue(BlittableJsonToken.StartObject, docsContext.ReadObject(new DynamicJsonValue { [Client.Constants.Documents.Metadata.Key] = metadata }, result.Doc.Id) ); writer.WriteComma(); writer.WritePropertyName(context.GetLazyStringForFieldWithCaching(ExceptionSegment)); writer.WriteValue(BlittableJsonToken.String, context.GetLazyStringForFieldWithCaching(result.Exception.ToString())); } else { writer.WriteDocument(docsContext, result.Doc, metadataOnly: false); } writer.WriteEndObject(); docsToFlush++; // perform flush for current batch after 1000ms of running or 1 MB if (_buffer.Length > Constants.Size.Megabyte || sendingCurrentBatchStopwatch.ElapsedMilliseconds > 1000) { if (docsToFlush > 0) { await FlushDocsToClient(writer, docsToFlush); docsToFlush = 0; sendingCurrentBatchStopwatch.Restart(); } else { await SendHeartBeat(); } } } } if (anyDocumentsSentInCurrentIteration) { context.Write(writer, new DynamicJsonValue { [nameof(SubscriptionConnectionServerMessage.Type)] = nameof(SubscriptionConnectionServerMessage.MessageType.EndOfBatch) }); await FlushDocsToClient(writer, docsToFlush, true); if (_logger.IsInfoEnabled) { _logger.Info( $"Finished sending a batch with {docsToFlush} documents for subscription {Options.SubscriptionName}"); } } } if (anyDocumentsSentInCurrentIteration == false) { if (_logger.IsInfoEnabled) { _logger.Info( $"Finished sending a batch with {docsToFlush} documents for subscription {Options.SubscriptionName}"); } await TcpConnection.DocumentDatabase.SubscriptionStorage.AcknowledgeBatchProcessed(SubscriptionId, Options.SubscriptionName, lastChangeVector, subscriptionChangeVectorBeforeCurrentBatch); subscriptionChangeVectorBeforeCurrentBatch = lastChangeVector; if (sendingCurrentBatchStopwatch.ElapsedMilliseconds > 1000) { await SendHeartBeat(); } using (docsContext.OpenReadTransaction()) { long globalEtag = TcpConnection.DocumentDatabase.DocumentsStorage.GetLastDocumentEtag(docsContext, Collection); if (globalEtag > startEtag) { continue; } } if (await WaitForChangedDocuments(replyFromClientTask)) { continue; } } SubscriptionConnectionClientMessage clientReply; while (true) { var result = await Task.WhenAny(replyFromClientTask, TimeoutManager.WaitFor(TimeSpan.FromMilliseconds(5000), CancellationTokenSource.Token)).ConfigureAwait(false); CancellationTokenSource.Token.ThrowIfCancellationRequested(); if (result == replyFromClientTask) { clientReply = await replyFromClientTask; if (clientReply.Type == SubscriptionConnectionClientMessage.MessageType.DisposedNotification) { CancellationTokenSource.Cancel(); break; } replyFromClientTask = GetReplyFromClientAsync(); break; } await SendHeartBeat(); } CancellationTokenSource.Token.ThrowIfCancellationRequested(); switch (clientReply.Type) { case SubscriptionConnectionClientMessage.MessageType.Acknowledge: await TcpConnection.DocumentDatabase.SubscriptionStorage.AcknowledgeBatchProcessed( SubscriptionId, Options.SubscriptionName, lastChangeVector, subscriptionChangeVectorBeforeCurrentBatch); subscriptionChangeVectorBeforeCurrentBatch = lastChangeVector; Stats.LastAckReceivedAt = DateTime.UtcNow; Stats.AckRate.Mark(); await WriteJsonAsync(new DynamicJsonValue { [nameof(SubscriptionConnectionServerMessage.Type)] = nameof(SubscriptionConnectionServerMessage.MessageType.Confirm) }); break; //precaution, should not reach this case... case SubscriptionConnectionClientMessage.MessageType.DisposedNotification: CancellationTokenSource.Cancel(); break; default: throw new ArgumentException("Unknown message type from client " + clientReply.Type); } } CancellationTokenSource.Token.ThrowIfCancellationRequested(); } }
private void PutCounterImpl(DocumentsOperationContext context, string documentId, string collection, string name, string changeVector, long value) { if (context.Transaction == null) { DocumentPutAction.ThrowRequiresTransaction(); Debug.Assert(false);// never hit } var collectionName = _documentsStorage.ExtractCollectionName(context, collection); var table = GetCountersTable(context.Transaction.InnerTransaction, collectionName); using (GetCounterKey(context, documentId, name, changeVector ?? context.Environment.Base64Id, out var counterKey)) { using (DocumentIdWorker.GetStringPreserveCase(context, name, out Slice nameSlice)) using (table.Allocate(out TableValueBuilder tvb)) { if (changeVector != null) { if (table.ReadByKey(counterKey, out var existing)) { var existingChangeVector = TableValueToChangeVector(context, (int)CountersTable.ChangeVector, ref existing); if (ChangeVectorUtils.GetConflictStatus(changeVector, existingChangeVector) == ConflictStatus.AlreadyMerged) { return; } } } RemoveTombstoneIfExists(context, documentId, name); var etag = _documentsStorage.GenerateNextEtag(); if (changeVector == null) { changeVector = ChangeVectorUtils.NewChangeVector(_documentDatabase.ServerStore.NodeTag, etag, _documentsStorage.Environment.Base64Id); context.LastDatabaseChangeVector = ChangeVectorUtils.MergeVectors(context.LastDatabaseChangeVector ?? GetDatabaseChangeVector(context), changeVector); } using (Slice.From(context.Allocator, changeVector, out var cv)) using (DocumentIdWorker.GetStringPreserveCase(context, collectionName.Name, out Slice collectionSlice)) { tvb.Add(counterKey); tvb.Add(nameSlice); tvb.Add(Bits.SwapBytes(etag)); tvb.Add(value); tvb.Add(cv); tvb.Add(collectionSlice); tvb.Add(context.TransactionMarkerOffset); table.Set(tvb); } UpdateMetrics(counterKey, name, changeVector, collection); context.Transaction.AddAfterCommitNotification(new CounterChange { ChangeVector = changeVector, DocumentId = documentId, Name = name, Value = value, Type = CounterChangeTypes.Put }); } } }
public void RecordChangeVector(string changeVector) { _stats.ChangeVector = ChangeVectorUtils.MergeVectors(_stats.ChangeVector, changeVector); }
/// <summary> /// Iterates on a batch in document collection, process it and send documents if found any match /// </summary> /// <param name="docsContext"></param> /// <param name="sendingCurrentBatchStopwatch"></param> /// <returns>Whether succeeded finding any documents to send</returns> private async Task <bool> TrySendingBatchToClient(DocumentsOperationContext docsContext, Stopwatch sendingCurrentBatchStopwatch) { bool anyDocumentsSentInCurrentIteration = false; int docsToFlush = 0; using (var writer = new BlittableJsonTextWriter(docsContext, _buffer)) { using (docsContext.OpenReadTransaction()) { foreach (var result in _documentsFetcher.GetDataToSend(docsContext, _startEtag)) { _startEtag = result.Doc.Etag; _lastChangeVector = string.IsNullOrEmpty(SubscriptionState.ChangeVectorForNextBatchStartingPoint) ? result.Doc.ChangeVector : ChangeVectorUtils.MergeVectors(result.Doc.ChangeVector, SubscriptionState.ChangeVectorForNextBatchStartingPoint); if (result.Doc.Data == null) { if (sendingCurrentBatchStopwatch.ElapsedMilliseconds > 1000) { await SendHeartBeat(); sendingCurrentBatchStopwatch.Restart(); } continue; } anyDocumentsSentInCurrentIteration = true; writer.WriteStartObject(); writer.WritePropertyName(docsContext.GetLazyStringForFieldWithCaching(TypeSegment)); writer.WriteValue(BlittableJsonToken.String, docsContext.GetLazyStringForFieldWithCaching(DataSegment)); writer.WriteComma(); writer.WritePropertyName(docsContext.GetLazyStringForFieldWithCaching(DataSegment)); result.Doc.EnsureMetadata(); if (result.Exception != null) { var metadata = result.Doc.Data[Client.Constants.Documents.Metadata.Key]; writer.WriteValue(BlittableJsonToken.StartObject, docsContext.ReadObject(new DynamicJsonValue { [Client.Constants.Documents.Metadata.Key] = metadata }, result.Doc.Id) ); writer.WriteComma(); writer.WritePropertyName(docsContext.GetLazyStringForFieldWithCaching(ExceptionSegment)); writer.WriteValue(BlittableJsonToken.String, docsContext.GetLazyStringForFieldWithCaching(result.Exception.ToString())); } else { writer.WriteDocument(docsContext, result.Doc, metadataOnly: false); } writer.WriteEndObject(); docsToFlush++; // perform flush for current batch after 1000ms of running or 1 MB if (_buffer.Length > Constants.Size.Megabyte || sendingCurrentBatchStopwatch.ElapsedMilliseconds > 1000) { if (docsToFlush > 0) { await FlushDocsToClient(writer, docsToFlush); docsToFlush = 0; sendingCurrentBatchStopwatch.Restart(); } else { await SendHeartBeat(); } } } } if (anyDocumentsSentInCurrentIteration) { docsContext.Write(writer, new DynamicJsonValue { [nameof(SubscriptionConnectionServerMessage.Type)] = nameof(SubscriptionConnectionServerMessage.MessageType.EndOfBatch) }); await FlushDocsToClient(writer, docsToFlush, true); if (_logger.IsInfoEnabled) { _logger.Info( $"Finished sending a batch with {docsToFlush} documents for subscription {Options.SubscriptionName}"); } } } return(anyDocumentsSentInCurrentIteration); }
public override unsafe void Execute(ClusterOperationContext context, Table items, long index, RawDatabaseRecord record, RachisState state, out object result) { result = null; var shouldUpdateChangeVector = true; var subscriptionName = SubscriptionName; if (string.IsNullOrEmpty(subscriptionName)) { subscriptionName = SubscriptionId.ToString(); } //insert all docs to voron table. If exists, then batchId will be replaced var subscriptionStateTable = context.Transaction.InnerTransaction.OpenTable(ClusterStateMachine.SubscriptionStateSchema, ClusterStateMachine.SubscriptionState); var itemKey = SubscriptionState.GenerateSubscriptionItemKeyName(DatabaseName, subscriptionName); using (Slice.From(context.Allocator, itemKey.ToLowerInvariant(), out Slice valueNameLowered)) using (Slice.From(context.Allocator, itemKey, out Slice valueName)) { if (items.ReadByKey(valueNameLowered, out var tvr) == false) { throw new RachisApplyException($"Cannot find subscription {subscriptionName} @ {DatabaseName}"); } var ptr = tvr.Read(2, out int size); var existingValue = new BlittableJsonReaderObject(ptr, size, context); if (existingValue == null) { throw new SubscriptionDoesNotExistException($"Subscription with name '{subscriptionName}' does not exist in database '{DatabaseName}'"); } var subscriptionState = JsonDeserializationClient.SubscriptionState(existingValue); var topology = record.Topology; var lastResponsibleNode = AcknowledgeSubscriptionBatchCommand.GetLastResponsibleNode(HasHighlyAvailableTasks, topology, NodeTag); var appropriateNode = topology.WhoseTaskIsIt(RachisState.Follower, subscriptionState, lastResponsibleNode); if (appropriateNode == null && record.DeletionInProgress.ContainsKey(NodeTag)) { throw new DatabaseDoesNotExistException($"Stopping subscription '{subscriptionName}' on node {NodeTag}, because database '{DatabaseName}' is being deleted."); } if (appropriateNode != NodeTag) { throw new SubscriptionDoesNotBelongToNodeException( $"Cannot apply {nameof(AcknowledgeSubscriptionBatchCommand)} for subscription '{subscriptionName}' with id '{SubscriptionId}', on database '{DatabaseName}', on node '{NodeTag}'," + $" because the subscription task belongs to '{appropriateNode ?? "N/A"}'.") { AppropriateNode = appropriateNode }; } if (CurrentChangeVector == nameof(Constants.Documents.SubscriptionChangeVectorSpecialStates.DoNotChange)) { context.ReadObject(existingValue, subscriptionName); shouldUpdateChangeVector = false; } if (subscriptionState.ChangeVectorForNextBatchStartingPoint != PreviouslyRecordedChangeVector) { throw new SubscriptionChangeVectorUpdateConcurrencyException($"Can't record subscription with name '{subscriptionName}' due to inconsistency in change vector progress. Probably there was an admin intervention that changed the change vector value. Stored value: {subscriptionState.ChangeVectorForNextBatchStartingPoint}, received value: {PreviouslyRecordedChangeVector}"); } if (shouldUpdateChangeVector) { subscriptionState.ChangeVectorForNextBatchStartingPoint = ChangeVectorUtils.MergeVectors(CurrentChangeVector, subscriptionState.ChangeVectorForNextBatchStartingPoint); subscriptionState.NodeTag = NodeTag; using (var obj = context.ReadObject(subscriptionState.ToJson(), "subscription")) { ClusterStateMachine.UpdateValue(index, items, valueNameLowered, valueName, obj); } } } foreach (var deletedId in Deleted) { using (SubscriptionConnectionsState.GetDatabaseAndSubscriptionAndDocumentKey(context, DatabaseName, SubscriptionId, deletedId, out var key)) { using var _ = Slice.External(context.Allocator, key, out var keySlice); subscriptionStateTable.DeleteByKey(keySlice); } } foreach (var documentRecord in Documents) { using (SubscriptionConnectionsState.GetDatabaseAndSubscriptionAndDocumentKey(context, DatabaseName, SubscriptionId, documentRecord.DocumentId, out var key)) using (subscriptionStateTable.Allocate(out var tvb)) { using var _ = Slice.External(context.Allocator, key, out var keySlice); using var __ = Slice.From(context.Allocator, documentRecord.ChangeVector, out var changeVectorSlice); tvb.Add(keySlice); tvb.Add(changeVectorSlice); tvb.Add(Bits.SwapBytes(index)); // batch id subscriptionStateTable.Set(tvb); } } foreach (var revisionRecord in Revisions) { using (SubscriptionConnectionsState.GetDatabaseAndSubscriptionAndRevisionKey(context, DatabaseName, SubscriptionId, revisionRecord.Current, out var key)) using (subscriptionStateTable.Allocate(out var tvb)) { using var _ = Slice.External(context.Allocator, key, out var keySlice); using var __ = Slice.From(context.Allocator, revisionRecord.Previous ?? string.Empty, out var changeVectorSlice); tvb.Add(keySlice); tvb.Add(changeVectorSlice); //prev change vector tvb.Add(Bits.SwapBytes(index)); // batch id subscriptionStateTable.Set(tvb); } } }