public void LocalIsLongerThanRemote() { var dbIds = new List <string> { new string('1', 22), new string('2', 22), new string('3', 22) }; var local = new[] { new ChangeVectorEntry { DbId = dbIds[2], Etag = 95, NodeTag = 2 }, new ChangeVectorEntry { DbId = dbIds[1], Etag = 2, NodeTag = 1 }, new ChangeVectorEntry { DbId = dbIds[0], Etag = 10, NodeTag = 0 }, }; var remote = new[] { new ChangeVectorEntry { DbId = dbIds[0], Etag = 75, NodeTag = 0 }, }; Assert.Equal(ConflictStatus.Conflict, ChangeVectorUtils.GetConflictStatus(remote.SerializeVector(), local.SerializeVector())); }
public void Very_large_change_vectors_local_larger_different_order_should_work_all_remote_larger(int length) { var dbIds = new List <Guid>(); for (int i = 0; i < length; i++) { dbIds.Add(Guid.NewGuid()); } var tags = Enumerable.Range(0, length).ToArray(); //we create two change vectors, where remote >> local var remoteVectorData = new List <(Guid, long, int)>(); var localVectorData = new List <(Guid, long, int)>(); for (int i = 0, j = length - 1; i < length; i++, j--) { if (length - i >= 10) { remoteVectorData.Add((dbIds[i], 10, tags[i])); } localVectorData.Add((dbIds[j], 1, tags[j])); } var remote = ChangeVector(remoteVectorData.ToArray()); var local = ChangeVector(localVectorData.ToArray()); Assert.Equal(ConflictStatus.Conflict, ChangeVectorUtils.GetConflictStatus(remote, local)); }
public void Some_remote_etags_lower_than_local_and_some_higher_should_return_Conflict_at_conflict_status_with_different_order() { var dbIds = new List <string> { new string('1', 22), new string('2', 22), new string('3', 22) }; var local = new[] { new ChangeVectorEntry { DbId = dbIds[1], Etag = 75, NodeTag = 0 }, new ChangeVectorEntry { DbId = dbIds[0], Etag = 10, NodeTag = 1 }, new ChangeVectorEntry { DbId = dbIds[2], Etag = 3, NodeTag = 2 }, }; var remote = new[] { new ChangeVectorEntry { DbId = dbIds[1], Etag = 95, NodeTag = 0 }, new ChangeVectorEntry { DbId = dbIds[2], Etag = 2, NodeTag = 1 }, new ChangeVectorEntry { DbId = dbIds[0], Etag = 10, NodeTag = 2 }, }; Assert.Equal(ConflictStatus.Conflict, ChangeVectorUtils.GetConflictStatus(remote.SerializeVector(), local.SerializeVector())); }
public static ConflictStatus GetConflictStatusForDocument(DocumentsOperationContext context, string remote, string local) { var originalStatus = ChangeVectorUtils.GetConflictStatus(remote, local); if (originalStatus == ConflictStatus.Conflict && context.DocumentDatabase.DocumentsStorage.HasUnusedDatabaseIds()) { // We need to distinguish between few cases here // let's assume that node C was removed // our local change vector is A:10, B:10, C:10 // case 1: incoming change vector A:10, B:10, C:11 -> update (original: update, after: already merged) // case 2: incoming change vector A:11, B:10, C:10 -> update (original: update, after: update) // case 3: incoming change vector A:11, B:10 -> update (original: conflict, after: update) // case 4: incoming change vector A:10, B:10 -> already merged (original: already merged, after: already merged) // our local change vector is A:11, B:10 // case 1: incoming change vector A:10, B:10, C:10 -> already merged (original: conflict, after: already merged) // case 2: incoming change vector A:10, B:11, C:10 -> conflict (original: conflict, after: conflict) // case 3: incoming change vector A:11, B:10, C:10 -> update (original: update, after: already merged) // case 4: incoming change vector A:11, B:12, C:10 -> update (original: conflict, after: update) context.DocumentDatabase.DocumentsStorage.TryRemoveUnusedIds(ref remote); context.SkipChangeVectorValidation = context.DocumentDatabase.DocumentsStorage.TryRemoveUnusedIds(ref local); return(ChangeVectorUtils.GetConflictStatus(remote, local)); } return(originalStatus); }
public void Only_one_etag_is_larger_at_local(int length) { var dbIds = new List <Guid>(); for (int i = 0; i < length; i++) { dbIds.Add(Guid.NewGuid()); } var tags = Enumerable.Range(0, length).ToArray(); //we create two change vectors, where remote >> local var remoteVectorData = new List <(Guid, long, int)>(); var localVectorData = new List <(Guid, long, int)>(); for (int i = 0, j = length - 1; i < length; i++, j--) { remoteVectorData.Add((dbIds[i], 10, tags[i])); localVectorData.Add((dbIds[j], i == length / 2 ? 15 : 5, tags[j])); } var remote = ChangeVector(remoteVectorData.ToArray()); var local = ChangeVector(localVectorData.ToArray()); Assert.Equal(ConflictStatus.Conflict, ChangeVectorUtils.GetConflictStatus(remote, local)); }
public void Very_large_change_vectors_remote_longer_same_order_should_work_some_remote_etags_smaller_and_some_larger(int length) { var dbIds = new List <Guid>(); for (int i = 0; i < length; i++) { dbIds.Add(Guid.NewGuid()); } var tags = Enumerable.Range(0, length).ToArray(); //we create two change vectors, where remote >> local var remoteVectorData = new List <(Guid, long, int)>(); var localVectorData = new List <(Guid, long, int)>(); for (int i = 0; i < length; i++) { remoteVectorData.Add((dbIds[i], i % 3 == 0 ? 10 : 1, tags[i])); if (length - i >= 5) { localVectorData.Add((dbIds[i], i % 2 == 0 ? 10 : 1, tags[i])); } } var remote = ChangeVector(remoteVectorData.ToArray()); var local = ChangeVector(localVectorData.ToArray()); Assert.Equal(ConflictStatus.Conflict, ChangeVectorUtils.GetConflictStatus(remote, local)); }
private string FindMostUpToDateNode(List <string> nodes, string database, Dictionary <string, ClusterNodeStatusReport> current) { var updated = nodes[0]; var highestChangeVectors = current[updated].Report[database].DatabaseChangeVector; var maxDocsCount = current[updated].Report[database].NumberOfDocuments; for (var index = 1; index < nodes.Count; index++) { var node = nodes[index]; var report = current[node].Report[database]; var cv = report.DatabaseChangeVector; var status = ChangeVectorUtils.GetConflictStatus(cv, highestChangeVectors); if (status == ConflictStatus.Update) { highestChangeVectors = cv; } // In conflict we need to choose between 2 nodes that are not synced. // So we take the one with the most documents. if (status == ConflictStatus.Conflict) { if (report.NumberOfDocuments > maxDocsCount) { highestChangeVectors = cv; maxDocsCount = report.NumberOfDocuments; updated = node; } } } return(updated); }
public void Very_large_change_vectors_equal_length_same_order_should_work(int length) { var dbIds = new List <Guid>(); for (int i = 0; i < length; i++) { dbIds.Add(Guid.NewGuid()); } var tags = Enumerable.Range(0, length).ToArray(); //we create two change vectors, where remote >> local var remoteVectorData = new List <(Guid, long, int)>(); var localVectorData = new List <(Guid, long, int)>(); for (int i = 0; i < length; i++) { remoteVectorData.Add((dbIds[i], 10, tags[i])); localVectorData.Add((dbIds[i], 1, tags[i])); } var remote = ChangeVector(remoteVectorData.ToArray()); var local = ChangeVector(localVectorData.ToArray()); Assert.Equal(ConflictStatus.Update, ChangeVectorUtils.GetConflictStatus(remote, local)); }
public override int Execute(DocumentsOperationContext context) { if (string.IsNullOrEmpty(context.LastDatabaseChangeVector)) { context.LastDatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(context); } var status = ChangeVectorUtils.GetConflictStatus(_replicationBatchReply.DatabaseChangeVector, context.LastDatabaseChangeVector); if (status != ConflictStatus.AlreadyMerged) { return(0); } var res = ChangeVectorUtils.TryUpdateChangeVector(_replicationBatchReply.NodeTag, _dbId, _replicationBatchReply.CurrentEtag, ref context.LastDatabaseChangeVector) ? 1 : 0; if (res == 1) { context.Transaction.InnerTransaction.LowLevelTransaction.OnDispose += _ => { try { _trigger.Set(); } catch { // } }; } return(res); }
public void All_local_etags_lower_than_remote_should_return_Update_at_conflict_status() { var dbIds = new List <string> { new string('1', 22), new string('2', 22), new string('3', 22) }; var local = new[] { new ChangeVectorEntry { DbId = dbIds[0], Etag = 1, NodeTag = 0 }, new ChangeVectorEntry { DbId = dbIds[1], Etag = 2, NodeTag = 1 }, new ChangeVectorEntry { DbId = dbIds[2], Etag = 3, NodeTag = 2 }, }; var remote = new[] { new ChangeVectorEntry { DbId = dbIds[0], Etag = 10, NodeTag = 0 }, new ChangeVectorEntry { DbId = dbIds[1], Etag = 20, NodeTag = 1 }, new ChangeVectorEntry { DbId = dbIds[2], Etag = 30, NodeTag = 2 }, }; Assert.Equal(ConflictStatus.Update, ChangeVectorUtils.GetConflictStatus(remote.SerializeVector(), local.SerializeVector())); }
public void Different_change_vectors_with_different_prefix_local_smaller_with_remote_etags_larger() { var dbIds = new[] { Guid.NewGuid(), Guid.NewGuid(), Guid.NewGuid() }; var tags = Enumerable.Range(0, 3).ToArray(); var remote = ChangeVector((dbIds[0], 10, tags[0]), (dbIds[1], 10, tags[1]), (dbIds[2], 10, tags[2])); var local = ChangeVector((dbIds[1], 1, tags[1]), (dbIds[2], 1, tags[2])); Assert.Equal(ConflictStatus.Update, ChangeVectorUtils.GetConflictStatus(remote, local)); }
public void Remote_has_entries_not_in_local_with_entries_same_order_and_some_local_etags_large_than_remote() { var dbIds = new[] { Guid.NewGuid(), Guid.NewGuid(), Guid.NewGuid() }; var tags = Enumerable.Range(0, 3).ToArray(); var remote = ChangeVector((dbIds[0], 1, tags[0]), (dbIds[1], 5, tags[1]), (dbIds[2], 1, tags[2])); var local = ChangeVector((dbIds[0], 5, tags[0]), (dbIds[1], 1, tags[1])); Assert.Equal(ConflictStatus.Conflict, ChangeVectorUtils.GetConflictStatus(remote, local)); }
public void DocumentsChangeVectorShouldBeUpdatedAfterAddingNewTimeSeries() { using (var store = GetDocumentStore()) { var baseline = DateTime.Today; using (var session = store.OpenSession()) { for (int i = 1; i <= 5; i++) { var id = $"users/{i}"; session.Store(new User { Name = "Oren" }, id); session.TimeSeriesFor(id, "Heartrate") .Append(baseline.AddMinutes(1), new[] { 59d }, "watches/fitbit"); } session.SaveChanges(); } var cvs = new List <string>(); using (var session = store.OpenSession()) { for (int i = 2; i < 5; i++) { var id = $"users/{i}"; var u = session.Load <User>(id); var cv = session.Advanced.GetChangeVectorFor(u); cvs.Add(cv); session.TimeSeriesFor(id, "Nasdaq") .Append(baseline.AddMinutes(1), new[] { 4012.5d }, "web"); } session.SaveChanges(); } using (var session = store.OpenSession()) { for (int i = 2; i < 5; i++) { var u = session.Load <User>($"users/{i}"); var cv = session.Advanced.GetChangeVectorFor(u); var oldCv = cvs[i - 2]; var conflictStatus = ChangeVectorUtils.GetConflictStatus(cv, oldCv); Assert.Equal(ConflictStatus.Update, conflictStatus); } } } }
public void Change_vector_has_negative_etag() { var changeVectorWithNegatoveEtag = ChangeVector((Guid.NewGuid(), 2, 1), (Guid.NewGuid(), -3, 2)); var changeVector = ChangeVector((Guid.NewGuid(), 2, 1), (Guid.NewGuid(), 3, 2)); Assert.Throws <ArgumentException>(() => ChangeVectorUtils.GetConflictStatus(changeVectorWithNegatoveEtag, changeVector)); Assert.Throws <ArgumentException>(() => ChangeVectorUtils.GetConflictStatus(changeVector, changeVectorWithNegatoveEtag)); }
private bool AddReplicationItemToBatch(ReplicationBatchItem item, OutgoingReplicationStatsScope stats, SkippedReplicationItemsInfo skippedReplicationItemsInfo) { if (item.Type == ReplicationBatchItem.ReplicationItemType.Document || item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) { if ((item.Flags & DocumentFlags.Artificial) == DocumentFlags.Artificial) { stats.RecordArtificialDocumentSkip(); skippedReplicationItemsInfo.Update(item, isArtificial: true); return(false); } } if (item.Flags.Contain(DocumentFlags.Revision) || item.Flags.Contain(DocumentFlags.DeleteRevision)) { // we let pass all the conflicted/resolved revisions, since we keep them with their original change vector which might be `AlreadyMerged` at the destination. if (item.Flags.Contain(DocumentFlags.Conflicted) || item.Flags.Contain(DocumentFlags.Resolved)) { _orderedReplicaItems.Add(item.Etag, item); return(true); } } // destination already has it if ((MissingAttachmentsInLastBatch == false || item.Type != ReplicationBatchItem.ReplicationItemType.Attachment) && ChangeVectorUtils.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); skippedReplicationItemsInfo.Update(item); return(false); } if (skippedReplicationItemsInfo.SkippedItems > 0) { if (_log.IsInfoEnabled) { var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector); _log.Info(message); } skippedReplicationItemsInfo.Reset(); } if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { _replicaAttachmentStreams[item.Base64Hash] = item; } Debug.Assert(item.Flags.Contain(DocumentFlags.Artificial) == false); _orderedReplicaItems.Add(item.Etag, item); return(true); }
public static ConflictStatus GetConflictStatusForDocument(DocumentsOperationContext context, string id, string changeVector, out string conflictingVector, out bool hasLocalClusterTx) { hasLocalClusterTx = false; conflictingVector = null; //tombstones also can be a conflict entry var conflicts = context.DocumentDatabase.DocumentsStorage.ConflictsStorage.GetConflictsFor(context, id); ConflictStatus status; if (conflicts.Count > 0) { foreach (var existingConflict in conflicts) { status = ChangeVectorUtils.GetConflictStatus(changeVector, existingConflict.ChangeVector); if (status == ConflictStatus.Conflict) { conflictingVector = existingConflict.ChangeVector; return(ConflictStatus.Conflict); } } // this document will resolve the conflicts when putted return(ConflictStatus.Update); } var result = context.DocumentDatabase.DocumentsStorage.GetDocumentOrTombstone(context, id); string local; if (result.Document != null) { local = result.Document.ChangeVector; hasLocalClusterTx = result.Document.Flags.Contain(DocumentFlags.FromClusterTransaction); } else if (result.Tombstone != null) { local = result.Tombstone.ChangeVector; hasLocalClusterTx = result.Tombstone.Flags.Contain(DocumentFlags.FromClusterTransaction); } else { return(ConflictStatus.Update); //document with 'id' doesn't exist locally, so just do PUT } context.SkipChangeVectorValidation = context.DocumentDatabase.DocumentsStorage.TryRemoveUnusedIds(ref local); status = ChangeVectorUtils.GetConflictStatus(changeVector, local); if (status == ConflictStatus.Conflict) { conflictingVector = local; } return(status); }
private int ReplicatedPast(string changeVector) { var count = 0; foreach (var destination in _outgoing) { var conflictStatus = ChangeVectorUtils.GetConflictStatus(changeVector, destination.LastAcceptedChangeVector); if (conflictStatus == ConflictStatus.AlreadyMerged) { count++; } } return(count); }
private int ReplicatedPastInternalDestinations(string changeVector) { var count = 0; foreach (var destination in _outgoing.Where(x => _internalDestinations.Select(y => y.Url).Contains(x.Destination.Url))) { var conflictStatus = ChangeVectorUtils.GetConflictStatus(changeVector, destination.LastAcceptedChangeVector); if (conflictStatus == ConflictStatus.AlreadyMerged) { count++; } } return(count); }
private bool AddReplicationItemToBatch(ReplicationBatchItem item, OutgoingReplicationStatsScope stats, SkippedReplicationItemsInfo skippedReplicationItemsInfo) { if (item.Type == ReplicationBatchItem.ReplicationItemType.Document || item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) { if ((item.Flags & DocumentFlags.Artificial) == DocumentFlags.Artificial) { stats.RecordArtificialDocumentSkip(); skippedReplicationItemsInfo.Update(item, isArtificial: true); return(false); } } if (item.Type == ReplicationBatchItem.ReplicationItemType.CounterTombstone && _parent.SupportedFeatures.Replication.Counters == false) { // skip counter tombstones in legacy mode skippedReplicationItemsInfo.Update(item); return(false); } // destination already has it if ((MissingAttachmentsInLastBatch == false || item.Type != ReplicationBatchItem.ReplicationItemType.Attachment) && ChangeVectorUtils.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); skippedReplicationItemsInfo.Update(item); return(false); } if (skippedReplicationItemsInfo.SkippedItems > 0) { if (_log.IsInfoEnabled) { var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector); _log.Info(message); } skippedReplicationItemsInfo.Reset(); } if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { _replicaAttachmentStreams[item.Base64Hash] = item; } Debug.Assert(item.Flags.Contain(DocumentFlags.Artificial) == false); _orderedReplicaItems.Add(item.Etag, item); return(true); }
private unsafe bool AddReplicationItemToBatch(ReplicationBatchItem item, OutgoingReplicationStatsScope stats) { if (item.Type == ReplicationBatchItem.ReplicationItemType.Document || item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) { if ((item.Flags & DocumentFlags.Artificial) == DocumentFlags.Artificial) { stats.RecordArtificialDocumentSkip(); if (_log.IsInfoEnabled) { _log.Info($"Skipping replication of {item.Id} because it is an artificial document"); } return(false); } if (CollectionName.IsSystemDocument(item.Id.Buffer, item.Id.Size, out bool isHiLo) && isHiLo == false) { stats.RecordSystemDocumentSkip(); if (_log.IsInfoEnabled) { _log.Info($"Skipping replication of {item.Id} because it is a system document"); } return(false); } } // destination already has it if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); if (_log.IsInfoEnabled) { _log.Info($"Skipping replication of {item.Type} '{item.Id}' because destination has a higher change vector. Current: {item.ChangeVector} < Destination: {_parent._destinationLastKnownChangeVectorAsString} "); } return(false); } if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { _replicaAttachmentStreams[item.Base64Hash] = item; } Debug.Assert(item.Flags.HasFlag(DocumentFlags.Artificial) == false); _orderedReplicaItems.Add(item.Etag, item); return(true); }
public override int Execute(DocumentsOperationContext context) { if (string.IsNullOrEmpty(context.LastDatabaseChangeVector)) { context.LastDatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(context); } var status = ChangeVectorUtils.GetConflictStatus(_replicationBatchReply.DatabaseChangeVector, context.LastDatabaseChangeVector); if (status != ConflictStatus.AlreadyMerged) { return(0); } var result = ChangeVectorUtils.TryUpdateChangeVector(_replicationBatchReply.NodeTag, _dbId, _replicationBatchReply.CurrentEtag, context.LastDatabaseChangeVector); if (result.IsValid) { if (context.LastReplicationEtagFrom == null) { context.LastReplicationEtagFrom = new Dictionary <string, long>(); } if (context.LastReplicationEtagFrom.ContainsKey(_replicationBatchReply.DatabaseId) == false) { context.LastReplicationEtagFrom[_replicationBatchReply.DatabaseId] = _replicationBatchReply.CurrentEtag; } context.LastDatabaseChangeVector = result.ChangeVector; context.Transaction.InnerTransaction.LowLevelTransaction.OnDispose += _ => { try { _trigger.Set(); } catch { // } }; } return(result.IsValid ? 1 : 0); }
public static ConflictStatus GetConflictStatusForDocument(DocumentsOperationContext context, string id, LazyStringValue remote, out string conflictingVector) { //tombstones also can be a conflict entry conflictingVector = null; var conflicts = context.DocumentDatabase.DocumentsStorage.ConflictsStorage.GetConflictsFor(context, id); if (conflicts.Count > 0) { foreach (var existingConflict in conflicts) { if (ChangeVectorUtils.GetConflictStatus(remote, existingConflict.ChangeVector) == ConflictStatus.Conflict) { conflictingVector = existingConflict.ChangeVector; return(ConflictStatus.Conflict); } } // this document will resolve the conflicts when putted return(ConflictStatus.Update); } var result = context.DocumentDatabase.DocumentsStorage.GetDocumentOrTombstone(context, id); string local; if (result.Document != null) { local = result.Document.ChangeVector; } else if (result.Tombstone != null) { local = result.Tombstone.ChangeVector; } else { return(ConflictStatus.Update); //document with 'id' doesn't exist locally, so just do PUT } var status = ChangeVectorUtils.GetConflictStatus(remote, local); if (status == ConflictStatus.Conflict) { conflictingVector = local; } return(status); }
private int ReplicatedPastInternalDestinations(HashSet <string> internalUrls, string changeVector) { var count = 0; foreach (var destination in _outgoing) { if (internalUrls.Contains(destination.Destination.Url) == false) { continue; } var conflictStatus = ChangeVectorUtils.GetConflictStatus(changeVector, destination.LastAcceptedChangeVector); if (conflictStatus == ConflictStatus.AlreadyMerged) { count++; } } return(count); }
public bool HasHigherChangeVector(DocumentsOperationContext context, Slice prefixSlice, string expectedChangeVector) { if (ConflictsCount == 0) { return(false); } var conflictsTable = context.Transaction.InnerTransaction.OpenTable(ConflictsSchema, ConflictsSlice); foreach (var tvr in conflictsTable.SeekForwardFrom(ConflictsSchema.Indexes[IdAndChangeVectorSlice], prefixSlice, 0, true)) { var changeVector = TableValueToChangeVector(context, (int)ConflictsTable.ChangeVector, ref tvr.Result.Reader); if (ChangeVectorUtils.GetConflictStatus(changeVector, expectedChangeVector) == ConflictStatus.AlreadyMerged) { return(true); } } return(false); }
private bool ShouldSkip(ReplicationBatchItem item, OutgoingReplicationStatsScope stats, SkippedReplicationItemsInfo skippedReplicationItemsInfo) { switch (item) { case DocumentReplicationItem doc: if (doc.Flags.Contain(DocumentFlags.Artificial)) { stats.RecordArtificialDocumentSkip(); skippedReplicationItemsInfo.Update(item, isArtificial: true); return(true); } if (doc.Flags.Contain(DocumentFlags.Revision) || doc.Flags.Contain(DocumentFlags.DeleteRevision)) { // we let pass all the conflicted/resolved revisions, since we keep them with their original change vector which might be `AlreadyMerged` at the destination. if (doc.Flags.Contain(DocumentFlags.Conflicted) || doc.Flags.Contain(DocumentFlags.Resolved)) { return(false); } } break; case AttachmentReplicationItem _: if (MissingAttachmentsInLastBatch) { return(false); } break; } // destination already has it if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); skippedReplicationItemsInfo.Update(item); return(true); } return(false); }
public void Remote_change_vector_with_different_dbId_set_than_local_should_return_Conflict_at_conflict_status() { var dbIds = new List <string> { new string('1', 22), new string('2', 22), new string('3', 22) }; var local = new[] { new ChangeVectorEntry { DbId = dbIds[0], Etag = 10, NodeTag = 0 }, }; var remote = new[] { new ChangeVectorEntry { DbId = dbIds[1], Etag = 10, NodeTag = 0 } }; Assert.Equal(ConflictStatus.Conflict, ChangeVectorUtils.GetConflictStatus(remote.SerializeVector(), local.SerializeVector())); }
public void Remote_change_vector_smaller_than_local_and_some_remote_etags_higher_than_local_should_return_Conflict_at_conflict_status() { var dbIds = new List <string> { new string('1', 22), new string('2', 22), new string('3', 22), new string('4', 22) }; var local = new[] { new ChangeVectorEntry { DbId = dbIds[0], Etag = 10, NodeTag = 0 }, new ChangeVectorEntry { DbId = dbIds[1], Etag = 20, NodeTag = 1 }, new ChangeVectorEntry { DbId = dbIds[2], Etag = 3000, NodeTag = 2 }, new ChangeVectorEntry { DbId = dbIds[3], Etag = 40, NodeTag = 3 } }; var remote = new[] { new ChangeVectorEntry { DbId = dbIds[0], Etag = 100, NodeTag = 0 }, new ChangeVectorEntry { DbId = dbIds[1], Etag = 200, NodeTag = 1 }, new ChangeVectorEntry { DbId = dbIds[2], Etag = 300, NodeTag = 2 } }; Assert.Equal(ConflictStatus.Conflict, ChangeVectorUtils.GetConflictStatus(remote.SerializeVector(), local.SerializeVector())); }
public bool WaitForBiggerChangeVector(DocumentStore store, string changeVector) { var sw = Stopwatch.StartNew(); var timeout = 10000; if (Debugger.IsAttached) { timeout *= 10; } while (sw.ElapsedMilliseconds < timeout) { using (var session = store.OpenSession()) { var doc = session.Load <User>("users/1"); if (ChangeVectorUtils.GetConflictStatus(session.Advanced.GetChangeVectorFor(doc), changeVector) == ConflictStatus.Update) { return(true); } } Thread.Sleep(10); } return(false); }
public void AddConflict( DocumentsOperationContext context, string id, long lastModifiedTicks, BlittableJsonReaderObject incomingDoc, string incomingChangeVector, string incomingTombstoneCollection, DocumentFlags flags, NonPersistentDocumentFlags nonPersistentFlags = NonPersistentDocumentFlags.None) { if (_logger.IsInfoEnabled) { _logger.Info($"Adding conflict to {id} (Incoming change vector {incomingChangeVector})"); } var tx = context.Transaction.InnerTransaction; var conflictsTable = tx.OpenTable(ConflictsSchema, ConflictsSlice); var fromSmuggler = (nonPersistentFlags & NonPersistentDocumentFlags.FromSmuggler) == NonPersistentDocumentFlags.FromSmuggler; using (DocumentIdWorker.GetLowerIdSliceAndStorageKey(context, id, out Slice lowerId, out Slice idPtr)) { CollectionName collectionName; // ReSharper disable once ArgumentsStyleLiteral var existing = _documentsStorage.GetDocumentOrTombstone(context, id, throwOnConflict: false); if (existing.Document != null) { var existingDoc = existing.Document; if (fromSmuggler == false) { using (Slice.From(context.Allocator, existingDoc.ChangeVector, out Slice cv)) using (DocumentIdWorker.GetStringPreserveCase(context, CollectionName.GetLazyCollectionNameFrom(context, existingDoc.Data), out Slice collectionSlice)) using (conflictsTable.Allocate(out TableValueBuilder tvb)) { tvb.Add(lowerId); tvb.Add(SpecialChars.RecordSeparator); tvb.Add(cv); tvb.Add(idPtr); tvb.Add(existingDoc.Data.BasePointer, existingDoc.Data.Size); tvb.Add(Bits.SwapBytes(_documentsStorage.GenerateNextEtag())); tvb.Add(collectionSlice); tvb.Add(existingDoc.LastModified.Ticks); tvb.Add((int)existingDoc.Flags); if (conflictsTable.Set(tvb)) { Interlocked.Increment(ref ConflictsCount); } } } // we delete the data directly, without generating a tombstone, because we have a // conflict instead _documentsStorage.EnsureLastEtagIsPersisted(context, existingDoc.Etag); collectionName = _documentsStorage.ExtractCollectionName(context, existingDoc.Data); //make sure that the relevant collection tree exists var table = tx.OpenTable(DocsSchema, collectionName.GetTableName(CollectionTableType.Documents)); table.Delete(existingDoc.StorageId); } else if (existing.Tombstone != null) { var existingTombstone = existing.Tombstone; if (fromSmuggler == false) { using (Slice.From(context.Allocator, existingTombstone.ChangeVector, out var cv)) using (DocumentIdWorker.GetStringPreserveCase(context, existingTombstone.Collection, out Slice collectionSlice)) using (conflictsTable.Allocate(out TableValueBuilder tvb)) { tvb.Add(lowerId); tvb.Add(SpecialChars.RecordSeparator); tvb.Add(cv); tvb.Add(idPtr); tvb.Add(null, 0); tvb.Add(Bits.SwapBytes(_documentsStorage.GenerateNextEtag())); tvb.Add(collectionSlice); tvb.Add(existingTombstone.LastModified.Ticks); tvb.Add((int)existingTombstone.Flags); if (conflictsTable.Set(tvb)) { Interlocked.Increment(ref ConflictsCount); } } } // we delete the data directly, without generating a tombstone, because we have a // conflict instead _documentsStorage.EnsureLastEtagIsPersisted(context, existingTombstone.Etag); collectionName = _documentsStorage.GetCollection(existingTombstone.Collection, throwIfDoesNotExist: true); var table = tx.OpenTable(TombstonesSchema, collectionName.GetTableName(CollectionTableType.Tombstones)); table.Delete(existingTombstone.StorageId); } else // has existing conflicts { collectionName = _documentsStorage.ExtractCollectionName(context, incomingDoc); using (GetConflictsIdPrefix(context, lowerId, out Slice prefixSlice)) { var conflicts = GetConflictsFor(context, prefixSlice); foreach (var conflict in conflicts) { var conflictStatus = ChangeVectorUtils.GetConflictStatus(incomingChangeVector, conflict.ChangeVector); switch (conflictStatus) { case ConflictStatus.Update: DeleteConflictsFor(context, conflict.ChangeVector); // delete this, it has been subsumed break; case ConflictStatus.Conflict: if (fromSmuggler && DocumentCompare.IsEqualTo(conflict.Doc, incomingDoc, false) == DocumentCompareResult.Equal) { return; // we already have a conflict with equal content, no need to create another one } break; // we'll add this conflict if no one else also includes it case ConflictStatus.AlreadyMerged: return; // we already have a conflict that includes this version default: throw new ArgumentOutOfRangeException("Invalid conflict status " + conflictStatus); } } } } var etag = _documentsStorage.GenerateNextEtag(); if (context.LastDatabaseChangeVector == null) { context.LastDatabaseChangeVector = GetDatabaseChangeVector(context); } var result = ChangeVectorUtils.TryUpdateChangeVector(_documentDatabase.ServerStore.NodeTag, _documentDatabase.DbBase64Id, etag, context.LastDatabaseChangeVector); if (result.IsValid) { context.LastDatabaseChangeVector = result.ChangeVector; } byte * doc = null; var docSize = 0; string collection; if (incomingDoc != null) // can be null if it is a tombstone { doc = incomingDoc.BasePointer; docSize = incomingDoc.Size; collection = CollectionName.GetLazyCollectionNameFrom(context, incomingDoc); } else { collection = incomingTombstoneCollection; } using (Slice.From(context.Allocator, incomingChangeVector, out var cv)) using (DocumentIdWorker.GetStringPreserveCase(context, collection, out Slice collectionSlice)) using (conflictsTable.Allocate(out TableValueBuilder tvb)) { tvb.Add(lowerId); tvb.Add(SpecialChars.RecordSeparator); tvb.Add(cv); tvb.Add(idPtr); tvb.Add(doc, docSize); tvb.Add(Bits.SwapBytes(etag)); tvb.Add(collectionSlice); tvb.Add(lastModifiedTicks); tvb.Add((int)flags); if (conflictsTable.Set(tvb)) { Interlocked.Increment(ref ConflictsCount); } } context.Transaction.AddAfterCommitNotification(new DocumentChange { ChangeVector = incomingChangeVector, CollectionName = collectionName.Name, Id = id, Type = DocumentChangeTypes.Conflict, }); } }
private void ReplicateToDestination() { try { AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiate); NativeMemory.EnsureRegistered(); if (_log.IsInfoEnabled) { _log.Info($"Will replicate to {Destination.FromString()} via {_connectionInfo.Url}"); } using (_parent._server.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var record = _parent.LoadDatabaseRecord(); if (record == null) { throw new InvalidOperationException($"The database record for {_parent.Database.Name} does not exist?!"); } if (record.Encrypted && Destination.Url.StartsWith("https:", StringComparison.OrdinalIgnoreCase) == false) { throw new InvalidOperationException( $"{record.DatabaseName} is encrypted, and require HTTPS for replication, but had endpoint with url {Destination.Url} to database {Destination.Database}"); } } var task = TcpUtils.ConnectSocketAsync(_connectionInfo, _parent._server.Engine.TcpConnectionTimeout, _log); task.Wait(CancellationToken); using (Interlocked.Exchange(ref _tcpClient, task.Result)) { var wrapSsl = TcpUtils.WrapStreamWithSslAsync(_tcpClient, _connectionInfo, _parent._server.Server.Certificate.Certificate, _parent._server.Engine.TcpConnectionTimeout); wrapSsl.Wait(CancellationToken); using (_stream = wrapSsl.Result) // note that _stream is being disposed by the interruptible read using (_interruptibleRead = new InterruptibleRead(_database.DocumentsStorage.ContextPool, _stream)) using (_buffer = JsonOperationContext.ManagedPinnedBuffer.LongLivedInstance()) { var documentSender = new ReplicationDocumentSender(_stream, this, _log); WriteHeaderToRemotePeer(); //handle initial response to last etag and staff try { var response = HandleServerResponse(getFullResponse: true); switch (response.ReplyType) { //The first time we start replication we need to register the destination current CV case ReplicationMessageReply.ReplyType.Ok: LastAcceptedChangeVector = response.Reply.DatabaseChangeVector; break; case ReplicationMessageReply.ReplyType.Error: var exception = new InvalidOperationException(response.Reply.Exception); if (response.Reply.Exception.Contains(nameof(DatabaseDoesNotExistException)) || response.Reply.Exception.Contains(nameof(DatabaseNotRelevantException))) { AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, "Database does not exist"); DatabaseDoesNotExistException.ThrowWithMessageAndException(Destination.Database, response.Reply.Message, exception); } AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, $"Got error: {response.Reply.Exception}"); throw exception; } } catch (DatabaseDoesNotExistException e) { var msg = $"Failed to parse initial server replication response, because there is no database named {_database.Name} " + "on the other end. "; if (_external) { msg += "In order for the replication to work, a database with the same name needs to be created at the destination"; } var young = (DateTime.UtcNow - _startedAt).TotalSeconds < 30; if (young) { msg += "This can happen if the other node wasn't yet notified about being assigned this database and should be resolved shortly."; } if (_log.IsInfoEnabled) { _log.Info(msg, e); } AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg); // won't add an alert on young connections // because it may take a few seconds for the other side to be notified by // the cluster that it has this db. if (young == false) { AddAlertOnFailureToReachOtherSide(msg, e); } throw; } catch (OperationCanceledException e) { const string msg = "Got operation canceled notification while opening outgoing replication channel. " + "Aborting and closing the channel."; if (_log.IsInfoEnabled) { _log.Info(msg, e); } AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg); throw; } catch (Exception e) { var msg = $"{OutgoingReplicationThreadName} got an unexpected exception during initial handshake"; if (_log.IsInfoEnabled) { _log.Info(msg, e); } AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg); AddAlertOnFailureToReachOtherSide(msg, e); throw; } DateTime nextReplicateAt = default(DateTime); while (_cts.IsCancellationRequested == false) { while (_database.Time.GetUtcNow() > nextReplicateAt) { if (_parent.DebugWaitAndRunReplicationOnce != null) { _parent.DebugWaitAndRunReplicationOnce.Wait(_cts.Token); _parent.DebugWaitAndRunReplicationOnce.Reset(); } var sp = Stopwatch.StartNew(); var stats = _lastStats = new OutgoingReplicationStatsAggregator(_parent.GetNextReplicationStatsId(), _lastStats); AddReplicationPerformance(stats); AddReplicationPulse(ReplicationPulseDirection.OutgoingBegin); try { using (var scope = stats.CreateScope()) { try { if (Destination is InternalReplication dest) { _parent.EnsureNotDeleted(dest.NodeTag); } var didWork = documentSender.ExecuteReplicationOnce(scope, ref nextReplicateAt); if (didWork == false) { break; } if (Destination is ExternalReplication externalReplication) { var taskId = externalReplication.TaskId; UpdateExternalReplicationInfo(taskId); } DocumentsSend?.Invoke(this); if (sp.ElapsedMilliseconds > 60 * 1000) { _waitForChanges.Set(); break; } } catch (OperationCanceledException) { // cancellation is not an actual error, // it is a "notification" that we need to cancel current operation const string msg = "Operation was canceled."; AddReplicationPulse(ReplicationPulseDirection.OutgoingError, msg); throw; } catch (Exception e) { AddReplicationPulse(ReplicationPulseDirection.OutgoingError, e.Message); scope.AddError(e); throw; } } } finally { stats.Complete(); AddReplicationPulse(ReplicationPulseDirection.OutgoingEnd); } } //if this returns false, this means either timeout or canceled token is activated while (WaitForChanges(_parent.MinimalHeartbeatInterval, _cts.Token) == false) { //If we got cancelled we need to break right away if (_cts.IsCancellationRequested) { break; } // open tx // read current change vector compare to last sent // if okay, send cv using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) using (var tx = ctx.OpenReadTransaction()) { var etag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); if (etag == _lastSentDocumentEtag) { SendHeartbeat(DocumentsStorage.GetDatabaseChangeVector(ctx)); _parent.CompleteDeletionIfNeeded(); } else if (nextReplicateAt > DateTime.UtcNow) { SendHeartbeat(null); } else { //Send a heartbeat first so we will get an updated CV of the destination var currentChangeVector = DocumentsStorage.GetDatabaseChangeVector(ctx); SendHeartbeat(null); //If our previous CV is already merged to the destination wait a bit more if (ChangeVectorUtils.GetConflictStatus(LastAcceptedChangeVector, currentChangeVector) == ConflictStatus.AlreadyMerged) { continue; } // we have updates that we need to send to the other side // let's do that.. // this can happen if we got replication from another node // that we need to send to it. Note that we typically // will wait for the other node to send the data directly to // our destination, but if it doesn't, we'll step in. // In this case, we try to limit congestion in the network and // only send updates that we have gotten from someone else after // a certain time, to let the other side tell us that it already // got it. Note that this is merely an optimization to reduce network // traffic. It is fine to have the same data come from different sources. break; } } } _waitForChanges.Reset(); } } } } catch (AggregateException e) { if (e.InnerExceptions.Count == 1) { if (e.InnerException is OperationCanceledException oce) { HandleOperationCancelException(oce); } if (e.InnerException is IOException ioe) { HandleIOException(ioe); } } HandleException(e); } catch (OperationCanceledException e) { HandleOperationCancelException(e); } catch (IOException e) { HandleIOException(e); } catch (Exception e) { HandleException(e); } void HandleOperationCancelException(OperationCanceledException e) { if (_log.IsInfoEnabled) { _log.Info($"Operation canceled on replication thread ({FromToString}). " + $"This is not necessary due to an issue. Stopped the thread."); } if (_cts.IsCancellationRequested == false) { Failed?.Invoke(this, e); } } void HandleIOException(IOException e) { if (_log.IsInfoEnabled) { if (e.InnerException is SocketException) { _log.Info($"SocketException was thrown from the connection to remote node ({FromToString}). " + $"This might mean that the remote node is done or there is a network issue.", e); } else { _log.Info($"IOException was thrown from the connection to remote node ({FromToString}).", e); } } Failed?.Invoke(this, e); } void HandleException(Exception e) { if (_log.IsInfoEnabled) { _log.Info($"Unexpected exception occurred on replication thread ({FromToString}). " + $"Replication stopped (will be retried later).", e); } Failed?.Invoke(this, e); } }