private bool AddReplicationItemToBatch(ReplicationBatchItem item, OutgoingReplicationStatsScope stats, SkippedReplicationItemsInfo skippedReplicationItemsInfo) { if (ShouldSkip(item, stats, skippedReplicationItemsInfo)) { return(false); } if (skippedReplicationItemsInfo.SkippedItems > 0) { if (_log.IsInfoEnabled) { var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector); _log.Info(message); } skippedReplicationItemsInfo.Reset(); } if (item is AttachmentReplicationItem attachment) { _replicaAttachmentStreams[attachment.Base64Hash] = attachment; } _orderedReplicaItems.Add(item.Etag, item); return(true); }
public MergedReplicationBatchEnumerator(OutgoingReplicationStatsScope documentRead, OutgoingReplicationStatsScope attachmentRead, OutgoingReplicationStatsScope tombstoneRead, OutgoingReplicationStatsScope counterRead) { _documentRead = documentRead; _attachmentRead = attachmentRead; _tombstoneRead = tombstoneRead; _countersRead = counterRead; }
private void SendDocumentsBatch(DocumentsOperationContext documentsContext, OutgoingReplicationStatsScope stats) { if (_log.IsInfoEnabled) { _log.Info($"Starting sending replication batch ({_parent._database.Name}) with {_orderedReplicaItems.Count:#,#;;0} docs, and last etag {_lastEtag:#,#;;0}"); } var sw = Stopwatch.StartNew(); var headerJson = new DynamicJsonValue { [nameof(ReplicationMessageHeader.Type)] = ReplicationMessageType.Documents, [nameof(ReplicationMessageHeader.LastDocumentEtag)] = _lastEtag, [nameof(ReplicationMessageHeader.ItemsCount)] = _orderedReplicaItems.Count, [nameof(ReplicationMessageHeader.AttachmentStreamsCount)] = _replicaAttachmentStreams.Count }; stats.RecordLastEtag(_lastEtag); _parent.WriteToServer(headerJson); foreach (var item in _orderedReplicaItems) { using (Slice.From(documentsContext.Allocator, item.Value.ChangeVector, out var cv)) { item.Value.Write(cv, _stream, _tempBuffer, stats); } } foreach (var item in _replicaAttachmentStreams) { item.Value.WriteStream(_stream, _tempBuffer); stats.RecordAttachmentOutput(item.Value.Stream.Length); } // close the transaction as early as possible, and before we wait for reply // from other side documentsContext.Transaction.Dispose(); _stream.Flush(); sw.Stop(); if (_log.IsInfoEnabled && _orderedReplicaItems.Count > 0) { _log.Info($"Finished sending replication batch. Sent {_orderedReplicaItems.Count:#,#;;0} documents and {_replicaAttachmentStreams.Count:#,#;;0} attachment streams in {sw.ElapsedMilliseconds:#,#;;0} ms. Last sent etag = {_lastEtag:#,#;;0}"); } var(type, _) = _parent.HandleServerResponse(); if (type == ReplicationMessageReply.ReplyType.MissingAttachments) { MissingAttachmentsInLastBatch = true; return; } _parent._lastSentDocumentEtag = _lastEtag; _parent._lastDocumentSentTime = DateTime.UtcNow; }
private bool AddReplicationItemToBatch(ReplicationBatchItem item, OutgoingReplicationStatsScope stats, SkippedReplicationItemsInfo skippedReplicationItemsInfo) { if (item.Type == ReplicationBatchItem.ReplicationItemType.Document || item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) { if ((item.Flags & DocumentFlags.Artificial) == DocumentFlags.Artificial) { stats.RecordArtificialDocumentSkip(); skippedReplicationItemsInfo.Update(item, isArtificial: true); return(false); } } if (item.Flags.Contain(DocumentFlags.Revision) || item.Flags.Contain(DocumentFlags.DeleteRevision)) { // we let pass all the conflicted/resolved revisions, since we keep them with their original change vector which might be `AlreadyMerged` at the destination. if (item.Flags.Contain(DocumentFlags.Conflicted) || item.Flags.Contain(DocumentFlags.Resolved)) { _orderedReplicaItems.Add(item.Etag, item); return(true); } } // destination already has it if ((MissingAttachmentsInLastBatch == false || item.Type != ReplicationBatchItem.ReplicationItemType.Attachment) && ChangeVectorUtils.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); skippedReplicationItemsInfo.Update(item); return(false); } if (skippedReplicationItemsInfo.SkippedItems > 0) { if (_log.IsInfoEnabled) { var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector); _log.Info(message); } skippedReplicationItemsInfo.Reset(); } if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { _replicaAttachmentStreams[item.Base64Hash] = item; } Debug.Assert(item.Flags.Contain(DocumentFlags.Artificial) == false); _orderedReplicaItems.Add(item.Etag, item); return(true); }
private bool AddReplicationItemToBatch(ReplicationBatchItem item, OutgoingReplicationStatsScope stats, SkippedReplicationItemsInfo skippedReplicationItemsInfo) { if (item.Type == ReplicationBatchItem.ReplicationItemType.Document || item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) { if ((item.Flags & DocumentFlags.Artificial) == DocumentFlags.Artificial) { stats.RecordArtificialDocumentSkip(); skippedReplicationItemsInfo.Update(item, isArtificial: true); return(false); } } if (item.Type == ReplicationBatchItem.ReplicationItemType.CounterTombstone && _parent.SupportedFeatures.Replication.Counters == false) { // skip counter tombstones in legacy mode skippedReplicationItemsInfo.Update(item); return(false); } // destination already has it if ((MissingAttachmentsInLastBatch == false || item.Type != ReplicationBatchItem.ReplicationItemType.Attachment) && ChangeVectorUtils.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); skippedReplicationItemsInfo.Update(item); return(false); } if (skippedReplicationItemsInfo.SkippedItems > 0) { if (_log.IsInfoEnabled) { var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector); _log.Info(message); } skippedReplicationItemsInfo.Reset(); } if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { _replicaAttachmentStreams[item.Base64Hash] = item; } Debug.Assert(item.Flags.Contain(DocumentFlags.Artificial) == false); _orderedReplicaItems.Add(item.Etag, item); return(true); }
private void SendDocumentsBatch(DocumentsOperationContext documentsContext, OutgoingReplicationStatsScope stats) { if (_log.IsInfoEnabled) { _log.Info($"Starting sending replication batch ({_parent._database.Name}) with {_orderedReplicaItems.Count:#,#;;0} docs, and last etag {_lastEtag}"); } var sw = Stopwatch.StartNew(); var headerJson = new DynamicJsonValue { [nameof(ReplicationMessageHeader.Type)] = ReplicationMessageType.Documents, [nameof(ReplicationMessageHeader.LastDocumentEtag)] = _lastEtag, [nameof(ReplicationMessageHeader.ItemsCount)] = _orderedReplicaItems.Count, [nameof(ReplicationMessageHeader.AttachmentStreamsCount)] = _replicaAttachmentStreams.Count }; stats.RecordLastEtag(_lastEtag); _parent.WriteToServer(headerJson); foreach (var item in _orderedReplicaItems) { var value = item.Value; WriteItemToServer(documentsContext, value, stats); } foreach (var item in _replicaAttachmentStreams) { var value = item.Value; WriteAttachmentStreamToServer(value); stats.RecordAttachmentOutput(value.Stream.Length); } // close the transaction as early as possible, and before we wait for reply // from other side documentsContext.Transaction.Dispose(); _stream.Flush(); sw.Stop(); _parent._lastSentDocumentEtag = _lastEtag; if (_log.IsInfoEnabled && _orderedReplicaItems.Count > 0) { _log.Info($"Finished sending replication batch. Sent {_orderedReplicaItems.Count:#,#;;0} documents and {_replicaAttachmentStreams.Count:#,#;;0} attachment streams in {sw.ElapsedMilliseconds:#,#;;0} ms. Last sent etag = {_lastEtag}"); } _parent._lastDocumentSentTime = DateTime.UtcNow; _parent.HandleServerResponse(); }
private unsafe bool AddReplicationItemToBatch(ReplicationBatchItem item, OutgoingReplicationStatsScope stats) { if (item.Type == ReplicationBatchItem.ReplicationItemType.Document || item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) { if ((item.Flags & DocumentFlags.Artificial) == DocumentFlags.Artificial) { stats.RecordArtificialDocumentSkip(); if (_log.IsInfoEnabled) { _log.Info($"Skipping replication of {item.Id} because it is an artificial document"); } return(false); } if (CollectionName.IsSystemDocument(item.Id.Buffer, item.Id.Size, out bool isHiLo) && isHiLo == false) { stats.RecordSystemDocumentSkip(); if (_log.IsInfoEnabled) { _log.Info($"Skipping replication of {item.Id} because it is a system document"); } return(false); } } // destination already has it if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); if (_log.IsInfoEnabled) { _log.Info($"Skipping replication of {item.Type} '{item.Id}' because destination has a higher change vector. Current: {item.ChangeVector} < Destination: {_parent._destinationLastKnownChangeVectorAsString} "); } return(false); } if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { _replicaAttachmentStreams[item.Base64Hash] = item; } Debug.Assert(item.Flags.HasFlag(DocumentFlags.Artificial) == false); _orderedReplicaItems.Add(item.Etag, item); return(true); }
private void EnsureValidStats(OutgoingReplicationStatsScope stats) { if (_statsInstance == stats) { return; } _statsInstance = stats; _stats.Storage = stats.For(ReplicationOperation.Outgoing.Storage, start: false); _stats.Network = stats.For(ReplicationOperation.Outgoing.Network, start: false); _stats.DocumentRead = _stats.Storage.For(ReplicationOperation.Outgoing.DocumentRead, start: false); _stats.TombstoneRead = _stats.Storage.For(ReplicationOperation.Outgoing.TombstoneRead, start: false); _stats.AttachmentRead = _stats.Storage.For(ReplicationOperation.Outgoing.AttachmentRead, start: false); }
private bool ShouldSkip(ReplicationBatchItem item, OutgoingReplicationStatsScope stats, SkippedReplicationItemsInfo skippedReplicationItemsInfo) { switch (item) { case DocumentReplicationItem doc: if (doc.Flags.Contain(DocumentFlags.Artificial)) { stats.RecordArtificialDocumentSkip(); skippedReplicationItemsInfo.Update(item, isArtificial: true); return(true); } if (doc.Flags.Contain(DocumentFlags.Revision) || doc.Flags.Contain(DocumentFlags.DeleteRevision)) { // we let pass all the conflicted/resolved revisions, since we keep them with their original change vector which might be `AlreadyMerged` at the destination. if (doc.Flags.Contain(DocumentFlags.Conflicted) || doc.Flags.Contain(DocumentFlags.Resolved)) { return(false); } } break; case AttachmentReplicationItem _: if (MissingAttachmentsInLastBatch) { return(false); } break; } // destination already has it if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); skippedReplicationItemsInfo.Update(item); return(true); } return(false); }
private bool ShouldSkip(ReplicationBatchItem item, OutgoingReplicationStatsScope stats, SkippedReplicationItemsInfo skippedReplicationItemsInfo) { if (ValidatorSaysToSkip(_pathsToSend) || ValidatorSaysToSkip(_destinationAcceptablePaths)) { return(true); } switch (item) { case DocumentReplicationItem doc: if (doc.Flags.Contain(DocumentFlags.Artificial)) { stats.RecordArtificialDocumentSkip(); skippedReplicationItemsInfo.Update(item, isArtificial: true); return(true); } if (doc.Flags.Contain(DocumentFlags.Revision) || doc.Flags.Contain(DocumentFlags.DeleteRevision)) { // we let pass all the conflicted/resolved revisions, since we keep them with their original change vector which might be `AlreadyMerged` at the destination. if (doc.Flags.Contain(DocumentFlags.Conflicted) || doc.Flags.Contain(DocumentFlags.Resolved) || (doc.Flags.Contain(DocumentFlags.FromClusterTransaction))) { return(false); } } break; case AttachmentReplicationItem _: if (MissingAttachmentsInLastBatch) { return(false); } break; } // destination already has it if (_parent._database.DocumentsStorage.GetConflictStatus(item.ChangeVector, _parent.LastAcceptedChangeVector) == ConflictStatus.AlreadyMerged) { stats.RecordChangeVectorSkip(); skippedReplicationItemsInfo.Update(item); return(true); } return(false); bool ValidatorSaysToSkip(AllowedPathsValidator validator) { if (validator == null) { return(false); } if (validator.ShouldAllow(item)) { return(false); } stats.RecordArtificialDocumentSkip(); skippedReplicationItemsInfo.Update(item); if (_log.IsInfoEnabled) { string key = validator.GetItemInformation(item); _log.Info($"Will skip sending {key} ({item.Type}) because it was not allowed according to the incoming ."); } return(true); } }
public bool ExecuteReplicationOnce(TcpConnectionOptions tcpConnectionOptions, OutgoingReplicationStatsScope stats, ref long next) { EnsureValidStats(stats); var wasInterrupted = false; var delay = GetDelayReplication(); var currentNext = next; using (_parent._database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext)) using (documentsContext.OpenReadTransaction()) { try { // we scan through the documents to send to the other side, we need to be careful about // filtering a lot of documents, because we need to let the other side know about this, and // at the same time, we need to send a heartbeat to keep the tcp connection alive _lastEtag = _parent._lastSentDocumentEtag; _parent.CancellationToken.ThrowIfCancellationRequested(); var skippedReplicationItemsInfo = new SkippedReplicationItemsInfo(); long prevLastEtag = _lastEtag; var replicationState = new ReplicationState { BatchSize = _parent._database.Configuration.Replication.MaxItemsCount, MaxSizeToSend = _parent._database.Configuration.Replication.MaxSizeToSend, CurrentNext = currentNext, Delay = delay, Context = documentsContext, LastTransactionMarker = -1, NumberOfItemsSent = 0, Size = 0L, MissingTxMarkers = new HashSet <short>() }; using (_stats.Storage.Start()) { foreach (var item in GetReplicationItems(_parent._database, documentsContext, _lastEtag, _stats, _parent.SupportedFeatures.Replication.CaseInsensitiveCounters)) { _parent.CancellationToken.ThrowIfCancellationRequested(); if (replicationState.LastTransactionMarker != item.TransactionMarker) { replicationState.Item = item; if (CanContinueBatch(replicationState, ref next) == false) { wasInterrupted = true; break; } replicationState.LastTransactionMarker = item.TransactionMarker; } _stats.Storage.RecordInputAttempt(); // here we add missing attachments in the same batch as the document that contains them without modifying the last etag or transaction boundary if (MissingAttachmentsInLastBatch && item.Type == ReplicationBatchItem.ReplicationItemType.Document && item is DocumentReplicationItem docItem && docItem.Flags.Contain(DocumentFlags.HasAttachments)) { var type = (docItem.Flags & DocumentFlags.Revision) == DocumentFlags.Revision ? AttachmentType.Revision: AttachmentType.Document; foreach (var attachment in _parent._database.DocumentsStorage.AttachmentsStorage.GetAttachmentsForDocument(documentsContext, type, docItem.Id, docItem.ChangeVector)) { // we need to filter attachments that are been sent in the same batch as the document if (attachment.Etag >= prevLastEtag) { if (attachment.TransactionMarker != item.TransactionMarker) { replicationState.MissingTxMarkers.Add(attachment.TransactionMarker); } continue; } var stream = _parent._database.DocumentsStorage.AttachmentsStorage.GetAttachmentStream(documentsContext, attachment.Base64Hash); attachment.Stream = stream; var attachmentItem = AttachmentReplicationItem.From(documentsContext, attachment); AddReplicationItemToBatch(attachmentItem, _stats.Storage, skippedReplicationItemsInfo); replicationState.Size += attachmentItem.Size; } } _lastEtag = item.Etag; if (AddReplicationItemToBatch(item, _stats.Storage, skippedReplicationItemsInfo) == false) { // this item won't be needed anymore item.Dispose(); continue; } replicationState.Size += item.Size; replicationState.NumberOfItemsSent++; } } if (_log.IsInfoEnabled) { if (skippedReplicationItemsInfo.SkippedItems > 0) { var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector); _log.Info(message); } var msg = $"Found {_orderedReplicaItems.Count:#,#;;0} documents " + $"and {_replicaAttachmentStreams.Count} attachment's streams " + $"to replicate to {_parent.Node.FromString()}, "; var encryptionSize = documentsContext.Transaction.InnerTransaction.LowLevelTransaction.AdditionalMemoryUsageSize.GetValue(SizeUnit.Bytes); if (encryptionSize > 0) { msg += $"encryption buffer overhead size is {new Size(encryptionSize, SizeUnit.Bytes)}, "; } msg += $"total size: {new Size(replicationState.Size + encryptionSize, SizeUnit.Bytes)}"; _log.Info(msg); } if (_orderedReplicaItems.Count == 0) { var hasModification = _lastEtag != _parent._lastSentDocumentEtag; // ensure that the other server is aware that we skipped // on (potentially a lot of) documents to send, and we update // the last etag they have from us on the other side _parent._lastSentDocumentEtag = _lastEtag; _parent._lastDocumentSentTime = DateTime.UtcNow; var changeVector = wasInterrupted ? null : DocumentsStorage.GetDatabaseChangeVector(documentsContext); _parent.SendHeartbeat(changeVector); return(hasModification); } _parent.CancellationToken.ThrowIfCancellationRequested(); try { using (_stats.Network.Start()) { SendDocumentsBatch(documentsContext, _stats.Network); tcpConnectionOptions._lastEtagSent = _lastEtag; tcpConnectionOptions.RegisterBytesSent(replicationState.Size); if (MissingAttachmentsInLastBatch) { return(false); } } } catch (OperationCanceledException) { if (_log.IsInfoEnabled) { _log.Info("Received cancellation notification while sending document replication batch."); } throw; } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info("Failed to send document replication batch", e); } throw; } MissingAttachmentsInLastBatch = false; return(true); } finally { foreach (var item in _orderedReplicaItems) { item.Value.Dispose(); } _orderedReplicaItems.Clear(); _replicaAttachmentStreams.Clear(); } } }
private void WriteItemToServer(DocumentsOperationContext context, ReplicationBatchItem item, OutgoingReplicationStatsScope stats) { if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { WriteAttachmentToServer(context, item); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.AttachmentTombstone) { WriteAttachmentTombstoneToServer(context, item); stats.RecordAttachmentTombstoneOutput(); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.RevisionTombstone) { WriteRevisionTombstoneToServer(context, item); stats.RecordRevisionTombstoneOutput(); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) { WriteDocumentToServer(context, item); stats.RecordDocumentTombstoneOutput(); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.Counter) { WriteCounterToServer(context, item); stats.RecordCounterOutput(); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.CounterTombstone) { WriteCounterTombstoneToServer(context, item); stats.RecordCounterTombstoneOutput(); return; } WriteDocumentToServer(context, item); stats.RecordDocumentOutput(item.Data?.Size ?? 0); }
public bool ExecuteReplicationOnce(OutgoingReplicationStatsScope stats, ref DateTime next) { EnsureValidStats(stats); var wasInterrupted = false; var delay = GetDelayReplication(); using (_parent._database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext)) using (documentsContext.OpenReadTransaction()) { try { // we scan through the documents to send to the other side, we need to be careful about // filtering a lot of documents, because we need to let the other side know about this, and // at the same time, we need to send a heartbeat to keep the tcp connection alive _lastEtag = _parent._lastSentDocumentEtag; _parent.CancellationToken.ThrowIfCancellationRequested(); var batchSize = _parent._database.Configuration.Replication.MaxItemsCount; var maxSizeToSend = _parent._database.Configuration.Replication.MaxSizeToSend; long size = 0; var numberOfItemsSent = 0; var skippedReplicationItemsInfo = new SkippedReplicationItemsInfo(); short lastTransactionMarker = -1; long prevLastEtag = _lastEtag; using (_stats.Storage.Start()) { foreach (var item in GetReplicationItems(documentsContext, _lastEtag, _stats)) { if (lastTransactionMarker != item.TransactionMarker) { if (delay.Ticks > 0) { var nextReplication = item.LastModifiedTicks + delay.Ticks; if (_parent._database.Time.GetUtcNow().Ticks < nextReplication) { next = new DateTime(nextReplication); wasInterrupted = true; break; } } lastTransactionMarker = item.TransactionMarker; if (_parent.SupportedFeatures.Replication.Counters == false) { AssertNotCounterForLegacyReplication(item); } if (_parent.SupportedFeatures.Replication.ClusterTransaction == false) { AssertNotClusterTransactionDocumentForLegacyReplication(item); } // Include the attachment's document which is right after its latest attachment. if ((item.Type == ReplicationBatchItem.ReplicationItemType.Document || item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) && // We want to limit batch sizes to reasonable limits. ((maxSizeToSend.HasValue && size > maxSizeToSend.Value.GetValue(SizeUnit.Bytes)) || (batchSize.HasValue && numberOfItemsSent > batchSize.Value))) { wasInterrupted = true; break; } if (_stats.Storage.CurrentStats.InputCount % 16384 == 0) { // ReSharper disable once PossibleLossOfFraction if ((_parent._parent.MinimalHeartbeatInterval / 2) < _stats.Storage.Duration.TotalMilliseconds) { wasInterrupted = true; break; } } } _stats.Storage.RecordInputAttempt(); //Here we add missing attachments in the same batch as the document that contains them without modifying the last etag or transaction boundry if (MissingAttachmentsInLastBatch && item.Type == ReplicationBatchItem.ReplicationItemType.Document && (item.Flags & DocumentFlags.HasAttachments) == DocumentFlags.HasAttachments) { var type = (item.Flags & DocumentFlags.Revision) == DocumentFlags.Revision ? AttachmentType.Revision: AttachmentType.Document; foreach (var attachment in _parent._database.DocumentsStorage.AttachmentsStorage.GetAttachmentsForDocument(documentsContext, type, item.Id)) { //We need to filter attachments that are been sent in the same batch as the document if (attachment.Etag >= prevLastEtag) { continue; } var stream = _parent._database.DocumentsStorage.AttachmentsStorage.GetAttachmentStream(documentsContext, attachment.Base64Hash); attachment.Stream = stream; AddReplicationItemToBatch(ReplicationBatchItem.From(attachment), _stats.Storage, skippedReplicationItemsInfo); size += attachment.Stream.Length; } } _lastEtag = item.Etag; if (item.Data != null) { size += item.Data.Size; } else if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { size += item.Stream.Length; } if (AddReplicationItemToBatch(item, _stats.Storage, skippedReplicationItemsInfo) == false) { continue; } numberOfItemsSent++; } } if (_log.IsInfoEnabled) { if (skippedReplicationItemsInfo.SkippedItems > 0) { var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector); _log.Info(message); } _log.Info($"Found {_orderedReplicaItems.Count:#,#;;0} documents and {_replicaAttachmentStreams.Count} attachment's streams to replicate to {_parent.Node.FromString()}."); } if (_orderedReplicaItems.Count == 0) { var hasModification = _lastEtag != _parent._lastSentDocumentEtag; // ensure that the other server is aware that we skipped // on (potentially a lot of) documents to send, and we update // the last etag they have from us on the other side _parent._lastSentDocumentEtag = _lastEtag; _parent._lastDocumentSentTime = DateTime.UtcNow; var changeVector = wasInterrupted ? null : DocumentsStorage.GetDatabaseChangeVector(documentsContext); _parent.SendHeartbeat(changeVector); return(hasModification); } _parent.CancellationToken.ThrowIfCancellationRequested(); try { using (_stats.Network.Start()) { SendDocumentsBatch(documentsContext, _stats.Network); if (MissingAttachmentsInLastBatch) { return(false); } } } catch (OperationCanceledException) { if (_log.IsInfoEnabled) { _log.Info("Received cancellation notification while sending document replication batch."); } throw; } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info("Failed to send document replication batch", e); } throw; } MissingAttachmentsInLastBatch = false; return(true); } finally { foreach (var item in _orderedReplicaItems) { var value = item.Value; if (value.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { value.Stream.Dispose(); } else { value.Data?.Dispose(); //item.Value.Data is null if tombstone } } _orderedReplicaItems.Clear(); _replicaAttachmentStreams.Clear(); } } }
private void WriteItemToServer(DocumentsOperationContext context, ReplicationBatchItem item, OutgoingReplicationStatsScope stats) { if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { WriteAttachmentToServer(context, item); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.AttachmentTombstone) { WriteAttachmentTombstoneToServer(context, item); stats.RecordAttachmentTombstoneOutput(); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.RevisionTombstone) { WriteRevisionTombstoneToServer(context, item); stats.RecordRevisionTombstoneOutput(); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) { WriteDocumentToServer(context, item); stats.RecordDocumentTombstoneOutput(); return; } if (item.Type == ReplicationBatchItem.ReplicationItemType.CounterGroup) { item.Values.TryGet(CountersStorage.Values, out BlittableJsonReaderObject counters); stats.RecordCountersOutput(counters?.Count ?? 0); WriteCountersToServer(context, item); return; } WriteDocumentToServer(context, item); stats.RecordDocumentOutput(item.Data?.Size ?? 0); }
public bool ExecuteReplicationOnce(OutgoingReplicationStatsScope stats) { EnsureValidStats(stats); using (_parent._database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext)) using (documentsContext.OpenReadTransaction()) { try { // we scan through the documents to send to the other side, we need to be careful about // filtering a lot of documents, because we need to let the other side know about this, and // at the same time, we need to send a heartbeat to keep the tcp connection alive _lastEtag = _parent._lastSentDocumentEtag; _parent.CancellationToken.ThrowIfCancellationRequested(); var batchSize = _parent._database.Configuration.Replication.MaxItemsCount; var maxSizeToSend = _parent._database.Configuration.Replication.MaxSizeToSend; long size = 0; int numberOfItemsSent = 0; short lastTransactionMarker = -1; using (_stats.Storage.Start()) { foreach (var item in GetDocsConflictsTombstonesRevisionsAndAttachmentsAfter(documentsContext, _lastEtag, _stats)) { if (lastTransactionMarker != item.TransactionMarker) { lastTransactionMarker = item.TransactionMarker; // Include the attachment's document which is right after its latest attachment. if ((item.Type == ReplicationBatchItem.ReplicationItemType.Document || item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) && // We want to limit batch sizes to reasonable limits. ((maxSizeToSend.HasValue && size > maxSizeToSend.Value.GetValue(SizeUnit.Bytes)) || (batchSize.HasValue && numberOfItemsSent > batchSize.Value))) { break; } } _stats.Storage.RecordInputAttempt(); _lastEtag = item.Etag; if (item.Data != null) { size += item.Data.Size; } else if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { size += item.Stream.Length; } if (AddReplicationItemToBatch(item, _stats.Storage)) { numberOfItemsSent++; } } } if (_log.IsInfoEnabled) { _log.Info($"Found {_orderedReplicaItems.Count:#,#;;0} documents and {_replicaAttachmentStreams.Count} attachment's streams to replicate to {_parent.Node.FromString()}."); } if (_orderedReplicaItems.Count == 0) { var hasModification = _lastEtag != _parent._lastSentDocumentEtag; // ensure that the other server is aware that we skipped // on (potentially a lot of) documents to send, and we update // the last etag they have from us on the other side _parent._lastSentDocumentEtag = _lastEtag; _parent._lastDocumentSentTime = DateTime.UtcNow; _parent.SendHeartbeat(DocumentsStorage.GetDatabaseChangeVector(documentsContext)); return(hasModification); } _parent.CancellationToken.ThrowIfCancellationRequested(); try { using (_stats.Network.Start()) { SendDocumentsBatch(documentsContext, _stats.Network); } } catch (OperationCanceledException) { if (_log.IsInfoEnabled) { _log.Info("Received cancellation notification while sending document replication batch."); } throw; } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info("Failed to send document replication batch", e); } throw; } return(true); } finally { foreach (var item in _orderedReplicaItems) { var value = item.Value; if (value.Type == ReplicationBatchItem.ReplicationItemType.Attachment) { // TODO: Why are we disposing here? // Shouldn't the all context be disposed here? // If not, should we dispose all strings here? value.Stream.Dispose(); } else { value.Data?.Dispose(); //item.Value.Data is null if tombstone } } _orderedReplicaItems.Clear(); _replicaAttachmentStreams.Clear(); } } }