private void WritePatchResultToResponse(DocumentsOperationContext context, PatchDocumentCommand command) { using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WritePropertyName(nameof(command.PatchResult.Status)); writer.WriteString(command.PatchResult.Status.ToString()); writer.WriteComma(); writer.WritePropertyName(nameof(command.PatchResult.ModifiedDocument)); writer.WriteObject(command.PatchResult.ModifiedDocument); writer.WriteComma(); writer.WritePropertyName(nameof(command.PatchResult.OriginalDocument)); writer.WriteObject(command.PatchResult.OriginalDocument); writer.WriteComma(); writer.WritePropertyName(nameof(command.PatchResult.Debug)); context.Write(writer, new DynamicJsonValue { ["Info"] = new DynamicJsonArray(command.DebugOutput), ["Actions"] = command.DebugActions?.GetDebugActions() }); writer.WriteEndObject(); } }
public void WriteSubscription(SubscriptionState subscriptionState) { if (First == false) Writer.WriteComma(); First = false; _context.Write(_writer, subscriptionState.ToJson()); }
private void WriteImportResult(DocumentsOperationContext context, Stopwatch sp, ImportResult result, Stream stream) { using (var writer = new BlittableJsonTextWriter(context, stream)) { var json = result.ToJson(); json["ElapsedMilliseconds"] = sp.ElapsedMilliseconds; json["Elapsed"] = sp.Elapsed.ToString(); context.Write(writer, json); } }
public void WriteTombstone(Tombstone tombstone, SmugglerProgressBase.CountsWithLastEtag progress) { if (First == false) { Writer.WriteComma(); } First = false; _context.Write(Writer, new DynamicJsonValue { ["Key"] = tombstone.LowerId, [nameof(Tombstone.Type)] = tombstone.Type.ToString(), [nameof(Tombstone.Collection)] = tombstone.Collection, [nameof(Tombstone.Flags)] = tombstone.Flags.ToString(), [nameof(Tombstone.ChangeVector)] = tombstone.ChangeVector, [nameof(Tombstone.DeletedEtag)] = tombstone.DeletedEtag, [nameof(Tombstone.Etag)] = tombstone.Etag, [nameof(Tombstone.LastModified)] = tombstone.LastModified, }); }
public async ValueTask WriteSubscriptionAsync(SubscriptionState subscriptionState) { if (First == false) { Writer.WriteComma(); } First = false; _context.Write(_writer, subscriptionState.ToJson()); await Writer.MaybeFlushAsync(); }
public void WriteReplicationHubCertificate(string hub, ReplicationHubAccess access) { if (First == false) { Writer.WriteComma(); } First = false; var djv = access.ToJson(); djv[nameof(RegisterReplicationHubAccessCommand.HubName)] = hub; _context.Write(_writer, djv); }
private async Task CreateInternal(BlittableJsonReaderObject bjro, SubscriptionCreationOptions options, DocumentsOperationContext context, long?id, bool?disabled) { if (TrafficWatchManager.HasRegisteredClients) { AddStringToHttpContext(bjro.ToString(), TrafficWatchChangeType.Subscriptions); } var sub = SubscriptionConnection.ParseSubscriptionQuery(options.Query); if (Enum.TryParse(options.ChangeVector, out Constants.Documents.SubscriptionChangeVectorSpecialStates changeVectorSpecialValue)) { switch (changeVectorSpecialValue) { case Constants.Documents.SubscriptionChangeVectorSpecialStates.BeginningOfTime: options.ChangeVector = null; break; case Constants.Documents.SubscriptionChangeVectorSpecialStates.LastDocument: options.ChangeVector = Database.DocumentsStorage.GetLastDocumentChangeVector(context.Transaction.InnerTransaction, context, sub.Collection); break; } } var mentor = options.MentorNode; var subscriptionId = await Database.SubscriptionStorage.PutSubscription(options, GetRaftRequestIdFromQuery(), id, disabled, mentor); var name = options.Name ?? subscriptionId.ToString(); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext serverContext)) using (serverContext.OpenReadTransaction()) { // need to wait on the relevant remote node var node = Database.SubscriptionStorage.GetResponsibleNode(serverContext, name); if (node != null && node != ServerStore.NodeTag) { await WaitForExecutionOnSpecificNode(serverContext, ServerStore.GetClusterTopology(serverContext), node, subscriptionId); } } HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; await using (var writer = new AsyncBlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { [nameof(CreateSubscriptionResult.Name)] = name }); } }
public void WriteDocument(DocumentItem item, SmugglerProgressBase.CountsWithLastEtag progress) { if (item.Attachments != null) { throw new NotSupportedException(); } var document = item.Document; using (document.Data) { WriteUniqueAttachmentStreams(document, progress); if (First == false) { Writer.WriteComma(); } First = false; document.EnsureMetadata(); _context.Write(Writer, document.Data); } }
private async Task HandleClusterTransaction(DocumentsOperationContext context, MergedBatchCommand command, ClusterTransactionCommand.ClusterTransactionOptions options) { var clusterTransactionCommand = new ClusterTransactionCommand(Database.Name, command.ParsedCommands, options); var result = await ServerStore.SendToLeaderAsync(clusterTransactionCommand); if (result.Result is List <string> errors) { HttpContext.Response.StatusCode = (int)HttpStatusCode.Conflict; throw new ConcurrencyException($"Failed to execute cluster transaction due to the following issues: {string.Join(Environment.NewLine, errors)}"); } var array = new DynamicJsonArray(); if (clusterTransactionCommand.DatabaseCommandsCount > 0) { var reply = (ClusterTransactionCompletionResult)await Database.ClusterTransactionWaiter.WaitForResults(options.TaskId, HttpContext.RequestAborted); if (reply.IndexTask != null) { await reply.IndexTask; } array = reply.Array; } else { // wait for the command to be applied on this node (batch of cmpxchng ops only) await ServerStore.WaitForCommitIndexChange(RachisConsensus.CommitIndexModification.GreaterOrEqual, result.Index); } foreach (var clusterCommands in clusterTransactionCommand.ClusterCommands) { array.Add(new DynamicJsonValue { ["Type"] = clusterCommands.Type, ["Key"] = clusterCommands.Id, ["Index"] = result.Index }); } HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { [nameof(BatchCommandResult.Results)] = array, [nameof(BatchCommandResult.TransactionIndex)] = result.Index }); } }
public async ValueTask WriteReplicationHubCertificateAsync(string hub, ReplicationHubAccess access) { if (First == false) { Writer.WriteComma(); } First = false; var djv = access.ToJson(); djv[nameof(RegisterReplicationHubAccessCommand.HubName)] = hub; _context.Write(_writer, djv); await Writer.MaybeFlushAsync(); }
private async Task GetRevisionsCount(DocumentsOperationContext documentContext) { var docId = GetQueryStringValueAndAssertIfSingleAndNotEmpty("id"); var documentRevisionsDetails = new GetRevisionsCountOperation.DocumentRevisionsCount() { RevisionsCount = 0 }; documentRevisionsDetails.RevisionsCount = Database.DocumentsStorage.RevisionsStorage.GetRevisionsCount(documentContext, docId); await using (var writer = new AsyncBlittableJsonTextWriter(documentContext, ResponseBodyStream())) { documentContext.Write(writer, documentRevisionsDetails.ToJson()); } }
private void ReadHeaderResponseAndThrowIfUnAuthorized(DocumentsOperationContext documentsContext, BlittableJsonTextWriter writer) { const int timeout = 2 * 60 * 1000; using (var replicationTcpConnectReplyMessage = _interruptibleRead.ParseToMemory( _connectionDisposed, "replication acknowledge response", timeout, _buffer, CancellationToken)) { if (replicationTcpConnectReplyMessage.Timeout) { ThrowTimeout(timeout); } if (replicationTcpConnectReplyMessage.Interrupted) { ThrowConnectionClosed(); } var headerResponse = JsonDeserializationServer.TcpConnectionHeaderResponse(replicationTcpConnectReplyMessage.Document); switch (headerResponse.Status) { case TcpConnectionStatus.Ok: break; case TcpConnectionStatus.AuthorizationFailed: throw new UnauthorizedAccessException($"{Destination.FromString()} replied with failure {headerResponse.Message}"); case TcpConnectionStatus.TcpVersionMismatch: //Kindly request the server to drop the connection documentsContext.Write(writer, new DynamicJsonValue { [nameof(TcpConnectionHeaderMessage.DatabaseName)] = Destination.Database, [nameof(TcpConnectionHeaderMessage.Operation)] = TcpConnectionHeaderMessage.OperationTypes.Drop.ToString(), [nameof(TcpConnectionHeaderMessage.SourceNodeTag)] = _parent._server.NodeTag, [nameof(TcpConnectionHeaderMessage.OperationVersion)] = TcpConnectionHeaderMessage.GetOperationTcpVersion(TcpConnectionHeaderMessage.OperationTypes.Drop), [nameof(TcpConnectionHeaderMessage.Info)] = $"Couldn't agree on replication tcp version ours:{TcpConnectionHeaderMessage.ReplicationTcpVersion} theirs:{headerResponse.Version}" }); writer.Flush(); throw new InvalidOperationException($"{Destination.FromString()} replied with failure {headerResponse.Message}"); default: throw new InvalidOperationException($"{Destination.FromString()} replied with unknown status {headerResponse.Status}, message:{headerResponse.Message}"); } } }
public unsafe void GetAllSubscriptions(BlittableJsonTextWriter writer, DocumentsOperationContext context, int start, int take) { var transactionPersistentContext = new TransactionPersistentContext(); using (var tx = _environment.WriteTransaction(transactionPersistentContext)) { var subscriptions = new List <DynamicJsonValue>(); var table = tx.OpenTable(_subscriptionsSchema, SubscriptionSchema.SubsTree); var seen = 0; var taken = 0; foreach (var subscriptionTvr in table.SeekByPrimaryKey(Slices.BeforeAllKeys)) { if (seen < start) { seen++; continue; } var subscriptionData = ExtractSubscriptionConfigValue(subscriptionTvr, context); subscriptions.Add(subscriptionData); int size; var subscriptionId = Bits.SwapBytes(*(long *)subscriptionTvr.Read(SubscriptionSchema.SubscriptionTable.IdIndex, out size)); SubscriptionState subscriptionState = null; if (_subscriptionStates.TryGetValue(subscriptionId, out subscriptionState)) { SetSubscriptionStateData(subscriptionState, subscriptionData); } taken++; if (taken > take) { break; } } writer.WriteStartObject(); writer.WritePropertyName("Subscriptions"); context.Write(writer, new DynamicJsonArray(subscriptions)); writer.WriteEndObject(); } }
private void GenerateTopology(DocumentsOperationContext context, BlittableJsonTextWriter writer, IEnumerable <DynamicJsonValue> nodes = null, long etag = -1) { context.Write(writer, new DynamicJsonValue { [nameof(Topology.LeaderNode)] = new DynamicJsonValue { [nameof(ServerNode.Url)] = GetStringQueryString("url", required: false) ?? Server.Configuration.Core.ServerUrl, [nameof(ServerNode.Database)] = Database.Name, }, [nameof(Topology.Nodes)] = (nodes == null)? new DynamicJsonArray(): new DynamicJsonArray(nodes), [nameof(Topology.ReadBehavior)] = ReadBehavior.LeaderWithFailoverWhenRequestTimeSlaThresholdIsReached.ToString(), [nameof(Topology.WriteBehavior)] = WriteBehavior.LeaderOnly.ToString(), [nameof(Topology.SLA)] = new DynamicJsonValue { [nameof(TopologySla.RequestTimeThresholdInMilliseconds)] = 100, }, [nameof(Topology.Etag)] = etag, }); }
public void GetRunningSusbscriptions(BlittableJsonTextWriter writer, DocumentsOperationContext context, int start, int take) { var transactionPersistentContext = new TransactionPersistentContext(); using (var tx = _environment.ReadTransaction(transactionPersistentContext)) { var connections = new List <DynamicJsonValue>(take); var skipped = 0; var taken = 0; foreach (var kvp in _subscriptionStates) { var subscriptionState = kvp.Value; var subscriptionId = kvp.Key; if (taken > take) { break; } if (subscriptionState?.Connection == null) { continue; } if (skipped < start) { skipped++; continue; } var subscriptionData = ExtractSubscriptionConfigValue(GetSubscriptionConfig(subscriptionId, tx), context); SetSubscriptionStateData(subscriptionState, subscriptionData); connections.Add(subscriptionData); taken++; } context.Write(writer, new DynamicJsonArray(connections)); writer.Flush(); } }
/// <summary> /// Iterates on a batch in document collection, process it and send documents if found any match /// </summary> /// <param name="docsContext"></param> /// <param name="sendingCurrentBatchStopwatch"></param> /// <returns>Whether succeeded finding any documents to send</returns> private async Task <bool> TrySendingBatchToClient(DocumentsOperationContext docsContext, Stopwatch sendingCurrentBatchStopwatch) { bool anyDocumentsSentInCurrentIteration = false; int docsToFlush = 0; using (var writer = new BlittableJsonTextWriter(docsContext, _buffer)) { using (docsContext.OpenReadTransaction()) { foreach (var result in _documentsFetcher.GetDataToSend(docsContext, _startEtag)) { _startEtag = result.Doc.Etag; _lastChangeVector = string.IsNullOrEmpty(SubscriptionState.ChangeVectorForNextBatchStartingPoint) ? result.Doc.ChangeVector : ChangeVectorUtils.MergeVectors(result.Doc.ChangeVector, SubscriptionState.ChangeVectorForNextBatchStartingPoint); if (result.Doc.Data == null) { if (sendingCurrentBatchStopwatch.ElapsedMilliseconds > 1000) { await SendHeartBeat(); sendingCurrentBatchStopwatch.Restart(); } continue; } anyDocumentsSentInCurrentIteration = true; writer.WriteStartObject(); writer.WritePropertyName(docsContext.GetLazyStringForFieldWithCaching(TypeSegment)); writer.WriteValue(BlittableJsonToken.String, docsContext.GetLazyStringForFieldWithCaching(DataSegment)); writer.WriteComma(); writer.WritePropertyName(docsContext.GetLazyStringForFieldWithCaching(DataSegment)); result.Doc.EnsureMetadata(); if (result.Exception != null) { var metadata = result.Doc.Data[Client.Constants.Documents.Metadata.Key]; writer.WriteValue(BlittableJsonToken.StartObject, docsContext.ReadObject(new DynamicJsonValue { [Client.Constants.Documents.Metadata.Key] = metadata }, result.Doc.Id) ); writer.WriteComma(); writer.WritePropertyName(docsContext.GetLazyStringForFieldWithCaching(ExceptionSegment)); writer.WriteValue(BlittableJsonToken.String, docsContext.GetLazyStringForFieldWithCaching(result.Exception.ToString())); } else { writer.WriteDocument(docsContext, result.Doc, metadataOnly: false); } writer.WriteEndObject(); docsToFlush++; // perform flush for current batch after 1000ms of running or 1 MB if (_buffer.Length > Constants.Size.Megabyte || sendingCurrentBatchStopwatch.ElapsedMilliseconds > 1000) { if (docsToFlush > 0) { await FlushDocsToClient(writer, docsToFlush); docsToFlush = 0; sendingCurrentBatchStopwatch.Restart(); } else { await SendHeartBeat(); } } } } if (anyDocumentsSentInCurrentIteration) { docsContext.Write(writer, new DynamicJsonValue { [nameof(SubscriptionConnectionServerMessage.Type)] = nameof(SubscriptionConnectionServerMessage.MessageType.EndOfBatch) }); await FlushDocsToClient(writer, docsToFlush, true); if (_logger.IsInfoEnabled) { _logger.Info( $"Finished sending a batch with {docsToFlush} documents for subscription {Options.SubscriptionName}"); } } } return(anyDocumentsSentInCurrentIteration); }
public ExportResult Export(DocumentsOperationContext context, Stream destinationStream, Action <IOperationProgress> onProgress = null) { var result = new ExportResult(); var progress = new IndeterminateProgress(); using (var gZipStream = new GZipStream(destinationStream, CompressionMode.Compress, leaveOpen: true)) using (var writer = new BlittableJsonTextWriter(context, gZipStream)) { writer.WriteStartObject(); writer.WritePropertyName(("BuildVersion")); writer.WriteInteger(40000); if (Options.OperateOnTypes.HasFlag(DatabaseItemType.Documents)) { progress.Progress = "Exporting Documents"; onProgress?.Invoke(progress); writer.WriteComma(); writer.WritePropertyName(("Docs")); IEnumerable <Document> documents = Options.CollectionsToExport.Count != 0 ? _database.DocumentsStorage.GetDocumentsFrom(context, Options.CollectionsToExport, StartDocsEtag ?? 0, int.MaxValue) : _database.DocumentsStorage.GetDocumentsFrom(context, StartDocsEtag ?? 0, 0, int.MaxValue); writer.WriteStartArray(); PatchDocument patch = null; PatchRequest patchRequest = null; if (string.IsNullOrWhiteSpace(Options.TransformScript) == false) { patch = new PatchDocument(context.DocumentDatabase); patchRequest = new PatchRequest { Script = Options.TransformScript }; } bool first = true; foreach (var document in documents) { if (document == null) { continue; } if (Options.IncludeExpired == false && document.Expired(_database.Time.GetUtcNow())) { continue; } var patchResult = patch?.Apply(context, document, patchRequest); if (patchResult != null && patchResult.ModifiedDocument.Equals(document.Data) == false) { document.Data = patchResult.ModifiedDocument; } using (document.Data) { if (first == false) { writer.WriteComma(); } first = false; document.EnsureMetadata(); context.Write(writer, document.Data); result.LastDocsEtag = document.Etag; } result.ExportedDocuments++; } writer.WriteEndArray(); } if (Options.OperateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments)) { var versioningStorage = _database.BundleLoader.VersioningStorage; if (versioningStorage != null) { writer.WriteComma(); writer.WritePropertyName("RevisionDocuments"); writer.WriteStartArray(); var first = true; var revisionDocuments = Options.RevisionDocumentsLimit.HasValue ? versioningStorage.GetRevisionsAfter(context, StartRevisionDocumentsEtag ?? 0, Options.RevisionDocumentsLimit.Value) : versioningStorage.GetRevisionsAfter(context, StartRevisionDocumentsEtag ?? 0); foreach (var revisionDocument in revisionDocuments) { if (revisionDocument == null) { continue; } using (revisionDocument.Data) { if (first == false) { writer.WriteComma(); } first = false; revisionDocument.EnsureMetadata(); context.Write(writer, revisionDocument.Data); result.LastRevisionDocumentsEtag = revisionDocument.Etag; } } writer.WriteEndArray(); } } if (Options.OperateOnTypes.HasFlag(DatabaseItemType.Indexes)) { progress.Progress = "Exporting Indexes"; onProgress?.Invoke(progress); writer.WriteComma(); writer.WritePropertyName("Indexes"); writer.WriteStartArray(); var isFirst = true; foreach (var index in _database.IndexStore.GetIndexes()) { if (isFirst == false) { writer.WriteComma(); } isFirst = false; IndexProcessor.Export(writer, index, context, Options.RemoveAnalyzers); } writer.WriteEndArray(); } if (Options.OperateOnTypes.HasFlag(DatabaseItemType.Transformers)) { progress.Progress = "Exporting Transformers"; onProgress?.Invoke(progress); writer.WriteComma(); writer.WritePropertyName(("Transformers")); writer.WriteStartArray(); var isFirst = true; foreach (var transformer in _database.TransformerStore.GetTransformers()) { if (isFirst == false) { writer.WriteComma(); } isFirst = false; TransformerProcessor.Export(writer, transformer, context); } writer.WriteEndArray(); } if (Options.OperateOnTypes.HasFlag(DatabaseItemType.Identities)) { progress.Progress = "Exporting Identities"; onProgress?.Invoke(progress); writer.WriteComma(); writer.WritePropertyName(("Identities")); writer.WriteStartArray(); var identities = _database.DocumentsStorage.GetIdentities(context); var first = true; foreach (var identity in identities) { if (first == false) { writer.WriteComma(); } first = false; writer.WriteStartObject(); writer.WritePropertyName(("Key")); writer.WriteString((identity.Key)); writer.WriteComma(); writer.WritePropertyName(("Value")); writer.WriteString((identity.Value.ToString())); writer.WriteEndObject(); } writer.WriteEndArray(); } writer.WriteEndObject(); progress.Progress = $"Finish Exported database to {Options.FileName}. Exported {result.ExportedDocuments}"; onProgress?.Invoke(progress); } return(result); }
private async Task Graph(DocumentsOperationContext context, RequestTimeTracker tracker, HttpMethod method) { var indexQuery = await GetIndexQuery(context, method, tracker); var queryRunner = Database.QueryRunner.GetRunner(indexQuery); if (!(queryRunner is GraphQueryRunner gqr)) { throw new InvalidOperationException("The specified query is not a graph query."); } using (var token = CreateTimeLimitedQueryToken()) using (Database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) { var results = await gqr.GetAnalyzedQueryResults(indexQuery, ctx, null, token); var nodes = new DynamicJsonArray(); var edges = new DynamicJsonArray(); var output = new DynamicJsonValue { ["Nodes"] = nodes, ["Edges"] = edges }; foreach (var item in results.Nodes) { var val = item.Value; if (val is Document d) { d.EnsureMetadata(); val = d.Data; } nodes.Add(new DynamicJsonValue { ["Id"] = item.Key, ["Value"] = val }); } foreach (var edge in results.Edges) { var array = new DynamicJsonArray(); var djv = new DynamicJsonValue { ["Name"] = edge.Key, ["Results"] = array }; foreach (var item in edge.Value) { var edgeVal = item.Edge; if (edgeVal is Document d) { edgeVal = d.Id?.ToString() ?? "anonymous/" + Guid.NewGuid(); } array.Add(new DynamicJsonValue { ["From"] = item.Source, ["To"] = item.Destination, ["Edge"] = edgeVal }); } edges.Add(djv); } using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, output); } } }
private void ReplicateToDestination() { try { var connectionInfo = GetTcpInfo(); using (_tcpClient = new TcpClient()) { ConnectSocket(connectionInfo, _tcpClient); using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out _documentsContext)) using (_database.ConfigurationStorage.ContextPool.AllocateOperationContext(out _configurationContext)) using (var stream = _tcpClient.GetStream()) { var documentSender = new ReplicationDocumentSender(stream, this, _log); var indexAndTransformerSender = new ReplicationIndexTransformerSender(stream, this, _log); using (_writer = new BlittableJsonTextWriter(_documentsContext, stream)) using (_parser = _documentsContext.ParseMultiFrom(stream)) { //send initial connection information _documentsContext.Write(_writer, new DynamicJsonValue { [nameof(TcpConnectionHeaderMessage.DatabaseName)] = _destination.Database, [nameof(TcpConnectionHeaderMessage.Operation)] = TcpConnectionHeaderMessage.OperationTypes.Replication.ToString(), }); //start request/response for fetching last etag _documentsContext.Write(_writer, new DynamicJsonValue { ["Type"] = "GetLastEtag", ["SourceDatabaseId"] = _database.DbId.ToString(), ["SourceDatabaseName"] = _database.Name, ["SourceUrl"] = _database.Configuration.Core.ServerUrl, ["MachineName"] = Environment.MachineName, }); _writer.Flush(); //handle initial response to last etag and staff try { using (_configurationContext.OpenReadTransaction()) using (_documentsContext.OpenReadTransaction()) { var response = HandleServerResponse(); if (response.Item1 == ReplicationMessageReply.ReplyType.Error) { if (response.Item2.Contains("DatabaseDoesNotExistsException")) { throw new DatabaseDoesNotExistsException(); } throw new InvalidOperationException(response.Item2); } } } catch (DatabaseDoesNotExistsException e) { var msg = $"Failed to parse initial server replication response, because there is no database named {_database.Name} on the other end. " + "In order for the replication to work, a database with the same name needs to be created at the destination"; if (_log.IsInfoEnabled) { _log.Info(msg, e); } using (var txw = _configurationContext.OpenWriteTransaction()) { _database.Alerts.AddAlert(new Alert { Key = FromToString, Type = AlertType.Replication, Message = msg, CreatedAt = DateTime.UtcNow, Severity = AlertSeverity.Warning }, _configurationContext, txw); txw.Commit(); } throw; } catch (Exception e) { var msg = $"Failed to parse initial server response. This is definitely not supposed to happen. Exception thrown: {e}"; if (_log.IsInfoEnabled) { _log.Info(msg, e); } using (var txw = _configurationContext.OpenWriteTransaction()) { _database.Alerts.AddAlert(new Alert { Key = FromToString, Type = AlertType.Replication, Message = msg, CreatedAt = DateTime.UtcNow, Severity = AlertSeverity.Error }, _configurationContext, txw); txw.Commit(); } throw; } while (_cts.IsCancellationRequested == false) { _documentsContext.ResetAndRenew(); long currentEtag; Debug.Assert(_database.IndexMetadataPersistence.IsInitialized); using (_configurationContext.OpenReadTransaction()) currentEtag = _database.IndexMetadataPersistence.ReadLastEtag(_configurationContext.Transaction.InnerTransaction); if (_destination.SkipIndexReplication == false && currentEtag != indexAndTransformerSender.LastEtag) { indexAndTransformerSender.ExecuteReplicationOnce(); } var sp = Stopwatch.StartNew(); while (documentSender.ExecuteReplicationOnce()) { if (sp.ElapsedMilliseconds > 60 * 1000) { _waitForChanges.Set(); break; } } //if this returns false, this means either timeout or canceled token is activated while (WaitForChanges(_minimalHeartbeatInterval, _cts.Token) == false) { _configurationContext.ResetAndRenew(); _documentsContext.ResetAndRenew(); using (_documentsContext.OpenReadTransaction()) using (_configurationContext.OpenReadTransaction()) { SendHeartbeat(); } } _waitForChanges.Reset(); } } } } } catch (OperationCanceledException) { if (_log.IsInfoEnabled) { _log.Info($"Operation canceled on replication thread ({FromToString}). Stopped the thread."); } } catch (IOException e) { if (_log.IsInfoEnabled) { if (e.InnerException is SocketException) { _log.Info( $"SocketException was thrown from the connection to remote node ({FromToString}). This might mean that the remote node is done or there is a network issue.", e); } else { _log.Info($"IOException was thrown from the connection to remote node ({FromToString}).", e); } } } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info($"Unexpected exception occured on replication thread ({FromToString}). Replication stopped (will be retried later).", e); } Failed?.Invoke(this, e); } }