private void SendErrorIfPossible(TcpConnectionOptions tcp, Exception e) { var tcpStream = tcp?.Stream; if (tcpStream == null) { return; } try { using (_tcpContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var errorWriter = new BlittableJsonTextWriter(context, tcpStream)) { context.Write(errorWriter, new DynamicJsonValue { ["Type"] = "Error", ["Exception"] = e.ToString(), ["Message"] = e.Message }); } } catch (Exception inner) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to send error in TCP connection", inner); } } }
private async Task <bool> DispatchDatabaseTcpConnection(TcpConnectionOptions tcp, TcpConnectionHeaderMessage header) { var databaseLoadingTask = ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(header.DatabaseName); if (databaseLoadingTask == null) { DatabaseDoesNotExistException.Throw(header.DatabaseName); return(true); } var databaseLoadTimeout = ServerStore.DatabasesLandlord.DatabaseLoadTimeout; if (databaseLoadingTask.IsCompleted == false) { var resultingTask = await Task.WhenAny(databaseLoadingTask, Task.Delay(databaseLoadTimeout)); if (resultingTask != databaseLoadingTask) { ThrowTimeoutOnDatabaseLoad(header); } } tcp.DocumentDatabase = await databaseLoadingTask; if (tcp.DocumentDatabase == null) { DatabaseDoesNotExistException.Throw(header.DatabaseName); } Debug.Assert(tcp.DocumentDatabase != null); if (tcp.DocumentDatabase.DatabaseShutdown.IsCancellationRequested) { ThrowDatabaseShutdown(tcp.DocumentDatabase); } tcp.DocumentDatabase.RunningTcpConnections.Add(tcp); switch (header.Operation) { case TcpConnectionHeaderMessage.OperationTypes.Subscription: SubscriptionConnection.SendSubscriptionDocuments(tcp); break; case TcpConnectionHeaderMessage.OperationTypes.Replication: var documentReplicationLoader = tcp.DocumentDatabase.ReplicationLoader; documentReplicationLoader.AcceptIncomingConnection(tcp); break; default: throw new InvalidOperationException("Unknown operation for TCP " + header.Operation); } //since the responses to TCP connections mostly continue to run //beyond this point, no sense to dispose the connection now, so set it to null. //this way the responders are responsible to dispose the connection and the context // ReSharper disable once RedundantAssignment tcp = null; return(false); }
public ClusterMaintenanceWorker(TcpConnectionOptions tcp, CancellationToken externalToken, ServerStore serverStore, long term) { _tcp = tcp; _cts = CancellationTokenSource.CreateLinkedTokenSource(externalToken); _token = _cts.Token; _server = serverStore; WorkerSamplePeriod = _server.Configuration.Cluster.WorkerSamplePeriod.AsTimeSpan; _logger = LoggingSource.Instance.GetLogger <ClusterMaintenanceWorker>($"Logger on {serverStore.NodeTag}"); CurrentTerm = term; }
public ClusterMaintenanceWorker(TcpConnectionOptions tcp, CancellationToken externalToken, ServerStore serverStore, string leader, long term) { _tcp = tcp; _cts = CancellationTokenSource.CreateLinkedTokenSource(externalToken); _token = _cts.Token; _server = serverStore; _logger = LoggingSource.Instance.GetLogger <ClusterMaintenanceWorker>(serverStore.NodeTag); _name = $"Maintenance worker connection to leader {leader} in term {term}"; WorkerSamplePeriod = _server.Configuration.Cluster.WorkerSamplePeriod.AsTimeSpan; CurrentTerm = term; }
public ClusterMaintenanceWorker(TcpConnectionOptions tcp, CancellationToken externalToken, ServerStore serverStore, string leader, long term) { _tcp = tcp; _cts = CancellationTokenSource.CreateLinkedTokenSource(externalToken); _token = _cts.Token; _server = serverStore; _logger = LoggingSource.Instance.GetLogger <ClusterMaintenanceWorker>(serverStore.NodeTag); _name = $"Heartbeats worker connection to leader {leader} in term {term}"; WorkerSamplePeriod = _server.Configuration.Cluster.WorkerSamplePeriod.AsTimeSpan; CurrentTerm = term; SupportedFeatures = TcpConnectionHeaderMessage.GetSupportedFeaturesFor(TcpConnectionHeaderMessage.OperationTypes.Heartbeats, _tcp.ProtocolVersion); }
private async Task <bool> DispatchServerWideTcpConnection(TcpConnectionOptions tcp, TcpConnectionHeaderMessage header) { tcp.Operation = header.Operation; if (tcp.Operation == TcpConnectionHeaderMessage.OperationTypes.Cluster) { ServerStore.ClusterAcceptNewConnection(tcp.Stream); return(true); } if (tcp.Operation == TcpConnectionHeaderMessage.OperationTypes.Heartbeats) { // check for the term using (_tcpContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var headerJson = await context.ParseToMemoryAsync( tcp.Stream, "maintenance-heartbeat-header", BlittableJsonDocumentBuilder.UsageMode.None, tcp.PinnedBuffer )) { var maintenanceHeader = JsonDeserializationRachis <ClusterMaintenanceSupervisor.ClusterMaintenanceConnectionHeader> .Deserialize(headerJson); if (_clusterMaintenanceWorker?.CurrentTerm > maintenanceHeader.Term) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info($"Request for maintenance with term {maintenanceHeader.Term} was rejected, " + $"because we are already connected to the recent leader with the term {_clusterMaintenanceWorker.CurrentTerm}"); } tcp.Dispose(); return(true); } var old = _clusterMaintenanceWorker; using (old) { _clusterMaintenanceWorker = new ClusterMaintenanceWorker(tcp, ServerStore.ServerShutdown, ServerStore, maintenanceHeader.Term); _clusterMaintenanceWorker.Start(); } return(true); } } return(false); }
public void AcceptIncomingConnection(TcpConnectionOptions tcpConnectionOptions) { ReplicationLatestEtagRequest getLatestEtagMessage; using (tcpConnectionOptions.ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var readerObject = context.ParseToMemory( tcpConnectionOptions.Stream, "IncomingReplication/get-last-etag-message read", BlittableJsonDocumentBuilder.UsageMode.None, tcpConnectionOptions.PinnedBuffer)) { getLatestEtagMessage = JsonDeserializationServer.ReplicationLatestEtagRequest(readerObject); if (_log.IsInfoEnabled) { _log.Info( $"GetLastEtag: {getLatestEtagMessage.SourceTag}({getLatestEtagMessage.SourceMachineName}) / {getLatestEtagMessage.SourceDatabaseName} ({getLatestEtagMessage.SourceDatabaseId}) - {getLatestEtagMessage.SourceUrl}"); } } var connectionInfo = IncomingConnectionInfo.FromGetLatestEtag(getLatestEtagMessage); try { AssertValidConnection(connectionInfo); } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info($"Connection from [{connectionInfo}] is rejected.", e); } var incomingConnectionRejectionInfos = _incomingRejectionStats.GetOrAdd(connectionInfo, _ => new ConcurrentQueue <IncomingConnectionRejectionInfo>()); incomingConnectionRejectionInfos.Enqueue(new IncomingConnectionRejectionInfo { Reason = e.ToString() }); try { tcpConnectionOptions.Dispose(); } catch { // do nothing } throw; } try { using (Database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsOperationContext)) using (Database.ConfigurationStorage.ContextPool.AllocateOperationContext(out TransactionOperationContext configurationContext)) using (var writer = new BlittableJsonTextWriter(documentsOperationContext, tcpConnectionOptions.Stream)) using (documentsOperationContext.OpenReadTransaction()) using (configurationContext.OpenReadTransaction()) { var changeVector = DocumentsStorage.GetDatabaseChangeVector(documentsOperationContext); var lastEtagFromSrc = Database.DocumentsStorage.GetLastReplicateEtagFrom( documentsOperationContext, getLatestEtagMessage.SourceDatabaseId); if (_log.IsInfoEnabled) { _log.Info($"GetLastEtag response, last etag: {lastEtagFromSrc}"); } var response = new DynamicJsonValue { [nameof(ReplicationMessageReply.Type)] = "Ok", [nameof(ReplicationMessageReply.MessageType)] = ReplicationMessageType.Heartbeat, [nameof(ReplicationMessageReply.LastEtagAccepted)] = lastEtagFromSrc, [nameof(ReplicationMessageReply.NodeTag)] = _server.NodeTag, [nameof(ReplicationMessageReply.DatabaseChangeVector)] = changeVector }; documentsOperationContext.Write(writer, response); writer.Flush(); } } catch (Exception) { try { tcpConnectionOptions.Dispose(); } catch (Exception) { // do nothing } throw; } var newIncoming = new IncomingReplicationHandler( tcpConnectionOptions, getLatestEtagMessage, this); newIncoming.Failed += OnIncomingReceiveFailed; newIncoming.DocumentsReceived += OnIncomingReceiveSucceeded; if (_log.IsInfoEnabled) { _log.Info( $"Initialized document replication connection from {connectionInfo.SourceDatabaseName} located at {connectionInfo.SourceUrl}"); } // need to safeguard against two concurrent connection attempts var newConnection = _incoming.GetOrAdd(newIncoming.ConnectionInfo.SourceDatabaseId, newIncoming); if (newConnection == newIncoming) { newIncoming.Start(); IncomingReplicationAdded?.Invoke(newIncoming); ForceTryReconnectAll(); } else { newIncoming.Dispose(); } }
public bool ExecuteReplicationOnce(TcpConnectionOptions tcpConnectionOptions, OutgoingReplicationStatsScope stats, ref long next) { EnsureValidStats(stats); var wasInterrupted = false; var delay = GetDelayReplication(); var currentNext = next; using (_parent._database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext)) using (documentsContext.OpenReadTransaction()) { try { // we scan through the documents to send to the other side, we need to be careful about // filtering a lot of documents, because we need to let the other side know about this, and // at the same time, we need to send a heartbeat to keep the tcp connection alive _lastEtag = _parent._lastSentDocumentEtag; _parent.CancellationToken.ThrowIfCancellationRequested(); var skippedReplicationItemsInfo = new SkippedReplicationItemsInfo(); long prevLastEtag = _lastEtag; var replicationState = new ReplicationState { BatchSize = _parent._database.Configuration.Replication.MaxItemsCount, MaxSizeToSend = _parent._database.Configuration.Replication.MaxSizeToSend, CurrentNext = currentNext, Delay = delay, Context = documentsContext, LastTransactionMarker = -1, NumberOfItemsSent = 0, Size = 0L, MissingTxMarkers = new HashSet <short>() }; using (_stats.Storage.Start()) { foreach (var item in GetReplicationItems(_parent._database, documentsContext, _lastEtag, _stats, _parent.SupportedFeatures.Replication.CaseInsensitiveCounters)) { _parent.CancellationToken.ThrowIfCancellationRequested(); if (replicationState.LastTransactionMarker != item.TransactionMarker) { replicationState.Item = item; if (CanContinueBatch(replicationState, ref next) == false) { wasInterrupted = true; break; } replicationState.LastTransactionMarker = item.TransactionMarker; } _stats.Storage.RecordInputAttempt(); // here we add missing attachments in the same batch as the document that contains them without modifying the last etag or transaction boundary if (MissingAttachmentsInLastBatch && item.Type == ReplicationBatchItem.ReplicationItemType.Document && item is DocumentReplicationItem docItem && docItem.Flags.Contain(DocumentFlags.HasAttachments)) { var type = (docItem.Flags & DocumentFlags.Revision) == DocumentFlags.Revision ? AttachmentType.Revision: AttachmentType.Document; foreach (var attachment in _parent._database.DocumentsStorage.AttachmentsStorage.GetAttachmentsForDocument(documentsContext, type, docItem.Id, docItem.ChangeVector)) { // we need to filter attachments that are been sent in the same batch as the document if (attachment.Etag >= prevLastEtag) { if (attachment.TransactionMarker != item.TransactionMarker) { replicationState.MissingTxMarkers.Add(attachment.TransactionMarker); } continue; } var stream = _parent._database.DocumentsStorage.AttachmentsStorage.GetAttachmentStream(documentsContext, attachment.Base64Hash); attachment.Stream = stream; var attachmentItem = AttachmentReplicationItem.From(documentsContext, attachment); AddReplicationItemToBatch(attachmentItem, _stats.Storage, skippedReplicationItemsInfo); replicationState.Size += attachmentItem.Size; } } _lastEtag = item.Etag; if (AddReplicationItemToBatch(item, _stats.Storage, skippedReplicationItemsInfo) == false) { // this item won't be needed anymore item.Dispose(); continue; } replicationState.Size += item.Size; replicationState.NumberOfItemsSent++; } } if (_log.IsInfoEnabled) { if (skippedReplicationItemsInfo.SkippedItems > 0) { var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector); _log.Info(message); } var msg = $"Found {_orderedReplicaItems.Count:#,#;;0} documents " + $"and {_replicaAttachmentStreams.Count} attachment's streams " + $"to replicate to {_parent.Node.FromString()}, "; var encryptionSize = documentsContext.Transaction.InnerTransaction.LowLevelTransaction.AdditionalMemoryUsageSize.GetValue(SizeUnit.Bytes); if (encryptionSize > 0) { msg += $"encryption buffer overhead size is {new Size(encryptionSize, SizeUnit.Bytes)}, "; } msg += $"total size: {new Size(replicationState.Size + encryptionSize, SizeUnit.Bytes)}"; _log.Info(msg); } if (_orderedReplicaItems.Count == 0) { var hasModification = _lastEtag != _parent._lastSentDocumentEtag; // ensure that the other server is aware that we skipped // on (potentially a lot of) documents to send, and we update // the last etag they have from us on the other side _parent._lastSentDocumentEtag = _lastEtag; _parent._lastDocumentSentTime = DateTime.UtcNow; var changeVector = wasInterrupted ? null : DocumentsStorage.GetDatabaseChangeVector(documentsContext); _parent.SendHeartbeat(changeVector); return(hasModification); } _parent.CancellationToken.ThrowIfCancellationRequested(); try { using (_stats.Network.Start()) { SendDocumentsBatch(documentsContext, _stats.Network); tcpConnectionOptions._lastEtagSent = _lastEtag; tcpConnectionOptions.RegisterBytesSent(replicationState.Size); if (MissingAttachmentsInLastBatch) { return(false); } } } catch (OperationCanceledException) { if (_log.IsInfoEnabled) { _log.Info("Received cancellation notification while sending document replication batch."); } throw; } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info("Failed to send document replication batch", e); } throw; } MissingAttachmentsInLastBatch = false; return(true); } finally { foreach (var item in _orderedReplicaItems) { item.Value.Dispose(); } _orderedReplicaItems.Clear(); _replicaAttachmentStreams.Clear(); } } }
private void ListenToNewTcpConnection(TcpListener listener) { Task.Run(async() => { TcpClient tcpClient; try { tcpClient = await listener.AcceptTcpClientAsync(); } catch (ObjectDisposedException) { // shutting down return; } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to accept new tcp connection", e); } return; } ListenToNewTcpConnection(listener); try { tcpClient.NoDelay = true; tcpClient.ReceiveBufferSize = 32 * 1024; tcpClient.SendBufferSize = 4096; Stream stream = tcpClient.GetStream(); stream = await AuthenticateAsServerIfSslNeeded(stream); var tcp = new TcpConnectionOptions { ContextPool = _tcpContextPool, Stream = stream, TcpClient = tcpClient, PinnedBuffer = JsonOperationContext.ManagedPinnedBuffer.LongLivedInstance() }; try { TcpConnectionHeaderMessage header; using (_tcpContextPool.AllocateOperationContext(out JsonOperationContext context)) { using (var headerJson = await context.ParseToMemoryAsync( stream, "tcp-header", BlittableJsonDocumentBuilder.UsageMode.None, tcp.PinnedBuffer, ServerStore.ServerShutdown, // we don't want to allow external (and anonymous) users to send us unlimited data // a maximum of 2 KB for the header is big enough to include any valid header that // we can currently think of maxSize: 1024 * 2 )) { header = JsonDeserializationClient.TcpConnectionHeaderMessage(headerJson); if (Logger.IsInfoEnabled) { Logger.Info( $"New {header.Operation} TCP connection to {header.DatabaseName ?? "the cluster node"} from {tcpClient.Client.RemoteEndPoint}"); } } if (MatchingOperationVersion(header, out var error) == false) { RespondToTcpConnection(stream, context, error, TcpConnectionStatus.TcpVersionMismatch); if (Logger.IsInfoEnabled) { Logger.Info( $"New {header.Operation} TCP connection to {header.DatabaseName ?? "the cluster node"} from {tcpClient.Client.RemoteEndPoint} failed because:" + $" {error}"); } return; //we will not accept not matching versions } bool authSuccessful = TryAuthorize(Configuration, tcp.Stream, header, out var err); RespondToTcpConnection(stream, context, error, authSuccessful ? TcpConnectionStatus.Ok : TcpConnectionStatus.AuthorizationFailed); if (authSuccessful == false) { if (Logger.IsInfoEnabled) { Logger.Info( $"New {header.Operation} TCP connection to {header.DatabaseName ?? "the cluster node"} from {tcpClient.Client.RemoteEndPoint}" + $" is not authorized to access {header.DatabaseName ?? "the cluster node"} because {err}"); } return; // cannot proceed } } if (await DispatchServerWideTcpConnection(tcp, header)) { tcp = null; //do not keep reference -> tcp will be disposed by server-wide connection handlers return; } await DispatchDatabaseTcpConnection(tcp, header); } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to process TCP connection run", e); } SendErrorIfPossible(tcp, e); } } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failure when processing tcp connection", e); } } }); }
public void AcceptIncomingConnection(TcpConnectionOptions tcpConnectionOptions) { ReplicationLatestEtagRequest getLatestEtagMessage; using (var readerObject = tcpConnectionOptions.MultiDocumentParser.ParseToMemory("IncomingReplication/get-last-etag-message read")) { getLatestEtagMessage = JsonDeserializationServer.ReplicationLatestEtagRequest(readerObject); if (_log.IsInfoEnabled) { _log.Info($"GetLastEtag: {getLatestEtagMessage.SourceMachineName} / {getLatestEtagMessage.SourceDatabaseName} ({getLatestEtagMessage.SourceDatabaseId}) - {getLatestEtagMessage.SourceUrl}"); } } var connectionInfo = IncomingConnectionInfo.FromGetLatestEtag(getLatestEtagMessage); try { AssertValidConnection(connectionInfo); } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info($"Connection from [{connectionInfo}] is rejected.", e); } var incomingConnectionRejectionInfos = _incomingRejectionStats.GetOrAdd(connectionInfo, _ => new ConcurrentQueue <IncomingConnectionRejectionInfo>()); incomingConnectionRejectionInfos.Enqueue(new IncomingConnectionRejectionInfo { Reason = e.ToString() }); throw; } DocumentsOperationContext documentsOperationContext; TransactionOperationContext configurationContext; using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out documentsOperationContext)) using (_database.ConfigurationStorage.ContextPool.AllocateOperationContext(out configurationContext)) using (var writer = new BlittableJsonTextWriter(documentsOperationContext, tcpConnectionOptions.Stream)) using (var docTx = documentsOperationContext.OpenReadTransaction()) using (var configTx = configurationContext.OpenReadTransaction()) { var documentsChangeVector = new DynamicJsonArray(); foreach (var changeVectorEntry in _database.DocumentsStorage.GetDatabaseChangeVector(documentsOperationContext)) { documentsChangeVector.Add(new DynamicJsonValue { [nameof(ChangeVectorEntry.DbId)] = changeVectorEntry.DbId.ToString(), [nameof(ChangeVectorEntry.Etag)] = changeVectorEntry.Etag }); } var indexesChangeVector = new DynamicJsonArray(); var changeVectorAsArray = _database.IndexMetadataPersistence.GetIndexesAndTransformersChangeVector(configTx.InnerTransaction); foreach (var changeVectorEntry in changeVectorAsArray) { indexesChangeVector.Add(new DynamicJsonValue { [nameof(ChangeVectorEntry.DbId)] = changeVectorEntry.DbId.ToString(), [nameof(ChangeVectorEntry.Etag)] = changeVectorEntry.Etag }); } var lastEtagFromSrc = _database.DocumentsStorage.GetLastReplicateEtagFrom(documentsOperationContext, getLatestEtagMessage.SourceDatabaseId); if (_log.IsInfoEnabled) { _log.Info($"GetLastEtag response, last etag: {lastEtagFromSrc}"); } documentsOperationContext.Write(writer, new DynamicJsonValue { [nameof(ReplicationMessageReply.Type)] = "Ok", [nameof(ReplicationMessageReply.MessageType)] = ReplicationMessageType.Heartbeat, [nameof(ReplicationMessageReply.LastEtagAccepted)] = lastEtagFromSrc, [nameof(ReplicationMessageReply.LastIndexTransformerEtagAccepted)] = _database.IndexMetadataPersistence.GetLastReplicateEtagFrom(configTx.InnerTransaction, getLatestEtagMessage.SourceDatabaseId), [nameof(ReplicationMessageReply.DocumentsChangeVector)] = documentsChangeVector, [nameof(ReplicationMessageReply.IndexTransformerChangeVector)] = indexesChangeVector }); writer.Flush(); } var newIncoming = new IncomingReplicationHandler( tcpConnectionOptions.MultiDocumentParser, _database, tcpConnectionOptions.TcpClient, tcpConnectionOptions.Stream, getLatestEtagMessage, this); newIncoming.Failed += OnIncomingReceiveFailed; newIncoming.DocumentsReceived += OnIncomingReceiveSucceeded; if (_log.IsInfoEnabled) { _log.Info($"Initialized document replication connection from {connectionInfo.SourceDatabaseName} located at {connectionInfo.SourceUrl}", null); } // need to safeguard against two concurrent connection attempts var newConnection = _incoming.GetOrAdd(newIncoming.ConnectionInfo.SourceDatabaseId, newIncoming); if (newConnection == newIncoming) { newIncoming.Start(); } else { newIncoming.Dispose(); } }
private void ListenToNewTcpConnection(TcpListener listener) { Task.Run(async() => { TcpClient tcpClient; try { tcpClient = await listener.AcceptTcpClientAsync(); } catch (ObjectDisposedException) { // shutting down return; } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to accept new tcp connection", e); } return; } ListenToNewTcpConnection(listener); TcpConnectionOptions tcp = null; try { tcpClient.NoDelay = true; tcpClient.ReceiveBufferSize = 32 * 1024; tcpClient.SendBufferSize = 4096; var stream = tcpClient.GetStream(); tcp = new TcpConnectionOptions() { Stream = stream, TcpClient = tcpClient, DisposeOnConnectionClose = { stream, tcpClient } }; tcp.DisposeOnConnectionClose.Add( _tcpContextPool.AllocateOperationContext(out tcp.Context) ); tcp.MultiDocumentParser = tcp.Context.ParseMultiFrom(stream); try { TcpConnectionHeaderMessage header; using (var headerJson = await tcp.MultiDocumentParser.ParseToMemoryAsync()) { header = JsonDeserializationClient.TcpConnectionHeaderMessage(headerJson); if (_logger.IsInfoEnabled) { _logger.Info($"New {header.Operation} TCP connection to {header.DatabaseName} from {tcpClient.Client.RemoteEndPoint}"); } } tcp.Operation = header.Operation; var databaseLoadingTask = ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(header.DatabaseName); if (databaseLoadingTask == null) { ThrowNoSuchDatabase(header); return;// never hit } var databaseLoadTimeout = ServerStore.DatabasesLandlord.DatabaseLoadTimeout; if (databaseLoadingTask.IsCompleted == false) { var resultingTask = await Task.WhenAny(databaseLoadingTask, Task.Delay(databaseLoadTimeout)); if (resultingTask != databaseLoadingTask) { ThrowTimeoutOnDatbaseLoad(header); } } tcp.DocumentDatabase = await databaseLoadingTask; tcp.DocumentDatabase.RunningTcpConnections.Add(tcp); switch (header.Operation) { case TcpConnectionHeaderMessage.OperationTypes.BulkInsert: BulkInsertConnection.Run(tcp); break; case TcpConnectionHeaderMessage.OperationTypes.Subscription: SubscriptionConnection.SendSubscriptionDocuments(tcp); break; case TcpConnectionHeaderMessage.OperationTypes.Replication: var documentReplicationLoader = tcp.DocumentDatabase.DocumentReplicationLoader; documentReplicationLoader.AcceptIncomingConnection(tcp); break; default: throw new InvalidOperationException("Unknown operation for tcp " + header.Operation); } tcp = null; } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to process TCP connection run", e); } if (tcp != null) { using (var errorWriter = new BlittableJsonTextWriter(tcp.Context, tcp.Stream)) { tcp.Context.Write(errorWriter, new DynamicJsonValue { ["Type"] = "Error", ["Exception"] = e.ToString() }); } } } } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failure when processing tcp connection", e); } } finally { tcp?.Dispose(); } }); }