public Task ListRemoteConnections() { using (ServerStore.ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var write = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(write, new DynamicJsonValue { ["Remote-Connections"] = new DynamicJsonArray(RemoteConnection.RemoteConnectionsList .Select(connection => new DynamicJsonValue { [nameof(RemoteConnection.RemoteConnectionInfo.Caller)] = connection.Caller, [nameof(RemoteConnection.RemoteConnectionInfo.Term)] = connection.Term, [nameof(RemoteConnection.RemoteConnectionInfo.Destination)] = connection.Destination, [nameof(RemoteConnection.RemoteConnectionInfo.StartAt)] = connection.StartAt, ["Duration"] = DateTime.UtcNow - connection.StartAt, [nameof(RemoteConnection.RemoteConnectionInfo.Number)] = connection.Number, })) }); write.Flush(); } return(Task.CompletedTask); }
public Task GetNodeInfo() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var json = new DynamicJsonValue(); using (context.OpenReadTransaction()) { json[nameof(NodeInfo.NodeTag)] = ServerStore.NodeTag; json[nameof(NodeInfo.TopologyId)] = ServerStore.GetClusterTopology(context).TopologyId; json[nameof(NodeInfo.Certificate)] = ServerStore.Server.ClusterCertificateHolder.CertificateForClients; json[nameof(ServerStore.Engine.LastStateChangeReason)] = ServerStore.LastStateChangeReason(); json[nameof(NodeInfo.NumberOfCores)] = ProcessorInfo.ProcessorCount; var memoryInformation = MemoryInformation.GetMemoryInfo(); json[nameof(NodeInfo.InstalledMemoryInGb)] = memoryInformation.InstalledMemory.GetDoubleValue(SizeUnit.Gigabytes); json[nameof(NodeInfo.UsableMemoryInGb)] = memoryInformation.TotalPhysicalMemory.GetDoubleValue(SizeUnit.Gigabytes); } context.Write(writer, json); writer.Flush(); } return(Task.CompletedTask); }
private async Task Send(RavenClientWebSocket webSocket, JsonOperationContext context, string command, string commandParameter) { if (Logger.IsInfoEnabled) { Logger.Info($"Sending WebSocket Authentication Command {command} - {commandParameter}"); } var json = new DynamicJsonValue { [command] = commandParameter }; using (var stream = new MemoryStream()) using (var writer = new BlittableJsonTextWriter(context, stream)) { context.Write(writer, json); writer.Flush(); ArraySegment <byte> bytes; stream.TryGetBuffer(out bytes); await webSocket.SendAsync(bytes, WebSocketMessageType.Text, true, CancellationToken.None).ConfigureAwait(false); } }
public async Task ToggleServerWideTaskState() { var typeAsString = GetStringQueryString("type", required: true); var taskName = GetStringQueryString("name", required: true); var disable = GetBoolValueQueryString("disable") ?? true; if (Enum.TryParse(typeAsString, out OngoingTaskType type) == false) { throw new ArgumentException($"{typeAsString} is unknown task type."); } using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var configuration = new ToggleServerWideTaskStateCommand.Parameters { Type = type, TaskName = taskName, Disable = disable }; var(newIndex, _) = await ServerStore.ToggleServerWideTaskStateAsync(configuration, GetRaftRequestIdFromQuery()); await ServerStore.WaitForCommitIndexChange(RachisConsensus.CommitIndexModification.GreaterOrEqual, newIndex); using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var toggleResponse = new ServerWideTaskResponse { Name = taskName, RaftCommandIndex = newIndex }; context.Write(writer, toggleResponse.ToJson()); writer.Flush(); } } }
public async Task ToggleTaskState() { var disable = GetBoolValueQueryString("disable") ?? true; var taskName = GetStringQueryString("taskName", required: false); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { // Get existing task var serveWideBackupBlittable = ServerStore.Cluster.GetServerWideBackupConfigurations(context, taskName).FirstOrDefault(); if (serveWideBackupBlittable == null) { throw new InvalidOperationException($"Server-Wide Backup Task: {taskName} was not found in the server."); } // Toggle ServerWideBackupConfiguration serverWideBackup = JsonDeserializationServer.ServerWideBackupConfiguration(serveWideBackupBlittable); serverWideBackup.Disabled = disable; // Save task var(newIndex, _) = await ServerStore.PutServerWideBackupConfigurationAsync(serverWideBackup, GetRaftRequestIdFromQuery()); await ServerStore.WaitForCommitIndexChange(RachisConsensus.CommitIndexModification.GreaterOrEqual, newIndex); using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var toggleResponse = new PutServerWideBackupConfigurationResponse() { Name = taskName, RaftCommandIndex = newIndex }; context.Write(writer, toggleResponse.ToJson()); writer.Flush(); } } }
private void WriteHeaderToRemotePeer() { using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext)) using (var writer = new BlittableJsonTextWriter(documentsContext, _stream)) { var parameters = new TcpNegotiateParameters { Database = Destination.Database, Operation = TcpConnectionHeaderMessage.OperationTypes.Replication, SourceNodeTag = _parent._server.NodeTag, DestinationNodeTag = GetNode(), DestinationUrl = Destination.Url, ReadResponseAndGetVersionCallback = ReadHeaderResponseAndThrowIfUnAuthorized, Version = TcpConnectionHeaderMessage.ReplicationTcpVersion }; //This will either throw or return acceptable protocol version. SupportedFeatures = TcpNegotiation.NegotiateProtocolVersion(documentsContext, _stream, parameters); if (SupportedFeatures.ProtocolVersion <= 0) { throw new InvalidOperationException( $"{OutgoingReplicationThreadName}: TCP negotiation resulted with an invalid protocol version:{SupportedFeatures.ProtocolVersion}"); } //start request/response for fetching last etag var request = new DynamicJsonValue { ["Type"] = "GetLastEtag", [nameof(ReplicationLatestEtagRequest.SourceDatabaseId)] = _database.DbId.ToString(), [nameof(ReplicationLatestEtagRequest.SourceDatabaseName)] = _database.Name, [nameof(ReplicationLatestEtagRequest.SourceUrl)] = _parent._server.GetNodeHttpServerUrl(), [nameof(ReplicationLatestEtagRequest.SourceTag)] = _parent._server.NodeTag, [nameof(ReplicationLatestEtagRequest.SourceMachineName)] = Environment.MachineName }; documentsContext.Write(writer, request); writer.Flush(); } }
private async Task WriteServerWide(ZipArchive archive, JsonOperationContext context, LocalEndpointClient localEndpointClient, string prefix, CancellationToken token = default) { token.ThrowIfCancellationRequested(); //theoretically this could be parallelized, //however ZipArchive allows only one archive entry to be open concurrently foreach (var route in DebugInfoPackageUtils.Routes.Where(x => x.TypeOfRoute == RouteInformation.RouteType.None)) { token.ThrowIfCancellationRequested(); var entryRoute = DebugInfoPackageUtils.GetOutputPathFromRouteInformation(route, prefix); try { var entry = archive.CreateEntry(entryRoute); entry.ExternalAttributes = ((int)(FilePermissions.S_IRUSR | FilePermissions.S_IWUSR)) << 16; using (var entryStream = entry.Open()) using (var writer = new BlittableJsonTextWriter(context, entryStream)) using (var endpointOutput = await localEndpointClient.InvokeAndReadObjectAsync(route, context)) { context.Write(writer, endpointOutput); writer.Flush(); await entryStream.FlushAsync(token); } } catch (OperationCanceledException) { throw; } catch (Exception e) { DebugInfoPackageUtils.WriteExceptionAsZipEntry(e, archive, entryRoute); } } }
public Task GetServerWideBackupConfigurationCommand() { var taskName = GetStringQueryString("name", required: false); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var backups = ServerStore.Cluster.GetServerWideBackupConfigurations(context, taskName); ServerWideBackupConfigurationResults backupsResult = new ServerWideBackupConfigurationResults(); foreach (var backupBlittable in backups) { var backup = JsonDeserializationServer.ServerWideBackupConfiguration(backupBlittable); backup.BackupDestinations = backup.GetDestinations(); backupsResult.Results.Add(backup); } context.Write(writer, backupsResult.ToJson()); writer.Flush(); return(Task.CompletedTask); } }
public async Task ToggleTaskState() { if (ResourceNameValidator.IsValidResourceName(Database.Name, ServerStore.Configuration.Core.DataDirectory.FullPath, out string errorMessage) == false) { throw new BadRequestException(errorMessage); } var key = GetLongQueryString("key"); var typeStr = GetQueryStringValueAndAssertIfSingleAndNotEmpty("type"); var disable = GetBoolValueQueryString("disable") ?? true; var taskName = GetStringQueryString("taskName", required: false); if (Enum.TryParse <OngoingTaskType>(typeStr, true, out var type) == false) { throw new ArgumentException($"Unknown task type: {type}", nameof(type)); } using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { var(index, _) = await ServerStore.ToggleTaskState(key, taskName, type, disable, Database.Name); await Database.RachisLogIndexNotifications.WaitForIndexNotification(index); HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { [nameof(ModifyOngoingTaskResult.TaskId)] = key, [nameof(ModifyOngoingTaskResult.RaftCommandIndex)] = index }); writer.Flush(); } } }
public void CompareLazyCompressedStringValue() { using (var context = JsonOperationContext.ShortTermSingleUse()) using (var ms = new MemoryStream()) using (var writer = new BlittableJsonTextWriter(context, ms)) { writer.WriteStartObject(); writer.WritePropertyName("Test"); writer.WriteString(new string('c', 1024 * 1024)); writer.WriteEndObject(); writer.Flush(); ms.Flush(); ms.Position = 0; var json = context.Read(ms, "test"); ms.Position = 0; var json2 = context.Read(ms, "test"); Assert.IsType <LazyCompressedStringValue>(json["Test"]); Assert.IsType <LazyCompressedStringValue>(json2["Test"]); Assert.Equal(json["Test"], json2["Test"]); } }
public async Task Batch() { using (ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { var countersBlittable = await context.ReadForMemoryAsync(RequestBodyStream(), "counters"); var counterBatch = JsonDeserializationClient.CounterBatch(countersBlittable); var cmd = new ExecuteCounterBatchCommand(Database, counterBatch); if (cmd.HasWrites) { try { await Database.TxMerger.Enqueue(cmd); } catch (DocumentDoesNotExistException) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; throw; } } else { using (context.OpenReadTransaction()) { cmd.Execute(context); } } using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, cmd.CountersDetail.ToJson()); writer.Flush(); } } }
public Task GetCmpXchgValue() { var prefix = Database.Name + "/"; var key = prefix + GetStringQueryString("key"); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var res = ServerStore.Cluster.GetCmpXchg(context, key); HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { [nameof(CmpXchgResult <object> .Index)] = res.Index, [nameof(CmpXchgResult <object> .Value)] = res.Value, [nameof(CmpXchgResult <object> .Successful)] = true }); writer.Flush(); } return(Task.CompletedTask); } }
public static string ConvertResultToString(ScriptRunnerResult result) { var ms = new MemoryStream(); using (var ctx = JsonOperationContext.ShortTermSingleUse()) using (var writer = new BlittableJsonTextWriter(ctx, ms)) { writer.WriteStartObject(); writer.WritePropertyName("Result"); if (result.IsNull) { writer.WriteNull(); } else if (result.RawJsValue.IsBoolean()) { writer.WriteBool(result.RawJsValue.AsBoolean()); } else if (result.RawJsValue.IsString()) { writer.WriteString(result.RawJsValue.AsString()); } else { writer.WriteObject(result.TranslateToObject(ctx)); } writer.WriteEndObject(); writer.Flush(); } var str = Encoding.UTF8.GetString(ms.ToArray()); return(str); }
private void WriteOperationHeaderToRemote(BlittableJsonTextWriter writer, int remoteVersion = -1, bool drop = false) { var operation = drop ? TcpConnectionHeaderMessage.OperationTypes.Drop : TcpConnectionHeaderMessage.OperationTypes.Heartbeats; writer.WriteStartObject(); { writer.WritePropertyName(nameof(TcpConnectionHeaderMessage.Operation)); writer.WriteString(operation.ToString()); writer.WriteComma(); writer.WritePropertyName(nameof(TcpConnectionHeaderMessage.OperationVersion)); writer.WriteInteger(TcpConnectionHeaderMessage.HeartbeatsTcpVersion); writer.WriteComma(); writer.WritePropertyName(nameof(TcpConnectionHeaderMessage.DatabaseName)); writer.WriteString((string)null); if (drop) { writer.WriteComma(); writer.WritePropertyName(nameof(TcpConnectionHeaderMessage.Info)); writer.WriteString($"Couldn't agree on heartbeats tcp version ours:{TcpConnectionHeaderMessage.HeartbeatsTcpVersion} theirs:{remoteVersion}"); } } writer.WriteEndObject(); writer.Flush(); }
public Task Put() { var name = RouteMatch.Url.Substring(RouteMatch.MatchLength); string errorMessage; if ( ResourceNameValidator.IsValidResourceName(name, ServerStore.Configuration.Core.DataDirectory, out errorMessage) == false) { HttpContext.Response.StatusCode = 400; return(HttpContext.Response.WriteAsync(errorMessage)); } TransactionOperationContext context; using (ServerStore.ContextPool.AllocateOperationContext(out context)) { var dbId = Constants.Database.Prefix + name; var etagAsString = HttpContext.Request.Headers["ETag"]; long etag; var hasEtagInRequest = long.TryParse(etagAsString, out etag); using (context.OpenReadTransaction()) { var existingDatabase = ServerStore.Read(context, dbId); if ( DatabaseHelper.CheckExistingDatabaseName(existingDatabase, name, dbId, etagAsString, out errorMessage) == false) { HttpContext.Response.StatusCode = 400; return(HttpContext.Response.WriteAsync(errorMessage)); } } var dbDoc = context.ReadForDisk(RequestBodyStream(), dbId); //TODO: Fix this //int size; //var buffer = context.GetNativeTempBuffer(dbDoc.SizeInBytes, out size); //dbDoc.CopyTo(buffer); //var reader = new BlittableJsonReaderObject(buffer, dbDoc.SizeInBytes, context); //object result; //if (reader.TryGetMember("SecureSettings", out result)) //{ // var secureSettings = (BlittableJsonReaderObject) result; // secureSettings.Unloading = new DynamicJsonValue(secureSettings); // foreach (var propertyName in secureSettings.GetPropertyNames()) // { // secureSettings.TryGetMember(propertyName, out result); // // protect // secureSettings.Unloading[propertyName] = "fooo"; // } //} long?newEtag = null; ServerStore.DatabasesLandlord.UnloadAndLock(name, () => { using (var tx = context.OpenWriteTransaction()) { newEtag = hasEtagInRequest ? ServerStore.Write(context, dbId, dbDoc, etag) : ServerStore.Write(context, dbId, dbDoc); tx.Commit(); } }); HttpContext.Response.StatusCode = 201; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, new DynamicJsonValue { ["ETag"] = newEtag, ["Key"] = dbId }); writer.Flush(); } } return(Task.CompletedTask); }
public async Task StartSendingNotifications(bool throttleConnection) { using (_documentDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out JsonOperationContext context)) { using (var ms = new MemoryStream()) { var sp = Stopwatch.StartNew(); while (true) { if (_disposeToken.IsCancellationRequested) { break; } ms.SetLength(0); using (var writer = new BlittableJsonTextWriter(context, ms)) { sp.Restart(); var first = true; writer.WriteStartArray(); do { var value = await GetNextMessage(throttleConnection); if (value == null || _disposeToken.IsCancellationRequested) { break; } if (first == false) { writer.WriteComma(); } first = false; context.Write(writer, value); writer.Flush(); if (ms.Length > 16 * 1024) { break; } } while (_sendQueue.Count > 0 && sp.Elapsed < TimeSpan.FromSeconds(5)); writer.WriteEndArray(); } if (_disposeToken.IsCancellationRequested) { break; } ms.TryGetBuffer(out ArraySegment <byte> bytes); await _webSocket.SendAsync(bytes, WebSocketMessageType.Text, true, _disposeToken); } } } }
private async Task WriteJsonAsync(DynamicJsonValue value) { TcpConnection.Context.Write(_bufferedWriter, value); _bufferedWriter.Flush(); await FlushBufferToNetwork(); }
public async Task ApplyCommand() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { if (ServerStore.IsLeader() == false) { throw new NoLeaderException("Not a leader, cannot accept commands."); } HttpContext.Response.Headers["Reached-Leader"] = "true"; var commandJson = await context.ReadForMemoryAsync(RequestBodyStream(), "external/rachis/command"); CommandBase command; try { command = CommandBase.CreateFrom(commandJson); } catch (InvalidOperationException e) { RequestRouter.AssertClientVersion(HttpContext, e); throw; } switch (command) { case AddDatabaseCommand addDatabase: if (addDatabase.Record.Topology.Count == 0) { ServerStore.AssignNodesToDatabase(ServerStore.GetClusterTopology(), addDatabase.Record); } break; case AddOrUpdateCompareExchangeBatchCommand batchCmpExchange: batchCmpExchange.ContextToWriteResult = context; break; case CompareExchangeCommandBase cmpExchange: cmpExchange.ContextToWriteResult = context; break; } var isClusterAdmin = IsClusterAdmin(); command.VerifyCanExecuteCommand(ServerStore, context, isClusterAdmin); var(etag, result) = await ServerStore.Engine.PutAsync(command); HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; var ms = context.CheckoutMemoryStream(); try { using (var writer = new BlittableJsonTextWriter(context, ms)) { context.Write(writer, new DynamicJsonValue { [nameof(ServerStore.PutRaftCommandResult.RaftCommandIndex)] = etag, [nameof(ServerStore.PutRaftCommandResult.Data)] = result }); writer.Flush(); } // now that we know that we properly serialized it ms.Position = 0; await ms.CopyToAsync(ResponseBodyStream()); } finally { context.ReturnMemoryStream(ms); } } }
public static void WaitForIndexing(IDocumentStore store, string dbName = null, TimeSpan?timeout = null) { var admin = store.Maintenance.ForDatabase(dbName); timeout = timeout ?? (Debugger.IsAttached ? TimeSpan.FromMinutes(15) : TimeSpan.FromMinutes(1)); var sp = Stopwatch.StartNew(); while (sp.Elapsed < timeout.Value) { var databaseStatistics = admin.Send(new GetStatisticsOperation()); var indexes = databaseStatistics.Indexes .Where(x => x.State != IndexState.Disabled); if (indexes.All(x => x.IsStale == false && x.Name.StartsWith("ReplacementOf/") == false)) { return; } if (databaseStatistics.Indexes.Any(x => x.State == IndexState.Error)) { break; } Thread.Sleep(32); } var perf = admin.Send(new GetIndexPerformanceStatisticsOperation()); var errors = admin.Send(new GetIndexErrorsOperation()); var stats = admin.Send(new GetIndexesStatisticsOperation()); var total = new { Errors = errors, Stats = stats, Performance = perf }; var file = Path.GetTempFileName() + ".json"; using (var stream = File.Open(file, FileMode.OpenOrCreate)) using (var context = JsonOperationContext.ShortTermSingleUse()) using (var writer = new BlittableJsonTextWriter(context, stream)) { var djv = (DynamicJsonValue)TypeConverter.ToBlittableSupportedType(total); var json = context.ReadObject(djv, "errors"); writer.WriteObject(json); writer.Flush(); } var statistics = admin.Send(new GetStatisticsOperation()); var corrupted = statistics.Indexes.Where(x => x.State == IndexState.Error).ToList(); if (corrupted.Count > 0) { throw new InvalidOperationException( $"The following indexes are with error state: {string.Join(",", corrupted.Select(x => x.Name))} - details at " + file); } throw new TimeoutException("The indexes stayed stale for more than " + timeout.Value + ", stats at " + file); }
public async Task PostMultiGet() { using (ContextPool.AllocateOperationContext(out JsonOperationContext context)) { var input = await context.ReadForMemoryAsync(RequestBodyStream(), "multi_get"); if (input.TryGet("Requests", out BlittableJsonReaderArray requests) == false) { ThrowRequiredPropertyNameInRequest("Requests"); } using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); var resultProperty = context.GetLazyStringForFieldWithCaching(nameof(GetResponse.Result)); var statusProperty = context.GetLazyStringForFieldWithCaching(nameof(GetResponse.StatusCode)); var headersProperty = context.GetLazyStringForFieldWithCaching(nameof(GetResponse.Headers)); var features = new FeatureCollection(HttpContext.Features); var responseStream = new MultiGetHttpResponseStream(ResponseBodyStream()); features.Set <IHttpResponseFeature>(new MultiGetHttpResponseFeature(responseStream)); var httpContext = new DefaultHttpContext(features); var host = HttpContext.Request.Host; var scheme = HttpContext.Request.Scheme; for (int i = 0; i < requests.Length; i++) { var request = (BlittableJsonReaderObject)requests[i]; if (i != 0) { writer.WriteComma(); } writer.WriteStartObject(); if (request.TryGet("Url", out string url) == false || request.TryGet("Query", out string query) == false) { writer.WriteEndObject(); continue; } if (request.TryGet("Method", out string method) == false || string.IsNullOrEmpty(method)) { method = HttpMethod.Get.Method; } httpContext.Request.Method = method; var routeInformation = Server.Router.GetRoute(method, url, out RouteMatch localMatch); if (routeInformation == null) { writer.WritePropertyName(statusProperty); writer.WriteInteger((int)HttpStatusCode.BadRequest); writer.WritePropertyName(resultProperty); context.Write(writer, new DynamicJsonValue { ["Error"] = $"There is no handler for path: {method} {url}{query}" }); writer.WriteEndObject(); continue; } var requestHandler = routeInformation.GetRequestHandler(); writer.WritePropertyName(resultProperty); writer.Flush(); httpContext.Response.StatusCode = 0; httpContext.Request.Headers.Clear(); httpContext.Response.Headers.Clear(); httpContext.Request.Host = host; httpContext.Request.Scheme = scheme; httpContext.Request.QueryString = new QueryString(query); if (request.TryGet("Headers", out BlittableJsonReaderObject headers)) { foreach (var header in headers.GetPropertyNames()) { if (headers.TryGet(header, out string value) == false) { continue; } if (string.IsNullOrWhiteSpace(value)) { continue; } httpContext.Request.Headers.Add(header, value); } } if (method == HttpMethod.Post.Method && request.TryGet("Content", out object content)) { if (content is LazyStringValue) { var requestBody = GetRequestBody(content.ToString()); HttpContext.Response.RegisterForDispose(requestBody); httpContext.Request.Body = requestBody; } else { var requestBody = new MemoryStream(); var contentWriter = new BlittableJsonTextWriter(context, requestBody); context.Write(contentWriter, (BlittableJsonReaderObject)content); contentWriter.Flush(); HttpContext.Response.RegisterForDispose(requestBody); httpContext.Request.Body = requestBody; httpContext.Request.Body.Position = 0; } } var bytesWrittenBeforeRequest = responseStream.BytesWritten; int statusCode; try { await requestHandler(new RequestHandlerContext { Database = Database, RavenServer = Server, RouteMatch = localMatch, HttpContext = httpContext }); if (bytesWrittenBeforeRequest == responseStream.BytesWritten) { writer.WriteNull(); } statusCode = httpContext.Response.StatusCode == 0 ? (int)HttpStatusCode.OK : httpContext.Response.StatusCode; } catch (Exception e) { if (bytesWrittenBeforeRequest != responseStream.BytesWritten) { throw; } statusCode = (int)HttpStatusCode.InternalServerError; var djv = new DynamicJsonValue { [nameof(ExceptionDispatcher.ExceptionSchema.Url)] = $"{url}{query}", [nameof(ExceptionDispatcher.ExceptionSchema.Type)] = e.GetType().FullName, [nameof(ExceptionDispatcher.ExceptionSchema.Message)] = e.Message, [nameof(ExceptionDispatcher.ExceptionSchema.Error)] = e.ToString() }; using (var json = context.ReadObject(djv, "exception")) writer.WriteObject(json); } writer.WriteComma(); writer.WritePropertyName(statusProperty); writer.WriteInteger(statusCode); writer.WriteComma(); writer.WritePropertyName(headersProperty); writer.WriteStartObject(); bool headerStart = true; foreach (var header in httpContext.Response.Headers) { foreach (var value in header.Value) { if (headerStart == false) { writer.WriteComma(); } headerStart = false; writer.WritePropertyName(header.Key); writer.WriteString(value); } } writer.WriteEndObject(); writer.WriteEndObject(); } writer.WriteEndArray(); writer.WriteEndObject(); } } }
public void ExecuteReplicationOnce() { _orderedReplicaItems.Clear(); try { var sp = Stopwatch.StartNew(); var timeout = Debugger.IsAttached ? 60 * 1000 : 1000; var configurationContext = _parent._configurationContext; using (configurationContext.OpenReadTransaction()) { while (sp.ElapsedMilliseconds < timeout) { LastEtag = _parent._lastSentIndexOrTransformerEtag; _parent.CancellationToken.ThrowIfCancellationRequested(); var indexAndTransformerMetadata = _parent._database.IndexMetadataPersistence.GetAfter( configurationContext.Transaction.InnerTransaction, configurationContext, LastEtag + 1, 0, 1024); using (var stream = new MemoryStream()) { foreach (var item in indexAndTransformerMetadata) { _parent.CancellationToken.ThrowIfCancellationRequested(); stream.Position = 0; using (var writer = new BlittableJsonTextWriter(configurationContext, stream)) { switch (item.Type) { case IndexEntryType.Index: var index = _parent._database.IndexStore.GetIndex(item.Id); if (index == null) //precaution { throw new InvalidDataException( $"Index with name {item.Name} has metadata, but is not at the index store. This is not supposed to happen and is likely a bug."); } try { IndexProcessor.Export(writer, index, configurationContext, false); } catch (InvalidOperationException e) { if (_log.IsInfoEnabled) { _log.Info( $"Failed to export index definition for replication. Index name = {item.Name}", e); } } break; case IndexEntryType.Transformer: var transformer = _parent._database.TransformerStore.GetTransformer(item.Id); if (transformer == null) //precaution { throw new InvalidDataException( $"Transformer with name {item.Name} has metadata, but is not at the transformer store. This is not supposed to happen and is likely a bug."); } try { TransformerProcessor.Export(writer, transformer, configurationContext); } catch (InvalidOperationException e) { if (_log.IsInfoEnabled) { _log.Info( $"Failed to export transformer definition for replication. Transformer name = {item.Name}", e); } } break; default: throw new ArgumentOutOfRangeException(nameof(item), "Unexpected item type in index/transformer metadata. This is not supposed to happen."); } writer.Flush(); stream.Position = 0; var newItem = new ReplicationBatchIndexItem { Name = item.Name, ChangeVector = item.ChangeVector, Etag = item.Etag, Type = (int)item.Type, Definition = configurationContext.ReadForMemory(stream, "Index/Transformer Replication - Reading definition into memory") }; AddReplicationItemToBatch(newItem); } } } // if we are at the end, we are done if (LastEtag <= _parent._database.IndexMetadataPersistence.ReadLastEtag( configurationContext.Transaction.InnerTransaction)) { break; } } if (_log.IsInfoEnabled) { _log.Info( $"Found {_orderedReplicaItems.Count:#,#;;0} indexes/transformers to replicate to {_parent.Destination.Database} @ {_parent.Destination.Url} in {sp.ElapsedMilliseconds:#,#;;0} ms."); } _parent.CancellationToken.ThrowIfCancellationRequested(); try { using (_parent._documentsContext.OpenReadTransaction()) SendIndexTransformerBatch(); } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info("Failed to send index/transformer replication batch", e); } throw; } } } finally { //release memory at the end of the operation foreach (var item in _orderedReplicaItems) { item.Value.Definition.Dispose(); } _orderedReplicaItems.Clear(); } }
public async Task DisposeAsync() { try { Exception flushEx = null; if (_stream != null) { try { _jsonWriter.WriteEndArray(); _jsonWriter.Flush(); await _stream.FlushAsync(_token).ConfigureAwait(false); } catch (Exception e) { flushEx = e; } } _streamExposerContent.Done(); if (_operationId == -1) { // closing without calling a single store. return; } if (_bulkInsertExecuteTask != null) { try { await _bulkInsertExecuteTask.ConfigureAwait(false); } catch (Exception e) { var errors = new List <Exception>(3) { e }; if (flushEx != null) { errors.Add(flushEx); } var error = await GetExceptionFromOperation().ConfigureAwait(false); if (error != null) { errors.Add(error); } errors.Reverse(); throw new BulkInsertAbortedException("Failed to execute bulk insert", new AggregateException(errors)); } } } finally { _streamExposerContent?.Dispose(); _resetContext.Dispose(); } }
public void AcceptIncomingConnection(TcpConnectionOptions tcpConnectionOptions) { ReplicationLatestEtagRequest getLatestEtagMessage; using (var readerObject = tcpConnectionOptions.MultiDocumentParser.ParseToMemory("IncomingReplication/get-last-etag-message read")) { getLatestEtagMessage = JsonDeserializationServer.ReplicationLatestEtagRequest(readerObject); if (_log.IsInfoEnabled) { _log.Info($"GetLastEtag: {getLatestEtagMessage.SourceMachineName} / {getLatestEtagMessage.SourceDatabaseName} ({getLatestEtagMessage.SourceDatabaseId}) - {getLatestEtagMessage.SourceUrl}"); } } var connectionInfo = IncomingConnectionInfo.FromGetLatestEtag(getLatestEtagMessage); try { AssertValidConnection(connectionInfo); } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info($"Connection from [{connectionInfo}] is rejected.", e); } var incomingConnectionRejectionInfos = _incomingRejectionStats.GetOrAdd(connectionInfo, _ => new ConcurrentQueue <IncomingConnectionRejectionInfo>()); incomingConnectionRejectionInfos.Enqueue(new IncomingConnectionRejectionInfo { Reason = e.ToString() }); throw; } DocumentsOperationContext documentsOperationContext; TransactionOperationContext configurationContext; using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out documentsOperationContext)) using (_database.ConfigurationStorage.ContextPool.AllocateOperationContext(out configurationContext)) using (var writer = new BlittableJsonTextWriter(documentsOperationContext, tcpConnectionOptions.Stream)) using (var docTx = documentsOperationContext.OpenReadTransaction()) using (var configTx = configurationContext.OpenReadTransaction()) { var documentsChangeVector = new DynamicJsonArray(); foreach (var changeVectorEntry in _database.DocumentsStorage.GetDatabaseChangeVector(documentsOperationContext)) { documentsChangeVector.Add(new DynamicJsonValue { [nameof(ChangeVectorEntry.DbId)] = changeVectorEntry.DbId.ToString(), [nameof(ChangeVectorEntry.Etag)] = changeVectorEntry.Etag }); } var indexesChangeVector = new DynamicJsonArray(); var changeVectorAsArray = _database.IndexMetadataPersistence.GetIndexesAndTransformersChangeVector(configTx.InnerTransaction); foreach (var changeVectorEntry in changeVectorAsArray) { indexesChangeVector.Add(new DynamicJsonValue { [nameof(ChangeVectorEntry.DbId)] = changeVectorEntry.DbId.ToString(), [nameof(ChangeVectorEntry.Etag)] = changeVectorEntry.Etag }); } var lastEtagFromSrc = _database.DocumentsStorage.GetLastReplicateEtagFrom(documentsOperationContext, getLatestEtagMessage.SourceDatabaseId); if (_log.IsInfoEnabled) { _log.Info($"GetLastEtag response, last etag: {lastEtagFromSrc}"); } documentsOperationContext.Write(writer, new DynamicJsonValue { [nameof(ReplicationMessageReply.Type)] = "Ok", [nameof(ReplicationMessageReply.MessageType)] = ReplicationMessageType.Heartbeat, [nameof(ReplicationMessageReply.LastEtagAccepted)] = lastEtagFromSrc, [nameof(ReplicationMessageReply.LastIndexTransformerEtagAccepted)] = _database.IndexMetadataPersistence.GetLastReplicateEtagFrom(configTx.InnerTransaction, getLatestEtagMessage.SourceDatabaseId), [nameof(ReplicationMessageReply.DocumentsChangeVector)] = documentsChangeVector, [nameof(ReplicationMessageReply.IndexTransformerChangeVector)] = indexesChangeVector }); writer.Flush(); } var newIncoming = new IncomingReplicationHandler( tcpConnectionOptions.MultiDocumentParser, _database, tcpConnectionOptions.TcpClient, tcpConnectionOptions.Stream, getLatestEtagMessage, this); newIncoming.Failed += OnIncomingReceiveFailed; newIncoming.DocumentsReceived += OnIncomingReceiveSucceeded; if (_log.IsInfoEnabled) { _log.Info($"Initialized document replication connection from {connectionInfo.SourceDatabaseName} located at {connectionInfo.SourceUrl}", null); } // need to safeguard against two concurrent connection attempts var newConnection = _incoming.GetOrAdd(newIncoming.ConnectionInfo.SourceDatabaseId, newIncoming); if (newConnection == newIncoming) { newIncoming.Start(); } else { newIncoming.Dispose(); } }
public void AcceptIncomingConnection(TcpConnectionOptions tcpConnectionOptions) { ReplicationLatestEtagRequest getLatestEtagMessage; using (tcpConnectionOptions.ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var readerObject = context.ParseToMemory( tcpConnectionOptions.Stream, "IncomingReplication/get-last-etag-message read", BlittableJsonDocumentBuilder.UsageMode.None, tcpConnectionOptions.PinnedBuffer)) { getLatestEtagMessage = JsonDeserializationServer.ReplicationLatestEtagRequest(readerObject); if (_log.IsInfoEnabled) { _log.Info( $"GetLastEtag: {getLatestEtagMessage.SourceTag}({getLatestEtagMessage.SourceMachineName}) / {getLatestEtagMessage.SourceDatabaseName} ({getLatestEtagMessage.SourceDatabaseId}) - {getLatestEtagMessage.SourceUrl}"); } } var connectionInfo = IncomingConnectionInfo.FromGetLatestEtag(getLatestEtagMessage); try { AssertValidConnection(connectionInfo); } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info($"Connection from [{connectionInfo}] is rejected.", e); } var incomingConnectionRejectionInfos = _incomingRejectionStats.GetOrAdd(connectionInfo, _ => new ConcurrentQueue <IncomingConnectionRejectionInfo>()); incomingConnectionRejectionInfos.Enqueue(new IncomingConnectionRejectionInfo { Reason = e.ToString() }); try { tcpConnectionOptions.Dispose(); } catch { // do nothing } throw; } try { using (Database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsOperationContext)) using (Database.ConfigurationStorage.ContextPool.AllocateOperationContext(out TransactionOperationContext configurationContext)) using (var writer = new BlittableJsonTextWriter(documentsOperationContext, tcpConnectionOptions.Stream)) using (documentsOperationContext.OpenReadTransaction()) using (configurationContext.OpenReadTransaction()) { var changeVector = DocumentsStorage.GetDatabaseChangeVector(documentsOperationContext); var lastEtagFromSrc = Database.DocumentsStorage.GetLastReplicateEtagFrom( documentsOperationContext, getLatestEtagMessage.SourceDatabaseId); if (_log.IsInfoEnabled) { _log.Info($"GetLastEtag response, last etag: {lastEtagFromSrc}"); } var response = new DynamicJsonValue { [nameof(ReplicationMessageReply.Type)] = "Ok", [nameof(ReplicationMessageReply.MessageType)] = ReplicationMessageType.Heartbeat, [nameof(ReplicationMessageReply.LastEtagAccepted)] = lastEtagFromSrc, [nameof(ReplicationMessageReply.NodeTag)] = _server.NodeTag, [nameof(ReplicationMessageReply.DatabaseChangeVector)] = changeVector }; documentsOperationContext.Write(writer, response); writer.Flush(); } } catch (Exception) { try { tcpConnectionOptions.Dispose(); } catch (Exception) { // do nothing } throw; } var newIncoming = new IncomingReplicationHandler( tcpConnectionOptions, getLatestEtagMessage, this); newIncoming.Failed += OnIncomingReceiveFailed; newIncoming.DocumentsReceived += OnIncomingReceiveSucceeded; if (_log.IsInfoEnabled) { _log.Info( $"Initialized document replication connection from {connectionInfo.SourceDatabaseName} located at {connectionInfo.SourceUrl}"); } // need to safeguard against two concurrent connection attempts var newConnection = _incoming.GetOrAdd(newIncoming.ConnectionInfo.SourceDatabaseId, newIncoming); if (newConnection == newIncoming) { newIncoming.Start(); IncomingReplicationAdded?.Invoke(newIncoming); ForceTryReconnectAll(); } else { newIncoming.Dispose(); } }
/// <summary> /// Connects to raven traffic event source and registers all the requests to the file defined in the config /// </summary> /// <param name="config">configuration conatining the connection, the file to write to, etc.</param> /// <param name="store">the store to work with</param> private async Task RecordRequests(TrafficToolConfiguration config, IDocumentStore store) { var id = Guid.NewGuid().ToString(); using (var client = new ClientWebSocket()) { var url = store.Urls.First() + "/admin/traffic-watch"; var uri = new Uri(url.ToWebSocketPath()); await client.ConnectAsync(uri, CancellationToken.None) .ConfigureAwait(false); // record traffic no more then 7 days var day = 24 * 60 * 60; var timeout = (int)config.Timeout.TotalMilliseconds / 1000; timeout = Math.Min(timeout, 7 * day); if (timeout <= 0) { timeout = 7 * day; } try { string resourceName = config.ResourceName ?? "N/A"; var connectMessage = new DynamicJsonValue { ["Id"] = id, ["DatabaseName"] = resourceName, ["Timeout"] = timeout }; var stream = new MemoryStream(); JsonOperationContext context; using (_jsonContextPool.AllocateOperationContext(out context)) using (var writer = new BlittableJsonTextWriter(context, stream)) { context.Write(writer, connectMessage); writer.Flush(); ArraySegment <byte> bytes; stream.TryGetBuffer(out bytes); await client.SendAsync(bytes, WebSocketMessageType.Text, true, CancellationToken.None) .ConfigureAwait(false); } var requestsCounter = 0; using (var fileStream = File.Create(config.RecordFilePath)) { Stream finalStream = fileStream; if (config.IsCompressed) { finalStream = new GZipStream(fileStream, CompressionMode.Compress, leaveOpen: true); } using (var streamWriter = new StreamWriter(finalStream)) { var jsonWriter = new JsonTextWriter(streamWriter) { Formatting = Formatting.Indented }; jsonWriter.WriteStartArray(); var sp = Stopwatch.StartNew(); while (true) { using (var reader = await Receive(client, context)) { if (reader == null) { // server asked to close connection break; } string type; if (reader.TryGet("Type", out type)) { if (type.Equals("Heartbeat")) { continue; } } string error; if (reader.TryGet("Error", out error)) { throw new InvalidOperationException("Server returned error: " + error); } var notification = new TrafficWatchChange(); notification.TimeStamp = GetDateTimeFromJson(reader, "TimeStamp"); notification.RequestId = GetIntFromJson(reader, "RequestId"); notification.HttpMethod = GetStringFromJson(reader, "HttpMethod"); notification.ElapsedMilliseconds = GetIntFromJson(reader, "ElapsedMilliseconds"); notification.ResponseStatusCode = GetIntFromJson(reader, "ResponseStatusCode"); notification.TenantName = GetStringFromJson(reader, "TenantName"); notification.CustomInfo = GetStringFromJson(reader, "CustomInfo"); notification.InnerRequestsCount = GetIntFromJson(reader, "InnerRequestsCount"); // notification.QueryTimings = GetRavenJObjectFromJson(reader, "QueryTimings"); // TODO (TrafficWatch) : Handle this both server and client sides if (config.PrintOutput) { Console.Write("\rRequest #{0} Stored...\t\t ", ++requestsCounter); } var jobj = JObject.FromObject(notification); jobj.WriteTo(jsonWriter); if (sp.ElapsedMilliseconds > 5000) { streamWriter.Flush(); sp.Restart(); } } } jsonWriter.WriteEndArray(); streamWriter.Flush(); if (config.IsCompressed) { finalStream.Dispose(); } } } } catch (Exception ex) { Console.WriteLine("\r\n\nError while reading messages from server : " + ex); } finally { Console.WriteLine("\r\n\nClosing connection to server...`"); try { await client.CloseAsync(WebSocketCloseStatus.NormalClosure, "CLOSE_NORMAL", CancellationToken.None) .ConfigureAwait(false); } catch { // ignored } } } }
public Task GetServerWideTasksForStudio() { var taskName = GetStringQueryString("name", required: false); var typeAsString = GetStringQueryString("type", required: false); var tryParse = Enum.TryParse(typeAsString, out OngoingTaskType type); if (typeAsString != null && tryParse == false) { throw new ArgumentException($"{typeAsString} is unknown task type."); } using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var result = new ServerWideTasksResult(); var blittables = ServerStore.Cluster.GetServerWideConfigurations(context, OngoingTaskType.Backup, taskName); foreach (var blittable in blittables) { var configuration = JsonDeserializationCluster.ServerWideBackupConfiguration(blittable); if (taskName != null && type == OngoingTaskType.Backup && string.Equals(taskName, configuration.Name) == false) { continue; } result.Tasks.Add(new ServerWideTasksResult.ServerWideBackupTask { TaskId = configuration.TaskId, TaskName = configuration.Name, TaskState = configuration.Disabled ? OngoingTaskState.Disabled : OngoingTaskState.Enabled, ExcludedDatabases = configuration.ExcludedDatabases, BackupType = configuration.BackupType, RetentionPolicy = configuration.RetentionPolicy, BackupDestinations = configuration.GetDestinations(), IsEncrypted = configuration.BackupEncryptionSettings != null && configuration.BackupEncryptionSettings.EncryptionMode != EncryptionMode.None }); } blittables = ServerStore.Cluster.GetServerWideConfigurations(context, OngoingTaskType.Replication, taskName); foreach (var blittable in blittables) { var configuration = JsonDeserializationCluster.ServerWideExternalReplication(blittable); if (taskName != null && type == OngoingTaskType.Replication && string.Equals(taskName, configuration.Name) == false) { continue; } result.Tasks.Add(new ServerWideTasksResult.ServerWideExternalReplicationTask { TaskId = configuration.TaskId, TaskName = configuration.Name, TaskState = configuration.Disabled ? OngoingTaskState.Disabled : OngoingTaskState.Enabled, ExcludedDatabases = configuration.ExcludedDatabases, TopologyDiscoveryUrls = configuration.TopologyDiscoveryUrls, DelayReplicationFor = configuration.DelayReplicationFor, }); } context.Write(writer, result.ToJson()); writer.Flush(); return(Task.CompletedTask); } }
public Task GetClusterTopology() { using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var topology = ServerStore.GetClusterTopology(context); var nodeTag = ServerStore.NodeTag; if (topology.AllNodes.Count == 0) { var tag = ServerStore.NodeTag ?? "A"; var serverUrl = ServerStore.GetNodeHttpServerUrl(HttpContext.Request.GetClientRequestedNodeUrl()); topology = new ClusterTopology( topology.TopologyId ?? "dummy", new Dictionary <string, string> { [tag] = serverUrl }, new Dictionary <string, string>(), new Dictionary <string, string>(), tag ); nodeTag = tag; } else { var isClientIndependent = GetBoolValueQueryString("clientIndependent", false) ?? false; if (isClientIndependent == false) { topology.ReplaceCurrentNodeUrlWithClientRequestedNodeUrlIfNecessary(ServerStore, HttpContext); } } HttpContext.Response.StatusCode = (int)HttpStatusCode.OK; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { var loadLicenseLimits = ServerStore.LoadLicenseLimits(); var nodeLicenseDetails = loadLicenseLimits == null ? null : DynamicJsonValue.Convert(loadLicenseLimits.NodeLicenseDetails); var json = new DynamicJsonValue { ["Topology"] = topology.ToSortedJson(), ["Leader"] = ServerStore.LeaderTag, ["LeaderShipDuration"] = ServerStore.Engine.CurrentLeader?.LeaderShipDuration, ["CurrentState"] = ServerStore.CurrentRachisState, ["NodeTag"] = nodeTag, ["CurrentTerm"] = ServerStore.Engine.CurrentTerm, ["NodeLicenseDetails"] = nodeLicenseDetails, [nameof(ServerStore.Engine.LastStateChangeReason)] = ServerStore.LastStateChangeReason() }; var clusterErrors = ServerStore.GetClusterErrors(); if (clusterErrors.Count > 0) { json["Errors"] = clusterErrors; } var nodesStatues = ServerStore.GetNodesStatuses(); json["Status"] = DynamicJsonValue.Convert(nodesStatues); context.Write(writer, json); writer.Flush(); } } return(Task.CompletedTask); }
public async Task SubscriptionShouldRespectDocumentsWithCompressedData() { using (var documentStore = this.GetDocumentStore()) { Server.ServerStore.Observer.Suspended = true; var originalDoc = new Doc { Id = "doc/1", StrVal = new string(Enumerable.Repeat('.', 129).ToArray()), LongByteArray = Enumerable.Repeat((byte)2, 1024).ToArray() }; using (var session = documentStore.OpenAsyncSession()) { await session.StoreAsync(originalDoc); await session.SaveChangesAsync(); } var database = await Server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(documentStore.Database); using (database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) { var doc = database.DocumentsStorage.Get(context, "doc/1"); MemoryStream ms = new MemoryStream(); using (var newContext = JsonOperationContext.ShortTermSingleUse()) using (var writer = new BlittableJsonTextWriter(newContext, ms)) { writer.WriteDocument(newContext, doc, metadataOnly: false); writer.Flush(); var bjro = GetReaderFromMemoryStream(ms, context); var desereializedDoc = (Doc)EntityToBlittable.ConvertToEntity(typeof(Doc), null, bjro, DocumentConventions.Default); Assert.Equal(originalDoc.StrVal, desereializedDoc.StrVal); Assert.Equal(originalDoc.LongByteArray, originalDoc.LongByteArray); } } var subscriptionCreationParams = new SubscriptionCreationOptions { Query = "from Docs", }; var subsId = await documentStore.Subscriptions.CreateAsync(subscriptionCreationParams).ConfigureAwait(false); var amre = new AsyncManualResetEvent(); using (var subscription = documentStore.Subscriptions.GetSubscriptionWorker <Doc>(new SubscriptionWorkerOptions(subsId))) { var t = subscription.Run(batch => { var receivedDoc = batch.Items.First().Result; Assert.Equal(originalDoc.LongByteArray, receivedDoc.LongByteArray); Assert.Equal(originalDoc.StrVal, receivedDoc.StrVal); amre.Set(); }); try { Assert.True(await amre.WaitAsync(TimeSpan.FromSeconds(60))); } catch { if (t.IsFaulted) { t.Wait(); } throw; } } } }
public async Task ConnectInternal() { try { var urlBuilder = new StringBuilder(_url).Append("/admin/traffic-watch"); if (string.IsNullOrWhiteSpace(_database) == false) { urlBuilder.Append("?resourceName=").Append(_database); } var stringUrl = ToWebSocketPath(urlBuilder.ToString().ToLower()); var url = new Uri(stringUrl, UriKind.Absolute); _client = new ClientWebSocket(); if (_cert != null) { _client.Options.ClientCertificates.Add(_cert); } await _client.ConnectAsync(url, _cancellationTokenSource.Token).ConfigureAwait(false); _firstConnection = false; Console.WriteLine($"Connected to RavenDB server. Collecting traffic watch entries to {_path}"); const int maxFileSize = 128 * 1024 * 1024; while (_cancellationTokenSource.IsCancellationRequested == false) { string file = Path.Combine(_path, _logNameCreator.GetNewFileName()); var state = new JsonParserState(); using (var context = JsonOperationContext.ShortTermSingleUse()) // Read await using (var stream = new WebSocketStream(_client, _cancellationTokenSource.Token)) using (context.GetManagedBuffer(out var buffer)) using (var parser = new UnmanagedJsonParser(context, state, "trafficwatch/receive")) using (var builder = new BlittableJsonDocumentBuilder(context, BlittableJsonDocumentBuilder.UsageMode.None, "readObject/singleResult", parser, state)) // Write await using (var fileStream = new FileStream(file, FileMode.Append, FileAccess.Write, FileShare.Read, 32 * 1024, false)) await using (var gZipStream = new GZipStream(fileStream, CompressionMode.Compress, false)) using (var peepingTomStream = new PeepingTomStream(stream, context)) using (var writer = new BlittableJsonTextWriter(context, gZipStream)) { writer.WriteStartArray(); var isFirst = true; while (fileStream.Length < maxFileSize) { if (_cancellationTokenSource.IsCancellationRequested) { writer.WriteEndArray(); break; } try { var flushCount = 0; while (fileStream.Length < maxFileSize && _cancellationTokenSource.IsCancellationRequested == false) { builder.Reset(); builder.Renew("trafficwatch/receive", BlittableJsonDocumentBuilder.UsageMode.None); if (await UnmanagedJsonParserHelper.ReadAsync(peepingTomStream, parser, state, buffer).ConfigureAwait(false) == false) { continue; } await UnmanagedJsonParserHelper.ReadObjectAsync(builder, peepingTomStream, parser, buffer).ConfigureAwait(false); using (var json = builder.CreateReader()) { if (_changeTypes != null) { if (json.TryGet("Type", out TrafficWatchChangeType type) == false) { continue; } if (_changeTypes.Contains(type) == false) { continue; } } if (_database != null) { if (json.TryGet("DatabaseName", out LazyStringValue databaseName) == false || _database.Equals(databaseName, StringComparison.OrdinalIgnoreCase) == false) { continue; } } if (isFirst == false) { writer.WriteComma(); } isFirst = false; if (_verbose) { Console.WriteLine(json); } writer.WriteObject(json); _errorCount = 0; if (flushCount++ % 128 == 0) { writer.Flush(); } } } } catch (Exception) { writer.WriteEndArray(); throw; } } } } } catch (ObjectDisposedException) { // closing } }
public async Task PostMultiGet() { JsonOperationContext context; using (ContextPool.AllocateOperationContext(out context)) { var requests = await context.ParseArrayToMemoryAsync(RequestBodyStream(), "multi_get", BlittableJsonDocumentBuilder.UsageMode.None); using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartArray(); var resultProperty = context.GetLazyStringForFieldWithCaching("Result"); var statusProperty = context.GetLazyStringForFieldWithCaching("Status"); var headersProperty = context.GetLazyStringForFieldWithCaching("Headers"); HttpContext.Response.StatusCode = 200; var features = new FeatureCollection(HttpContext.Features); features.Set <IHttpResponseFeature>(new MultiGetHttpResponseFeature(HttpContext.Response.Body)); var httpContext = new DefaultHttpContext(features); for (int i = 0; i < requests.Length; i++) { var request = (BlittableJsonReaderObject)requests[i]; if (i != 0) { writer.WriteComma(); } writer.WriteStartObject(); string method, url, query; if (request.TryGet(nameof(GetRequest.Url), out url) == false || request.TryGet(nameof(GetRequest.Query), out query) == false) { writer.WriteEndObject(); continue; } if (request.TryGet(nameof(GetRequest.Method), out method) == false) { method = HttpMethod.Get.Method; } httpContext.Request.Method = method; RouteMatch localMatch; var routeInformation = Server.Router.GetRoute(method, url, out localMatch); if (routeInformation == null) { writer.WritePropertyName(statusProperty); writer.WriteInteger(400); writer.WritePropertyName(resultProperty); context.Write(writer, new DynamicJsonValue { ["Error"] = $"There is no handler for path: {method} {url}{query}" }); writer.WriteEndObject(); continue; } var requestHandler = routeInformation.GetRequestHandler(); writer.WritePropertyName(resultProperty); writer.Flush(); httpContext.Request.Headers.Clear(); httpContext.Response.Headers.Clear(); httpContext.Request.QueryString = new QueryString(query); BlittableJsonReaderObject headers; if (request.TryGet(nameof(GetRequest.Headers), out headers)) { foreach (var header in headers.GetPropertyNames()) { string value; if (headers.TryGet(header, out value) == false) { continue; } if (string.IsNullOrWhiteSpace(value)) { continue; } httpContext.Request.Headers.Add(header, value); } } string content; if (method == HttpMethod.Post.Method && request.TryGet(nameof(GetRequest.Content), out content)) { var requestBody = GetRequestBody(content); HttpContext.Response.RegisterForDispose(requestBody); httpContext.Request.Body = requestBody; } await requestHandler(new RequestHandlerContext { Database = Database, RavenServer = Server, RouteMatch = localMatch, HttpContext = httpContext, AllowResponseCompression = false }); writer.WriteComma(); writer.WritePropertyName(statusProperty); writer.WriteInteger(httpContext.Response.StatusCode); writer.WriteComma(); writer.WritePropertyName(headersProperty); writer.WriteStartObject(); bool headerStart = true; foreach (var header in httpContext.Response.Headers) { foreach (var value in header.Value) { if (headerStart == false) { writer.WriteComma(); } headerStart = false; writer.WritePropertyName(header.Key); writer.WriteString(value); } } writer.WriteEndObject(); writer.WriteEndObject(); } writer.WriteEndArray(); } } }