public Task Patch() { var id = GetQueryStringValueAndAssertIfSingleAndNotEmpty("id"); var etag = GetLongFromHeaders("If-Match"); var isTestOnly = GetBoolValueQueryString("test", required: false) ?? false; DocumentsOperationContext context; using (ContextPool.AllocateOperationContext(out context)) { var request = context.Read(RequestBodyStream(), "ScriptedPatchRequest"); BlittableJsonReaderObject patchCmd, patchIsMissingCmd; if (request.TryGet("Patch", out patchCmd) == false) { throw new ArgumentException("The 'Patch' field in the body request is mandatory"); } var patch = PatchRequest.Parse(patchCmd); PatchRequest patchIfMissing = null; if (request.TryGet("PatchIfMissing", out patchIsMissingCmd)) { patchIfMissing = PatchRequest.Parse(patchCmd); } // TODO: In order to properly move this to the transaction merger, we need // TODO: move a lot of the costs (such as script parsing) out, so we create // TODO: an object that we'll apply, otherwise we'll slow down a lot the transactions // TODO: just by doing the javascript parsing and preparing the engine PatchResultData patchResult; using (context.OpenWriteTransaction()) { patchResult = Database.Patch.Apply(context, id, etag, patch, patchIfMissing, isTestOnly); context.Transaction.Commit(); } Debug.Assert(patchResult.PatchResult == PatchResult.Patched == isTestOnly == false); using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartObject(); writer.WritePropertyName(("Patched")); writer.WriteBool(isTestOnly == false); writer.WriteComma(); writer.WritePropertyName(("Debug")); writer.WriteObject(patchResult.ModifiedDocument); if (isTestOnly) { writer.WriteComma(); writer.WritePropertyName(("Document")); writer.WriteObject(patchResult.OriginalDocument); } writer.WriteEndObject(); } } return(Task.CompletedTask); }
public Task FindConnection() { var start = GetStart(); var take = GetPageSize(Database.Configuration.Core.MaxPageSize); var minDuration = GetLongQueryString("minSecDuration", false); var maxDuration = GetLongQueryString("maxSecDuration", false); var ip = GetStringQueryString("ip", false); var operationString = GetStringQueryString("operation", false); TcpConnectionHeaderMessage.OperationTypes?operation = null; if (string.IsNullOrEmpty(operationString) == false) { operation = (TcpConnectionHeaderMessage.OperationTypes) Enum.Parse(typeof(TcpConnectionHeaderMessage.OperationTypes), operationString); } DocumentsOperationContext context; using (ContextPool.AllocateOperationContext(out context)) { var connections = Database.RunningTcpConnections; HttpContext.Response.StatusCode = 200; using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream())) { writer.WriteStartArray(); var isFirst = true; foreach (var connection in connections) { if (connection.CheckMatch(minDuration, maxDuration, ip, operation) == false) { continue; } if (start > 0) { start--; continue; } if (--take <= 0) { break; } if (isFirst == false) { writer.WriteComma(); } context.Write(writer, connection.GetConnectionStats(context)); isFirst = false; } writer.WriteEndArray(); } } return(Task.CompletedTask); }
private async Task <IOperationResult> DoBulkInsert(Action <IOperationProgress> onProgress, CancellationToken token) { var progress = new BulkInsertProgress(); try { var logger = LoggingSource.Instance.GetLogger <MergedInsertBulkCommand>(Database.Name); IDisposable currentCtxReset = null, previousCtxReset = null; try { using (ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var buffer = JsonOperationContext.ManagedPinnedBuffer.LongLivedInstance()) { currentCtxReset = ContextPool.AllocateOperationContext(out JsonOperationContext docsCtx); var requestBodyStream = RequestBodyStream(); using (var parser = new BatchRequestParser.ReadMany(context, requestBodyStream, buffer, token)) { await parser.Init(); var array = new BatchRequestParser.CommandData[8]; var numberOfCommands = 0; long totalSize = 0; while (true) { var task = parser.MoveNext(docsCtx); if (task == null) { break; } token.ThrowIfCancellationRequested(); // if we are going to wait on the network, flush immediately if ((task.IsCompleted == false && numberOfCommands > 0) || // but don't batch too much anyway totalSize > 16 * Voron.Global.Constants.Size.Megabyte) { using (ReplaceContextIfCurrentlyInUse(task, numberOfCommands, array)) { await Database.TxMerger.Enqueue(new MergedInsertBulkCommand { Commands = array, NumberOfCommands = numberOfCommands, Database = Database, Logger = logger, TotalSize = totalSize }); } progress.BatchCount++; progress.Processed += numberOfCommands; progress.LastProcessedId = array[numberOfCommands - 1].Id; onProgress(progress); previousCtxReset?.Dispose(); previousCtxReset = currentCtxReset; currentCtxReset = ContextPool.AllocateOperationContext(out docsCtx); numberOfCommands = 0; totalSize = 0; } var commandData = await task; if (commandData.Type == CommandType.None) { break; } totalSize += commandData.Document.Size; if (numberOfCommands >= array.Length) { Array.Resize(ref array, array.Length * 2); } array[numberOfCommands++] = commandData; } if (numberOfCommands > 0) { await Database.TxMerger.Enqueue(new MergedInsertBulkCommand { Commands = array, NumberOfCommands = numberOfCommands, Database = Database, Logger = logger, TotalSize = totalSize }); progress.BatchCount++; progress.Processed += numberOfCommands; progress.LastProcessedId = array[numberOfCommands - 1].Id; onProgress(progress); } } } } finally { currentCtxReset?.Dispose(); previousCtxReset?.Dispose(); } HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; return(new BulkOperationResult { Total = progress.Processed }); } catch (Exception e) { HttpContext.Response.Headers["Connection"] = "close"; throw new InvalidOperationException("Failed to process bulk insert " + progress, e); } }