private async Task <IOperationResult> DoBulkInsert(Action <IOperationProgress> onProgress, CancellationToken token) { var progress = new BulkInsertProgress(); try { var logger = LoggingSource.Instance.GetLogger <MergedInsertBulkCommand>(Database.Name); IDisposable currentCtxReset = null, previousCtxReset = null; try { using (ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (var buffer = JsonOperationContext.ManagedPinnedBuffer.LongLivedInstance()) { currentCtxReset = ContextPool.AllocateOperationContext(out JsonOperationContext docsCtx); var requestBodyStream = RequestBodyStream(); using (var parser = new BatchRequestParser.ReadMany(context, requestBodyStream, buffer, token)) { await parser.Init(); var array = new BatchRequestParser.CommandData[8]; var numberOfCommands = 0; long totalSize = 0; while (true) { var task = parser.MoveNext(docsCtx); if (task == null) { break; } token.ThrowIfCancellationRequested(); // if we are going to wait on the network, flush immediately if ((task.IsCompleted == false && numberOfCommands > 0) || // but don't batch too much anyway totalSize > 16 * Voron.Global.Constants.Size.Megabyte) { using (ReplaceContextIfCurrentlyInUse(task, numberOfCommands, array)) { await Database.TxMerger.Enqueue(new MergedInsertBulkCommand { Commands = array, NumberOfCommands = numberOfCommands, Database = Database, Logger = logger, TotalSize = totalSize }); } progress.BatchCount++; progress.Processed += numberOfCommands; progress.LastProcessedId = array[numberOfCommands - 1].Id; onProgress(progress); previousCtxReset?.Dispose(); previousCtxReset = currentCtxReset; currentCtxReset = ContextPool.AllocateOperationContext(out docsCtx); numberOfCommands = 0; totalSize = 0; } var commandData = await task; if (commandData.Type == CommandType.None) { break; } totalSize += commandData.Document.Size; if (numberOfCommands >= array.Length) { Array.Resize(ref array, array.Length * 2); } array[numberOfCommands++] = commandData; } if (numberOfCommands > 0) { await Database.TxMerger.Enqueue(new MergedInsertBulkCommand { Commands = array, NumberOfCommands = numberOfCommands, Database = Database, Logger = logger, TotalSize = totalSize }); progress.BatchCount++; progress.Processed += numberOfCommands; progress.LastProcessedId = array[numberOfCommands - 1].Id; onProgress(progress); } } } } finally { currentCtxReset?.Dispose(); previousCtxReset?.Dispose(); } HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; return(new BulkOperationResult { Total = progress.Processed }); } catch (Exception e) { HttpContext.Response.Headers["Connection"] = "close"; throw new InvalidOperationException("Failed to process bulk insert " + progress, e); } }
private async Task <IOperationResult> DoBulkInsert(Action <IOperationProgress> onProgress, CancellationToken token) { var progress = new BulkInsertProgress(); try { var logger = LoggingSource.Instance.GetLogger <MergedInsertBulkCommand>(Database.Name); IDisposable currentCtxReset = null, previousCtxReset = null; try { using (ContextPool.AllocateOperationContext(out JsonOperationContext context)) using (context.GetMemoryBuffer(out var buffer)) { currentCtxReset = ContextPool.AllocateOperationContext(out JsonOperationContext docsCtx); var requestBodyStream = RequestBodyStream(); using (var parser = new BatchRequestParser.ReadMany(context, requestBodyStream, buffer, token)) { await parser.Init(); var array = new BatchRequestParser.CommandData[8]; var numberOfCommands = 0; long totalSize = 0; int operationsCount = 0; while (true) { using (var modifier = new BlittableMetadataModifier(docsCtx)) { var task = parser.MoveNext(docsCtx, modifier); if (task == null) { break; } token.ThrowIfCancellationRequested(); // if we are going to wait on the network, flush immediately if ((task.Wait(5) == false && numberOfCommands > 0) || // but don't batch too much anyway totalSize > 16 * Voron.Global.Constants.Size.Megabyte || operationsCount >= 8192) { using (ReplaceContextIfCurrentlyInUse(task, numberOfCommands, array)) { await Database.TxMerger.Enqueue(new MergedInsertBulkCommand { Commands = array, NumberOfCommands = numberOfCommands, Database = Database, Logger = logger, TotalSize = totalSize }); } ClearStreamsTempFiles(); progress.BatchCount++; progress.Total += numberOfCommands; progress.LastProcessedId = array[numberOfCommands - 1].Id; onProgress(progress); previousCtxReset?.Dispose(); previousCtxReset = currentCtxReset; currentCtxReset = ContextPool.AllocateOperationContext(out docsCtx); numberOfCommands = 0; totalSize = 0; operationsCount = 0; } var commandData = await task; if (commandData.Type == CommandType.None) { break; } if (commandData.Type == CommandType.AttachmentPUT) { commandData.AttachmentStream = await WriteAttachment(commandData.ContentLength, parser.GetBlob(commandData.ContentLength)); } (long size, int opsCount) = GetSizeAndOperationsCount(commandData); operationsCount += opsCount; totalSize += size; if (numberOfCommands >= array.Length) { Array.Resize(ref array, array.Length + Math.Min(1024, array.Length)); } array[numberOfCommands++] = commandData; switch (commandData.Type) { case CommandType.PUT: progress.DocumentsProcessed++; break; case CommandType.AttachmentPUT: progress.AttachmentsProcessed++; break; case CommandType.Counters: progress.CountersProcessed++; break; case CommandType.TimeSeriesBulkInsert: progress.TimeSeriesProcessed++; break; } } } if (numberOfCommands > 0) { await Database.TxMerger.Enqueue(new MergedInsertBulkCommand { Commands = array, NumberOfCommands = numberOfCommands, Database = Database, Logger = logger, TotalSize = totalSize }); progress.BatchCount++; progress.Total += numberOfCommands; progress.LastProcessedId = array[numberOfCommands - 1].Id; #pragma warning disable CS0618 // Type or member is obsolete progress.Processed = progress.DocumentsProcessed; #pragma warning restore CS0618 // Type or member is obsolete onProgress(progress); } } } } finally { currentCtxReset?.Dispose(); previousCtxReset?.Dispose(); ClearStreamsTempFiles(); } HttpContext.Response.StatusCode = (int)HttpStatusCode.Created; return(new BulkOperationResult { Total = progress.Total, DocumentsProcessed = progress.DocumentsProcessed, AttachmentsProcessed = progress.AttachmentsProcessed, CountersProcessed = progress.CountersProcessed, TimeSeriesProcessed = progress.TimeSeriesProcessed }); } catch (Exception e) { HttpContext.Response.Headers["Connection"] = "close"; throw new InvalidOperationException("Failed to process bulk insert. " + progress, e); } }