private void PartitionWorkItemQueue(BsnesImportStreamProcessor.CompressedWorkItem compressedItem) { Debug.Assert(compressedItem.wasDecompressed); using var stream = new MemoryStream(compressedItem.UncompressedBuffer, 0, compressedItem.UncompressedSize); compressedItem.tmpHeader ??= new byte[2]; // tune this as needed. // we want parallel jobs going, but, we don't want too many of them at once. // average # workItems per CompressedWorkItem is like 12K currently. const int numItemsPerTask = 6000; bool keepGoing; var itemsRemainingBeforeEnd = numItemsPerTask; Debug.Assert(compressedItem.listHeads != null && compressedItem.listHeads.Count == 0); BsnesImportStreamProcessor.WorkItem currentHead = null; BsnesImportStreamProcessor.WorkItem currentItem = null; do { var nextItem = ReadNextWorkItem(stream, compressedItem.tmpHeader); if (nextItem != null) { Debug.Assert(nextItem.next == null); if (currentHead == null) { currentHead = nextItem; Debug.Assert(currentItem == null); } else { currentItem.next = nextItem; } currentItem = nextItem; itemsRemainingBeforeEnd--; } keepGoing = !streamProcessor.CancelToken.IsCancellationRequested && nextItem != null; var endOfPartition = !keepGoing || itemsRemainingBeforeEnd == 0; if (!endOfPartition) { continue; } // finish list if (currentHead != null) { Debug.Assert(currentItem.next == null); compressedItem.listHeads.Add(currentHead); } // reset list currentHead = currentItem = null; itemsRemainingBeforeEnd = numItemsPerTask; } while (keepGoing); }
private void DecompressWorkItem(BsnesImportStreamProcessor.CompressedWorkItem compressedItem) { Debug.Assert(compressedItem.CompressedBuffer != null); Debug.Assert(compressedItem.UncompressedSize != 0); Debug.Assert(compressedItem.UncompressedSize != 0); Debug.Assert(!compressedItem.wasDecompressed); streamProcessor.DecompressWorkItem(compressedItem); Debug.Assert(compressedItem.UncompressedBuffer != null); Debug.Assert(compressedItem.wasDecompressed); }
private async void ProcessCompressedWorkItem(BsnesImportStreamProcessor.CompressedWorkItem compressedItem) { #if PROFILING var mainSpan = Markers.EnterSpan("BSNES ProcessCompressedWorkItem"); #endif DecompressWorkItem(compressedItem); PartitionWorkItemQueue(compressedItem); var subTasks = DispatchWorkersForCompressedWorkItem(compressedItem); var statsBytesCompleted = compressedItem.CompressedSize; streamProcessor.FreeCompressedWorkItem(ref compressedItem); await Task.WhenAll(subTasks); Stats_MarkCompleted(statsBytesCompleted); #if PROFILING mainSpan.Leave(); #endif }
private IEnumerable <Task> DispatchWorkersForCompressedWorkItem(BsnesImportStreamProcessor.CompressedWorkItem compressedItem) { var subTasks = new List <Task>(capacity: compressedItem.listHeads.Count); for (var i = 0; i < compressedItem.listHeads.Count; ++i) { var workItemListHead = compressedItem.listHeads[i]; subTasks.Add(taskManager.Run(() => { // this subtask shouldn't have any references to the compressedWorkItem here, we want to be fully // separated so that we can free it below immediately after. try { uncompressedWorkersLimit.Wait(streamProcessor.CancelToken.Token); try { ProcessWorkItemsLinkedList(workItemListHead); } finally { uncompressedWorkersLimit.Release(); } } catch (OperationCanceledException) { Debug.WriteLine("Cancelling2..."); // NOP } })); compressedItem.listHeads[i] = null; // remove the reference. } return(subTasks); }
// this is just neat stats. it's optional, remove if performance becomes an issue (seems unlikely) private void Stats_MarkQueued(BsnesImportStreamProcessor.CompressedWorkItem compressedItem) { Interlocked.Add(ref statsBytesToProcess, compressedItem.CompressedSize); Interlocked.Increment(ref statsCompressedBlocksToProcess); }