/// <summary> /// Creates requested number of workers /// </summary> /// <param name="count"></param> /// <returns></returns> static ArchWorker[] CreateWorkers(uint count) { Debug.Assert(count > 0); General.Log($"Creating {count} workers"); ArchWorker[] archivers = new ArchWorker[count]; for (int i = 0; i < archivers.Length; i++) { archivers[i] = new ArchWorker(i); } archivers[0].CanWrite.Set(); if (archivers.Length > 1) { // create closed chain for (int idx = 0; idx < archivers.Length - 1; idx++) { archivers[idx].Next = archivers[idx + 1]; } archivers[^ 1].Next = archivers[0];
/// <summary> /// Create those who do zip/unzip /// </summary> /// <param name="buffers"></param> /// <param name="act">zip or unzip</param> /// <param name="archivers">out param</param> /// <returns></returns> static void CreateArchivers(InOutBuffer[] buffers, ArchAction act, out ArchWorker[] archivers) { int count = buffers.Length; Debug.Assert(count > 0); General.Log($"Creating {count} (un)zippers"); archivers = new ArchWorker[count]; for (int i = 0; i < archivers.Length; i++) { archivers[i] = new ArchWorker(inBuffer: buffers[i], outBuffer: buffers[i], act); } }
/// <summary> /// Kick start of a zip/unzip process /// </summary> /// <param name="reader"></param> /// <param name="workers"></param> /// <param name="workersTasks"></param> /// <param name="buffers"></param> async static Task StartConveyor(ArchReader reader, ArchWorker[] workers, Task[] workerTasks, InOutBuffer[] buffers) { Debug.Assert(workers.Length == buffers.Length); for (int idx = 0; idx < workers.Length; idx++) { ArchWorker worker = workers[idx]; General.Log($"{idx} start reading"); reader.Buffer = buffers[idx]; //readerThread.haveWork.Set(); //readerThread.notifyDone.WaitOne(); await reader.ReadAsync(); General.Log($"{idx} reading chunk complete, start zippin"); workerTasks[idx] = worker.DoWorkAsync(); } }
async private static Task ManageThreadsAsync(string inputName, string outputName, ArchAction act, int bufferSize, uint workersCount = 0) { InOutBuffer[] buffers = null; Task[] workerTasks = null; try { General.Log($"Started '{inputName}' {act} into '{outputName}'..."); if (bufferSize < defaultChunkSize) { bufferSize = defaultChunkSize; } if (workersCount < 1) { workersCount = ChooseWorkersCount(); } General.Log($"buffer size {bufferSize}, workers count {workersCount}"); //to feel good let's use 'using' // objects for input reading using FileStream inFileStream = File.OpenRead(inputName); ArchReader reader = new ArchReader(inFileStream, act); // objects for output writing using FileStream outFileStream = File.Create(outputName); ArchWriter writer = new ArchWriter(outFileStream, act); // create objects to do zipping/unzipping CreateBuffersAndWorkers(act, bufferSize, workersCount, out buffers, out ArchWorker[] workers, out workerTasks); await StartConveyor(reader, workers, workerTasks, buffers); int idx = 0; // as long as there are bytes read from input, keep conveyor // when no more input data, set this flag & wait till all therads are finihed. bool finishing = false; do { ArchWorker worker = workers[idx]; if (worker == null) { General.Log($"all workers are finished"); break; } General.Log($"Wait for worker {idx} done, to start writting"); await workerTasks[idx];//wait till zipper idx has done General.Log($"{idx} start writting"); writer.Buffer = buffers[idx]; Task writerTask = writer.WriteAsync(); if (!finishing) { General.Log($"{idx} start reading next portion"); reader.Buffer = buffers[idx]; Task readerTask = reader.ReadAsync(); General.Log($"{idx} need both In & Out buffers to be Ready to start next part zip/unzip"); await readerTask; // check if have read anyting if (reader.Buffer.BytesRead <= 0) { //have read all, and all is (being) processed finishing = true; } } await writerTask; if (finishing) { workers[idx] = null; workerTasks[idx] = null; //according to info from ms , no need to dispose a Task buffers[idx] = null; } else { workerTasks[idx] = worker.DoWorkAsync(); } //cause rotating, and output shall be in-order if (++idx >= workers.Length) { idx = 0; } } while (true); General.Log($"{act} finished successfuly."); } catch (Exception exc) { General.Log($"ManageThreads encountered an error {exc.Message} returning to caller."); throw; } finally { CleanUp(buffers, workerTasks); } }
private static void ManageThreads(string inputName, string outputName, ArchAction act, int bufferSize, uint workersCount = 0) { General.Log($"{act} started."); Archiver.act = act; if (bufferSize > defaultChunkSize) { ArchWorker.ChunkSize = bufferSize; } using (FileStream outFileStream = File.Create(outputName)) { ArchWorker.OutFileStream = outFileStream; using FileStream inFileStream = File.OpenRead(inputName); if (workersCount < 1) { workersCount = ChooseWorkersCount(); } ArchWorker[] archivers = CreateWorkers(workersCount); int idx = 0; do { ArchWorker arch = archivers[idx]; General.Log($"main thread about to Wait for arch {idx} done, to start it in new thread again"); arch.Done.WaitOne(); //Load the next portion to be processed and written int read = act == ArchAction.Zip ? arch.ReadChunk(inFileStream) : arch.ReadZippedChunk(inFileStream); if (read > 0) { //TODO //Thread th = new Thread(new ParameterizedThreadStart(act == ArchAction.Zip ? ZipWork : UnzipWork)); Thread th; if (act == ArchAction.Zip) { //th = new Thread(new ParameterizedThreadStart(ZipWork)); th = new Thread(ZipWork); } else { //th = new Thread(new ParameterizedThreadStart(UnzipWork)); th = new Thread(UnzipWork); } th.Start(arch); if (++idx >= archivers.Length) { idx = 0; } } else { //have read all if (archivers.Length == 1) { break; } if (--idx < 0) { idx = archivers.Length - 1; } General.Log($"main thread about to Wait for the last arch {idx} done..."); archivers[idx].Done.WaitOne(); //prev is finished, meaning all before prev are finished //now safe to go out and dispose outputStream break; } } while (true); //archivers = null;//no need } General.Log($"{act} finished."); }