private void ReadChunks(IntPtr affinityMask) { if (_filesQueue != null) { try { TrySetAffinityForCurrentThread(affinityMask); while (_filesQueue.Count != 0 && !_forceStop) { if (_sortedChunkList.Count >= _maxQueueCapacity) { // if _chunkQueue is full, wait and pass to the next iteration Thread.Sleep(200); continue; } if (_filesQueue.TryDequeue(out Tuple <int, string> chunkFile)) { var zipedFile = Path.Combine(_baseDir, chunkFile.Item2); using (MemoryStream memStream = new MemoryStream()) using (Stream zippedFileStream = File.OpenRead(zipedFile)) using (Stream csStream = new GZipStream(zippedFileStream, CompressionMode.Decompress)) { byte[] buffer = new byte[1024]; int bytesRead; while ((bytesRead = csStream.Read(buffer, 0, buffer.Length)) > 0) { memStream.Write(buffer, 0, bytesRead); } CompressionChunk chunk = new CompressionChunk() { Number = chunkFile.Item1, Data = memStream.ToArray() }; lock (_sortedChunkListLocker) { _sortedChunkList.Add(chunk.Number, chunk); } } } } } catch (Exception ex) { HandleError($"Error accured in ReadChunks, thread name:{Thread.CurrentThread.Name} thread id:{Thread.CurrentThread.ManagedThreadId}", ex); } } }
private void ProduceChunks(IntPtr affinityMask) { var inputFilePath = Path.Combine(_baseDir, _inputFile); try { TrySetAffinityForCurrentThread(affinityMask); using (FileStream sourceFileSream = new FileStream(inputFilePath, FileMode.Open)) { int count = 1; byte[] buffer = new byte[chunkSize]; int bytesRead; while ((bytesRead = sourceFileSream.Read(buffer, 0, buffer.Length)) > 0 && !_forceStop) { byte[] data = new byte[bytesRead]; Buffer.BlockCopy(buffer, 0, data, 0, bytesRead); CompressionChunk chunk = new CompressionChunk() { Number = count, Data = data }; _chunkQueue.Enqueue(chunk); count++; if (_chunkQueue.Count >= _maxQueueCapacity) { // if _chunkQueue is full, wait to slow down a little. Thread.Sleep(200); continue; } } } } catch (Exception ex) { HandleError($"Error accured in ProduceChunks thread name:{Thread.CurrentThread.Name} thread id:{Thread.CurrentThread.ManagedThreadId}", ex); } _reading = false; }
private void WriteUnzipedChunks(IntPtr affinityMask) { // todo: first chunk number must be past as parameter or taken from class member. int count = 1; var outputFilePath = Path.Combine(_baseDir, _outputFile); CompressionChunk chunk = null; try { TrySetAffinityForCurrentThread(affinityMask); using (FileStream destFileSream = new FileStream(outputFilePath, FileMode.Create)) { while ((_reading || _sortedChunkList.Count > 0) && !_forceStop) { if (_sortedChunkList.Count > 0 && _sortedChunkList.ElementAt(0).Key == count) { lock (_sortedChunkListLocker) { // no need to check twice the key existance because there is only one writer. chunk = _sortedChunkList.ElementAt(0).Value; _sortedChunkList.RemoveAt(0); } destFileSream.Write(chunk.Data, 0, chunk.Data.Length); count++; } else { // if list is stil or already empty, wait. Thread.Sleep(100); } } } } catch (Exception ex) { HandleError($"Error accured in WriteUnzipedChunks, thread name:{Thread.CurrentThread.Name} thread id:{Thread.CurrentThread.ManagedThreadId}", ex); } }