ReusableTask Tick(int delta, bool waitForBufferedIO) { UpdateTimer.Restart(); ReadMonitor.Tick(delta); WriteMonitor.Tick(delta); WriteLimiter.UpdateChunks(Settings.MaximumDiskWriteRate, WriteRate); ReadLimiter.UpdateChunks(Settings.MaximumDiskReadRate, ReadRate); var processTask = ProcessBufferedIOAsync(); return(waitForBufferedIO ? processTask : ReusableTask.CompletedTask); }
void ProcessBufferedIO(bool force = false) { BufferedIO io; while (WriteQueue.Count > 0) { io = WriteQueue.Peek(); // This means we wanted to wait until all the writes had been flushed // before we attempt to generate the hash of a given piece. if (io.manager == null && io.buffer == null) { io = WriteQueue.Dequeue(); io.tcs.SetResult(true); continue; } if (!force && !WriteLimiter.TryProcess(io.count)) { break; } io = WriteQueue.Dequeue(); try { Interlocked.Add(ref pendingWrites, -io.count); Write(io.manager, io.offset, io.buffer, io.count); io.tcs.SetResult(true); } catch (Exception ex) { io.tcs.SetException(ex); } } while (ReadQueue.Count > 0) { if (!force && !ReadLimiter.TryProcess(ReadQueue.Peek().count)) { break; } io = ReadQueue.Dequeue(); try { Interlocked.Add(ref pendingReads, -io.count); var result = Read(io.manager, io.offset, io.buffer, io.count); io.tcs.SetResult(result); } catch (Exception ex) { io.tcs.SetException(ex); } } }
internal async ReusableTask WriteAsync(ITorrentData manager, long offset, byte[] buffer, int count) { if (count < 1) { throw new ArgumentOutOfRangeException(nameof(count), $"Count must be greater than zero, but was {count}."); } Interlocked.Add(ref pendingWrites, count); await IOLoop; int pieceIndex = (int)(offset / manager.PieceLength); long pieceStart = (long)pieceIndex * manager.PieceLength; long pieceEnd = pieceStart + manager.PieceLength; if (!IncrementalHashes.TryGetValue(ValueTuple.Create(manager, pieceIndex), out IncrementalHashData incrementalHash) && offset == pieceStart) { incrementalHash = IncrementalHashes[ValueTuple.Create(manager, pieceIndex)] = IncrementalHashCache.Dequeue(); incrementalHash.NextOffsetToHash = (long)manager.PieceLength * pieceIndex; } if (incrementalHash != null) { // Incremental hashing does not perform proper bounds checking to ensure // that pieces are correctly incrementally hashed even if 'count' is greater // than the PieceLength. This should never happen under normal operation, but // unit tests do it for convenience sometimes. Keep things safe by cancelling // incremental hashing if that occurs. if ((incrementalHash.NextOffsetToHash + count) > pieceEnd) { IncrementalHashes.Remove(ValueTuple.Create(manager, pieceIndex)); } else if (incrementalHash.NextOffsetToHash == offset) { incrementalHash.Hasher.TransformBlock(buffer, 0, count, buffer, 0); incrementalHash.NextOffsetToHash += count; } } if (WriteLimiter.TryProcess(count)) { Interlocked.Add(ref pendingWrites, -count); Write(manager, offset, buffer, count); } else { var tcs = new ReusableTaskCompletionSource <bool> (); WriteQueue.Enqueue(new BufferedIO(manager, offset, buffer, count, tcs)); await tcs.Task; } }
internal async Task WriteAsync(TorrentManager manager, long offset, byte[] buffer, int count) { Interlocked.Add(ref bufferedWriteBytes, count); await IOLoop; int pieceIndex = (int)(offset / manager.Torrent.PieceLength); IncrementalHashData incrementalHash; if (!IncrementalHashes.TryGetValue(pieceIndex, out incrementalHash)) { incrementalHash = IncrementalHashes[pieceIndex] = IncrementalHashCache.Dequeue(); incrementalHash.NextOffsetToHash = (long)manager.Torrent.PieceLength * pieceIndex; } if (incrementalHash.NextOffsetToHash == offset) { incrementalHash.Hasher.TransformBlock(buffer, 0, count, buffer, 0); incrementalHash.NextOffsetToHash += count; } try { if (WriteLimiter.TryProcess(count)) { Write(manager, offset, buffer, count); } else { var tcs = new TaskCompletionSource <bool>(); bufferedWrites.Enqueue(new BufferedIO(manager, offset, buffer, count, tcs)); await tcs.Task; } } catch (Exception ex) { await SetError(manager, Reason.WriteFailure, ex); } Interlocked.Add(ref bufferedWriteBytes, -count); }
internal DiskManager(ClientEngine engine, PieceWriter writer) { this.bufferedReads = new Queue <BufferedIO>(); this.bufferedWrites = new Queue <BufferedIO>(); this.engine = engine; this.ReadLimiter = new RateLimiter(); this.readMonitor = new SpeedMonitor(); this.writeMonitor = new SpeedMonitor(); this.WriteLimiter = new RateLimiter(); this.Writer = writer; IOLoop.QueueTimeout(TimeSpan.FromSeconds(1), () => { readMonitor.Tick(); writeMonitor.Tick(); WriteLimiter.UpdateChunks(engine.Settings.MaximumDiskWriteRate, WriteRate); ReadLimiter.UpdateChunks(engine.Settings.MaximumDiskReadRate, ReadRate); ProcessBufferedIO(); return(!Disposed); }); }