public void ReadFail() { writer.read = true; Hookup(); diskManager.ReadAsync(rig.Manager, 0, data, data.Length).Wait(); CheckFail(); }
public async Task ExceedReadRate() { // Ensure the read rate is smaller than a block settings.MaximumDiskReadRate = 1; await diskManager.Tick(1000); // Queue up 6 reads, none should process. var buffer = new byte[Piece.BlockSize]; int count = 6; var tasks = new List <Task> (); for (int i = 0; i < count; i++) { tasks.Add(diskManager.ReadAsync(fileData, 0, buffer, buffer.Length).AsTask()); } Assert.AreEqual(buffer.Length * count, diskManager.PendingReads, "#1"); // We should still process none. await diskManager.Tick(1000); Assert.AreEqual(buffer.Length * count, diskManager.PendingReads, "#2"); // Give a proper max read rate. settings.MaximumDiskReadRate = Piece.BlockSize * 2; for (int i = 0; i < 2; i++) { await diskManager.Tick(1000); count -= 2; Assert.AreEqual(buffer.Length * count, diskManager.PendingReads, "#3." + i); } // If we add more reads after we used up our allowance they still won't process. for (int i = 0; i < 2; i++) { count++; tasks.Add(diskManager.ReadAsync(fileData, 0, buffer, buffer.Length).AsTask()); } Assert.AreEqual(buffer.Length * count, diskManager.PendingReads, "#4." + count); while (count > 0) { await diskManager.Tick(1000); count -= 2; Assert.AreEqual(buffer.Length * count, diskManager.PendingReads, "#5." + count); } foreach (var v in tasks) { Assert.DoesNotThrowAsync(async() => await v.WithTimeout(1000), "#6"); } }
public async Task ExceedReadRate() { // Ensure the read rate is smaller than a block settings.MaximumDiskReadRate = 1; await diskManager.Tick(1000); // Queue up 6 reads, none should process. var buffer = new byte[Piece.BlockSize]; var tasks = new List <Task> (); for (int i = 0; i < 6; i++) { tasks.Add(diskManager.ReadAsync(fileData, 0, buffer, buffer.Length)); } // We should still process none. await diskManager.Tick(1000); Assert.IsTrue(tasks.All(t => t.IsCompleted == false), "#1"); // Give a proper max read rate. settings.MaximumDiskReadRate = Piece.BlockSize * 2; for (int i = 0; i < 2; i++) { await diskManager.Tick(1000); Assert.AreEqual(2, tasks.Count(t => t.IsCompleted), "#1"); tasks.RemoveAll(t => t.IsCompleted); } // If we add more reads after we used up our allowance they still won't process. for (int i = 0; i < 2; i++) { tasks.Add(diskManager.ReadAsync(fileData, 0, buffer, buffer.Length)); } while (tasks.Count > 0) { await diskManager.Tick(1000); Assert.AreEqual(2, tasks.Count(t => t.IsCompleted), "#1"); tasks.RemoveAll(t => t.IsCompleted); } }
/// <summary> /// /// </summary> /// <param name="manager">The torrent which the peer is associated with.</param> /// <param name="id">The peer whose message queue you want to start processing</param> internal async void ProcessQueue(TorrentManager manager, PeerId id) { while (id.QueueLength > 0) { var msg = id.Dequeue(); var pm = msg as PieceMessage; try { if (pm != null) { pm.Data = ClientEngine.BufferManager.GetBuffer(pm.ByteLength); try { await DiskManager.ReadAsync(manager.Torrent, pm.StartOffset + ((long)pm.PieceIndex * manager.Torrent.PieceLength), pm.Data, pm.RequestLength); } catch (Exception ex) { manager.TrySetError(Reason.ReadFailure, ex); return; } id.PiecesSent++; } await PeerIO.SendMessageAsync(id.Connection, id.Encryptor, msg, manager.UploadLimiters, id.Monitor, manager.Monitor); if (msg is PieceMessage) { id.IsRequestingPiecesCount--; } id.LastMessageSent.Restart(); } catch { CleanupSocket(manager, id); break; } finally { if (pm?.Data != null) { ClientEngine.BufferManager.FreeBuffer(pm.Data); } } } id.ProcessingQueue = false; }
public void ReadFail() { writer.read = true; diskManager.ReadAsync(rig.Manager, 0, data, data.Length).Wait(); Assert.AreEqual(TorrentState.Error, rig.Manager.State); }
public async Task ExceedReadRate() { // Ensure the read rate is smaller than a block await diskManager.UpdateSettingsAsync(new EngineSettingsBuilder { MaximumDiskReadRate = 1 }.ToSettings()); await diskManager.Tick(1000).WithTimeout(); // Queue up 6 reads, none should process. var buffer = new byte[Piece.BlockSize]; int count = 6; var tasks = new List <Task> (); for (int i = 0; i < count; i++) { tasks.Add(diskManager.ReadAsync(fileData, new BlockInfo(0, 0, buffer.Length), buffer).AsTask()); } Assert.AreEqual(buffer.Length * count, diskManager.PendingReadBytes, "#1"); // We should still process none. await diskManager.Tick(1000).WithTimeout(); Assert.AreEqual(buffer.Length * count, diskManager.PendingReadBytes, "#2"); // Give a proper max read rate. await diskManager.UpdateSettingsAsync(new EngineSettingsBuilder { MaximumDiskReadRate = Piece.BlockSize * 2 }.ToSettings()); for (int i = 0; i < 2; i++) { await diskManager.Tick(1000).WithTimeout(); for (int t = 0; t < 2; t++) { var completed = await Task.WhenAny(tasks).WithTimeout(); await completed; tasks.Remove(completed); } Assert.IsFalse(tasks.Any(t => t.IsCompleted)); count -= 2; Assert.AreEqual(buffer.Length * count, diskManager.PendingReadBytes, "#3." + i); } // If we add more reads after we used up our allowance they still won't process. for (int i = 0; i < 2; i++) { count++; tasks.Add(diskManager.ReadAsync(fileData, new BlockInfo(0, 0, buffer.Length), buffer).AsTask()); } Assert.AreEqual(buffer.Length * count, diskManager.PendingReadBytes, "#4." + count); while (count > 0) { await diskManager.Tick(1000).WithTimeout(); for (int t = 0; t < 2; t++) { var completed = await Task.WhenAny(tasks).WithTimeout(); await completed; tasks.Remove(completed); } Assert.IsFalse(tasks.Any(t => t.IsCompleted)); count -= 2; Assert.AreEqual(buffer.Length * count, diskManager.PendingReadBytes, "#5." + count); } }
/// <summary> /// /// </summary> /// <param name="manager">The torrent which the peer is associated with.</param> /// <param name="id">The peer whose message queue you want to start processing</param> internal async void TryProcessQueue(TorrentManager manager, PeerId id) { if (!id.MessageQueue.BeginProcessing()) { return; } await MainLoop.SwitchToThreadpool(); ByteBufferPool.Releaser messageBuffer = default; ByteBufferPool.Releaser pieceBuffer = default; PeerMessage msg; try { while ((msg = id.MessageQueue.TryDequeue()) != null) { var msgLength = msg.ByteLength; if (msg is PieceMessage pm) { if (pieceBuffer.Buffer == null) { pieceBuffer = DiskManager.BufferPool.Rent(msgLength, out ByteBuffer _); } pm.DataReleaser = pieceBuffer; try { await DiskManager.ReadAsync(manager, pm.StartOffset + ((long)pm.PieceIndex * manager.Torrent.PieceLength), pm.Data, pm.RequestLength).ConfigureAwait(false); } catch (Exception ex) { await ClientEngine.MainLoop; manager.TrySetError(Reason.ReadFailure, ex); return; } System.Threading.Interlocked.Increment(ref id.piecesSent); } else { pieceBuffer.Dispose(); } if (messageBuffer.Buffer == null || messageBuffer.Buffer.Data.Length < msg.ByteLength) { messageBuffer.Dispose(); messageBuffer = NetworkIO.BufferPool.Rent(msgLength, out ByteBuffer _); } await PeerIO.SendMessageAsync(id.Connection, id.Encryptor, msg, manager.UploadLimiters, id.Monitor, manager.Monitor, messageBuffer.Buffer).ConfigureAwait(false); if (msg is PieceMessage) { System.Threading.Interlocked.Decrement(ref id.isRequestingPiecesCount); } id.LastMessageSent.Restart(); } } catch { await ClientEngine.MainLoop; CleanupSocket(manager, id); } finally { messageBuffer.Dispose(); pieceBuffer.Dispose(); } }
public void ReadFail() { writer.read = true; Assert.ThrowsAsync <Exception> (() => diskManager.ReadAsync(data, 0, buffer, buffer.Length).AsTask()); }
public void ReadFail() { writer.read = true; Assert.ThrowsAsync <Exception> (() => diskManager.ReadAsync(data, new BlockInfo(0, 0, Constants.BlockSize), buffer).AsTask()); }