public override void Write(BufferedIO data) { Console.WriteLine("Flushed {0}:{1} to disk", data.PieceIndex, data.PieceOffset / 1000); tester.blocks.Remove(data); ArraySegment<byte> buffer = data.Buffer; PieceWriterTests.Buffer.FreeBuffer(ref buffer); }
internal void QueueWrite(TorrentManager manager, long offset, byte[] buffer, int count, DiskIOCallback callback) { BufferedIO io = cache.Dequeue(); io.Initialise(manager, buffer, offset, count, manager.Torrent.PieceLength, manager.Torrent.Files); QueueWrite(io, callback); }
private void PerformWrite(BufferedIO io) { // Find the block that this data belongs to and set it's state to "Written" int index = io.PieceOffset / Piece.BlockSize; try { // Perform the actual write writer.Write(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size); writeMonitor.AddDelta(io.Count); } finally { io.Complete = true; if (io.Callback != null) { io.Callback(true); } } }
void QueueWrite(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (Thread.CurrentThread == IOLoop.thread) { PerformWrite(io); cache.Enqueue(io); } else { lock (bufferLock) { bufferedWrites.Enqueue(io); if (bufferedWrites.Count == 1) { DiskManager.IOLoop.Queue(LoopTask); } } } }
private void PerformRead(BufferedIO io) { try { if (writer.Read(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size)) { io.ActualCount = io.Count; } else { io.ActualCount = 0; } readMonitor.AddDelta(io.ActualCount); } finally { io.Complete = true; if (io.Callback != null) { io.Callback(io.ActualCount == io.Count); } } }
void QueueRead(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (Task.CurrentId == IOLoop.thread.Id) { PerformRead(io); cache.Enqueue(io); } else { lock (bufferLock) { bufferedReads.Enqueue(io); if (bufferedReads.Count == 1) { DiskManager.IOLoop.Queue(LoopTask); } } } }
/// <summary> /// Generates the hash for the given piece /// </summary> /// <param name="pieceIndex">The piece to generate the hash for</param> /// <returns>The 20 byte SHA1 hash of the supplied piece</returns> internal byte[] GetHash(int pieceIndex, bool asynchronous) { int bytesToRead = 0; long pieceStartIndex = (long)this.pieceLength * pieceIndex; BufferedIO io = null; ArraySegment<byte> hashBuffer = BufferManager.EmptyBuffer; List<BufferedIO> list = new List<BufferedIO>(); for (long i = pieceStartIndex; i < (pieceStartIndex + pieceLength); i += Piece.BlockSize) { hashBuffer = BufferManager.EmptyBuffer; ClientEngine.BufferManager.GetBuffer(ref hashBuffer, Piece.BlockSize); bytesToRead = Piece.BlockSize; if ((i + bytesToRead) > fileSize) bytesToRead = (int)(fileSize - i); io = new BufferedIO(hashBuffer, i, bytesToRead, manager.Torrent.PieceLength, manager.Torrent.Files, SavePath); io.WaitHandle = new ManualResetEvent(false); list.Add(io); manager.Engine.DiskManager.QueueRead(io); if (bytesToRead != Piece.BlockSize) break; } lock (hasher) { hasher.Initialize(); for (int i = 0; i < list.Count; i++) { list[i].WaitHandle.WaitOne(); list[i].WaitHandle.Close(); hashBuffer = list[i].buffer; hasher.TransformBlock(hashBuffer.Array, hashBuffer.Offset, list[i].ActualCount, hashBuffer.Array, hashBuffer.Offset); ClientEngine.BufferManager.FreeBuffer(ref list[i].buffer); } hasher.TransformFinalBlock(hashBuffer.Array, hashBuffer.Offset, 0); return hasher.Hash; } }
void QueueWrite(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (Task.CurrentId == IOLoop.thread.Id) { PerformWrite(io); cache.Enqueue (io); } else lock (bufferLock) { bufferedWrites.Enqueue(io); if (bufferedWrites.Count == 1) DiskManager.IOLoop.Queue(LoopTask); } }
private void PerformRead(BufferedIO io) { try { if (writer.Read(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size)) io.ActualCount = io.Count; else io.ActualCount = 0; readMonitor.AddDelta(io.ActualCount); } finally { io.Complete = true; if (io.Callback != null) io.Callback(io.ActualCount == io.Count); } }
private void PerformWrite(BufferedIO io) { // Find the block that this data belongs to and set it's state to "Written" int index = io.PieceOffset / Piece.BlockSize; try { // Perform the actual write writer.Write(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size); writeMonitor.AddDelta(io.Count); } finally { io.Complete = true; if (io.Callback != null) io.Callback(true); } }
void QueueRead(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (Thread.CurrentThread == IOLoop.thread) { PerformRead(io); cache.Enqueue (io); } else lock (bufferLock) { bufferedReads.Enqueue(io); if (bufferedReads.Count == 1) DiskManager.IOLoop.Queue(LoopTask); } }
public override int Read(BufferedIO data) { if(data == null) throw new ArgumentNullException("data"); memoryBuffer.Sort(delegate(BufferedIO left, BufferedIO right) { return left.Offset.CompareTo(right.Offset); }); BufferedIO io = memoryBuffer.Find(delegate(BufferedIO m) { return (data.PieceIndex == m.PieceIndex && data.BlockIndex == m.BlockIndex); }); if (io == null) return writer.Read(data); int toCopy = Math.Min(data.Count, io.Count + (int)(io.Offset - data.Offset)); Buffer.BlockCopy(io.buffer.Array, io.buffer.Offset + (int)(io.Offset - data.Offset), data.buffer.Array, data.buffer.Offset, toCopy); data.ActualCount += toCopy; return toCopy; }
public override int Read(BufferedIO data) { data.ActualCount = data.Count; return data.Count; }
/// <summary> /// Queues a block of data to be written asynchronously /// </summary> /// <param name="id">The peer who sent the block</param> /// <param name="recieveBuffer">The array containing the block</param> /// <param name="message">The PieceMessage</param> /// <param name="piece">The piece that the block to be written is part of</param> internal void QueueWrite(BufferedIO data) { manager.Engine.DiskManager.QueueWrite(data); }
public override int Read(BufferedIO data) { Console.WriteLine("Attempting to read - returning zero"); return 0; }
public void TestMemoryStandardReads() { ArraySegment<byte> buffer = BufferManager.EmptyBuffer; Buffer.GetBuffer(ref buffer, BlockSize); Initialise(buffer); foreach (BufferedIO data in this.blocks.ToArray()) level1.Write(data); for (int piece=0; piece < PieceCount; piece++) { for(int block = 0; block < BlockCount; block++) { BufferedIO io = new BufferedIO(buffer, piece, block, BlockSize, rig.Manager.Torrent.PieceLength, rig.Manager.Torrent.Files, rig.Manager.FileManager.SavePath); level1.ReadChunk(io); for (int i = 0; i < BlockSize; i++) Assert.AreEqual(buffer.Array[buffer.Offset + i], piece * BlockCount + block, "#1"); } } }
public void TestMemoryOffsetReads() { level1.Write(blocks[0]); level2.Write(blocks[1]); level1.Write(blocks[2]); level2.Write(blocks[3]); level2.Write(blocks[4]); ArraySegment<byte> buffer = BufferManager.EmptyBuffer; Buffer.GetBuffer(ref buffer, PieceSize); Initialise(buffer); int piece = 0; int block = 0; BufferedIO io = new BufferedIO(buffer, piece, block, PieceSize, rig.Manager.Torrent.PieceLength, rig.Manager.Torrent.Files, rig.Manager.FileManager.SavePath); level1.ReadChunk(io); for (block = 0; block < 5; block++) { for (int i = 0; i < BlockSize; i++) Assert.AreEqual(block, buffer.Array[buffer.Offset + i + block * BlockSize], "Piece 0. Block " + i); } }
public override void Write(BufferedIO data) { }
public override int Read(BufferedIO data) { long idx = data.Offset; for (int i = 0; i < data.Files.Length; i++) { if (idx < data.Files[i].Length) { string path = System.IO.Path.Combine(data.Path, data.Files[i].Path); if (!Paths.Contains(path)) Paths.Add(path); break; } else { idx -= data.Files[i].Length; } } data.ActualCount = data.Count; if (DontWrite) return data.Count; for (int i = 0; i < data.Count; i++) data.Buffer.Array[data.Buffer.Offset + i] = (byte)(data.Buffer.Offset + i); return data.Count; }
internal BlockEventArgs(BufferedIO data) : base(data.Id.TorrentManager) { Block b = data.Piece[Block.IndexOf(data.Piece.Blocks, data.PieceOffset, data.Count)]; Init(b, data.Piece, data.Id); }
public override void Write(BufferedIO data) { Write(data, false); }
public void Write(BufferedIO data, bool forceWrite) { if (forceWrite) { writer.Write(data); return; } if (Used > (Capacity - data.Count)) Flush(delegate(BufferedIO io) { return memoryBuffer[0] == io; }); memoryBuffer.Add(data); }
///<summary> ///calculates all hashes over the files which should be included in the torrent ///</summmary> byte[] CalcPiecesHash(string path, TorrentFile[] files, PieceWriter writer) { var piecesBuffer = new byte[GetPieceCount(files)*20]; //holds all the pieces hashes var piecesBufferOffset = 0; var totalLength = Toolbox.Accumulate<TorrentFile>(files, delegate(TorrentFile f) { return f.Length; }); var buffer = new ArraySegment<byte>(new byte[PieceLength]); while (totalLength > 0) { var bytesToRead = (int)Math.Min(totalLength, PieceLength); var io = new BufferedIO(null, buffer, (piecesBufferOffset/20)*PieceLength, bytesToRead, bytesToRead, files, path); totalLength -= writer.ReadChunk(io); // If we are using the synchronous version, result is null if (result != null && result.Aborted) return piecesBuffer; var currentHash = hasher.ComputeHash(buffer.Array, 0, io.ActualCount); RaiseHashed(new TorrentCreatorEventArgs(0, 0, //reader.CurrentFile.Position, reader.CurrentFile.Length, piecesBufferOffset*PieceLength, (piecesBuffer.Length - 20)*PieceLength)); Buffer.BlockCopy(currentHash, 0, piecesBuffer, piecesBufferOffset, currentHash.Length); piecesBufferOffset += currentHash.Length; } return piecesBuffer; }
public void PieceDataReceived(BufferedIO data) { Piece piece; if (picker.ValidatePiece(data.Id, data.PieceIndex, data.PieceOffset, data.Count, out piece)) { PeerId id = data.Id; data.Piece = piece; id.LastBlockReceived = DateTime.Now; id.TorrentManager.PieceManager.RaiseBlockReceived(new BlockEventArgs(data)); id.TorrentManager.FileManager.QueueWrite(data); if (data.Piece.AllBlocksReceived) this.unhashedPieces[data.PieceIndex] = true; } else { } }