internal int ReadChunk(BufferedIO data) { // Copy the inital buffer, offset and count so the values won't // be lost when doing the reading. var orig = data.buffer; var origOffset = data.Offset; var origCount = data.Count; var read = 0; var totalRead = 0; // Read the data in chunks. For every chunk we read, // advance the offset and subtract from the count. This // way we can keep filling in the buffer correctly. while (totalRead != data.Count) { read = Read(data); data.buffer = new ArraySegment<byte>(data.buffer.Array, data.buffer.Offset + read, data.buffer.Count - read); data.Offset += read; data.Count -= read; totalRead += read; if (read == 0 || data.Count == 0) break; } // Restore the original values so the object remains unchanged // as compared to when the user passed it in. data.buffer = orig; data.Offset = origOffset; data.Count = origCount; data.ActualCount = totalRead; return totalRead; }
public BTree(Stream bTreeStream, Stream dataStream, Stream bTreeMap, Stream dataMap, bool create) { BtreeIO = new BufferedIO(bTreeStream, (int)Static.PageSize); DataIO = new BufferedIO(dataStream, Record.ByteSize); if (create) { bTreeStream.SetLength(0); dataStream.SetLength(0); bTreeMap.SetLength(0); dataMap.SetLength(0); BTreeHeader = new BTreeHeaderPage(Static.FirstRootIndex); RootPage = new BTreePage(0, Static.FirstRootIndex); BTreeFreeSpaceMap = new FreeSpaceMap(bTreeMap, 2); DataFreeSpaceMap = new FreeSpaceMap(dataMap, 0); } else { BTreeHeader = new BTreeHeaderPage(BtreeIO.ReadPage(0)); RootPage = new BTreePage(BtreeIO.ReadPage(BTreeHeaderPage.RootIndex)); BTreeFreeSpaceMap = new FreeSpaceMap(bTreeMap, bTreeStream.Length / Static.PageSize); DataFreeSpaceMap = new FreeSpaceMap(dataMap, dataStream.Length / Record.ByteSize); } }
internal void QueueWrite(TorrentManager manager, long offset, byte[] buffer, int count, DiskIOCallback callback) { BufferedIO io = cache.Dequeue(); io.Initialise(manager, buffer, offset, count, manager.Torrent.PieceLength, manager.Torrent.Files); QueueWrite(io, callback); }
///<summary> ///calculates all hashes over the files which should be included in the torrent ///</summmary> byte[] CalcPiecesHash(string path, TorrentFile[] files, PieceWriter writer) { var piecesBuffer = new byte[GetPieceCount(files) * 20]; //holds all the pieces hashes var piecesBufferOffset = 0; var totalLength = Toolbox.Accumulate <TorrentFile>(files, delegate(TorrentFile f) { return(f.Length); }); var buffer = new ArraySegment <byte>(new byte[PieceLength]); while (totalLength > 0) { var bytesToRead = (int)Math.Min(totalLength, PieceLength); var io = new BufferedIO(null, buffer, (piecesBufferOffset / 20) * PieceLength, bytesToRead, bytesToRead, files, path); totalLength -= writer.ReadChunk(io); // If we are using the synchronous version, result is null if (result != null && result.Aborted) { return(piecesBuffer); } var currentHash = hasher.ComputeHash(buffer.Array, 0, io.ActualCount); RaiseHashed(new TorrentCreatorEventArgs(0, 0, //reader.CurrentFile.Position, reader.CurrentFile.Length, piecesBufferOffset * PieceLength, (piecesBuffer.Length - 20) * PieceLength)); Buffer.BlockCopy(currentHash, 0, piecesBuffer, piecesBufferOffset, currentHash.Length); piecesBufferOffset += currentHash.Length; } return(piecesBuffer); }
public FreeSpaceMap(Stream stream, long indexEnd) { bufferedIo = new BufferedIO(stream, sizeof(long)); _indexEnd = indexEnd; var bytes = bufferedIo.ReadAll(); for (var i = 0; i < bytes.Length; i += 8) { _map.Add(BitConverter.ToInt64(bytes, i)); } }
public override int Read(BufferedIO data) { if (data == null) { throw new ArgumentNullException("buffer"); } var offset = data.Offset; var count = data.Count; var files = data.Files; var fileSize = Toolbox.Accumulate <TorrentFile>(files, delegate(TorrentFile f) { return(f.Length); }); if (offset < 0 || offset + count > fileSize) { throw new ArgumentOutOfRangeException("offset"); } var i = 0; var bytesRead = 0; var totalRead = 0; for (i = 0; i < files.Length; i++) // This section loops through all the available { // files until we find the file which contains if (offset < files[i].Length) // the start of the data we want to read { break; } offset -= files[i].Length; // Offset now contains the index of the data we want } // to read from fileStream[i]. while (totalRead < count) // We keep reading until we have read 'count' bytes. { if (i == files.Length) { break; } var s = GetStream(data.Path, files[i], FileAccess.Read); s.Seek(offset, SeekOrigin.Begin); offset = 0; // Any further files need to be read from the beginning bytesRead = s.Read(data.buffer.Array, data.buffer.Offset + totalRead, count - totalRead); totalRead += bytesRead; i++; } //monitor.BytesSent(totalRead, TransferType.Data); data.ActualCount += totalRead; return(totalRead); }
private void PerformWrite(BufferedIO io) { try { // Perform the actual write _writer.Write(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size); _writeMonitor.AddDelta(io.Count); } finally { io.Complete = true; if (io.Callback != null) { io.Callback(true); } } }
private void PerformRead(BufferedIO io) { try { io.ActualCount = Writer.Read(io.Files, io.Offset, io.InternalBuffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size) ? io.Count : 0; _readMonitor.AddDelta(io.ActualCount); } finally { io.Complete = true; io.Callback?.Invoke(io.ActualCount == io.Count); } }
private void PerformWrite(BufferedIO io) { // Find the block that this data belongs to and set it's state to "Written" int index = io.PieceOffset / Piece.BlockSize; try { // Perform the actual write writer.Write(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size); writeMonitor.AddDelta(io.Count); } finally { io.Complete = true; if (io.Callback != null) { io.Callback(true); } } }
void QueueWrite(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (Thread.CurrentThread == IOLoop.thread) { PerformWrite(io); cache.Enqueue(io); } else { lock (bufferLock) { bufferedWrites.Enqueue(io); if (bufferedWrites.Count == 1) { DiskManager.IOLoop.Queue(LoopTask); } } } }
void QueueWrite(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (Thread.CurrentThread == IOLoop.Thread) { PerformWrite(io); _cache.Enqueue(io); } else { lock (_bufferLock) { _bufferedWrites.Enqueue(io); if (_bufferedWrites.Count == 1) { IOLoop.Queue(_loopTask); } } } }
private void PerformRead(BufferedIO io) { try { if (writer.Read(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size)) { io.ActualCount = io.Count; } else { io.ActualCount = 0; } readMonitor.AddDelta(io.ActualCount); } finally { io.Complete = true; if (io.Callback != null) { io.Callback(io.ActualCount == io.Count); } } }
private void QueueWrite(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (IoLoop.IsInCurrentThread()) { PerformWrite(io); _cache.Enqueue(io); } else { lock (_bufferLock) { _bufferedWrites.Enqueue(io); if (_bufferedWrites.Count == 1) { IoLoop.Queue(_loopTask); } } } }
public override int Read(BufferedIO data) { if (data == null) throw new ArgumentNullException("buffer"); var offset = data.Offset; var count = data.Count; var files = data.Files; var fileSize = Toolbox.Accumulate<TorrentFile>(files, delegate(TorrentFile f) { return f.Length; }); if (offset < 0 || offset + count > fileSize) throw new ArgumentOutOfRangeException("offset"); var i = 0; var bytesRead = 0; var totalRead = 0; for (i = 0; i < files.Length; i++) // This section loops through all the available { // files until we find the file which contains if (offset < files[i].Length) // the start of the data we want to read break; offset -= files[i].Length; // Offset now contains the index of the data we want } // to read from fileStream[i]. while (totalRead < count) // We keep reading until we have read 'count' bytes. { if (i == files.Length) break; var s = GetStream(data.Path, files[i], FileAccess.Read); s.Seek(offset, SeekOrigin.Begin); offset = 0; // Any further files need to be read from the beginning bytesRead = s.Read(data.buffer.Array, data.buffer.Offset + totalRead, count - totalRead); totalRead += bytesRead; i++; } //monitor.BytesSent(totalRead, TransferType.Data); data.ActualCount += totalRead; return totalRead; }
internal int ReadChunk(BufferedIO data) { // Copy the inital buffer, offset and count so the values won't // be lost when doing the reading. var orig = data.buffer; var origOffset = data.Offset; var origCount = data.Count; var read = 0; var totalRead = 0; // Read the data in chunks. For every chunk we read, // advance the offset and subtract from the count. This // way we can keep filling in the buffer correctly. while (totalRead != data.Count) { read = Read(data); data.buffer = new ArraySegment <byte>(data.buffer.Array, data.buffer.Offset + read, data.buffer.Count - read); data.Offset += read; data.Count -= read; totalRead += read; if (read == 0 || data.Count == 0) { break; } } // Restore the original values so the object remains unchanged // as compared to when the user passed it in. data.buffer = orig; data.Offset = origOffset; data.Count = origCount; data.ActualCount = totalRead; return(totalRead); }
internal override void Handle(PeerId id) { id.PiecesReceived++; string path = id.TorrentManager.FileManager.SavePath; BufferedIO d = new BufferedIO(data, pieceIndex, BlockIndex, requestLength, id.TorrentManager.Torrent.PieceLength, id.TorrentManager.Torrent.Files, path); d.Id = id; id.TorrentManager.PieceManager.PieceDataReceived(d); // Keep adding new piece requests to this peers queue until we reach the max pieces we're allowed queue while (id.TorrentManager.PieceManager.AddPieceRequest(id)) { } }
public abstract void Write(BufferedIO data);
public abstract int Read(BufferedIO data);
public override void Write(BufferedIO data) { var buffer = data.buffer.Array; var offset = data.Offset; var count = data.Count; if (buffer == null) throw new ArgumentNullException("buffer"); long fileSize = 0; for (var j = 0; j < data.Files.Length; j++) fileSize += data.Files[j].Length; if (offset < 0 || offset + count > fileSize) throw new ArgumentOutOfRangeException("offset"); var i = 0; long bytesWritten = 0; long totalWritten = 0; long bytesWeCanWrite = 0; for (i = 0; i < data.Files.Length; i++) // This section loops through all the available { // files until we find the file which contains if (offset < data.Files[i].Length) // the start of the data we want to write break; offset -= data.Files[i].Length; // Offset now contains the index of the data we want } // to write to fileStream[i]. while (totalWritten < count) // We keep writing until we have written 'count' bytes. { var stream = GetStream(data.Path, data.Files[i], FileAccess.ReadWrite); stream.Seek(offset, SeekOrigin.Begin); // Find the maximum number of bytes we can write before we reach the end of the file bytesWeCanWrite = data.Files[i].Length - offset; // Any further files need to be written from the beginning of the file offset = 0; // If the amount of data we are going to write is larger than the amount we can write, just write the allowed // amount and let the rest of the data be written with the next filestream bytesWritten = ((count - totalWritten) > bytesWeCanWrite) ? bytesWeCanWrite : (count - totalWritten); // Write the data stream.Write(buffer, data.buffer.Offset + (int)totalWritten, (int)bytesWritten); // Any further data should be written to the next available file totalWritten += bytesWritten; i++; } ClientEngine.BufferManager.FreeBuffer(ref data.buffer); //monitor.BytesReceived((int)totalWritten, TransferType.Data); }
public override void Write(BufferedIO data) { var buffer = data.buffer.Array; var offset = data.Offset; var count = data.Count; if (buffer == null) { throw new ArgumentNullException("buffer"); } long fileSize = 0; for (var j = 0; j < data.Files.Length; j++) { fileSize += data.Files[j].Length; } if (offset < 0 || offset + count > fileSize) { throw new ArgumentOutOfRangeException("offset"); } var i = 0; long bytesWritten = 0; long totalWritten = 0; long bytesWeCanWrite = 0; for (i = 0; i < data.Files.Length; i++) // This section loops through all the available { // files until we find the file which contains if (offset < data.Files[i].Length) // the start of the data we want to write { break; } offset -= data.Files[i].Length; // Offset now contains the index of the data we want } // to write to fileStream[i]. while (totalWritten < count) // We keep writing until we have written 'count' bytes. { var stream = GetStream(data.Path, data.Files[i], FileAccess.ReadWrite); stream.Seek(offset, SeekOrigin.Begin); // Find the maximum number of bytes we can write before we reach the end of the file bytesWeCanWrite = data.Files[i].Length - offset; // Any further files need to be written from the beginning of the file offset = 0; // If the amount of data we are going to write is larger than the amount we can write, just write the allowed // amount and let the rest of the data be written with the next filestream bytesWritten = ((count - totalWritten) > bytesWeCanWrite) ? bytesWeCanWrite : (count - totalWritten); // Write the data stream.Write(buffer, data.buffer.Offset + (int)totalWritten, (int)bytesWritten); // Any further data should be written to the next available file totalWritten += bytesWritten; i++; } ClientEngine.BufferManager.FreeBuffer(ref data.buffer); //monitor.BytesReceived((int)totalWritten, TransferType.Data); }
void QueueWrite(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (Thread.CurrentThread == IOLoop.Thread) { PerformWrite(io); _cache.Enqueue (io); } else lock (_bufferLock) { _bufferedWrites.Enqueue(io); if (_bufferedWrites.Count == 1) IOLoop.Queue(_loopTask); } }
private void PerformWrite(BufferedIO io) { try { // Perform the actual write _writer.Write(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size); _writeMonitor.AddDelta(io.Count); } finally { io.Complete = true; if (io.Callback != null) io.Callback(true); } }
private void PerformRead(BufferedIO io) { try { io.ActualCount = _writer.Read(io.Files, io.Offset, io.buffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size) ? io.Count : 0; _readMonitor.AddDelta(io.ActualCount); } finally { io.Complete = true; if (io.Callback != null) io.Callback(io.ActualCount == io.Count); } }
private void PerformWrite(BufferedIO io) { // Find the block that this data belongs to and set it's state to "Written" var index = io.PieceOffset/Piece.BlockSize; try { // Perform the actual write Writer.Write(io.Files, io.Offset, io.InternalBuffer, 0, io.Count, io.PieceLength, io.Manager.Torrent.Size); _writeMonitor.AddDelta(io.Count); } finally { io.Complete = true; io.Callback?.Invoke(true); } }
private void QueueWrite(BufferedIO io, DiskIOCallback callback) { io.Callback = callback; if (IoLoop.IsInCurrentThread()) { PerformWrite(io); _cache.Enqueue(io); } else lock (_bufferLock) { _bufferedWrites.Enqueue(io); if (_bufferedWrites.Count == 1) IoLoop.Queue(_loopTask); } }