示例#1
0
        internal async ReusableTask <byte[]> GetHashAsync(ITorrentData manager, int pieceIndex)
        {
            if (GetHashAsyncOverride != null)
            {
                return(GetHashAsyncOverride(manager, pieceIndex));
            }

            await IOLoop;

            if (IncrementalHashes.TryGetValue(ValueTuple.Create(manager, pieceIndex), out IncrementalHashData incrementalHash))
            {
                // Immediately remove it from the dictionary so another thread writing data to using `WriteAsync` can't try to use it
                IncrementalHashes.Remove(ValueTuple.Create(manager, pieceIndex));

                using var lockReleaser = await incrementalHash.Locker.EnterAsync();

                // We request the blocks for most pieces sequentially, and most (all?) torrent clients
                // will process requests in the order they have been received. This means we can optimise
                // hashing a received piece by hashing each block as it arrives. If blocks arrive out of order then
                // we'll compute the final hash by reading the data from disk.
                if (incrementalHash.NextOffsetToHash == manager.BytesPerPiece(pieceIndex))
                {
                    byte[] result = incrementalHash.Hasher.Hash;
                    IncrementalHashCache.Enqueue(incrementalHash);
                    return(result);
                }
            }
            else
            {
                // If we have no partial hash data for this piece we could be doing a full
                // hash check, so let's create a IncrementalHashData for our piece!
                incrementalHash = IncrementalHashCache.Dequeue();
            }

            // We can store up to 4MB of pieces in an in-memory queue so that, when we're rate limited
            // we can process the queue in-order. When we try to hash a piece we need to make sure
            // that in-memory cache is written to the PieceWriter before we try to Read the data back
            // to hash it.
            if (WriteQueue.Count > 0)
            {
                await WaitForPendingWrites();
            }

            using var releaser = await incrementalHash.Locker.EnterAsync();

            // Note that 'startOffset' may not be the very start of the piece if we have a partial hash.
            int startOffset = incrementalHash.NextOffsetToHash;
            int endOffset   = manager.BytesPerPiece(pieceIndex);

            using (BufferPool.Rent(Piece.BlockSize, out byte[] hashBuffer)) {
示例#2
0
        internal async ReusableTask <bool> GetHashAsync(ITorrentData manager, int pieceIndex, Memory <byte> dest)
        {
            if (GetHashAsyncOverride != null)
            {
                return(await GetHashAsyncOverride(manager, pieceIndex, dest));
            }

            await IOLoop;

            if (IncrementalHashes.TryGetValue(ValueTuple.Create(manager, pieceIndex), out IncrementalHashData incrementalHash))
            {
                // Immediately remove it from the dictionary so another thread writing data to using `WriteAsync` can't try to use it
                IncrementalHashes.Remove(ValueTuple.Create(manager, pieceIndex));

                using var lockReleaser = await incrementalHash.Locker.EnterAsync();

                // We request the blocks for most pieces sequentially, and most (all?) torrent clients
                // will process requests in the order they have been received. This means we can optimise
                // hashing a received piece by hashing each block as it arrives. If blocks arrive out of order then
                // we'll compute the final hash by reading the data from disk.
                if (incrementalHash.NextOffsetToHash == manager.BytesPerPiece(pieceIndex))
                {
                    if (!incrementalHash.Hasher.TryGetHashAndReset(dest.Span, out int written) || written != 20)
                    {
                        throw new NotSupportedException("Could not generate SHA1 hash for this piece");
                    }
                    IncrementalHashCache.Enqueue(incrementalHash);
                    return(true);
                }
            }
            else
            {
                // If we have no partial hash data for this piece we could be doing a full
                // hash check, so let's create a IncrementalHashData for our piece!
                incrementalHash = IncrementalHashCache.Dequeue();
            }

            // We can store up to 4MB of pieces in an in-memory queue so that, when we're rate limited
            // we can process the queue in-order. When we try to hash a piece we need to make sure
            // that in-memory cache is written to the PieceWriter before we try to Read the data back
            // to hash it.
            if (WriteQueue.Count > 0)
            {
                await WaitForPendingWrites();
            }

            using var releaser = await incrementalHash.Locker.EnterAsync();

            // Note that 'startOffset' may not be the very start of the piece if we have a partial hash.
            int startOffset = incrementalHash.NextOffsetToHash;
            int endOffset   = manager.BytesPerPiece(pieceIndex);

            using (BufferPool.Rent(Constants.BlockSize, out Memory <byte> hashBuffer)) {
                try {
                    var hasher = incrementalHash.Hasher;

                    while (startOffset != endOffset)
                    {
                        int count = Math.Min(Constants.BlockSize, endOffset - startOffset);
                        if (!await ReadAsync(manager, new BlockInfo(pieceIndex, startOffset, count), hashBuffer).ConfigureAwait(false))
                        {
                            return(false);
                        }
                        startOffset += count;
                        hasher.AppendData(hashBuffer.Slice(0, count));
                    }

                    return(hasher.TryGetHashAndReset(dest.Span, out int written));
                } finally {
                    await IOLoop;
                    IncrementalHashCache.Enqueue(incrementalHash);
                    IncrementalHashes.Remove(ValueTuple.Create(manager, pieceIndex));
                }
            }
        }