public async Task WritePiece_ReverseOrder()
        {
            writer.Data = null;

            var blocks = fileData.Data
                         .SelectMany(t => t)
                         .Partition(Constants.BlockSize)
                         .Take(fileData.TorrentInfo.PieceLength / Constants.BlockSize)
                         .ToArray();

            await diskManager.WriteAsync(fileData, new BlockInfo (0, Constants.BlockSize * 2, Constants.BlockSize), blocks[2]);

            await diskManager.WriteAsync(fileData, new BlockInfo (0, Constants.BlockSize * 1, Constants.BlockSize), blocks[1]);

            await diskManager.WriteAsync(fileData, new BlockInfo (0, Constants.BlockSize * 0, Constants.BlockSize), blocks[0]);

            using var _ = MemoryPool.Default.Rent(20, out Memory <byte> hashMemory);
            var hashes = new PieceHash(hashMemory);

            hashes.V1Hash.Span.Fill(0);
            Assert.IsTrue(await diskManager.GetHashAsync(fileData, 0, hashes));
            Assert.IsTrue(fileData.Hashes[0].AsSpan().SequenceEqual(hashes.V1Hash.Span), "#1");
            Assert.AreEqual(Constants.BlockSize * 2, writer.ReadData.Sum(t => t.Item3), "#2");

            writer.ReadData.Clear();
            hashes.V1Hash.Span.Fill(0);
            Assert.IsTrue(await diskManager.GetHashAsync(fileData, 0, hashes));
            Assert.IsTrue(fileData.Hashes[0].AsSpan().SequenceEqual(hashes.V1Hash.Span), "#3");
            Assert.AreEqual(Constants.BlockSize * 3, writer.ReadData.Sum(t => t.Item3), "#4");
        }
        public async Task WriteAllData()
        {
            var buffer         = new byte[Constants.BlockSize];
            var allData        = fileData.Data.SelectMany(t => t).Partition(Constants.BlockSize).ToArray();
            int blocksPerPiece = fileData.TorrentInfo.PieceLength / Constants.BlockSize;

            for (int i = 0; i < allData.Length; i++)
            {
                var pieceIndex = i / blocksPerPiece;
                var offset     = (i % blocksPerPiece) * Constants.BlockSize;

                Buffer.BlockCopy(allData[i], 0, buffer, 0, allData[i].Length);
                await diskManager.WriteAsync(fileData, new BlockInfo (pieceIndex, offset, allData[i].Length), buffer);
            }

            using var _ = MemoryPool.Default.Rent(20, out Memory <byte> hashMemory);
            var hashes = new PieceHash(hashMemory);

            for (int i = 0; i < fileData.Hashes.Length; i++)
            {
                // Check twice because the first check should give us the result from the incremental hash.
                hashes.V1Hash.Span.Clear();
                Assert.IsTrue(await diskManager.GetHashAsync(fileData, i, hashes));
                Assert.IsTrue(fileData.Hashes[i].AsSpan().SequenceEqual(hashes.V1Hash.Span), "#2." + i);

                hashes.V1Hash.Span.Fill(0);
                Assert.IsTrue(await diskManager.GetHashAsync(fileData, i, hashes));
                Assert.IsTrue(fileData.Hashes[i].AsSpan().SequenceEqual(hashes.V1Hash.Span), "#3." + i);
            }
        }
        public async Task WaitForHashingToComplete()
        {
            if (!Manager.HasMetadata)
            {
                throw new TorrentException("A hash check cannot be performed if TorrentManager.HasMetadata is false.");
            }

            Manager.HashFails = 0;

            // Delete any existing fast resume data. We will need to recreate it after hashing completes.
            await Manager.MaybeDeleteFastResumeAsync();

            bool atLeastOneDoNotDownload = Manager.Files.Any(t => t.Priority == Priority.DoNotDownload);

            if (await DiskManager.CheckAnyFilesExistAsync(Manager))
            {
                int piecesHashed = 0;
                Cancellation.Token.ThrowIfCancellationRequested();
                // bep52: Properly support this
                using var hashBuffer = MemoryPool.Default.Rent(Manager.InfoHashes.GetMaxByteCount(), out Memory <byte> hashMemory);
                var hashes = new PieceHash(hashMemory);
                for (int index = 0; index < Manager.Torrent !.PieceCount; index++)
                {
                    if (atLeastOneDoNotDownload && !Manager.Files.Any(f => index >= f.StartPieceIndex && index <= f.EndPieceIndex && f.Priority != Priority.DoNotDownload))
                    {
                        // If a file is marked 'do not download' ensure we update the TorrentFiles
                        // so they also report that the piece is not available/downloaded.
                        Manager.OnPieceHashed(index, false, piecesHashed, Manager.PartialProgressSelector.TrueCount);
                        // Then mark this piece as being unhashed so we don't try to download it.
                        Manager.UnhashedPieces[index] = true;
                        continue;
                    }

                    await PausedCompletionSource.Task;
                    Cancellation.Token.ThrowIfCancellationRequested();

                    var successful = await DiskManager.GetHashAsync(Manager, index, hashes);

                    if (Cancellation.Token.IsCancellationRequested)
                    {
                        await DiskManager.CloseFilesAsync(Manager);

                        Cancellation.Token.ThrowIfCancellationRequested();
                    }

                    bool hashPassed = successful && Manager.PieceHashes.IsValid(hashes, index);
                    Manager.OnPieceHashed(index, hashPassed, ++piecesHashed, Manager.PartialProgressSelector.TrueCount);
                }
            }
            else
            {
                await PausedCompletionSource.Task;
                for (int i = 0; i < Manager.Torrent !.PieceCount; i++)
                {
                    Manager.OnPieceHashed(i, false, i + 1, Manager.Torrent.PieceCount);
                }
            }
        }
        public async Task WriteDataFromTwoTorrentsConcurrently()
        {
            // Data from the primary torrent
            var allData = fileData.Data.SelectMany(t => t).ToArray();

            // Data from a different torrent which hits the same pieces.
            var emptyBytes = new byte[Constants.BlockSize];
            var otherData  = new TestTorrentData {
                Data        = fileData.Data,
                Hashes      = fileData.Hashes,
                TorrentInfo = fileData.TorrentInfo
            };

            int offset = 0;

            foreach (var block in allData.Partition(Constants.BlockSize))
            {
                var buffer = new byte[Constants.BlockSize];
                Buffer.BlockCopy(block, 0, buffer, 0, block.Length);

                var request = new BlockInfo(offset / fileData.TorrentInfo.PieceLength, offset % fileData.TorrentInfo.PieceLength, block.Length);
                await Task.WhenAll(
                    diskManager.WriteAsync (fileData, request, buffer).AsTask(),
                    // Attempt to 'overwrite' the data from the primary torrent by writing the same block
                    // or the subsequent block
                    diskManager.WriteAsync (otherData, request, buffer).AsTask()
                    );

                offset += block.Length;
            }

            using var _ = MemoryPool.Default.Rent(20, out Memory <byte> hashMemory);
            var hashes = new PieceHash(hashMemory);

            for (int i = 0; i < fileData.Hashes.Length; i++)
            {
                // Check twice because the first check should give us the result from the incremental hash.
                hashes.V1Hash.Span.Fill(0);
                Assert.IsTrue(await diskManager.GetHashAsync(fileData, i, hashes));
                Assert.IsTrue(fileData.Hashes[i].AsSpan().SequenceEqual(hashes.V1Hash.Span), "#2." + i);

                hashes.V1Hash.Span.Fill(0);
                Assert.IsTrue(await diskManager.GetHashAsync(fileData, i, hashes));
                Assert.IsTrue(fileData.Hashes[i].AsSpan().SequenceEqual(hashes.V1Hash.Span), "#3." + i);
            }
            Assert.AreEqual(fileData.TorrentInfo.Size + otherData.TorrentInfo.Size, diskManager.TotalBytesWritten, "#4");
        }
        public async Task WritePiece_FirstTwoSwapped([Values(0, Constants.BlockSize, Constants.BlockSize * 3)] int cacheSize)
        {
            await diskManager.UpdateSettingsAsync(new EngineSettingsBuilder { DiskCacheBytes = cacheSize }.ToSettings());

            writer.Data = null;

            var blocks = fileData.Data
                         .SelectMany(t => t)
                         .Partition(Constants.BlockSize)
                         .Take(fileData.TorrentInfo.PieceLength / Constants.BlockSize)
                         .ToArray();

            await diskManager.WriteAsync(fileData, new BlockInfo (0, Constants.BlockSize * 1, Constants.BlockSize), blocks[1]);

            await diskManager.WriteAsync(fileData, new BlockInfo (0, Constants.BlockSize * 0, Constants.BlockSize), blocks[0]);

            await diskManager.WriteAsync(fileData, new BlockInfo (0, Constants.BlockSize * 2, Constants.BlockSize), blocks[2]);

            using var _ = MemoryPool.Default.Rent(20, out Memory <byte> hashMemory);
            var hashes = new PieceHash(hashMemory);

            hashes.V1Hash.Span.Fill(0);
            Assert.IsTrue(await diskManager.GetHashAsync(fileData, 0, hashes));
            Assert.IsTrue(fileData.Hashes[0].AsSpan().SequenceEqual(hashes.V1Hash.Span), "#1");
            // If we have at least Constants.BlockSize in the disk cache we'll need to read nothing from disk
            if (cacheSize < Constants.BlockSize)
            {
                Assert.AreEqual(Constants.BlockSize * 2, writer.ReadData.Sum(t => t.Item3), "#2");
            }
            else
            {
                Assert.AreEqual(0, writer.ReadData.Sum(t => t.Item3), "#2");
            }


            writer.ReadData.Clear();
            hashes.V1Hash.Span.Fill(0);
            Assert.IsTrue(await diskManager.GetHashAsync(fileData, 0, hashes));
            Assert.IsTrue(fileData.Hashes[0].AsSpan().SequenceEqual(hashes.V1Hash.Span), "#3");
            Assert.AreEqual(Constants.BlockSize * 3, writer.ReadData.Sum(t => t.Item3), "#4");
        }
            public bool TryGetHashAndReset(PieceHash dest)
            {
                if (Manager is null)
                {
                    return(false);
                }

                if (UseV1 && (!SHA1Hasher.TryGetHashAndReset(dest.V1Hash.Span, out int written) || written != dest.V1Hash.Length))
                {
                    return(false);
                }

                if (Manager != null && UseV2)
                {
                    var file       = Manager.Files[Manager.Files.FindFileByPieceIndex(PieceIndex)];
                    var finalLayer = file.Length < Manager.TorrentInfo !.PieceLength ? Math.Min(Manager.TorrentInfo !.PieceLength, (long)Math.Pow(2, Math.Ceiling(Math.Log(Manager.TorrentInfo.BytesPerPiece(PieceIndex), 2)))) : Manager.TorrentInfo.PieceLength;
                    if (!MerkleHash.TryHash(SHA256Hasher, BlockHashes, Constants.BlockSize, finalLayer, dest.V2Hash.Span, out written) || written != dest.V2Hash.Length)
                    {
                        return(false);
                    }
                }

                return(true);
            }
        internal async ReusableTask <bool> GetHashAsync(ITorrentManagerInfo manager, int pieceIndex, PieceHash dest)
        {
            if (GetHashAsyncOverride != null)
            {
                return(await GetHashAsyncOverride(manager, pieceIndex, dest));
            }

            await IOLoop;

            if (IncrementalHashes.TryGetValue(ValueTuple.Create(manager, pieceIndex), out IncrementalHashData? incrementalHash))
            {
                // Immediately remove it from the dictionary so another thread writing data to using `WriteAsync` can't try to use it
                IncrementalHashes.Remove(ValueTuple.Create(manager, pieceIndex));

                using var lockReleaser = await incrementalHash.Locker.EnterAsync();

                // We request the blocks for most pieces sequentially, and most (all?) torrent clients
                // will process requests in the order they have been received. This means we can optimise
                // hashing a received piece by hashing each block as it arrives. If blocks arrive out of order then
                // we'll compute the final hash by reading the data from disk.
                if (incrementalHash.NextOffsetToHash == manager.TorrentInfo !.BytesPerPiece(pieceIndex))
                {
                    if (!incrementalHash.TryGetHashAndReset(dest))
                    {
                        throw new NotSupportedException("Could not generate SHA1 hash for this piece");
                    }
                    IncrementalHashCache.Enqueue(incrementalHash);
                    return(true);
                }
            }
            else
            {
                // If we have no partial hash data for this piece we could be doing a full
                // hash check, so let's create a IncrementalHashData for our piece!
                incrementalHash = IncrementalHashCache.Dequeue();
                incrementalHash.PrepareForFirstUse(manager, pieceIndex);
            }

            // We can store up to 4MB of pieces in an in-memory queue so that, when we're rate limited
            // we can process the queue in-order. When we try to hash a piece we need to make sure
            // that in-memory cache is written to the PieceWriter before we try to Read the data back
            // to hash it.
            if (WriteQueue.Count > 0)
            {
                await WaitForPendingWrites();
            }

            using var releaser = await incrementalHash.Locker.EnterAsync();

            // Note that 'startOffset' may not be the very start of the piece if we have a partial hash.
            int startOffset = incrementalHash.NextOffsetToHash;
            int endOffset   = manager.TorrentInfo !.BytesPerPiece(pieceIndex);

            using (BufferPool.Rent(Constants.BlockSize, out Memory <byte> hashBuffer)) {
                try {
                    while (startOffset != endOffset)
                    {
                        int count = Math.Min(Constants.BlockSize, endOffset - startOffset);
                        if (!await ReadAsync(manager, new BlockInfo(pieceIndex, startOffset, count), hashBuffer).ConfigureAwait(false))
                        {
                            return(false);
                        }
                        startOffset += count;
                        incrementalHash.AppendData(hashBuffer.Slice(0, count));
                    }
                    return(incrementalHash.TryGetHashAndReset(dest));
                } finally {
                    await IOLoop;
                    IncrementalHashCache.Enqueue(incrementalHash);
                    IncrementalHashes.Remove(ValueTuple.Create(manager, pieceIndex));
                }
            }
        }