void AddRequests(IPeerWithMessaging peer, IReadOnlyList <IPeerWithMessaging> allPeers, BitField bitfield, int startPieceIndex, int endPieceIndex, int maxDuplicates, int?preferredMaxRequests = null)
        {
            if (!peer.CanRequestMorePieces)
            {
                return;
            }

            int preferredRequestAmount = peer.PreferredRequestAmount(TorrentData.PieceLength);
            var maxRequests            = Math.Min(preferredMaxRequests ?? 3, peer.MaxPendingRequests);

            if (peer.AmRequestingPiecesCount >= maxRequests)
            {
                return;
            }

            // FIXME: Add a test to ensure we do not unintentionally request blocks off peers which are choking us.
            // This used to say if (!peer.IsChoing || peer.SupportsFastPeer), and with the recent changes we might
            // not actually guarantee that 'ContinueExistingRequest' or 'ContinueAnyExistingRequest' properly takes
            // into account that a peer which is choking us can *only* resume a 'fast piece' in the 'AmAllowedfastPiece' list.
            if (!peer.IsChoking)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, startPieceIndex, endPieceIndex, maxDuplicates);
                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }

            // If the peer supports fast peer and they are choking us, they'll still send pieces in the allowed fast set.
            if (peer.SupportsFastPeer && peer.IsChoking)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = Picker.ContinueExistingRequest(peer, startPieceIndex, endPieceIndex);
                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }

            // Should/could we simplify things for IPiecePicker implementations by guaranteeing IPiecePicker.PickPiece calls will
            // only be made to pieces which *can* be requested? Why not!
            // FIXME add a test for this.
            if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0))
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    IList <BlockInfo> request = PriorityPick(peer, peer.BitField, allPeers, preferredRequestAmount, 0, TorrentData.PieceCount() - 1);
                    if (request != null && request.Count > 0)
                    {
                        peer.EnqueueRequests(request);
                    }
                    else
                    {
                        break;
                    }
                }
            }

            if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, HighPriorityPieceIndex, bitfield.Length - 1, 1);
                    // If this peer is a seeder and we are unable to request any new blocks, then we should enter
                    // endgame mode. Every block has been requested at least once at this point.
                    if (request == null && (InEndgameMode || peer.IsSeeder))
                    {
                        request = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 2);
                        // FIXME: What if the picker is choosing to not allocate pieces? Then it's not endgame mode.
                        // This should be deterministic, not a heuristic?
                        InEndgameMode |= request != null && (bitfield.Length - bitfield.TrueCount) < 10;
                    }

                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }
        }
        public void AddRequests(IPeerWithMessaging peer, IReadOnlyList <IPeerWithMessaging> allPeers)
        {
            int maxRequests = peer.MaxPendingRequests;

            if (!peer.CanRequestMorePieces)
            {
                return;
            }

            int count = peer.PreferredRequestAmount(TorrentData.PieceLength);

            // This is safe to invoke. 'ContinueExistingRequest' strongly guarantees that a peer will only
            // continue a piece they have initiated. If they're choking then the only piece they can continue
            // will be a fast piece (if one exists!)
            if (!peer.IsChoking || peer.SupportsFastPeer)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = Picker.ContinueExistingRequest(peer, 0, peer.BitField.Length - 1);
                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }

            // FIXME: Would it be easier if RequestManager called PickPiece(AllowedFastPieces[0]) or something along those lines?
            if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0))
            {
                BitField filtered = null;
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    filtered ??= ApplyIgnorables(peer.BitField);
                    IList <BlockInfo> request = Picker.PickPiece(peer, filtered, allPeers, count, 0, TorrentData.PieceCount() - 1);
                    if (request != null && request.Count > 0)
                    {
                        peer.EnqueueRequests(request);
                    }
                    else
                    {
                        break;
                    }
                }
            }

            if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 1);
                    // If this peer is a seeder and we are unable to request any new blocks, then we should enter
                    // endgame mode. Every block has been requested at least once at this point.
                    if (request == null && (InEndgameMode || peer.IsSeeder))
                    {
                        request        = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 2);
                        InEndgameMode |= request != null;
                    }

                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }
        }
        void AddRequests(IPeerWithMessaging peer, IReadOnlyList <IPeerWithMessaging> allPeers, int startPieceIndex, int endPieceIndex, int maxDuplicates, int preferredMaxRequests)
        {
            if (!peer.CanRequestMorePieces || TorrentData == null)
            {
                return;
            }

            int preferredRequestAmount = peer.PreferredRequestAmount(TorrentData.PieceLength);
            var maxRequests            = Math.Min(preferredMaxRequests, peer.MaxPendingRequests);

            if (peer.AmRequestingPiecesCount >= maxRequests)
            {
                return;
            }

            // FIXME: Add a test to ensure we do not unintentionally request blocks off peers which are choking us.
            // This used to say if (!peer.IsChoing || peer.SupportsFastPeer), and with the recent changes we might
            // not actually guarantee that 'ContinueExistingRequest' or 'ContinueAnyExistingRequest' properly takes
            // into account that a peer which is choking us can *only* resume a 'fast piece' in the 'AmAllowedfastPiece' list.
            if (!peer.IsChoking)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = LowPriorityPicker !.ContinueAnyExistingRequest(peer, startPieceIndex, endPieceIndex, maxDuplicates);
                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }

            // If the peer supports fast peer and they are choking us, they'll still send pieces in the allowed fast set.
            if (peer.SupportsFastPeer && peer.IsChoking)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = LowPriorityPicker !.ContinueExistingRequest(peer, startPieceIndex, endPieceIndex);
                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }

            // Should/could we simplify things for IPiecePicker implementations by guaranteeing IPiecePicker.PickPiece calls will
            // only be made to pieces which *can* be requested? Why not!
            // FIXME add a test for this.
            if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0))
            {
                BitField filtered = null !;
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    filtered ??= GenerateAlreadyHaves().Not().And(peer.BitField);

                    Span <BlockInfo> buffer = stackalloc BlockInfo[preferredRequestAmount];
                    int requested           = PriorityPick(peer, filtered, allPeers, startPieceIndex, endPieceIndex, buffer);
                    if (requested > 0)
                    {
                        peer.EnqueueRequests(buffer);
                    }
                    else
                    {
                        break;
                    }
                }
            }
        }
        public void AddRequests(IPeerWithMessaging peer, IReadOnlyList <IPeerWithMessaging> allPeers)
        {
            int maxRequests = peer.MaxPendingRequests;

            if (!peer.CanRequestMorePieces)
            {
                return;
            }

            // This is safe to invoke. 'ContinueExistingRequest' strongly guarantees that a peer will only
            // continue a piece they have initiated. If they're choking then the only piece they can continue
            // will be a fast piece (if one exists!)
            if (!peer.IsChoking || peer.SupportsFastPeer)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = Picker.ContinueExistingRequest(peer, 0, peer.BitField.Length - 1);
                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }

            int count = peer.PreferredRequestAmount(TorrentData.PieceLength);

            if (RequestBufferCache.Length < count)
            {
                RequestBufferCache = new Memory <BlockInfo> (new BlockInfo[count]);
            }

            // Reuse the same buffer across multiple requests. However ensure the piecepicker is given
            // a Span<T> of the expected size - so slice the reused buffer if it's too large.
            var requestBuffer = RequestBufferCache.Span.Slice(0, count);

            if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0))
            {
                BitField filtered = null;
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    filtered ??= ApplyIgnorables(peer.BitField);
                    int requests = Picker.PickPiece(peer, filtered, allPeers, 0, TorrentData.PieceCount() - 1, requestBuffer);
                    if (requests > 0)
                    {
                        peer.EnqueueRequests(requestBuffer.Slice(0, requests));
                    }
                    else
                    {
                        break;
                    }
                }
            }

            if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0)
            {
                while (peer.AmRequestingPiecesCount < maxRequests)
                {
                    BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 1);
                    // If this peer is a seeder and we are unable to request any new blocks, then we should enter
                    // endgame mode. Every block has been requested at least once at this point.
                    if (request == null && (InEndgameMode || peer.IsSeeder))
                    {
                        request        = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 2);
                        InEndgameMode |= request != null;
                    }

                    if (request != null)
                    {
                        peer.EnqueueRequest(request.Value);
                    }
                    else
                    {
                        break;
                    }
                }
            }
        }