public static void RightClick(RemoteClient client, MinecraftServer server, IPacket _packet) { var packet = (RightClickPacket)_packet; var slot = client.Entity.Inventory[client.Entity.SelectedSlot]; var position = new Coordinates3D(packet.X, packet.Y, packet.Z); var cursorPosition = new Coordinates3D(packet.CursorX, packet.CursorY, packet.CursorZ); BlockInfo?block = null; if (position != -Coordinates3D.One) { if (position.DistanceTo((Coordinates3D)client.Entity.Position) > client.Reach) { return; } block = client.World.GetBlockInfo(position); } bool use = true; if (block != null) { use = client.World.RightClickBlock(position, packet.Face, cursorPosition, slot.AsItem()); } if (!slot.Empty) { var item = slot.AsItem(); if (use) { if (block != null) { client.World.UseItemOnBlock(position, packet.Face, cursorPosition, item.Value); if (item.Value.ItemId < 0x100) { client.SendPacket(new SoundEffectPacket(Block.GetPlacementSoundEffect(item.Value.ItemId), position.X, position.Y, position.Z, SoundEffectPacket.DefaultVolume, SoundEffectPacket.DefaultPitch)); } if (client.GameMode != GameMode.Creative) { slot.Count--; // TODO: This is probably a bad place to put this code if (slot.Count == 0) { client.Entity.Inventory[client.Entity.SelectedSlot] = ItemStack.EmptyStack; } else { client.Entity.Inventory[client.Entity.SelectedSlot] = slot; } } } else { client.World.UseItemOnBlock(position, packet.Face, cursorPosition, item.Value); if (item.Value.ItemId < 0x100) { client.SendPacket(new SoundEffectPacket(Block.GetPlacementSoundEffect(item.Value.ItemId), position.X, position.Y, position.Z, SoundEffectPacket.DefaultVolume, SoundEffectPacket.DefaultPitch)); } } } } }
public ChainLevelInfo?Decode(ref Rlp.ValueDecoderContext decoderContext, RlpBehaviors rlpBehaviors = RlpBehaviors.None) { if (decoderContext.IsNextItemNull()) { return(null); } int lastCheck = decoderContext.ReadSequenceLength() + decoderContext.Position; bool hasMainChainBlock = decoderContext.DecodeBool(); List <BlockInfo> blockInfos = new(); decoderContext.ReadSequenceLength(); while (decoderContext.Position < lastCheck) { // block info can be null for corrupted states (also cases where block hash is null from the old DBs) BlockInfo?blockInfo = Rlp.Decode <BlockInfo?>(ref decoderContext, RlpBehaviors.AllowExtraData); if (blockInfo is not null) { blockInfos.Add(blockInfo); } } if ((rlpBehaviors & RlpBehaviors.AllowExtraData) != RlpBehaviors.AllowExtraData) { decoderContext.Check(lastCheck); } ChainLevelInfo info = new(hasMainChainBlock, blockInfos.ToArray()); return(info); }
public Word(string value, int count = 1, BlockInfo?postingsAddress = null, IList <DocumentPosting> postings = null) { Value = value; Count = count; PostingsAddress = postingsAddress; Postings = postings; }
public static bool Serialize(this BlockInfo?block, Stream stream) { if (block == null) { var pos = BitConverter.GetBytes(long.MinValue); var len = BitConverter.GetBytes(int.MinValue); if (!BitConverter.IsLittleEndian) { Array.Reverse(pos); Array.Reverse(len); } stream.Write(pos, 0, pos.Length); stream.Write(len, 0, len.Length); return(false); } else { block.Value.Serialize(stream); return(true); } }
public static byte[] Serialize(this BlockInfo?block) { using (var stream = new MemoryStream()) { if (block == null) { var min = BitConverter.GetBytes(int.MinValue); if (!BitConverter.IsLittleEndian) { Array.Reverse(min); } stream.Write(min, 0, min.Length); stream.Write(min, 0, min.Length); } else { var blockBytes = block.Value.Serialize(); stream.Write(blockBytes, 0, blockBytes.Length); } return(stream.ToArray()); } }
public bool TrySetNextBlocks(int maxCount, BlockDownloadContext context) { if (context.Blocks.Length == 0) { return(false); } BlockInfo?beaconMainChainBlockInfo = GetBeaconMainChainBlockInfo(context.Blocks[0].Number); if (beaconMainChainBlockInfo?.IsBeaconHeader == true && beaconMainChainBlockInfo.IsBeaconBody == false) { return(false); } int offset = 0; while (offset != context.NonEmptyBlockHashes.Count) { IReadOnlyList <Keccak> hashesToRequest = context.GetHashesByOffset(offset, maxCount); for (int i = 0; i < hashesToRequest.Count; i++) { Block?block = _blockTree.FindBlock(hashesToRequest[i], BlockTreeLookupOptions.None); if (block == null) { return(false); } BlockBody blockBody = new(block.Transactions, block.Uncles); context.SetBody(i + offset, blockBody); } offset += hashesToRequest.Count; } return(true); }
private void ResetMigrationIndexIfNeeded() { ReceiptsRecovery recovery = new ReceiptsRecovery(_api.EthereumEcdsa, _api.SpecProvider); if (_receiptStorage.MigratedBlockNumber != long.MaxValue) { long blockNumber = _blockTree.Head?.Number ?? 0; while (blockNumber > 0) { ChainLevelInfo?level = _chainLevelInfoRepository.LoadLevel(blockNumber); BlockInfo? firstBlockInfo = level?.BlockInfos.FirstOrDefault(); if (firstBlockInfo != null) { TxReceipt[] receipts = _receiptStorage.Get(firstBlockInfo.BlockHash); if (receipts.Length > 0) { if (recovery.NeedRecover(receipts)) { _receiptStorage.MigratedBlockNumber = long.MaxValue; } break; } } blockNumber--; } } }
public LcrsNode(char value, bool haveSibling, bool haveChild, bool endOfWord, short depth, int weight, BlockInfo?postingsAddress) { Value = value; HaveSibling = haveSibling; HaveChild = haveChild; EndOfWord = endOfWord; Depth = depth; Weight = weight; PostingsAddress = postingsAddress; }
public LcrsNode(LcrsTrie trie, short depth, int weight, BlockInfo?postingsAddress) { Value = trie.Value; HaveSibling = trie.RightSibling != null; HaveChild = trie.LeftChild != null; EndOfWord = trie.EndOfWord; Depth = depth; Weight = weight; PostingsAddress = postingsAddress; }
private int InsertReceipts(ReceiptsSyncBatch batch) { bool hasBreachedProtocol = false; int validResponsesCount = 0; for (int i = 0; i < batch.Infos.Length; i++) { BlockInfo?blockInfo = batch.Infos[i]; TxReceipt[]? receipts = (batch.Response?.Length ?? 0) <= i ? null : (batch.Response ![i] ?? Array.Empty <TxReceipt>());
private int InsertBodies(BodiesSyncBatch batch) { bool hasBreachedProtocol = false; int validResponsesCount = 0; for (int i = 0; i < batch.Infos.Length; i++) { BlockInfo?blockInfo = batch.Infos[i]; BlockBody?body = (batch.Response?.Length ?? 0) <= i ? null : batch.Response ![i];
private BlockInfo?GetBeaconMainChainBlockInfo(long startingPoint) { ChainLevelInfo?startingLevel = _blockTree.FindLevel(startingPoint); BlockInfo? beaconMainChainBlock = startingLevel?.BeaconMainChainBlock; if (beaconMainChainBlock == null) { if (_logger.IsTrace) { _logger.Trace($"Beacon main chain block for number {startingPoint} was not found"); } return(null); } return(beaconMainChainBlock); }
public Rlp Encode(BlockInfo?item, RlpBehaviors rlpBehaviors = RlpBehaviors.None) { if (item == null) { return(Rlp.OfEmptySequence); } Rlp[] elements = new Rlp[_chainWithFinalization ? 4 : 3]; elements[0] = Rlp.Encode(item.BlockHash); elements[1] = Rlp.Encode(item.WasProcessed); elements[2] = Rlp.Encode(item.TotalDifficulty); if (_chainWithFinalization) { elements[3] = Rlp.Encode(item.IsFinalized); } return(Rlp.Encode(elements)); }
public static Rlp BlockInfoEncodeDeprecated(BlockInfo?item, bool chainWithFinalization) { if (item == null) { return(Rlp.OfEmptySequence); } Rlp[] elements = new Rlp[chainWithFinalization ? 4 : 3]; elements[0] = Rlp.Encode(item.BlockHash); elements[1] = Rlp.Encode(item.WasProcessed); elements[2] = Rlp.Encode(item.TotalDifficulty); if (chainWithFinalization) { elements[3] = Rlp.Encode(item.IsFinalized); } return(Rlp.Encode(elements)); }
public Rlp Encode(BlockInfo?item, RlpBehaviors rlpBehaviors = RlpBehaviors.None) { if (item == null) { return(Rlp.OfEmptySequence); } bool hasMetadata = item.Metadata != BlockMetadata.None; Rlp[] elements = new Rlp[hasMetadata ? 4 : 3]; elements[0] = Rlp.Encode(item.BlockHash); elements[1] = Rlp.Encode(item.WasProcessed); elements[2] = Rlp.Encode(item.TotalDifficulty); if (hasMetadata) { elements[3] = Rlp.Encode((int)item.Metadata); } return(Rlp.Encode(elements)); }
public ChainLevelInfo?Decode(RlpStream rlpStream, RlpBehaviors rlpBehaviors = RlpBehaviors.None) { if (rlpStream.Length == 0) { throw new RlpException($"Received a 0 length stream when decoding a {nameof(ChainLevelInfo)}"); } if (rlpStream.IsNextItemNull()) { return(null); } int lastCheck = rlpStream.ReadSequenceLength() + rlpStream.Position; bool hasMainChainBlock = rlpStream.DecodeBool(); List <BlockInfo> blockInfos = new(); rlpStream.ReadSequenceLength(); while (rlpStream.Position < lastCheck) { // block info can be null for corrupted states (also cases where block hash is null from the old DBs) BlockInfo?blockInfo = Rlp.Decode <BlockInfo?>(rlpStream, RlpBehaviors.AllowExtraData); if (blockInfo is not null) { blockInfos.Add(blockInfo); } } if ((rlpBehaviors & RlpBehaviors.AllowExtraData) != RlpBehaviors.AllowExtraData) { rlpStream.Check(lastCheck); } ChainLevelInfo info = new(hasMainChainBlock, blockInfos.ToArray()); return(info); }
public static LcrsNode DeserializeNode(Stream stream) { if (!stream.CanRead) { return(LcrsNode.MinValue); } var valBytes = new byte[sizeof(char)]; var depthBytes = new byte[sizeof(short)]; var weightBytes = new byte[sizeof(int)]; stream.Read(valBytes, 0, sizeof(char)); int byte0 = stream.ReadByte(); int byte1 = stream.ReadByte(); int byte2 = stream.ReadByte(); stream.Read(depthBytes, 0, depthBytes.Length); stream.Read(weightBytes, 0, weightBytes.Length); BlockInfo?block = DeserializeBlock(stream); if (!BitConverter.IsLittleEndian) { Array.Reverse(valBytes); Array.Reverse(depthBytes); Array.Reverse(weightBytes); } return(new LcrsNode( BitConverter.ToChar(valBytes, 0), byte0 == 1, byte1 == 1, byte2 == 1, BitConverter.ToInt16(depthBytes, 0), BitConverter.ToInt32(weightBytes, 0), block)); }
private bool ComputeBlockInfo(Chunk chunk, ChunkPosition chunkPosition, Chunk[] siblingChunks, byte x, byte z, short y, Chunk.ChunkBlockInformation chunkBlockInformation, out BlockInfo?blockInfo) { var blockVisibility = _blocksProvider.GetBlockForId(chunkBlockInformation.Id).IsOpaque ? 0 : 0x3F00; if (blockVisibility == 0) { if ((x > 0 && _blocksProvider.GetBlockForId(_chunkManager.GetBlockId(chunk, (byte)(x - 1), (byte)y, z))?.IsOpaque != true) || (x == 0 && _blocksProvider.GetBlockForId(siblingChunks[0] != null ? _chunkManager.GetBlockId(siblingChunks[0], 15, (byte)y, z) : (byte)0)?.IsOpaque != true)) { blockVisibility |= (int)BlockVisibility.Left; } if ((x < 15 && _blocksProvider.GetBlockForId(_chunkManager.GetBlockId(chunk, (byte)(x + 1), (byte)y, z))?.IsOpaque != true) || (x == 15 && _blocksProvider.GetBlockForId(siblingChunks[2] != null ? _chunkManager.GetBlockId(siblingChunks[2], 0, (byte)y, z) : (byte)0)?.IsOpaque != true)) { blockVisibility |= (int)BlockVisibility.Right; } if ((z > 0 && _blocksProvider.GetBlockForId(_chunkManager.GetBlockId(chunk, x, (byte)y, (byte)(z - 1)))?.IsOpaque != true) || (z == 0 && _blocksProvider.GetBlockForId(siblingChunks[3] != null ? _chunkManager.GetBlockId(siblingChunks[3], x, (byte)y, 15) : (byte)0)?.IsOpaque != true)) { blockVisibility |= (int)BlockVisibility.Front; } if ((z < 15 && _blocksProvider.GetBlockForId(_chunkManager.GetBlockId(chunk, x, (byte)y, (byte)(z + 1)))?.IsOpaque != true) || (z == 15 && _blocksProvider.GetBlockForId(siblingChunks[1] != null ? _chunkManager.GetBlockId(siblingChunks[1], x, (byte)y, 0) : (byte)0)?.IsOpaque != true)) { blockVisibility |= (int)BlockVisibility.Back; } if (y == 0 || _blocksProvider.GetBlockForId(_chunkManager.GetBlockId(chunk, x, (byte)(y - 1), z))?.IsOpaque != true) { blockVisibility |= (int)BlockVisibility.Bottom; } if (y == 255 || _blocksProvider.GetBlockForId(_chunkManager.GetBlockId(chunk, x, (byte)(y + 1), z))?.IsOpaque != true) { blockVisibility |= (int)BlockVisibility.Top; } } if (blockVisibility != 0) { blockInfo = new BlockInfo { BlockIdAndBlockVisibilityAndMetadata = blockVisibility | chunkBlockInformation.Id | (chunkBlockInformation.Metadata << 14), HumidityAndTemperature = chunkBlockInformation.Humidity << 8 | chunkBlockInformation.Temperature, Position = new Vector3((chunkPosition.X << 4) + x, y, (chunkPosition.Z << 4) + z) }; return(true); } blockInfo = null; return(false); }
public Word(string value, BlockInfo?postingsAddress, IList <DocumentPosting> postings = null) { Value = value; PostingsAddress = postingsAddress; Postings = postings; }
void AddRequests(IPeerWithMessaging peer, IReadOnlyList <IPeerWithMessaging> allPeers, BitField bitfield, int startPieceIndex, int endPieceIndex, int maxDuplicates, int?preferredMaxRequests = null) { if (!peer.CanRequestMorePieces) { return; } int preferredRequestAmount = peer.PreferredRequestAmount(TorrentData.PieceLength); var maxRequests = Math.Min(preferredMaxRequests ?? 3, peer.MaxPendingRequests); if (peer.AmRequestingPiecesCount >= maxRequests) { return; } // FIXME: Add a test to ensure we do not unintentionally request blocks off peers which are choking us. // This used to say if (!peer.IsChoing || peer.SupportsFastPeer), and with the recent changes we might // not actually guarantee that 'ContinueExistingRequest' or 'ContinueAnyExistingRequest' properly takes // into account that a peer which is choking us can *only* resume a 'fast piece' in the 'AmAllowedfastPiece' list. if (!peer.IsChoking) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, startPieceIndex, endPieceIndex, maxDuplicates); if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } // If the peer supports fast peer and they are choking us, they'll still send pieces in the allowed fast set. if (peer.SupportsFastPeer && peer.IsChoking) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueExistingRequest(peer, startPieceIndex, endPieceIndex); if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } // Should/could we simplify things for IPiecePicker implementations by guaranteeing IPiecePicker.PickPiece calls will // only be made to pieces which *can* be requested? Why not! // FIXME add a test for this. if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) { while (peer.AmRequestingPiecesCount < maxRequests) { IList <BlockInfo> request = PriorityPick(peer, peer.BitField, allPeers, preferredRequestAmount, 0, TorrentData.PieceCount() - 1); if (request != null && request.Count > 0) { peer.EnqueueRequests(request); } else { break; } } } if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, HighPriorityPieceIndex, bitfield.Length - 1, 1); // If this peer is a seeder and we are unable to request any new blocks, then we should enter // endgame mode. Every block has been requested at least once at this point. if (request == null && (InEndgameMode || peer.IsSeeder)) { request = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 2); // FIXME: What if the picker is choosing to not allocate pieces? Then it's not endgame mode. // This should be deterministic, not a heuristic? InEndgameMode |= request != null && (bitfield.Length - bitfield.TrueCount) < 10; } if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } }
public void AddRequests(IPeerWithMessaging peer, IReadOnlyList <IPeerWithMessaging> allPeers) { int maxRequests = peer.MaxPendingRequests; if (!peer.CanRequestMorePieces) { return; } // This is safe to invoke. 'ContinueExistingRequest' strongly guarantees that a peer will only // continue a piece they have initiated. If they're choking then the only piece they can continue // will be a fast piece (if one exists!) if (!peer.IsChoking || peer.SupportsFastPeer) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueExistingRequest(peer, 0, peer.BitField.Length - 1); if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } int count = peer.PreferredRequestAmount(TorrentData.PieceLength); if (RequestBufferCache.Length < count) { RequestBufferCache = new Memory <BlockInfo> (new BlockInfo[count]); } // Reuse the same buffer across multiple requests. However ensure the piecepicker is given // a Span<T> of the expected size - so slice the reused buffer if it's too large. var requestBuffer = RequestBufferCache.Span.Slice(0, count); if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) { BitField filtered = null; while (peer.AmRequestingPiecesCount < maxRequests) { filtered ??= ApplyIgnorables(peer.BitField); int requests = Picker.PickPiece(peer, filtered, allPeers, 0, TorrentData.PieceCount() - 1, requestBuffer); if (requests > 0) { peer.EnqueueRequests(requestBuffer.Slice(0, requests)); } else { break; } } } if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 1); // If this peer is a seeder and we are unable to request any new blocks, then we should enter // endgame mode. Every block has been requested at least once at this point. if (request == null && (InEndgameMode || peer.IsSeeder)) { request = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 2); InEndgameMode |= request != null; } if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } }
void AddRequests(IPeerWithMessaging peer, IReadOnlyList <IPeerWithMessaging> allPeers, int startPieceIndex, int endPieceIndex, int maxDuplicates, int?preferredMaxRequests = null) { if (!peer.CanRequestMorePieces) { return; } int preferredRequestAmount = peer.PreferredRequestAmount(TorrentData.PieceLength); var maxRequests = Math.Min(preferredMaxRequests ?? 3, peer.MaxPendingRequests); if (peer.AmRequestingPiecesCount >= maxRequests) { return; } // FIXME: Add a test to ensure we do not unintentionally request blocks off peers which are choking us. // This used to say if (!peer.IsChoing || peer.SupportsFastPeer), and with the recent changes we might // not actually guarantee that 'ContinueExistingRequest' or 'ContinueAnyExistingRequest' properly takes // into account that a peer which is choking us can *only* resume a 'fast piece' in the 'AmAllowedfastPiece' list. if (!peer.IsChoking) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, startPieceIndex, endPieceIndex, maxDuplicates); if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } // If the peer supports fast peer and they are choking us, they'll still send pieces in the allowed fast set. if (peer.SupportsFastPeer && peer.IsChoking) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueExistingRequest(peer, startPieceIndex, endPieceIndex); if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } // Should/could we simplify things for IPiecePicker implementations by guaranteeing IPiecePicker.PickPiece calls will // only be made to pieces which *can* be requested? Why not! // FIXME add a test for this. if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) { MutableBitField filtered = null; while (peer.AmRequestingPiecesCount < maxRequests) { filtered ??= GenerateAlreadyHaves().Not().And(peer.BitField); IList <BlockInfo> request = PriorityPick(peer, filtered, allPeers, preferredRequestAmount, 0, TorrentData.PieceCount() - 1); if (request != null && request.Count > 0) { peer.EnqueueRequests(request); } else { break; } } } if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, HighPriorityPieceIndex, TorrentData.PieceCount() - 1, 1); if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } }
public void AddRequests(IPeerWithMessaging peer, IReadOnlyList <IPeerWithMessaging> allPeers) { int maxRequests = peer.MaxPendingRequests; if (!peer.CanRequestMorePieces) { return; } int count = peer.PreferredRequestAmount(TorrentData.PieceLength); // This is safe to invoke. 'ContinueExistingRequest' strongly guarantees that a peer will only // continue a piece they have initiated. If they're choking then the only piece they can continue // will be a fast piece (if one exists!) if (!peer.IsChoking || peer.SupportsFastPeer) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueExistingRequest(peer, 0, peer.BitField.Length - 1); if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } // FIXME: Would it be easier if RequestManager called PickPiece(AllowedFastPieces[0]) or something along those lines? if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) { BitField filtered = null; while (peer.AmRequestingPiecesCount < maxRequests) { filtered ??= ApplyIgnorables(peer.BitField); IList <BlockInfo> request = Picker.PickPiece(peer, filtered, allPeers, count, 0, TorrentData.PieceCount() - 1); if (request != null && request.Count > 0) { peer.EnqueueRequests(request); } else { break; } } } if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0) { while (peer.AmRequestingPiecesCount < maxRequests) { BlockInfo?request = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 1); // If this peer is a seeder and we are unable to request any new blocks, then we should enter // endgame mode. Every block has been requested at least once at this point. if (request == null && (InEndgameMode || peer.IsSeeder)) { request = Picker.ContinueAnyExistingRequest(peer, 0, TorrentData.PieceCount() - 1, 2); InEndgameMode |= request != null; } if (request != null) { peer.EnqueueRequest(request.Value); } else { break; } } } }
public Word(string value) { Value = value; PostingsAddress = null; }
public BlockHeader[]? GetNextHeaders(int maxCount, long maxHeaderNumber) { long?startingPoint = GetStartingPoint(); if (startingPoint == null) { if (_logger.IsTrace) { _logger.Trace($"ChainLevelHelper.GetNextHeaders - starting point is null"); } return(null); } if (_logger.IsTrace) { _logger.Trace($"ChainLevelHelper.GetNextHeaders - starting point is {startingPoint}"); } List <BlockHeader> headers = new(maxCount); int i = 0; while (i < maxCount) { ChainLevelInfo?level = _blockTree.FindLevel(startingPoint !.Value); BlockInfo? beaconMainChainBlock = level?.BeaconMainChainBlock; if (level == null || beaconMainChainBlock == null) { if (_logger.IsTrace) { _logger.Trace($"ChainLevelHelper.GetNextHeaders - level {startingPoint} not found"); } break; } BlockHeader?newHeader = _blockTree.FindHeader(beaconMainChainBlock.BlockHash, BlockTreeLookupOptions.None); if (newHeader == null) { if (_logger.IsTrace) { _logger.Trace($"ChainLevelHelper - header {startingPoint} not found"); } break; } if (_logger.IsTrace) { _logger.Trace($"ChainLevelHelper - MainChainBlock: {level.MainChainBlock} TD: {level.MainChainBlock?.TotalDifficulty}"); foreach (BlockInfo bi in level.BlockInfos) { _logger.Trace($"ChainLevelHelper {bi.BlockHash}, {bi.BlockNumber} {bi.TotalDifficulty} {bi.Metadata}"); } } if (beaconMainChainBlock.IsBeaconInfo) { newHeader.TotalDifficulty = beaconMainChainBlock.TotalDifficulty == 0 ? null : beaconMainChainBlock.TotalDifficulty; } if (_logger.IsTrace) { _logger.Trace( $"ChainLevelHelper - A new block header {newHeader.ToString(BlockHeader.Format.FullHashAndNumber)}, header TD {newHeader.TotalDifficulty}"); } headers.Add(newHeader); ++i; if (i >= maxCount) { break; } ++startingPoint; } return(headers.ToArray()); }
public Word(string value, BlockInfo?postingsAddress) { Value = value; PostingsAddress = postingsAddress; }
/// <summary> /// Returns a number BEFORE the lowest beacon info where the forward beacon sync should start, or the latest /// block that was processed where we should continue processing. /// </summary> /// <returns></returns> private long?GetStartingPoint() { long startingPoint = Math.Min(_blockTree.BestKnownNumber + 1, _beaconPivot.ProcessDestination?.Number ?? long.MaxValue); bool foundBeaconBlock; if (_logger.IsTrace) { _logger.Trace($"ChainLevelHelper. starting point's starting point is {startingPoint}"); } BlockInfo?beaconMainChainBlock = GetBeaconMainChainBlockInfo(startingPoint); if (beaconMainChainBlock == null) { return(null); } if (!beaconMainChainBlock.IsBeaconInfo) { return(startingPoint); } Keccak currentHash = beaconMainChainBlock.BlockHash; // in normal situation we will have one iteration of this loop, in some cases a few. Thanks to that we don't need to add extra pointer to manage forward syncing do { BlockHeader?header = _blockTree.FindHeader(currentHash !, BlockTreeLookupOptions.None); if (header == null) { if (_logger.IsTrace) { _logger.Trace($"Header for number {startingPoint} was not found"); } return(null); } BlockInfo parentBlockInfo = (_blockTree.GetInfo(header.Number - 1, header.ParentHash !)).Info; foundBeaconBlock = parentBlockInfo.IsBeaconInfo; if (_logger.IsTrace) { _logger.Trace( $"Searching for starting point on level {startingPoint}. Header: {header.ToString(BlockHeader.Format.FullHashAndNumber)}, BlockInfo: {parentBlockInfo.IsBeaconBody}, {parentBlockInfo.IsBeaconHeader}"); } // Note: the starting point, points to the non-beacon info block. // MergeBlockDownloader does not download the first header so this is deliberate --startingPoint; currentHash = header.ParentHash !; if (_syncConfig.FastSync && startingPoint <= _syncConfig.PivotNumberParsed) { if (_logger.IsTrace) { _logger.Trace($"Reached syncConfig pivot. Starting point: {startingPoint}"); } break; } } while (foundBeaconBlock); return(startingPoint); }