public BlockHeader FindHeader(Keccak blockHash, bool mainChainOnly)
 {
     return(_wrapped.FindHeader(blockHash, mainChainOnly));
 }
Exemplo n.º 2
0
        private void RunBloomMigration(CancellationToken token)
        {
            BlockHeader GetMissingBlockHeader(long i)
            {
                if (_logger.IsWarn)
                {
                    _logger.Warn(GetLogMessage("warning", $"Header for block {i} not found. Logs will not be searchable for this block."));
                }
                return(EmptyHeader);
            }

            if (_context.BloomStorage == null)
            {
                throw new StepDependencyException(nameof(_context.BloomStorage));
            }
            if (_context.BlockTree == null)
            {
                throw new StepDependencyException(nameof(_context.BlockTree));
            }
            if (_context.ChainLevelInfoRepository == null)
            {
                throw new StepDependencyException(nameof(_context.ChainLevelInfoRepository));
            }

            IBlockTree    blockTree = _context.BlockTree;
            IBloomStorage storage   = _context.BloomStorage;
            long          to        = MinBlockNumber;
            long          synced    = storage.MigratedBlockNumber + 1;
            long          from      = synced;

            _migrateCount = to + 1;
            _averages     = _context.BloomStorage.Averages.ToArray();
            var chainLevelInfoRepository = _context.ChainLevelInfoRepository;

            _progress.Update(synced);

            if (_logger.IsInfo)
            {
                _logger.Info(GetLogMessage("started"));
            }

            using (var timer = new Timer(1000)
            {
                Enabled = true
            })
            {
                timer.Elapsed += (ElapsedEventHandler)((o, e) =>
                {
                    if (_logger.IsInfo)
                    {
                        _logger.Info(GetLogMessage("in progress"));
                    }
                });

                try
                {
                    storage.Migrate(GetHeadersForMigration());
                }
                finally
                {
                    _progress.MarkEnd();
                    _stopwatch?.Stop();
                }

                IEnumerable <BlockHeader> GetHeadersForMigration()
                {
                    bool TryGetMainChainBlockHashFromLevel(long number, out Keccak?blockHash)
                    {
                        using var batch = chainLevelInfoRepository.StartBatch();
                        var level = chainLevelInfoRepository.LoadLevel(number);

                        if (level != null)
                        {
                            if (!level.HasBlockOnMainChain)
                            {
                                if (level.BlockInfos.Length > 0)
                                {
                                    level.HasBlockOnMainChain = true;
                                    chainLevelInfoRepository.PersistLevel(number, level, batch);
                                }
                            }

                            blockHash = level.MainChainBlock?.BlockHash;
                            return(blockHash != null);
                        }
                        else
                        {
                            blockHash = null;
                            return(false);
                        }
                    }

                    for (long i = from; i <= to; i++)
                    {
                        if (token.IsCancellationRequested)
                        {
                            timer.Stop();
                            if (_logger.IsInfo)
                            {
                                _logger.Info(GetLogMessage("cancelled"));
                            }
                            yield break;
                        }

                        if (TryGetMainChainBlockHashFromLevel(i, out var blockHash))
                        {
                            var header = blockTree.FindHeader(blockHash, BlockTreeLookupOptions.None);
                            yield return(header ?? GetMissingBlockHeader(i));
                        }
                        else
                        {
                            yield return(GetMissingBlockHeader(i));
                        }

                        _progress.Update(++synced);
                    }
                }
            }

            if (!token.IsCancellationRequested)
            {
                if (_logger.IsInfo)
                {
                    _logger.Info(GetLogMessage("finished"));
                }
            }
        }
Exemplo n.º 3
0
        public async Task <long> DownloadHeaders(PeerInfo bestPeer, BlocksRequest blocksRequest, CancellationToken cancellation)
        {
            if (bestPeer == null)
            {
                string message = $"Not expecting best peer to be null inside the {nameof(BlockDownloader)}";
                _logger.Error(message);
                throw new ArgumentNullException(message);
            }

            int headersSynced       = 0;
            int ancestorLookupLevel = 0;

            long currentNumber = Math.Max(0, Math.Min(_blockTree.BestKnownNumber, bestPeer.HeadNumber - 1));

            while (bestPeer.TotalDifficulty > (_blockTree.BestSuggestedHeader?.TotalDifficulty ?? 0) && currentNumber <= bestPeer.HeadNumber)
            {
                int headersSyncedInPreviousRequests = headersSynced;
                if (_logger.IsTrace)
                {
                    _logger.Trace($"Continue headers sync with {bestPeer} (our best {_blockTree.BestKnownNumber})");
                }

                long blocksLeft       = bestPeer.HeadNumber - currentNumber - (blocksRequest.NumberOfLatestBlocksToBeIgnored ?? 0);
                int  headersToRequest = (int)Math.Min(blocksLeft + 1, _syncBatchSize.Current);
                if (headersToRequest <= 1)
                {
                    break;
                }

                if (_logger.IsDebug)
                {
                    _logger.Debug($"Headers request {currentNumber}+{headersToRequest} to peer {bestPeer} with {bestPeer.HeadNumber} blocks. Got {currentNumber} and asking for {headersToRequest} more.");
                }
                BlockHeader[] headers = await RequestHeaders(bestPeer, cancellation, currentNumber, headersToRequest);

                BlockHeader?startingPoint = headers[0] == null ? null : _blockTree.FindHeader(headers[0].Hash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
                if (startingPoint == null)
                {
                    ancestorLookupLevel++;
                    if (ancestorLookupLevel >= _ancestorJumps.Length)
                    {
                        if (_logger.IsWarn)
                        {
                            _logger.Warn($"Could not find common ancestor with {bestPeer}");
                        }
                        throw new EthSyncException("Peer with inconsistent chain in sync");
                    }

                    int ancestorJump = _ancestorJumps[ancestorLookupLevel] - _ancestorJumps[ancestorLookupLevel - 1];
                    currentNumber = currentNumber >= ancestorJump ? (currentNumber - ancestorJump) : 0L;
                    continue;
                }

                ancestorLookupLevel = 0;
                _sinceLastTimeout++;
                if (_sinceLastTimeout >= 2)
                {
                    // if peers are not timing out then we can try to be slightly more eager
                    _syncBatchSize.Expand();
                }

                for (int i = 1; i < headers.Length; i++)
                {
                    if (cancellation.IsCancellationRequested)
                    {
                        break;
                    }

                    BlockHeader currentHeader = headers[i];
                    if (currentHeader == null)
                    {
                        if (headersSynced - headersSyncedInPreviousRequests > 0)
                        {
                            break;
                        }

                        SyncPeerPool.ReportNoSyncProgress(bestPeer, AllocationContexts.Blocks);
                        return(0);
                    }

                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"Received {currentHeader} from {bestPeer:s}");
                    }
                    bool isValid = i > 1 ? _blockValidator.ValidateHeader(currentHeader, headers[i - 1], false) : _blockValidator.ValidateHeader(currentHeader, false);
                    if (!isValid)
                    {
                        throw new EthSyncException($"{bestPeer} sent a block {currentHeader.ToString(BlockHeader.Format.Short)} with an invalid header");
                    }

                    // i == 0 is always false but leave it this was as it will be possible that we will change the
                    // loop iterator to start with o
                    if (HandleAddResult(bestPeer, currentHeader, i == 0, _blockTree.Insert(currentHeader)))
                    {
                        headersSynced++;
                    }

                    currentNumber = currentNumber + 1;
                }

                if (headersSynced > 0)
                {
                    _syncReport.FullSyncBlocksDownloaded.Update(_blockTree.BestSuggestedHeader?.Number ?? 0);
                    _syncReport.FullSyncBlocksKnown = bestPeer.HeadNumber;
                }
                else
                {
                    break;
                }
            }

            return(headersSynced);
        }
Exemplo n.º 4
0
        public async Task <long> DownloadHeaders(PeerInfo bestPeer, int newBlocksToSkip, CancellationToken cancellation)
        {
            if (bestPeer == null)
            {
                string message = $"Not expecting best peer to be null inside the {nameof(BlockDownloader)}";
                _logger.Error(message);
                throw new ArgumentNullException(message);
            }

            int headersSynced       = 0;
            int ancestorLookupLevel = 0;

            long currentNumber = Math.Max(0, Math.Min(_blockTree.BestKnownNumber, bestPeer.HeadNumber - 1));

            while (bestPeer.TotalDifficulty > (_blockTree.BestSuggested?.TotalDifficulty ?? 0) && currentNumber <= bestPeer.HeadNumber)
            {
                if (_logger.IsTrace)
                {
                    _logger.Trace($"Continue headers sync with {bestPeer} (our best {_blockTree.BestKnownNumber})");
                }

                if (ancestorLookupLevel > MaxReorganizationLength)
                {
                    if (_logger.IsWarn)
                    {
                        _logger.Warn($"Could not find common ancestor with {bestPeer}");
                    }
                    throw new EthSynchronizationException("Peer with inconsistent chain in sync");
                }

                long blocksLeft       = bestPeer.HeadNumber - currentNumber - newBlocksToSkip;
                int  headersToRequest = (int)BigInteger.Min(blocksLeft + 1, _syncBatchSize.Current);
                if (headersToRequest <= 1)
                {
                    break;
                }

                if (_logger.IsTrace)
                {
                    _logger.Trace($"Headers request {currentNumber}+{headersToRequest} to peer {bestPeer} with {bestPeer.HeadNumber} blocks. Got {currentNumber} and asking for {headersToRequest} more.");
                }
                var headers = await RequestHeaders(bestPeer, cancellation, currentNumber, headersToRequest);

                BlockHeader startingPoint = headers[0] == null ? null : _blockTree.FindHeader(headers[0].Hash);
                if (startingPoint == null)
                {
                    ancestorLookupLevel += _syncBatchSize.Current;
                    currentNumber        = currentNumber >= _syncBatchSize.Current ? (currentNumber - _syncBatchSize.Current) : 0L;
                    continue;
                }

                _sinceLastTimeout++;
                if (_sinceLastTimeout >= 2)
                {
                    _syncBatchSize.Expand();
                }

                for (int i = 1; i < headers.Length; i++)
                {
                    if (cancellation.IsCancellationRequested)
                    {
                        break;
                    }

                    BlockHeader currentHeader = headers[i];
                    if (currentHeader == null)
                    {
                        if (headersSynced > 0)
                        {
                            break;
                        }

                        return(0);
                    }

                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"Received {currentHeader} from {bestPeer:s}");
                    }
                    if (!_blockValidator.ValidateHeader(currentHeader, false))
                    {
                        throw new EthSynchronizationException($"{bestPeer} sent a block {currentHeader.ToString(BlockHeader.Format.Short)} with an invalid header");
                    }

                    if (HandleAddResult(currentHeader, i == 0, _blockTree.SuggestHeader(currentHeader)))
                    {
                        headersSynced++;
                    }

                    currentNumber = currentNumber + 1;
                }

                if (headersSynced > 0)
                {
                    _syncStats.ReportBlocksDownload(_blockTree.BestSuggested?.Number ?? 0, bestPeer.HeadNumber);
                }
            }

            return(headersSynced);
        }
Exemplo n.º 5
0
        internal Snapshot GetOrCreateSnapshot(UInt256 number, Keccak hash)
        {
            // Search for a snapshot in memory or on disk for checkpoints
            List <BlockHeader> headers = new List <BlockHeader>();
            Snapshot           snapshot;

            while (true)
            {
                snapshot = GetSnapshot(number, hash);
                if (snapshot != null)
                {
                    break;
                }

                // If we're at an checkpoint block, make a snapshot if it's known
                BlockHeader header = _blockTree.FindHeader(hash);
                if (header == null)
                {
                    throw new InvalidOperationException("Unknown ancestor");
                }

                if (header.Hash == null)
                {
                    throw new InvalidOperationException("Block tree block without hash set");
                }

                Keccak parentHash = header.ParentHash;
                if (number == 0 || (IsEpochTransition(number) && _blockTree.FindHeader(parentHash) == null))
                {
                    int signersCount = header.CalculateSignersCount();
                    SortedList <Address, UInt256> signers = new SortedList <Address, UInt256>(signersCount, CliqueAddressComparer.Instance);
                    for (int i = 0; i < signersCount; i++)
                    {
                        Address signer = new Address(header.ExtraData.Slice(Clique.ExtraVanityLength + i * AddressLength, AddressLength));
                        signers.Add(signer, UInt256.Zero);
                    }

                    snapshot = new Snapshot(_signatures, number, header.Hash, signers);
                    snapshot.Store(_blocksDb);
                    break;
                }

                // No snapshot for this header, gather the header and move backward
                headers.Add(header);
                number = number - 1;
                hash   = header.ParentHash;
            }

            // Previous snapshot found, apply any pending headers on top of it
            for (int i = 0; i < headers.Count / 2; i++)
            {
                BlockHeader temp = headers[headers.Count - 1 - i];
                headers[headers.Count - 1 - i] = headers[i];
                headers[i] = temp;
            }

            for (int i = 0; i < headers.Count; i++)
            {
                headers[i].Author = headers[i].Author ?? GetBlockSealer(headers[i]);
            }

            snapshot = snapshot.Apply(headers, _config.Epoch);

            _recent.Set(snapshot.Hash, snapshot);
            // If we've generated a new checkpoint snapshot, save to disk
            if ((ulong)snapshot.Number % Clique.CheckpointInterval == 0 && headers.Count > 0)
            {
                snapshot.Store(_blocksDb);
            }

            return(snapshot);
        }
Exemplo n.º 6
0
        private void Process(Block suggestedBlock, bool forMining)
        {
            if (suggestedBlock.Number != 0 && _blockTree.FindParent(suggestedBlock) == null)
            {
                throw new InvalidOperationException("Got an orphaned block for porcessing.");
            }

            if (suggestedBlock.Header.TotalDifficulty == null)
            {
                throw new InvalidOperationException("block without total difficulty calculated was suggested for processing");
            }

            if (!forMining && suggestedBlock.Hash == null)
            {
                throw new InvalidOperationException("block hash should be known at this stage if the block is not mining");
            }

            foreach (BlockHeader ommerHeader in suggestedBlock.Ommers)
            {
                if (ommerHeader.Hash == null)
                {
                    throw new InvalidOperationException("ommer's hash is null when processing block");
                }
            }

            BigInteger totalDifficulty   = suggestedBlock.TotalDifficulty ?? 0;
            BigInteger totalTransactions = suggestedBlock.TotalTransactions ?? 0;

            if (_logger.IsDebugEnabled)
            {
                _logger.Debug($"Total difficulty of block {suggestedBlock.ToString(Block.Format.Short)} is {totalDifficulty}");
                _logger.Debug($"Total transactions of block {suggestedBlock.ToString(Block.Format.Short)} is {totalTransactions}");
            }

            if (totalDifficulty > (_blockTree.Head?.TotalDifficulty ?? 0))
            {
                List <Block> blocksToBeAddedToMain = new List <Block>();
                Block        toBeProcessed         = suggestedBlock;
                do
                {
                    blocksToBeAddedToMain.Add(toBeProcessed);
                    toBeProcessed = toBeProcessed.Number == 0 ? null : _blockTree.FindParent(toBeProcessed);
                    // TODO: need to remove the hardcoded head block store at keccak zero as it would be referenced by the genesis...

                    if (toBeProcessed == null)
                    {
                        break;
                    }
                } while (!_blockTree.IsMainChain(toBeProcessed.Hash));

                BlockHeader branchingPoint = toBeProcessed?.Header;
                if (branchingPoint != null && branchingPoint.Hash != _blockTree.Head?.Hash)
                {
                    if (_logger.IsDebugEnabled)
                    {
                        _logger.Debug($"Head block was: {_blockTree.Head?.ToString(BlockHeader.Format.Short)}");
                        _logger.Debug($"Branching from: {branchingPoint.ToString(BlockHeader.Format.Short)}");
                    }
                }
                else
                {
                    if (_logger.IsDebugEnabled)
                    {
                        _logger.Debug(branchingPoint == null ? "Setting as genesis block" : $"Adding on top of {branchingPoint.ToString(BlockHeader.Format.Short)}");
                    }
                }

                Keccak stateRoot = branchingPoint?.StateRoot;
                if (_logger.IsTraceEnabled)
                {
                    _logger.Trace($"State root lookup: {stateRoot}");
                }

                List <Block> unprocessedBlocksToBeAddedToMain = new List <Block>();

                foreach (Block block in blocksToBeAddedToMain)
                {
                    if (!forMining && _blockTree.WasProcessed(block.Hash))
                    {
                        stateRoot = block.Header.StateRoot;
                        if (_logger.IsTraceEnabled)
                        {
                            _logger.Trace($"State root lookup: {stateRoot}");
                        }

                        break;
                    }

                    unprocessedBlocksToBeAddedToMain.Add(block);
                }

                Block[] blocks = new Block[unprocessedBlocksToBeAddedToMain.Count];
                for (int i = 0; i < unprocessedBlocksToBeAddedToMain.Count; i++)
                {
                    blocks[blocks.Length - i - 1] = unprocessedBlocksToBeAddedToMain[i];
                }

                if (_logger.IsDebugEnabled)
                {
                    _logger.Debug($"Processing {blocks.Length} blocks from state root {stateRoot}");
                }

                //TODO: process blocks one by one here, refactor this, test
                for (int i = 0; i < blocks.Length; i++)
                {
                    if (blocks[i].Transactions.Length > 0 && blocks[i].Transactions[0].SenderAddress == null)
                    {
                        _signer.RecoverAddresses(blocks[i]);
                    }
                }
                Block[] processedBlocks = _blockProcessor.Process(stateRoot, blocks, forMining);

                // TODO: lots of unnecessary loading and decoding here, review after adding support for loading headers only
                List <BlockHeader> blocksToBeRemovedFromMain = new List <BlockHeader>();
                if (_blockTree.Head?.Hash != branchingPoint?.Hash && _blockTree.Head != null)
                {
                    blocksToBeRemovedFromMain.Add(_blockTree.Head);
                    BlockHeader teBeRemovedFromMain = _blockTree.FindHeader(_blockTree.Head.ParentHash);
                    while (teBeRemovedFromMain != null && teBeRemovedFromMain.Hash != branchingPoint?.Hash)
                    {
                        blocksToBeRemovedFromMain.Add(teBeRemovedFromMain);
                        teBeRemovedFromMain = _blockTree.FindHeader(teBeRemovedFromMain.ParentHash);
                    }
                }

                if (!forMining)
                {
                    foreach (Block processedBlock in processedBlocks)
                    {
                        if (_logger.IsDebugEnabled)
                        {
                            _logger.Debug($"Marking {processedBlock.ToString(Block.Format.Short)} as processed");
                        }

                        // TODO: review storage and retrieval of receipts since we removed them from the block class
                        _blockTree.MarkAsProcessed(processedBlock.Hash);
                    }

                    Block newHeadBlock = processedBlocks[processedBlocks.Length - 1];
                    newHeadBlock.Header.TotalDifficulty = suggestedBlock.TotalDifficulty; // TODO: cleanup total difficulty
                    if (_logger.IsDebugEnabled)
                    {
                        _logger.Debug($"Setting head block to {newHeadBlock.ToString(Block.Format.Short)}");
                    }

                    foreach (BlockHeader blockHeader in blocksToBeRemovedFromMain)
                    {
                        if (_logger.IsDebugEnabled)
                        {
                            _logger.Debug($"Moving {blockHeader.ToString(BlockHeader.Format.Short)} to branch");
                        }

                        _blockTree.MoveToBranch(blockHeader.Hash);
                        // TODO: only for miners
                        //foreach (Transaction transaction in block.Transactions)
                        //{
                        //    _transactionStore.AddPending(transaction);
                        //}

                        if (_logger.IsDebugEnabled)
                        {
                            _logger.Debug($"Block {blockHeader.ToString(BlockHeader.Format.Short)} moved to branch");
                        }
                    }

                    foreach (Block block in blocksToBeAddedToMain)
                    {
                        if (_logger.IsDebugEnabled)
                        {
                            _logger.Debug($"Moving {block.ToString(Block.Format.Short)} to main");
                        }

                        _blockTree.MoveToMain(block);
                        // TODO: only for miners
                        foreach (Transaction transaction in block.Transactions)
                        {
                            _transactionStore.RemovePending(transaction);
                        }

                        if (_logger.IsDebugEnabled)
                        {
                            _logger.Debug($"Block {block.ToString(Block.Format.Short)} added to main chain");
                        }
                    }

                    if (_logger.IsDebugEnabled)
                    {
                        _logger.Debug($"Updating total difficulty of the main chain to {totalDifficulty}");
                    }
                    if (_logger.IsDebugEnabled)
                    {
                        _logger.Debug($"Updating total transactions of the main chain to {totalTransactions}");
                    }
                }
                else
                {
                    Block blockToBeMined = processedBlocks[processedBlocks.Length - 1];
                    _miningCancellation = new CancellationTokenSource();
                    CancellationTokenSource anyCancellation =
                        CancellationTokenSource.CreateLinkedTokenSource(_miningCancellation.Token, _loopCancellationSource.Token);
                    _sealEngine.MineAsync(blockToBeMined, anyCancellation.Token).ContinueWith(t =>
                    {
                        anyCancellation.Dispose();

                        if (_logger.IsInfoEnabled)
                        {
                            _logger.Info($"Mined a block {t.Result.ToString(Block.Format.Short)} with parent {t.Result.Header.ParentHash}");
                        }

                        Block minedBlock = t.Result;

                        if (minedBlock.Hash == null)
                        {
                            throw new InvalidOperationException("Mined a block with null hash");
                        }

                        _blockTree.SuggestBlock(minedBlock);
                    }, _miningCancellation.Token);
                }
            }
        }
Exemplo n.º 7
0
 public BlockHeader FindHeader(Keccak blockHash)
 {
     return(_wrapped.FindHeader(blockHash));
 }
Exemplo n.º 8
0
        public Snapshot GetOrCreateSnapshot(long number, Keccak hash)
        {
            Snapshot?snapshot = GetSnapshot(number, hash);

            if (!(snapshot is null))
            {
                return(snapshot);
            }

            var headers = new List <BlockHeader>();

            lock (_snapshotCreationLock)
            {
                // Search for a snapshot in memory or on disk for checkpoints
                while (true)
                {
                    snapshot = GetSnapshot(number, hash);
                    if (snapshot != null)
                    {
                        break;
                    }

                    // If we're at an checkpoint block, make a snapshot if it's known
                    BlockHeader header = _blockTree.FindHeader(hash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
                    if (header == null)
                    {
                        throw new InvalidOperationException("Unknown ancestor");
                    }

                    if (header.Hash == null)
                    {
                        throw new InvalidOperationException("Block tree block without hash set");
                    }

                    Keccak parentHash = header.ParentHash;
                    if (IsEpochTransition(number))
                    {
                        Snapshot?parentSnapshot = GetSnapshot(number - 1, parentHash);

                        if (_logger.IsInfo)
                        {
                            _logger.Info($"Creating epoch snapshot at block {number}");
                        }
                        int     signersCount = CalculateSignersCount(header);
                        var     signers      = new SortedList <Address, long>(signersCount, AddressComparer.Instance);
                        Address epochSigner  = GetBlockSealer(header);
                        for (int i = 0; i < signersCount; i++)
                        {
                            Address signer = new Address(header.ExtraData.Slice(Clique.ExtraVanityLength + i * Address.ByteLength, Address.ByteLength));
                            signers.Add(signer, signer == epochSigner ? number : parentSnapshot == null ? 0L : parentSnapshot.Signers.ContainsKey(signer) ? parentSnapshot.Signers[signer] : 0L);
                        }

                        snapshot = new Snapshot(number, header.Hash, signers);
                        Store(snapshot);
                        break;
                    }

                    // No snapshot for this header, gather the header and move backward
                    headers.Add(header);
                    number = number - 1;
                    hash   = header.ParentHash;
                }

                if (headers.Count > 0)
                {
                    // Previous snapshot found, apply any pending headers on top of it
                    headers.Reverse();

                    for (int i = 0; i < headers.Count; i++)
                    {
                        headers[i].Author ??= GetBlockSealer(headers[i]);
                    }

                    int countBefore = snapshot.Signers.Count;
                    snapshot = Apply(snapshot, headers, _cliqueConfig.Epoch);

                    int countAfter = snapshot.Signers.Count;
                    if (countAfter != countBefore && _logger.IsInfo)
                    {
                        int    signerIndex = 0;
                        string word        = countAfter > countBefore ? "added to" : "removed from";
                        _logger.Info($"At block {number } a signer has been {word} the signer list:{Environment.NewLine}{string.Join(Environment.NewLine, snapshot.Signers.OrderBy(s => s.Key, AddressComparer.Instance).Select(s => $"  Signer {signerIndex++}: " + (KnownAddresses.GoerliValidators.ContainsKey(s.Key) ? KnownAddresses.GoerliValidators[s.Key] : s.Key.ToString())))}");
                    }
                }

                _snapshotCache.Set(snapshot.Hash, snapshot);
                // If we've generated a new checkpoint snapshot, save to disk
            }

            if ((ulong)snapshot.Number % Clique.CheckpointInterval == 0 && headers.Count > 0)
            {
                Store(snapshot);
            }

            return(snapshot);
        }
        private IReadOnlyList <BlockHeader> GetFinalizedBlocks(BlockHeader block)
        {
            (ChainLevelInfo parentLevel, BlockInfo parentBlockInfo) GetBlockInfo(BlockHeader blockHeader)
            {
                var chainLevelInfo = _chainLevelInfoRepository.LoadLevel(blockHeader.Number);
                var blockInfo      = chainLevelInfo.BlockInfos.First(i => i.BlockHash == blockHeader.Hash);

                return(chainLevelInfo, blockInfo);
            }

            var minSealersForFinalization = block.IsGenesis ? 1 : _auRaValidator.MinSealersForFinalization;
            var originalBlock             = block;

            bool IsConsecutiveBlock() => originalBlock.ParentHash == _lastProcessedBlockHash;
            bool ConsecutiveBlockWillFinalizeBlocks() => _consecutiveValidatorsForNotYetFinalizedBlocks.CountWith(block) >= minSealersForFinalization;

            List <BlockHeader> finalizedBlocks;
            var isConsecutiveBlock = IsConsecutiveBlock();

            // Optimization:
            // if block is consecutive than we can just check if this sealer will cause any blocks get finalized
            // using cache of vallidators of not yet finalized blocks from previous block operation
            if (isConsecutiveBlock && !ConsecutiveBlockWillFinalizeBlocks())
            {
                finalizedBlocks = Empty;
                _consecutiveValidatorsForNotYetFinalizedBlocks.Add(block);
            }
            else
            {
                if (!isConsecutiveBlock)
                {
                    _consecutiveValidatorsForNotYetFinalizedBlocks.Clear();
                }

                finalizedBlocks = new List <BlockHeader>();
                var  validators             = new HashSet <Address>();
                var  originalBlockSealer    = originalBlock.Beneficiary;
                bool ancestorsNotYetRemoved = true;

                using (var batch = _chainLevelInfoRepository.StartBatch())
                {
                    var(chainLevel, blockInfo) = GetBlockInfo(block);

                    // Optimization:
                    // if this block sealer seals for 2nd time than this seal can not finalize any blocks
                    // as the 1st seal or some seal between 1st seal and current one would already finalize some of them
                    bool OriginalBlockSealerSignedOnlyOnce() => !validators.Contains(originalBlockSealer) || block.Beneficiary != originalBlockSealer;

                    while (!blockInfo.IsFinalized && OriginalBlockSealerSignedOnlyOnce())
                    {
                        validators.Add(block.Beneficiary);
                        if (validators.Count >= minSealersForFinalization)
                        {
                            blockInfo.IsFinalized = true;
                            _chainLevelInfoRepository.PersistLevel(block.Number, chainLevel, batch);

                            finalizedBlocks.Add(block);
                            if (ancestorsNotYetRemoved)
                            {
                                _consecutiveValidatorsForNotYetFinalizedBlocks.RemoveAncestors(block.Number);
                                ancestorsNotYetRemoved = false;
                            }
                        }
                        else
                        {
                            _consecutiveValidatorsForNotYetFinalizedBlocks.Add(block);
                        }

                        if (!block.IsGenesis)
                        {
                            block = _blockTree.FindHeader(block.ParentHash, BlockTreeLookupOptions.None);
                            (chainLevel, blockInfo) = GetBlockInfo(block);
                        }
                    }
                }

                finalizedBlocks.Reverse(); // we were adding from the last to earliest, going through parents
            }

            _lastProcessedBlockHash = originalBlock.Hash;

            return(finalizedBlocks);
        }
Exemplo n.º 10
0
 public static BlockHeader FindParentHeader(this IBlockTree tree, BlockHeader header, BlockTreeLookupOptions options)
 {
     return(tree.FindHeader(header.ParentHash, options));
 }
Exemplo n.º 11
0
        private async Task RefreshPeerInfo(PeerInfo peerInfo, CancellationToken token)
        {
            if (_logger.IsTrace)
            {
                _logger.Trace($"Requesting head block info from {peerInfo.SyncPeer.Node:s}");
            }

            ISyncPeer          syncPeer          = peerInfo.SyncPeer;
            Task <BlockHeader> getHeadHeaderTask = peerInfo.SyncPeer.GetHeadBlockHeader(peerInfo.HeadHash, token);
            Task delayTask       = Task.Delay(InitTimeout, token);
            Task firstToComplete = await Task.WhenAny(getHeadHeaderTask, delayTask);

            await firstToComplete.ContinueWith(
                t =>
            {
                if (firstToComplete.IsFaulted || firstToComplete == delayTask)
                {
                    if (_logger.IsDebug)
                    {
                        _logger.Debug($"InitPeerInfo failed for node: {syncPeer.Node:s}{Environment.NewLine}{t.Exception}");
                    }
                    syncPeer.Disconnect(DisconnectReason.DisconnectRequested, "refresh peer info fault");
                    SyncEvent?.Invoke(this, new SyncEventArgs(syncPeer, peerInfo.IsInitialized ? Synchronization.SyncEvent.Failed : Synchronization.SyncEvent.InitFailed));
                }
                else if (firstToComplete.IsCanceled)
                {
                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"InitPeerInfo canceled for node: {syncPeer.Node:s}{Environment.NewLine}{t.Exception}");
                    }
                    SyncEvent?.Invoke(this, new SyncEventArgs(syncPeer, peerInfo.IsInitialized ? Synchronization.SyncEvent.Cancelled : Synchronization.SyncEvent.InitCancelled));
                    token.ThrowIfCancellationRequested();
                }
                else
                {
                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"Received head block info from {syncPeer.Node:s} with head block numer {getHeadHeaderTask.Result}");
                    }
                    if (!peerInfo.IsInitialized)
                    {
                        SyncEvent?.Invoke(
                            this,
                            new SyncEventArgs(syncPeer, Synchronization.SyncEvent.InitCompleted));
                    }

                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"REFRESH Updating header of {peerInfo} from {peerInfo.HeadNumber} to {getHeadHeaderTask.Result.Number}");
                    }
                    peerInfo.HeadNumber = getHeadHeaderTask.Result.Number;
                    peerInfo.HeadHash   = getHeadHeaderTask.Result.Hash;

                    BlockHeader parent = _blockTree.FindHeader(getHeadHeaderTask.Result.ParentHash);
                    if (parent != null)
                    {
                        peerInfo.TotalDifficulty = (parent.TotalDifficulty ?? UInt256.Zero) + getHeadHeaderTask.Result.Difficulty;
                    }

                    peerInfo.IsInitialized = true;
                    foreach ((SyncPeerAllocation allocation, object _) in _allocations)
                    {
                        if (allocation.Current == peerInfo)
                        {
                            allocation.Refresh();
                        }
                    }
                }
            }, token);
        }
Exemplo n.º 12
0
 public BlockHeader FindHeader(long number)
 {
     return(_blockTree.FindHeader(number));
 }
Exemplo n.º 13
0
        /// <summary>
        /// Validates all the header elements (usually in relation to parent). Difficulty calculation is validated in <see cref="ISealValidator"/>
        /// </summary>
        /// <param name="header">Block header to validate</param>
        /// <param name="isOmmer"><value>True</value> if the <paramref name="header"/> is an ommer, otherwise <value>False</value></param>
        /// <returns><value>True</value> if <paramref name="header"/> is valid, otherwise <value>False</value></returns>
        public bool Validate(BlockHeader header, bool isOmmer = false)
        {
            BlockHeader parent = _blockTree.FindHeader(header.ParentHash, false);

            return(Validate(header, parent, isOmmer));
        }
Exemplo n.º 14
0
 public BlockHeader FindHeader(Keccak blockHash, BlockTreeLookupOptions options) => _wrapped.FindHeader(blockHash, options);
Exemplo n.º 15
0
        private async Task RefreshPeerInfo(PeerInfo peerInfo, CancellationToken token)
        {
            if (_logger.IsTrace)
            {
                _logger.Trace($"Requesting head block info from {peerInfo.SyncPeer.Node:s}");
            }

            ISyncPeer          syncPeer          = peerInfo.SyncPeer;
            Task <BlockHeader> getHeadHeaderTask = peerInfo.SyncPeer.GetHeadBlockHeader(peerInfo.HeadHash, token);
            Task delayTask       = Task.Delay(InitTimeout, token);
            Task firstToComplete = await Task.WhenAny(getHeadHeaderTask, delayTask);

            await firstToComplete.ContinueWith(
                t =>
            {
                if (firstToComplete.IsFaulted || firstToComplete == delayTask)
                {
                    if (_logger.IsDebug)
                    {
                        _logger.Debug($"InitPeerInfo failed for node: {syncPeer.Node:c}{Environment.NewLine}{t.Exception}");
                    }
                    _stats.ReportSyncEvent(syncPeer.Node, peerInfo.IsInitialized ? NodeStatsEventType.SyncFailed : NodeStatsEventType.SyncInitFailed);
                    syncPeer.Disconnect(DisconnectReason.DisconnectRequested, "refresh peer info fault");
                }
                else if (firstToComplete.IsCanceled)
                {
                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"InitPeerInfo canceled for node: {syncPeer.Node:c}{Environment.NewLine}{t.Exception}");
                    }
                    _stats.ReportSyncEvent(syncPeer.Node, peerInfo.IsInitialized ? NodeStatsEventType.SyncCancelled : NodeStatsEventType.SyncInitCancelled);
                    token.ThrowIfCancellationRequested();
                }
                else
                {
                    BlockHeader header = getHeadHeaderTask.Result;
                    if (header == null)
                    {
                        if (_logger.IsDebug)
                        {
                            _logger.Debug($"InitPeerInfo failed for node: {syncPeer.Node:c}{Environment.NewLine}{t.Exception}");
                        }

                        _stats.ReportSyncEvent(syncPeer.Node, peerInfo.IsInitialized ? NodeStatsEventType.SyncFailed: NodeStatsEventType.SyncInitFailed);
                        syncPeer.Disconnect(DisconnectReason.DisconnectRequested, "refresh peer info fault");
                        return;
                    }

                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"Received head block info from {syncPeer.Node:c} with head block numer {header.Number}");
                    }
                    if (!peerInfo.IsInitialized)
                    {
                        _stats.ReportSyncEvent(syncPeer.Node, NodeStatsEventType.SyncInitCompleted);
                    }

                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"REFRESH Updating header of {peerInfo} from {peerInfo.HeadNumber} to {header.Number}");
                    }
                    peerInfo.HeadNumber = header.Number;
                    peerInfo.HeadHash   = header.Hash;

                    BlockHeader parent = _blockTree.FindHeader(header.ParentHash, BlockTreeLookupOptions.None);
                    if (parent != null)
                    {
                        peerInfo.TotalDifficulty = (parent.TotalDifficulty ?? UInt256.Zero) + header.Difficulty;
                    }

                    peerInfo.IsInitialized = true;
                    foreach ((SyncPeerAllocation allocation, object _) in _allocations)
                    {
                        if (allocation.Current == peerInfo)
                        {
                            allocation.Refresh();
                        }
                    }
                }
            }, token);
        }
Exemplo n.º 16
0
 public BlockHeader FindHeader(Keccak blockHash, BlockTreeLookupOptions options)
 {
     return(_blockTree.FindHeader(blockHash, options));
 }
Exemplo n.º 17
0
 public BlockHeader FindHeader(Keccak hash)
 {
     return(_blockTree.FindHeader(hash, false));
 }
Exemplo n.º 18
0
 public static BlockHeader FindParentHeader(this IBlockTree tree, BlockHeader header)
 {
     return(tree.FindHeader(header.ParentHash, false));
 }
Exemplo n.º 19
0
 public BlockHeader FindHeader(Keccak blockHash) => _blockTree.FindHeader(blockHash, BlockTreeLookupOptions.None);
Exemplo n.º 20
0
        public Snapshot GetSnapshot(Keccak hash)
        {
            BlockHeader head = _blockTree.FindHeader(hash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);

            return(_snapshotManager.GetOrCreateSnapshot(head.Number, head.Hash));
        }
Exemplo n.º 21
0
        /// <summary>
        /// Validates all the header elements (usually in relation to parent). Difficulty calculation is validated in <see cref="ISealValidator"/>
        /// </summary>
        /// <param name="header">Block header to validate</param>
        /// <param name="isOmmer"><value>True</value> if the <paramref name="header"/> is an ommer, otherwise <value>False</value></param>
        /// <returns><value>True</value> if <paramref name="header"/> is valid, otherwise <value>False</value></returns>
        public bool Validate(BlockHeader header, bool isOmmer = false)
        {
            // the rule here is to validate the seal first (avoid any cheap attacks on validation logic)
            // then validate whatever does not need to load parent from disk (the most expensive operation)

            bool areNonceValidAndMixHashValid = header.Number == 0 || header.SealEngineType == SealEngineType.None || _sealValidator.ValidateSeal(header);

            if (!areNonceValidAndMixHashValid)
            {
                if (_logger.IsWarn)
                {
                    _logger.Warn($"Invalid block header ({header.Hash}) - invalid mix hash / nonce");
                }
            }

            bool hashAsExpected = header.Hash == BlockHeader.CalculateHash(header);

            if (!hashAsExpected)
            {
                if (_logger.IsWarn)
                {
                    _logger.Warn($"Invalid block header ({header.Hash}) - invalid block hash");
                }
            }

            bool extraDataValid = isOmmer ||
                                  _daoBlockNumber == null ||
                                  header.Number < _daoBlockNumber ||
                                  header.Number >= _daoBlockNumber + 10 ||
                                  Bytes.AreEqual(header.ExtraData, DaoExtraData);

            if (!extraDataValid)
            {
                _logger.Warn($"Invalid block header ({header.Hash}) - DAO extra data not valid");
            }

            BlockHeader parent = _blockTree.FindHeader(header.ParentHash, false);

            if (parent == null)
            {
                if (header.Number == 0)
                {
                    var isGenesisValid = ValidateGenesis(header);;
                    if (!isGenesisValid)
                    {
                        if (_logger.IsWarn)
                        {
                            _logger.Warn($"Invalid genesis block header ({header.Hash})");
                        }
                    }

                    return(isGenesisValid);
                }

                if (_logger.IsWarn)
                {
                    _logger.Warn($"Orphan block, could not find parent ({header.Hash})");
                }
                return(false);
            }

            // seal is validated when synchronizing so we can remove it from here - review and test
            bool sealParamsCorrect = _sealValidator.ValidateParams(parent, header);

            if (!sealParamsCorrect)
            {
                _logger.Warn($"Invalid block header ({header.Hash}) - seal parameters incorrect");
            }

            bool gasUsedBelowLimit = header.GasUsed <= header.GasLimit;

            if (!gasUsedBelowLimit)
            {
                _logger.Warn($"Invalid block header ({header.Hash}) - gas used above gas limit");
            }

            long maxGasLimitDifference = parent.GasLimit / 1024;
            bool gasLimitNotTooHigh    = header.GasLimit < parent.GasLimit + maxGasLimitDifference;

            if (!gasLimitNotTooHigh)
            {
                _logger.Warn($"Invalid block header ({header.Hash}) - gas limit too high");
            }

            bool gasLimitNotTooLow = header.GasLimit > parent.GasLimit - maxGasLimitDifference;

            if (!gasLimitNotTooLow)
            {
                _logger.Warn($"Invalid block header ({header.Hash}) - invalid mix hash / nonce");
            }

            // bool gasLimitAboveAbsoluteMinimum = header.GasLimit >= 125000; // described in the YellowPaper but not followed
            bool timestampMoreThanAtParent = header.Timestamp > parent.Timestamp;

            if (!timestampMoreThanAtParent)
            {
                _logger.Warn($"Invalid block header ({header.Hash}) - timestamp before parent");
            }

            bool numberIsParentPlusOne = header.Number == parent.Number + 1;

            if (!numberIsParentPlusOne)
            {
                _logger.Warn($"Invalid block header ({header.Hash}) - block number is not parent + 1");
            }

            if (_logger.IsTrace)
            {
                _logger.Trace($"Validating block {header.ToString(BlockHeader.Format.Short)}, extraData {header.ExtraData.ToHexString(true)}");
            }

            return
                (areNonceValidAndMixHashValid &&
                 gasUsedBelowLimit &&
                 gasLimitNotTooLow &&
                 gasLimitNotTooHigh &&
                 sealParamsCorrect &&
                 // gasLimitAboveAbsoluteMinimum && // described in the YellowPaper but not followed
                 timestampMoreThanAtParent &&
                 numberIsParentPlusOne &&
                 hashAsExpected &&
                 extraDataValid);
        }