public void Fuzz_accounts_with_reorganizations( int accountsCount, int blocksCount, int uniqueValuesCount, int lookupLimit, int?seed) { int usedSeed = seed ?? _random.Next(int.MaxValue); _random = new Random(usedSeed); _logger.Info($"RANDOM SEED {usedSeed}"); string fileName = Path.GetTempFileName(); //string fileName = "C:\\Temp\\fuzz.txt"; _logger.Info( $"Fuzzing with accounts: {accountsCount}, " + $"blocks {blocksCount}, " + $"values: {uniqueValuesCount}, " + $"lookup: {lookupLimit} into file {fileName}"); using FileStream fileStream = new FileStream(fileName, FileMode.Create); using StreamWriter streamWriter = new StreamWriter(fileStream); Queue <Keccak> rootQueue = new Queue <Keccak>(); Stack <Keccak> rootStack = new Stack <Keccak>(); MemDb memDb = new MemDb(); TrieStore trieStore = new TrieStore(memDb, Prune.WhenCacheReaches(1.MB()), Persist.IfBlockOlderThan(lookupLimit), _logManager); PatriciaTree patriciaTree = new PatriciaTree(trieStore, _logManager); byte[][] accounts = new byte[accountsCount][]; byte[][] randomValues = new byte[uniqueValuesCount][]; for (int i = 0; i < randomValues.Length; i++) { bool isEmptyValue = _random.Next(0, 2) == 0; if (isEmptyValue) { randomValues[i] = Array.Empty <byte>(); } else { randomValues[i] = GenerateRandomAccountRlp(); } } for (int accountIndex = 0; accountIndex < accounts.Length; accountIndex++) { byte[] key = new byte[32]; ((UInt256)accountIndex).ToBigEndian(key); accounts[accountIndex] = key; } int blockCount = 0; for (int blockNumber = 0; blockNumber < blocksCount; blockNumber++) { int reorgDepth = _random.Next(Math.Min(5, blockCount)); _logger.Debug($"Reorganizing {reorgDepth}"); for (int i = 0; i < reorgDepth; i++) { try { // no longer need undo? // trieStore.UndoOneBlock(); } catch (InvalidOperationException) { // if memory limit hits in blockCount = 0; } rootStack.Pop(); patriciaTree.RootHash = rootStack.Peek(); } blockCount = Math.Max(0, blockCount - reorgDepth); _logger.Debug($"Setting block count to {blockCount}"); bool isEmptyBlock = _random.Next(5) == 0; if (!isEmptyBlock) { for (int i = 0; i < Math.Max(1, accountsCount / 8); i++) { int randomAccountIndex = _random.Next(accounts.Length); int randomValueIndex = _random.Next(randomValues.Length); byte[] account = accounts[randomAccountIndex]; byte[] value = randomValues[randomValueIndex]; streamWriter.WriteLine( $"Block {blockCount} - setting {account.ToHexString()} = {value.ToHexString()}"); patriciaTree.Set(account, value); } } streamWriter.WriteLine( $"Commit block {blockCount} | empty: {isEmptyBlock}"); patriciaTree.UpdateRootHash(); patriciaTree.Commit(blockCount); rootQueue.Enqueue(patriciaTree.RootHash); rootStack.Push(patriciaTree.RootHash); blockCount++; _logger.Debug($"Setting block count to {blockCount}"); } streamWriter.Flush(); fileStream.Seek(0, SeekOrigin.Begin); streamWriter.WriteLine($"DB size: {memDb.Keys.Count}"); _logger.Info($"DB size: {memDb.Keys.Count}"); int verifiedBlocks = 0; rootQueue.Clear(); Stack <Keccak> stackCopy = new Stack <Keccak>(); while (rootStack.Any()) { stackCopy.Push(rootStack.Pop()); } rootStack = stackCopy; while (rootStack.TryPop(out Keccak currentRoot)) { try { patriciaTree.RootHash = currentRoot; for (int i = 0; i < accounts.Length; i++) { patriciaTree.Get(accounts[i]); } _logger.Info($"Verified positive {verifiedBlocks}"); } catch (Exception ex) { if (verifiedBlocks % lookupLimit == 0) { throw new InvalidDataException(ex.ToString()); } else { _logger.Info($"Verified negative {verifiedBlocks} (which is ok on block {verifiedBlocks})"); } } verifiedBlocks++; } }
// [TestCase(4, 16, 4, 4)] public void Fuzz_accounts( int accountsCount, int blocksCount, int uniqueValuesCount, int lookupLimit) { string fileName = Path.GetTempFileName(); //string fileName = "C:\\Temp\\fuzz.txt"; _logger.Info( $"Fuzzing with accounts: {accountsCount}, " + $"blocks {blocksCount}, " + $"values: {uniqueValuesCount}, " + $"lookup: {lookupLimit} into file {fileName}"); using FileStream fileStream = new FileStream(fileName, FileMode.Create); using StreamWriter streamWriter = new StreamWriter(fileStream); Queue <Keccak> rootQueue = new Queue <Keccak>(); MemDb memDb = new MemDb(); TrieStore trieStore = new TrieStore(memDb, Prune.WhenCacheReaches(1.MB()), Persist.IfBlockOlderThan(lookupLimit), _logManager); StateTree patriciaTree = new StateTree(trieStore, _logManager); byte[][] accounts = new byte[accountsCount][]; byte[][] randomValues = new byte[uniqueValuesCount][]; for (int i = 0; i < randomValues.Length; i++) { bool isEmptyValue = _random.Next(0, 2) == 0; if (isEmptyValue) { randomValues[i] = Array.Empty <byte>(); } else { randomValues[i] = GenerateRandomAccountRlp(); } } for (int accountIndex = 0; accountIndex < accounts.Length; accountIndex++) { byte[] key = new byte[32]; ((UInt256)accountIndex).ToBigEndian(key); accounts[accountIndex] = key; } for (int blockNumber = 0; blockNumber < blocksCount; blockNumber++) { bool isEmptyBlock = _random.Next(5) == 0; if (!isEmptyBlock) { for (int i = 0; i < Math.Max(1, accountsCount / 8); i++) { int randomAccountIndex = _random.Next(accounts.Length); int randomValueIndex = _random.Next(randomValues.Length); byte[] account = accounts[randomAccountIndex]; byte[] value = randomValues[randomValueIndex]; streamWriter.WriteLine( $"Block {blockNumber} - setting {account.ToHexString()} = {value.ToHexString()}"); patriciaTree.Set(account, value); } } streamWriter.WriteLine( $"Commit block {blockNumber} | empty: {isEmptyBlock}"); patriciaTree.UpdateRootHash(); patriciaTree.Commit(blockNumber); rootQueue.Enqueue(patriciaTree.RootHash); } streamWriter.Flush(); fileStream.Seek(0, SeekOrigin.Begin); streamWriter.WriteLine($"DB size: {memDb.Keys.Count}"); _logger.Info($"DB size: {memDb.Keys.Count}"); int verifiedBlocks = 0; while (rootQueue.TryDequeue(out Keccak currentRoot)) { try { patriciaTree.RootHash = currentRoot; for (int i = 0; i < accounts.Length; i++) { patriciaTree.Get(accounts[i]); } _logger.Info($"Verified positive {verifiedBlocks}"); } catch (Exception ex) { if (verifiedBlocks % lookupLimit == 0) { throw new InvalidDataException(ex.ToString()); } else { _logger.Info($"Verified negative {verifiedBlocks}"); } } verifiedBlocks++; } }
private Task InitBlockchain() { BlockTraceDumper.Converters.AddRange(DebugModuleFactory.Converters); BlockTraceDumper.Converters.AddRange(TraceModuleFactory.Converters); var(getApi, setApi) = _api.ForBlockchain; if (getApi.ChainSpec == null) { throw new StepDependencyException(nameof(getApi.ChainSpec)); } if (getApi.DbProvider == null) { throw new StepDependencyException(nameof(getApi.DbProvider)); } if (getApi.SpecProvider == null) { throw new StepDependencyException(nameof(getApi.SpecProvider)); } _logger = getApi.LogManager.GetClassLogger(); IInitConfig initConfig = getApi.Config <IInitConfig>(); ISyncConfig syncConfig = getApi.Config <ISyncConfig>(); IPruningConfig pruningConfig = getApi.Config <IPruningConfig>(); if (syncConfig.DownloadReceiptsInFastSync && !syncConfig.DownloadBodiesInFastSync) { _logger.Warn($"{nameof(syncConfig.DownloadReceiptsInFastSync)} is selected but {nameof(syncConfig.DownloadBodiesInFastSync)} - enabling bodies to support receipts download."); syncConfig.DownloadBodiesInFastSync = true; } Account.AccountStartNonce = getApi.ChainSpec.Parameters.AccountStartNonce; IWitnessCollector witnessCollector = setApi.WitnessCollector = syncConfig.WitnessProtocolEnabled ? new WitnessCollector(getApi.DbProvider.WitnessDb, _api.LogManager) .WithPruning(getApi.BlockTree !, getApi.LogManager) : NullWitnessCollector.Instance; IKeyValueStoreWithBatching cachedStateDb = getApi.DbProvider.StateDb .Cached(Trie.MemoryAllowance.TrieNodeCacheCount); setApi.MainStateDbWithCache = cachedStateDb; IKeyValueStore codeDb = getApi.DbProvider.CodeDb .WitnessedBy(witnessCollector); TrieStore trieStore; if (pruningConfig.Enabled) { setApi.TrieStore = trieStore = new TrieStore( setApi.MainStateDbWithCache.WitnessedBy(witnessCollector), Prune.WhenCacheReaches(pruningConfig.CacheMb.MB()), // TODO: memory hint should define this Persist.IfBlockOlderThan(pruningConfig.PersistenceInterval), // TODO: this should be based on time getApi.LogManager); } else { setApi.TrieStore = trieStore = new TrieStore( setApi.MainStateDbWithCache.WitnessedBy(witnessCollector), No.Pruning, Persist.EveryBlock, getApi.LogManager); } getApi.DisposeStack.Push(trieStore); trieStore.ReorgBoundaryReached += ReorgBoundaryReached; ITrieStore readOnlyTrieStore = setApi.ReadOnlyTrieStore = trieStore.AsReadOnly(cachedStateDb); IStateProvider stateProvider = setApi.StateProvider = new StateProvider( trieStore, codeDb, getApi.LogManager); ReadOnlyDbProvider readOnly = new(getApi.DbProvider, false); PersistentTxStorage txStorage = new(getApi.DbProvider.PendingTxsDb); IStateReader stateReader = setApi.StateReader = new StateReader(readOnlyTrieStore, readOnly.GetDb <IDb>(DbNames.Code), getApi.LogManager); setApi.TransactionComparerProvider = new TransactionComparerProvider(getApi.SpecProvider !, getApi.BlockTree.AsReadOnly()); setApi.ChainHeadStateProvider = new ChainHeadReadOnlyStateProvider(getApi.BlockTree, stateReader); Account.AccountStartNonce = getApi.ChainSpec.Parameters.AccountStartNonce; stateProvider.StateRoot = getApi.BlockTree !.Head?.StateRoot ?? Keccak.EmptyTreeHash; if (_api.Config <IInitConfig>().DiagnosticMode == DiagnosticMode.VerifyTrie) { _logger.Info("Collecting trie stats and verifying that no nodes are missing..."); TrieStats stats = stateProvider.CollectStats(getApi.DbProvider.CodeDb, _api.LogManager); _logger.Info($"Starting from {getApi.BlockTree.Head?.Number} {getApi.BlockTree.Head?.StateRoot}{Environment.NewLine}" + stats); } // Init state if we need system calls before actual processing starts if (getApi.BlockTree !.Head?.StateRoot != null) { stateProvider.StateRoot = getApi.BlockTree.Head.StateRoot; } var txValidator = setApi.TxValidator = new TxValidator(getApi.SpecProvider.ChainId); ITxPool txPool = _api.TxPool = CreateTxPool(txStorage); OnChainTxWatcher onChainTxWatcher = new(getApi.BlockTree, txPool, getApi.SpecProvider, _api.LogManager); getApi.DisposeStack.Push(onChainTxWatcher); ReceiptCanonicalityMonitor receiptCanonicalityMonitor = new(getApi.BlockTree, getApi.ReceiptStorage, _api.LogManager); getApi.DisposeStack.Push(receiptCanonicalityMonitor); _api.BlockPreprocessor.AddFirst( new RecoverSignatures(getApi.EthereumEcdsa, txPool, getApi.SpecProvider, getApi.LogManager)); IStorageProvider storageProvider = setApi.StorageProvider = new StorageProvider( trieStore, stateProvider, getApi.LogManager); // blockchain processing BlockhashProvider blockhashProvider = new BlockhashProvider( getApi.BlockTree, getApi.LogManager); VirtualMachine virtualMachine = new VirtualMachine( stateProvider, storageProvider, blockhashProvider, getApi.SpecProvider, getApi.LogManager); _api.TransactionProcessor = new TransactionProcessor( getApi.SpecProvider, stateProvider, storageProvider, virtualMachine, getApi.LogManager); InitSealEngine(); if (_api.SealValidator == null) { throw new StepDependencyException(nameof(_api.SealValidator)); } /* validation */ var headerValidator = setApi.HeaderValidator = CreateHeaderValidator(); OmmersValidator ommersValidator = new( getApi.BlockTree, headerValidator, getApi.LogManager); var blockValidator = setApi.BlockValidator = new BlockValidator( txValidator, headerValidator, ommersValidator, getApi.SpecProvider, getApi.LogManager); setApi.TxPoolInfoProvider = new TxPoolInfoProvider(stateReader, txPool); var mainBlockProcessor = setApi.MainBlockProcessor = CreateBlockProcessor(); BlockchainProcessor blockchainProcessor = new( getApi.BlockTree, mainBlockProcessor, _api.BlockPreprocessor, getApi.LogManager, new BlockchainProcessor.Options { AutoProcess = !syncConfig.BeamSync, StoreReceiptsByDefault = initConfig.StoreReceipts, }); setApi.BlockProcessingQueue = blockchainProcessor; setApi.BlockchainProcessor = blockchainProcessor; if (syncConfig.BeamSync) { BeamBlockchainProcessor beamBlockchainProcessor = new( new ReadOnlyDbProvider(_api.DbProvider, false), getApi.BlockTree, getApi.SpecProvider, getApi.LogManager, blockValidator, _api.BlockPreprocessor, _api.RewardCalculatorSource !, // TODO: does it work with AuRa? blockchainProcessor, getApi.SyncModeSelector !); _api.DisposeStack.Push(beamBlockchainProcessor); } // TODO: can take the tx sender from plugin here maybe ITxSigner txSigner = new WalletTxSigner(getApi.Wallet, getApi.SpecProvider.ChainId); TxSealer standardSealer = new(txSigner, getApi.Timestamper); NonceReservingTxSealer nonceReservingTxSealer = new(txSigner, getApi.Timestamper, txPool); setApi.TxSender = new TxPoolSender(txPool, nonceReservingTxSealer, standardSealer); // TODO: possibly hide it (but need to confirm that NDM does not really need it) var filterStore = setApi.FilterStore = new FilterStore(); setApi.FilterManager = new FilterManager(filterStore, mainBlockProcessor, txPool, getApi.LogManager); setApi.HealthHintService = CreateHealthHintService(); return(Task.CompletedTask); }
// [TestCase(8, 32, 8, 1543322391)] public void Fuzz_accounts_with_storage( int accountsCount, int blocksCount, int lookupLimit, int?seed) { int usedSeed = seed ?? _random.Next(int.MaxValue); _random = new Random(usedSeed); _logger.Info($"RANDOM SEED {usedSeed}"); string fileName = Path.GetTempFileName(); //string fileName = "C:\\Temp\\fuzz.txt"; _logger.Info( $"Fuzzing with accounts: {accountsCount}, " + $"blocks {blocksCount}, " + $"lookup: {lookupLimit} into file {fileName}"); using FileStream fileStream = new FileStream(fileName, FileMode.Create); using StreamWriter streamWriter = new StreamWriter(fileStream); Queue <Keccak> rootQueue = new Queue <Keccak>(); MemDb memDb = new MemDb(); TrieStore trieStore = new TrieStore(memDb, Prune.WhenCacheReaches(1.MB()), Persist.IfBlockOlderThan(lookupLimit), _logManager); StateProvider stateProvider = new StateProvider(trieStore, new MemDb(), _logManager); StorageProvider storageProvider = new StorageProvider(trieStore, stateProvider, _logManager); Account[] accounts = new Account[accountsCount]; Address[] addresses = new Address[accountsCount]; for (int i = 0; i < accounts.Length; i++) { bool isEmptyValue = _random.Next(0, 2) == 0; if (isEmptyValue) { accounts[i] = Account.TotallyEmpty; } else { accounts[i] = GenerateRandomAccount(); } addresses[i] = TestItem.GetRandomAddress(_random); } for (int blockNumber = 0; blockNumber < blocksCount; blockNumber++) { bool isEmptyBlock = _random.Next(5) == 0; if (!isEmptyBlock) { for (int i = 0; i < Math.Max(1, accountsCount / 8); i++) { int randomAddressIndex = _random.Next(addresses.Length); int randomAccountIndex = _random.Next(accounts.Length); Address address = addresses[randomAddressIndex]; Account account = accounts[randomAccountIndex]; if (stateProvider.AccountExists(address)) { Account existing = stateProvider.GetAccount(address); if (existing.Balance != account.Balance) { if (account.Balance > existing.Balance) { stateProvider.AddToBalance( address, account.Balance - existing.Balance, MuirGlacier.Instance); } else { stateProvider.SubtractFromBalance( address, existing.Balance - account.Balance, MuirGlacier.Instance); } stateProvider.IncrementNonce(address); } byte[] storage = new byte[1]; _random.NextBytes(storage); storageProvider.Set(new StorageCell(address, 1), storage); } else if (!account.IsTotallyEmpty) { stateProvider.CreateAccount(address, account.Balance); byte[] storage = new byte[1]; _random.NextBytes(storage); storageProvider.Set(new StorageCell(address, 1), storage); } } } streamWriter.WriteLine( $"Commit block {blockNumber} | empty: {isEmptyBlock}"); storageProvider.Commit(); stateProvider.Commit(MuirGlacier.Instance); storageProvider.CommitTrees(blockNumber); stateProvider.CommitTree(blockNumber); rootQueue.Enqueue(stateProvider.StateRoot); } streamWriter.Flush(); fileStream.Seek(0, SeekOrigin.Begin); streamWriter.WriteLine($"DB size: {memDb.Keys.Count}"); _logger.Info($"DB size: {memDb.Keys.Count}"); int verifiedBlocks = 0; while (rootQueue.TryDequeue(out Keccak currentRoot)) { try { stateProvider.StateRoot = currentRoot; for (int i = 0; i < addresses.Length; i++) { Account account = stateProvider.GetAccount(addresses[i]); if (account != null) { for (int j = 0; j < 256; j++) { storageProvider.Get(new StorageCell(addresses[i], (UInt256)j)); } } } _logger.Info($"Verified positive {verifiedBlocks}"); } catch (Exception ex) { if (verifiedBlocks % lookupLimit == 0) { throw new InvalidDataException(ex.ToString()); } else { _logger.Info($"Verified negative {verifiedBlocks} which is ok here"); } } verifiedBlocks++; } }
private Task InitBlockchain() { InitBlockTraceDumper(); (IApiWithStores getApi, IApiWithBlockchain setApi) = _api.ForBlockchain; if (getApi.ChainSpec == null) { throw new StepDependencyException(nameof(getApi.ChainSpec)); } if (getApi.DbProvider == null) { throw new StepDependencyException(nameof(getApi.DbProvider)); } if (getApi.SpecProvider == null) { throw new StepDependencyException(nameof(getApi.SpecProvider)); } if (getApi.BlockTree == null) { throw new StepDependencyException(nameof(getApi.BlockTree)); } _logger = getApi.LogManager.GetClassLogger(); IInitConfig initConfig = getApi.Config <IInitConfig>(); ISyncConfig syncConfig = getApi.Config <ISyncConfig>(); IPruningConfig pruningConfig = getApi.Config <IPruningConfig>(); IMiningConfig miningConfig = getApi.Config <IMiningConfig>(); if (syncConfig.DownloadReceiptsInFastSync && !syncConfig.DownloadBodiesInFastSync) { _logger.Warn($"{nameof(syncConfig.DownloadReceiptsInFastSync)} is selected but {nameof(syncConfig.DownloadBodiesInFastSync)} - enabling bodies to support receipts download."); syncConfig.DownloadBodiesInFastSync = true; } Account.AccountStartNonce = getApi.ChainSpec.Parameters.AccountStartNonce; IWitnessCollector witnessCollector; if (syncConfig.WitnessProtocolEnabled) { WitnessCollector witnessCollectorImpl = new(getApi.DbProvider.WitnessDb, _api.LogManager); witnessCollector = setApi.WitnessCollector = witnessCollectorImpl; setApi.WitnessRepository = witnessCollectorImpl.WithPruning(getApi.BlockTree !, getApi.LogManager); } else { witnessCollector = setApi.WitnessCollector = NullWitnessCollector.Instance; setApi.WitnessRepository = NullWitnessCollector.Instance; } CachingStore cachedStateDb = getApi.DbProvider.StateDb .Cached(Trie.MemoryAllowance.TrieNodeCacheCount); setApi.MainStateDbWithCache = cachedStateDb; IKeyValueStore codeDb = getApi.DbProvider.CodeDb .WitnessedBy(witnessCollector); TrieStore trieStore; IKeyValueStoreWithBatching stateWitnessedBy = setApi.MainStateDbWithCache.WitnessedBy(witnessCollector); if (pruningConfig.Mode.IsMemory()) { IPersistenceStrategy persistenceStrategy = Persist.IfBlockOlderThan(pruningConfig.PersistenceInterval); // TODO: this should be based on time if (pruningConfig.Mode.IsFull()) { PruningTriggerPersistenceStrategy triggerPersistenceStrategy = new((IFullPruningDb)getApi.DbProvider !.StateDb, getApi.BlockTree !, getApi.LogManager); getApi.DisposeStack.Push(triggerPersistenceStrategy); persistenceStrategy = persistenceStrategy.Or(triggerPersistenceStrategy); } setApi.TrieStore = trieStore = new TrieStore( stateWitnessedBy, Prune.WhenCacheReaches(pruningConfig.CacheMb.MB()), // TODO: memory hint should define this persistenceStrategy, getApi.LogManager); if (pruningConfig.Mode.IsFull()) { IFullPruningDb fullPruningDb = (IFullPruningDb)getApi.DbProvider !.StateDb; fullPruningDb.PruningStarted += (_, args) => { cachedStateDb.PersistCache(args.Context); trieStore.PersistCache(args.Context, args.Context.CancellationTokenSource.Token); }; } } else { setApi.TrieStore = trieStore = new TrieStore( stateWitnessedBy, No.Pruning, Persist.EveryBlock, getApi.LogManager); } TrieStoreBoundaryWatcher trieStoreBoundaryWatcher = new(trieStore, _api.BlockTree !, _api.LogManager); getApi.DisposeStack.Push(trieStoreBoundaryWatcher); getApi.DisposeStack.Push(trieStore); ITrieStore readOnlyTrieStore = setApi.ReadOnlyTrieStore = trieStore.AsReadOnly(cachedStateDb); IStateProvider stateProvider = setApi.StateProvider = new StateProvider( trieStore, codeDb, getApi.LogManager); ReadOnlyDbProvider readOnly = new(getApi.DbProvider, false); IStateReader stateReader = setApi.StateReader = new StateReader(readOnlyTrieStore, readOnly.GetDb <IDb>(DbNames.Code), getApi.LogManager); setApi.TransactionComparerProvider = new TransactionComparerProvider(getApi.SpecProvider !, getApi.BlockTree.AsReadOnly()); setApi.ChainHeadStateProvider = new ChainHeadReadOnlyStateProvider(getApi.BlockTree, stateReader); Account.AccountStartNonce = getApi.ChainSpec.Parameters.AccountStartNonce; stateProvider.StateRoot = getApi.BlockTree !.Head?.StateRoot ?? Keccak.EmptyTreeHash; if (_api.Config <IInitConfig>().DiagnosticMode == DiagnosticMode.VerifyTrie) { Task.Run(() => { try { _logger !.Info("Collecting trie stats and verifying that no nodes are missing..."); TrieStats stats = stateProvider.CollectStats(getApi.DbProvider.CodeDb, _api.LogManager); _logger.Info($"Starting from {getApi.BlockTree.Head?.Number} {getApi.BlockTree.Head?.StateRoot}{Environment.NewLine}" + stats); } catch (Exception ex) { _logger !.Error(ex.ToString()); } }); } // Init state if we need system calls before actual processing starts if (getApi.BlockTree !.Head?.StateRoot != null) { stateProvider.StateRoot = getApi.BlockTree.Head.StateRoot; } TxValidator txValidator = setApi.TxValidator = new TxValidator(getApi.SpecProvider.ChainId); ITxPool txPool = _api.TxPool = CreateTxPool(); ReceiptCanonicalityMonitor receiptCanonicalityMonitor = new(getApi.BlockTree, getApi.ReceiptStorage, _api.LogManager); getApi.DisposeStack.Push(receiptCanonicalityMonitor); _api.ReceiptMonitor = receiptCanonicalityMonitor; _api.BlockPreprocessor.AddFirst( new RecoverSignatures(getApi.EthereumEcdsa, txPool, getApi.SpecProvider, getApi.LogManager)); IStorageProvider storageProvider = setApi.StorageProvider = new StorageProvider( trieStore, stateProvider, getApi.LogManager); // blockchain processing BlockhashProvider blockhashProvider = new ( getApi.BlockTree, getApi.LogManager); VirtualMachine virtualMachine = new ( blockhashProvider, getApi.SpecProvider, getApi.LogManager); WorldState worldState = new (stateProvider, storageProvider); _api.TransactionProcessor = new TransactionProcessor( getApi.SpecProvider, worldState, virtualMachine, getApi.LogManager); InitSealEngine(); if (_api.SealValidator == null) { throw new StepDependencyException(nameof(_api.SealValidator)); } setApi.HeaderValidator = CreateHeaderValidator(); IHeaderValidator?headerValidator = setApi.HeaderValidator; IUnclesValidator unclesValidator = setApi.UnclesValidator = new UnclesValidator( getApi.BlockTree, headerValidator, getApi.LogManager); setApi.BlockValidator = new BlockValidator( txValidator, headerValidator, unclesValidator, getApi.SpecProvider, getApi.LogManager); IChainHeadInfoProvider chainHeadInfoProvider = new ChainHeadInfoProvider(getApi.SpecProvider, getApi.BlockTree, stateReader); setApi.TxPoolInfoProvider = new TxPoolInfoProvider(chainHeadInfoProvider.AccountStateProvider, txPool); setApi.GasPriceOracle = new GasPriceOracle(getApi.BlockTree, getApi.SpecProvider, _api.LogManager, miningConfig.MinGasPrice); IBlockProcessor mainBlockProcessor = setApi.MainBlockProcessor = CreateBlockProcessor(); BlockchainProcessor blockchainProcessor = new( getApi.BlockTree, mainBlockProcessor, _api.BlockPreprocessor, stateReader, getApi.LogManager, new BlockchainProcessor.Options { StoreReceiptsByDefault = initConfig.StoreReceipts, DumpOptions = initConfig.AutoDump }); setApi.BlockProcessingQueue = blockchainProcessor; setApi.BlockchainProcessor = blockchainProcessor; setApi.EthSyncingInfo = new EthSyncingInfo(getApi.BlockTree); // TODO: can take the tx sender from plugin here maybe ITxSigner txSigner = new WalletTxSigner(getApi.Wallet, getApi.SpecProvider.ChainId); TxSealer standardSealer = new(txSigner, getApi.Timestamper); NonceReservingTxSealer nonceReservingTxSealer = new(txSigner, getApi.Timestamper, txPool); setApi.TxSender = new TxPoolSender(txPool, nonceReservingTxSealer, standardSealer); // TODO: possibly hide it (but need to confirm that NDM does not really need it) IFilterStore filterStore = setApi.FilterStore = new FilterStore(); setApi.FilterManager = new FilterManager(filterStore, mainBlockProcessor, txPool, getApi.LogManager); setApi.HealthHintService = CreateHealthHintService(); setApi.BlockProductionPolicy = new BlockProductionPolicy(miningConfig); InitializeFullPruning(pruningConfig, initConfig, _api, stateReader); return(Task.CompletedTask); }