private void BeamProcess(Block block) { if (block.TotalDifficulty == null) { throw new InvalidDataException( $"Received a block with null {nameof(block.TotalDifficulty)} for beam processing"); } CancellationTokenSource cancellationToken; lock (_tokens) { cancellationToken = _tokens.GetOrAdd(block.Number, t => new CancellationTokenSource()); if (_isDisposed) { return; } } Task beamProcessingTask = Task.CompletedTask; Task prefetchTasks = Task.CompletedTask; try { if (_logger.IsInfo) { _logger.Info($"Beam processing block {block}"); } _recoveryStep.RecoverData(block); (IBlockchainProcessor beamProcessor, IStateReader stateReader) = CreateProcessor(block, new ReadOnlyDbProvider(_readOnlyDbProvider, true), _specProvider, _logManager); BlockHeader parentHeader = _readOnlyBlockTree.FindHeader(block.ParentHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (parentHeader != null) { // TODO: is author / beneficiary always resolved here prefetchTasks = PrefetchNew(stateReader, block, parentHeader.StateRoot !, parentHeader.Author ?? parentHeader.Beneficiary !); } Stopwatch stopwatch = Stopwatch.StartNew(); Block? processedBlock = null; beamProcessingTask = Task.Run(() => { BeamSyncContext.MinimumDifficulty.Value = block.TotalDifficulty.Value; BeamSyncContext.Description.Value = $"[preProcess of {block.Hash!.ToShortString()}]"; BeamSyncContext.LastFetchUtc.Value = DateTime.UtcNow; BeamSyncContext.Cancelled.Value = cancellationToken.Token; processedBlock = beamProcessor.Process(block, ProcessingOptions.Beam, NullBlockTracer.Instance); stopwatch.Stop(); if (processedBlock == null) { if (_logger.IsDebug) { _logger.Debug($"Block {block.ToString(Block.Format.Short)} skipped in beam sync"); } } else { Interlocked.Increment(ref Metrics.BeamedBlocks); if (_logger.IsInfo) { _logger.Info($"Successfully beam processed block {processedBlock.ToString(Block.Format.Short)} in {stopwatch.ElapsedMilliseconds}ms"); } } }).ContinueWith(t => { if (t.IsFaulted) { if (_logger.IsInfo) { _logger.Info($"Stopped processing block {block} | {t.Exception?.Flatten().InnerException?.Message}"); } if (_logger.IsTrace) { _logger.Trace($"Details of beam sync failure {block} | {t.Exception}"); } return; } if (processedBlock != null) { // if (_logger.IsDebug) _logger.Debug($"Running standard processor after beam sync for {block}"); // at this stage we are sure to have all the state available CancelPreviousBeamSyncingBlocks(processedBlock.Number); // do I even need this? // do I even need to process any of these blocks or just leave the RPC available // (based on user expectations they may need to trace or just query balance) // soo - there should be a separate beam queue that we can wait for to finish? // then we can ensure that it finishes before the normal queue fires // and so they never hit the wrong databases? // but, yeah, we do not even need to process it twice // we can just announce that we have finished beam processing here... // _standardProcessorQueue.Enqueue(block, ProcessingOptions.Beam); // I only needed it in the past when I wanted to actually store the beam data // now I can generate the witness on the fly and transfer the witness to the right place... // OK, seems fine } beamProcessor.Dispose(); }); } catch (Exception e) { if (_logger.IsError) { _logger.Error($"Block {block.ToString(Block.Format.Short)} failed processing and it will be skipped from beam sync", e); } } _beamProcessTasks.Add(Task.WhenAll(beamProcessingTask, prefetchTasks)); long number = block.Number; CancelOldBeamTasks(number); }
private void BeamProcess(Block block) { CancellationTokenSource cancellationToken = _tokens.GetOrAdd(block.Number, t => new CancellationTokenSource()); if (block.IsGenesis) { _blockchainProcessor.Enqueue(block, ProcessingOptions.IgnoreParentNotOnMainChain); return; } // we only want to trace the actual block try { _recoveryStep.RecoverData(block); (IBlockchainProcessor processor, IStateReader stateReader) = CreateProcessor(block, new ReadOnlyDbProvider(_readOnlyDbProvider, true), _specProvider, _logManager); BlockHeader parentHeader = _readOnlyBlockTree.FindHeader(block.ParentHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (parentHeader != null) { PrefetchNew(stateReader, block, parentHeader.StateRoot, parentHeader.Author ?? parentHeader.Beneficiary); } // Prefetch(block, parentHeader.StateRoot, parentHeader.Author ?? parentHeader.Beneficiary); // Prefetch(block, block.StateRoot, block.Author ?? block.Beneficiary); if (_logger.IsInfo) { _logger.Info($"Now beam processing {block}"); } Block processedBlock = null; Task preProcessTask = Task.Run(() => { BeamSyncContext.MinimumDifficulty.Value = block.TotalDifficulty.Value; BeamSyncContext.Description.Value = $"[preProcess of {block.Hash.ToShortString()}]"; BeamSyncContext.LastFetchUtc.Value = DateTime.UtcNow; BeamSyncContext.Cancelled.Value = cancellationToken.Token; processedBlock = processor.Process(block, ProcessingOptions.ReadOnlyChain | ProcessingOptions.IgnoreParentNotOnMainChain, NullBlockTracer.Instance); if (processedBlock == null) { if (_logger.IsInfo) { _logger.Info($"Block {block.ToString(Block.Format.Short)} skipped in beam sync"); } } }).ContinueWith(t => { if (t.IsFaulted) { if (_logger.IsWarn) { _logger.Warn($"Stopped / failed to beam process block {block} | {t.Exception.Message}"); } if (_logger.IsDebug) { _logger.Debug($"Details of beam sync failure {block} | {t.Exception}"); } return; } if (processedBlock != null) { if (_logger.IsInfo) { _logger.Info($"Enqueuing for standard processing {block}"); } // at this stage we are sure to have all the state available _blockchainProcessor.Enqueue(block, ProcessingOptions.IgnoreParentNotOnMainChain); } processor.Dispose(); }); } catch (Exception e) { if (_logger.IsError) { _logger.Error($"Block {block.ToString(Block.Format.Short)} failed processing and it will be skipped from beam sync", e); } } }