public void Report_no_sync_progress_on_null_does_not_crash() { var peer = new SimpleSyncPeerMock(TestItem.PublicKeyA); _pool.Start(); _pool.AddPeer(peer); _pool.ReportNoSyncProgress((SyncPeerAllocation)null); _pool.ReportNoSyncProgress((PeerInfo)null); }
private async Task ExecuteRequest(CancellationToken token, StateSyncBatch batch) { SyncPeerAllocation nodeSyncAllocation = _syncPeerPool.Borrow(BorrowOptions.DoNotReplace, "node sync"); try { ISyncPeer peer = nodeSyncAllocation?.Current?.SyncPeer; batch.AssignedPeer = nodeSyncAllocation; if (peer != null) { var hashes = batch.RequestedNodes.Select(r => r.Hash).ToArray(); Task <byte[][]> getNodeDataTask = peer.GetNodeData(hashes, token); await getNodeDataTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { batch.Responses = getNodeDataTask.Result; } } ); } (NodeDataHandlerResult Result, int NodesConsumed)result = (NodeDataHandlerResult.InvalidFormat, 0); try { result = _nodeDataFeed.HandleResponse(batch); if (result.Result == NodeDataHandlerResult.BadQuality) { _syncPeerPool.ReportBadPeer(batch.AssignedPeer); } } catch (Exception e) { if (_logger.IsError) { _logger.Error($"Error when handling response", e); } } Interlocked.Add(ref _consumedNodesCount, result.NodesConsumed); if (result.NodesConsumed == 0 && peer != null) { _syncPeerPool.ReportNoSyncProgress(nodeSyncAllocation); } } finally { if (nodeSyncAllocation != null) { // _logger.Warn($"Free {nodeSyncAllocation?.Current}"); _syncPeerPool.Free(nodeSyncAllocation); } } }
private async Task ExecuteRequest(CancellationToken token, StateSyncBatch batch) { var peer = batch.AssignedPeer?.Current?.SyncPeer; if (peer != null) { var hashes = batch.RequestedNodes.Select(r => r.Hash).ToArray(); Task <byte[][]> getNodeDataTask = peer.GetNodeData(hashes, token); await getNodeDataTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { batch.Responses = getNodeDataTask.Result; } } ); } (NodeDataHandlerResult Result, int NodesConsumed)result = (NodeDataHandlerResult.InvalidFormat, 0); try { if (batch.IsAdditionalDataConsumer) { result = (NodeDataHandlerResult.OK, _additionalConsumer.HandleResponse(new DataConsumerRequest(batch.ConsumerId, batch.RequestedNodes.Select(r => r.Hash).ToArray()), batch.Responses)); } else { result = _feed.HandleResponse(batch); } if (result.Result == NodeDataHandlerResult.BadQuality) { _syncPeerPool.ReportWeakPeer(batch.AssignedPeer); } } catch (Exception e) { if (_logger.IsError) { _logger.Error($"Error when handling response", e); } } Interlocked.Add(ref _consumedNodesCount, result.NodesConsumed); if (result.NodesConsumed == 0 && peer != null) { _syncPeerPool.ReportNoSyncProgress(batch.AssignedPeer, !batch.IsAdditionalDataConsumer); } if (batch.AssignedPeer != null) { _syncPeerPool.Free(batch.AssignedPeer); } }
private async Task RunSyncLoop() { while (true) { if (_logger.IsTrace) { _logger.Trace("Sync loop - WAIT."); } _syncRequested.Wait(_syncLoopCancellation.Token); _syncRequested.Reset(); if (_logger.IsTrace) { _logger.Trace("Sync loop - IN."); } if (_syncLoopCancellation.IsCancellationRequested) { if (_logger.IsTrace) { _logger.Trace("Sync loop cancellation requested - leaving the main sync loop."); } break; } if (!_blockTree.CanAcceptNewBlocks) { continue; } _syncMode.Update(); _syncReport.CurrentSyncMode = _syncMode.Current; if (_blocksSyncAllocation == null) { AllocateBlocksSync(); if (_syncMode.Current == SyncMode.Headers) { _blocksSyncAllocation.MinBlocksAhead = SyncModeSelector.FullSyncThreshold; } else { _blocksSyncAllocation.MinBlocksAhead = null; } } else if (_syncMode.IsParallel) { FreeBlocksSyncAllocation(); } PeerInfo bestPeer = null; if (_blocksSyncAllocation != null) { UInt256 ourTotalDifficulty = _blockTree.BestSuggestedHeader?.TotalDifficulty ?? 0; _syncPeerPool.EnsureBest(); // can we remove it yet? bestPeer = _blocksSyncAllocation?.Current; if (bestPeer == null || bestPeer.TotalDifficulty <= ourTotalDifficulty) { if (_logger.IsTrace) { _logger.Trace("Skipping sync - no peer with better chain."); } continue; } SyncEvent?.Invoke(this, new SyncEventArgs(bestPeer.SyncPeer, Synchronization.SyncEvent.Started)); if (_logger.IsDebug) { _logger.Debug($"Starting {_syncMode.Current} sync with {bestPeer} - theirs {bestPeer?.HeadNumber} {bestPeer?.TotalDifficulty} | ours {_blockTree.BestSuggestedHeader?.Number ?? 0} {_blockTree.BestSuggestedHeader?.TotalDifficulty ?? 0}"); } } _peerSyncCancellation = new CancellationTokenSource(); var linkedCancellation = CancellationTokenSource.CreateLinkedTokenSource(_peerSyncCancellation.Token, _syncLoopCancellation.Token); Task <long> syncProgressTask; switch (_syncMode.Current) { case SyncMode.FastBlocks: syncProgressTask = _fastBlockDownloader.Sync(linkedCancellation.Token); break; case SyncMode.Headers: syncProgressTask = _syncConfig.DownloadBodiesInFastSync ? _blockDownloader.DownloadBlocks(bestPeer, SyncModeSelector.FullSyncThreshold, linkedCancellation.Token, _syncConfig.DownloadReceiptsInFastSync ? BlockDownloader.DownloadOptions.DownloadWithReceipts : BlockDownloader.DownloadOptions.Download) : _blockDownloader.DownloadHeaders(bestPeer, SyncModeSelector.FullSyncThreshold, linkedCancellation.Token); break; case SyncMode.StateNodes: syncProgressTask = DownloadStateNodes(_syncLoopCancellation.Token); break; case SyncMode.WaitForProcessor: syncProgressTask = Task.Delay(5000).ContinueWith(_ => 0L); break; case SyncMode.Full: syncProgressTask = _blockDownloader.DownloadBlocks(bestPeer, 0, linkedCancellation.Token); break; case SyncMode.NotStarted: syncProgressTask = Task.Delay(1000).ContinueWith(_ => 0L); break; default: throw new ArgumentOutOfRangeException(); } switch (_syncMode.Current) { case SyncMode.WaitForProcessor: if (_logger.IsInfo) { _logger.Info("Waiting for the block processor to catch up before the next sync round..."); } await syncProgressTask; break; case SyncMode.NotStarted: if (_logger.IsInfo) { _logger.Info("Waiting for peers to connect before selecting the sync mode..."); } await syncProgressTask; break; default: await syncProgressTask.ContinueWith(t => HandleSyncRequestResult(t, bestPeer)); break; } if (syncProgressTask.IsCompletedSuccessfully) { long progress = syncProgressTask.Result; if (progress == 0 && _blocksSyncAllocation != null) { _syncPeerPool.ReportNoSyncProgress(_blocksSyncAllocation); // not very fair here - allocation may have changed } } _blocksSyncAllocation?.FinishSync(); linkedCancellation.Dispose(); var source = _peerSyncCancellation; _peerSyncCancellation = null; source?.Dispose(); } }
private async Task ExecuteRequest(CancellationToken token, FastBlocksBatch batch) { SyncPeerAllocation nodeSyncAllocation = _syncPeerPool.Borrow(BorrowOptions.DoNotReplace | (batch.Prioritized ? BorrowOptions.None : BorrowOptions.LowPriority), "fast blocks", batch.MinNumber); foreach (PeerInfo peerInfo in _syncPeerPool.UsefulPeers) { if (peerInfo.HeadNumber < Math.Max(0, (batch.MinNumber ?? 0) - 1024)) { if (_logger.IsDebug) { _logger.Debug($"Made {peerInfo} sleep for a while - no min number satisfied"); } _syncPeerPool.ReportNoSyncProgress(peerInfo); } } try { ISyncPeer peer = nodeSyncAllocation?.Current?.SyncPeer; batch.Allocation = nodeSyncAllocation; if (peer != null) { batch.MarkSent(); switch (batch.BatchType) { case FastBlocksBatchType.Headers: { Task <BlockHeader[]> getHeadersTask = peer.GetBlockHeaders(batch.Headers.StartNumber, batch.Headers.RequestSize, 0, token); await getHeadersTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Headers.Response = getHeadersTask.Result; ValidateHeaders(token, batch); } else { _syncPeerPool.ReportInvalid(batch.Allocation); } } ); break; } case FastBlocksBatchType.Bodies: { Task <BlockBody[]> getBodiesTask = peer.GetBlocks(batch.Bodies.Request, token); await getBodiesTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Bodies.Response = getBodiesTask.Result; } else { _syncPeerPool.ReportInvalid(batch.Allocation); } } ); break; } case FastBlocksBatchType.Receipts: { Task <TxReceipt[][]> getReceiptsTask = peer.GetReceipts(batch.Receipts.Request, token); await getReceiptsTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Receipts.Response = getReceiptsTask.Result; } else { _syncPeerPool.ReportInvalid(batch.Allocation); } } ); break; } default: { throw new InvalidOperationException($"{nameof(FastBlocksBatchType)} is {batch.BatchType}"); } } } (BlocksDataHandlerResult Result, int ItemsSynced)result = (BlocksDataHandlerResult.InvalidFormat, 0); try { if (batch.Bodies?.Response == null && batch.Headers?.Response == null && batch.Receipts?.Response == null) { // to avoid uncontrolled loop in case of a code error await Task.Delay(10); } result = _fastBlocksFeed.HandleResponse(batch); } catch (Exception e) { // possibly clear the response and handle empty response batch here (to avoid missing parts) if (_logger.IsError) { _logger.Error($"Error when handling response", e); } } Interlocked.Add(ref _downloadedHeaders, result.ItemsSynced); if (result.ItemsSynced == 0 && peer != null) { _syncPeerPool.ReportNoSyncProgress(nodeSyncAllocation); } } finally { if (nodeSyncAllocation != null) { _syncPeerPool.Free(nodeSyncAllocation); } } }
private async Task ExecuteRequest(CancellationToken token, FastBlocksBatch batch) { SyncPeerAllocation syncPeerAllocation = batch.Allocation; try { foreach (PeerInfo usefulPeer in _syncPeerPool.UsefulPeers) { if (usefulPeer.HeadNumber < Math.Max(0, (batch.MinNumber ?? 0) - 1024)) { if (_logger.IsDebug) { _logger.Debug($"Made {usefulPeer} sleep for a while - no min number satisfied"); } _syncPeerPool.ReportNoSyncProgress(usefulPeer); } } PeerInfo peerInfo = syncPeerAllocation?.Current; ISyncPeer peer = peerInfo?.SyncPeer; if (peer != null) { batch.MarkSent(); switch (batch.BatchType) { case FastBlocksBatchType.Headers: { Task <BlockHeader[]> getHeadersTask = peer.GetBlockHeaders(batch.Headers.StartNumber, batch.Headers.RequestSize, 0, token); await getHeadersTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Headers.Response = getHeadersTask.Result; } else { if (t.Exception.InnerExceptions.Any(e => e is TimeoutException)) { _syncPeerPool.ReportInvalid(batch.Allocation, $"headers -> timeout"); } else { _syncPeerPool.ReportInvalid(batch.Allocation, $"headers -> {t.Exception}"); } } } ); break; } case FastBlocksBatchType.Bodies: { Task <BlockBody[]> getBodiesTask = peer.GetBlockBodies(batch.Bodies.Request, token); await getBodiesTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Bodies.Response = getBodiesTask.Result; } else { if (t.Exception.InnerExceptions.Any(e => e is TimeoutException)) { _syncPeerPool.ReportInvalid(batch.Allocation, $"bodies -> timeout"); } else { _syncPeerPool.ReportInvalid(batch.Allocation, $"bodies -> {t.Exception}"); } } } ); break; } case FastBlocksBatchType.Receipts: { Task <TxReceipt[][]> getReceiptsTask = peer.GetReceipts(batch.Receipts.Request, token); await getReceiptsTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Receipts.Response = getReceiptsTask.Result; } else { if (t.Exception.InnerExceptions.Any(e => e is TimeoutException)) { _syncPeerPool.ReportInvalid(batch.Allocation, $"receipts -> timeout"); } else { _syncPeerPool.ReportInvalid(batch.Allocation, $"receipts -> {t.Exception}"); } } } ); break; } default: { throw new InvalidOperationException($"{nameof(FastBlocksBatchType)} is {batch.BatchType}"); } } } (BlocksDataHandlerResult Result, int ItemsSynced)result = (BlocksDataHandlerResult.InvalidFormat, 0); try { result = _fastBlocksFeed.HandleResponse(batch); } catch (Exception e) { // possibly clear the response and handle empty response batch here (to avoid missing parts) // this practically corrupts sync if (_logger.IsError) { _logger.Error($"Error when handling response", e); } } Interlocked.Add(ref _downloadedHeaders, result.ItemsSynced); if (result.ItemsSynced == 0 && peer != null) { _syncPeerPool.ReportNoSyncProgress(peerInfo); } } finally { if (syncPeerAllocation != null) { _syncPeerPool.Free(syncPeerAllocation); } } }
public static void ReportNoSyncProgress(this IEthSyncPeerPool syncPeerPool, SyncPeerAllocation allocation, bool isSevere = true) { syncPeerPool.ReportNoSyncProgress(allocation?.Current, isSevere); }
private async Task RunSyncLoop() { while (true) { if (_logger.IsTrace) { _logger.Trace("Sync loop - WAIT."); } _syncRequested.Wait(_syncLoopCancellation.Token); if (_syncLoopCancellation.IsCancellationRequested) { if (_logger.IsTrace) { _logger.Trace("Sync loop cancellation requested - leaving the main sync loop."); } break; } _syncRequested.Reset(); if (_logger.IsTrace) { _logger.Trace("Sync loop - IN."); } if (!_blockTree.CanAcceptNewBlocks) { continue; } PeerInfo bestPeer = null; if (_blocksSyncAllocation != null) { UInt256 ourTotalDifficulty = _blockTree.BestSuggested?.TotalDifficulty ?? 0; _syncPeerPool.EnsureBest(); bestPeer = _blocksSyncAllocation?.Current; if (bestPeer == null || bestPeer.TotalDifficulty <= ourTotalDifficulty) { if (_logger.IsTrace) { _logger.Trace("Skipping sync - no peer with better chain."); } continue; } SyncEvent?.Invoke(this, new SyncEventArgs(bestPeer.SyncPeer, Synchronization.SyncEvent.Started)); if (_logger.IsDebug) { _logger.Debug($"Starting {_syncMode.Current} sync with {bestPeer} - theirs {bestPeer?.HeadNumber} {bestPeer?.TotalDifficulty} | ours {_bestSuggestedNumber} {_blockTree.BestSuggested?.TotalDifficulty ?? 0}"); } } _peerSyncCancellation = new CancellationTokenSource(); var linkedCancellation = CancellationTokenSource.CreateLinkedTokenSource(_peerSyncCancellation.Token, _syncLoopCancellation.Token); Task <long> syncProgressTask; switch (_syncMode.Current) { case SyncMode.Headers: syncProgressTask = _blockDownloader.DownloadHeaders(bestPeer, SyncModeSelector.FullSyncThreshold, linkedCancellation.Token); break; case SyncMode.StateNodes: syncProgressTask = DownloadStateNodes(_syncLoopCancellation.Token); break; case SyncMode.WaitForProcessor: syncProgressTask = Task.Delay(1000).ContinueWith(_ => 0L); break; case SyncMode.Full: syncProgressTask = _blockDownloader.DownloadBlocks(bestPeer, linkedCancellation.Token); break; default: throw new ArgumentOutOfRangeException(); } SyncMode beforeUpdate = _syncMode.Current; long bestFullState = FindBestFullState(); _syncMode.Update(_bestSuggestedNumber, _blockTree.BestSuggestedFullBlock?.Number ?? 0, bestFullState); if (beforeUpdate != SyncMode.WaitForProcessor) { await syncProgressTask.ContinueWith(t => HandleSyncRequestResult(t, bestPeer)); if (_syncMode.Current == SyncMode.Headers) { _blocksSyncAllocation.MinBlocksAhead = SyncModeSelector.FullSyncThreshold; } else if (_blocksSyncAllocation != null) { _blocksSyncAllocation.MinBlocksAhead = null; } if (syncProgressTask.IsCompletedSuccessfully) { long progress = syncProgressTask.Result; if (_syncMode.Current == beforeUpdate && progress == 0 && _blocksSyncAllocation != null) { _syncPeerPool.ReportNoSyncProgress(_blocksSyncAllocation); // not very fair here - allocation may have changed } } } _blocksSyncAllocation?.FinishSync(); linkedCancellation.Dispose(); var source = _peerSyncCancellation; _peerSyncCancellation = null; source?.Dispose(); } }