private async Task SynchronizeWithPeerAsync(PeerInfo peerInfo, CancellationToken peerSyncToken) { bool wasCanceled = false; ISynchronizationPeer peer = peerInfo.Peer; BigInteger bestNumber = _blockTree.BestSuggested.Number; // UInt256 bestDifficulty = _blockTree.BestSuggested.Difficulty; const int maxLookup = 64; int ancestorLookupLevel = 0; bool isCommonAncestorKnown = false; while (peerInfo.NumberAvailable > bestNumber && peerInfo.NumberReceived <= peerInfo.NumberAvailable) { if (_logger.IsTrace) { _logger.Trace($"Continue syncing with {peerInfo} (our best {bestNumber})"); } if (ancestorLookupLevel > maxLookup) { if (_logger.IsWarn) { _logger.Warn($"Could not find common ancestor with {peerInfo.Peer.NodeId}"); } throw new EthSynchronizationException("Peer with inconsistent chain in sync"); } if (peerSyncToken.IsCancellationRequested) { peerSyncToken.ThrowIfCancellationRequested(); } if (!isCommonAncestorKnown) { // TODO: cases when many peers used for sync and one peer finished sync and then we need resync - we should start from common point and not NumberReceived that may be far in the past _logger.Trace($"Finding common ancestor for {peerInfo.Peer.NodeId}"); isCommonAncestorKnown = true; } BigInteger blocksLeft = peerInfo.NumberAvailable - peerInfo.NumberReceived; int blocksToRequest = (int)BigInteger.Min(blocksLeft + 1, BatchSize); if (_logger.IsTrace) { _logger.Trace($"Sync request to peer with {peerInfo.NumberAvailable} blocks. Got {peerInfo.NumberReceived} and asking for {blocksToRequest} more."); } Task <BlockHeader[]> headersTask = peer.GetBlockHeaders(peerInfo.NumberReceived, blocksToRequest, 0, peerSyncToken); _currentSyncTask = headersTask; BlockHeader[] headers = await headersTask; if (_currentSyncTask.IsCanceled) { wasCanceled = true; break; } if (_currentSyncTask.IsFaulted) { if (_currentSyncTask.Exception.InnerExceptions.Any(x => x.InnerException is TimeoutException)) { if (_logger.IsTrace) { _logger.Error("Failed to retrieve headers when synchronizing (Timeout)", _currentSyncTask.Exception); } } else { if (_logger.IsError) { _logger.Error("Failed to retrieve headers when synchronizing", _currentSyncTask.Exception); } } throw _currentSyncTask.Exception; } if (peerSyncToken.IsCancellationRequested) { peerSyncToken.ThrowIfCancellationRequested(); } List <Keccak> hashes = new List <Keccak>(); Dictionary <Keccak, BlockHeader> headersByHash = new Dictionary <Keccak, BlockHeader>(); for (int i = 1; i < headers.Length; i++) { hashes.Add(headers[i].Hash); headersByHash[headers[i].Hash] = headers[i]; } Task <Block[]> bodiesTask = peer.GetBlocks(hashes.ToArray(), peerSyncToken); _currentSyncTask = bodiesTask; Block[] blocks = await bodiesTask; if (_currentSyncTask.IsCanceled) { wasCanceled = true; break; } if (_currentSyncTask.IsFaulted) { if (_currentSyncTask.Exception.InnerExceptions.Any(x => x.InnerException is TimeoutException)) { if (_logger.IsTrace) { _logger.Error("Failed to retrieve bodies when synchronizing (Timeout)", _currentSyncTask.Exception); } } else { if (_logger.IsError) { _logger.Error("Failed to retrieve bodies when synchronizing", _currentSyncTask.Exception); } } throw _currentSyncTask.Exception; } for (int i = 0; i < blocks.Length; i++) { if (peerSyncToken.IsCancellationRequested) { peerSyncToken.ThrowIfCancellationRequested(); } blocks[i].Header = headersByHash[hashes[i]]; } if (blocks.Length > 0) { Block parent = _blockTree.FindParent(blocks[0]); if (parent == null) { ancestorLookupLevel += BatchSize; peerInfo.NumberReceived = peerInfo.NumberReceived >= BatchSize ? peerInfo.NumberReceived - BatchSize : 0; continue; } } // Parity 1.11 non canonical blocks when testing on 27/06 for (int i = 0; i < blocks.Length; i++) { if (i != 0 && blocks[i].ParentHash != blocks[i - 1].Hash) { throw new EthSynchronizationException("Peer sent an inconsistent block list"); } } for (int i = 0; i < blocks.Length; i++) { if (_logger.IsTrace) { _logger.Trace($"Received {blocks[i]} from {peer.NodeId}"); } if (_blockValidator.ValidateSuggestedBlock(blocks[i])) { AddBlockResult addResult = _blockTree.SuggestBlock(blocks[i]); if (addResult == AddBlockResult.UnknownParent) { if (_logger.IsTrace) { _logger.Trace($"Block {blocks[i].Number} ignored (unknown parent)"); } if (i == 0) { const string message = "Peer sent orphaned blocks"; _logger.Error(message); throw new EthSynchronizationException(message); // if (_logger.IsTrace) _logger.Trace("Resyncing split"); // peerInfo.NumberReceived -= 1; // var syncTask = // Task.Run(() => SynchronizeWithPeerAsync(peerInfo, _peerSyncCancellationTokenSource.Token), // _peerSyncCancellationTokenSource.Token); // await syncTask; } else { const string message = "Peer sent an inconsistent batch of block headers"; _logger.Error(message); throw new EthSynchronizationException(message); } } if (_logger.IsTrace) { _logger.Trace($"Block {blocks[i].Number} suggested for processing"); } } else { if (_logger.IsWarn) { _logger.Warn($"Block {blocks[i].Number} skipped (validation failed)"); } } } peerInfo.NumberReceived = blocks[blocks.Length - 1].Number; bestNumber = _blockTree.BestSuggested.Number; } if (_logger.IsTrace) { _logger.Trace($"Stopping sync processes with Node: {peerInfo.Peer.NodeId}, wasCancelled: {wasCanceled}"); } }
private async Task PeerSyncAsync(CancellationToken token, PeerInfo peerInfo) { bool wasCancelled = false; ISynchronizationPeer peer = peerInfo.Peer; BigInteger bestNumber = BlockTree.BestSuggested.Number; const int maxLookup = 64; int ancestorLookupLevel = 0; bool isCommonAncestorKnown = false; while (peerInfo.NumberAvailable > bestNumber && peerInfo.NumberReceived <= peerInfo.NumberAvailable) { if (ancestorLookupLevel > maxLookup) { throw new InvalidOperationException("Cannot find ancestor"); // TODO: remodel this after full sync test is added } if (token.IsCancellationRequested) { token.ThrowIfCancellationRequested(); } if (!isCommonAncestorKnown) { // TODO: cases when many peers used for sync and one peer finished sync and then we need resync - we should start from common point and not NumberReceived that may be far in the past _logger.Info($"Finding common ancestor for {peerInfo.Peer.NodeId}"); isCommonAncestorKnown = true; } BigInteger blocksLeft = peerInfo.NumberAvailable - peerInfo.NumberReceived; // TODO: fault handling on tasks int blocksToRequest = (int)BigInteger.Min(blocksLeft + 1, BatchSize); if (_logger.IsDebugEnabled) { _logger.Debug($"Sync request to peer with {peerInfo.NumberAvailable} blocks. Got {peerInfo.NumberReceived} and asking for {blocksToRequest} more."); } Task <BlockHeader[]> headersTask = peer.GetBlockHeaders(peerInfo.NumberReceived, blocksToRequest, 0, token); _currentSyncTask = headersTask; BlockHeader[] headers = await headersTask; if (_currentSyncTask.IsCanceled) { wasCancelled = true; break; } if (_currentSyncTask.IsFaulted) { _logger.Error("Failed to retrieve headers when synchronizing", _currentSyncTask.Exception); throw _currentSyncTask.Exception; } if (token.IsCancellationRequested) { token.ThrowIfCancellationRequested(); } List <Keccak> hashes = new List <Keccak>(); Dictionary <Keccak, BlockHeader> headersByHash = new Dictionary <Keccak, BlockHeader>(); for (int i = 1; i < headers.Length; i++) { hashes.Add(headers[i].Hash); headersByHash[headers[i].Hash] = headers[i]; } Task <Block[]> bodiesTask = peer.GetBlocks(hashes.ToArray(), token); _currentSyncTask = bodiesTask; Block[] blocks = await bodiesTask; if (_currentSyncTask.IsCanceled) { wasCancelled = true; break; } if (_currentSyncTask.IsFaulted) { _logger.Error("Failed to retrieve bodies when synchronizing", _currentSyncTask.Exception); throw _currentSyncTask.Exception; } ancestorLookupLevel = 0; for (int i = 0; i < blocks.Length; i++) { if (token.IsCancellationRequested) { token.ThrowIfCancellationRequested(); } blocks[i].Header = headersByHash[hashes[i]]; } if (blocks.Length > 0) { Block parent = BlockTree.FindParent(blocks[0]); if (parent == null) { ancestorLookupLevel += BatchSize; peerInfo.NumberReceived -= BatchSize; continue; } } // Parity 1.11 non canonical blocks when testing on 27/06 for (int i = 0; i < blocks.Length; i++) { if (i != 0 && blocks[i].ParentHash != blocks[i - 1].Hash) { throw new EthSynchronizationException("Peer send an inconsistent block list"); } } for (int i = 0; i < blocks.Length; i++) { if (_logger.IsInfoEnabled) { _logger.Info($"Received {blocks[i]} from {peer.NodeId}"); } if (_blockValidator.ValidateSuggestedBlock(blocks[i])) { AddBlockResult addResult = BlockTree.SuggestBlock(blocks[i]); if (addResult == AddBlockResult.UnknownParent) { if (_logger.IsInfoEnabled) { _logger.Info($"Block {blocks[i].Number} ignored (unknown parent)"); } if (i == 0) { if (_logger.IsWarnEnabled) { _logger.Warn("Resyncing split"); } peerInfo.NumberReceived -= 1; var syncTask = Task.Run(() => PeerSyncAsync(_syncCancellationTokenSource.Token, peerInfo), _syncCancellationTokenSource.Token); await syncTask; } else { const string message = "Peer sent an inconsistent batch of block headers"; _logger.Error(message); throw new EthSynchronizationException(message); } } if (_logger.IsDebugEnabled) { _logger.Debug($"Block {blocks[i].Number} suggested for processing"); } } else { if (_logger.IsWarnEnabled) { _logger.Warn($"Block {blocks[i].Number} skipped (validation failed)"); } } } peerInfo.NumberReceived = blocks[blocks.Length - 1].Number; bestNumber = BlockTree.BestSuggested.Number; } if (_logger.IsInfoEnabled) { _logger.Info($"Stopping sync processes with Node: {peerInfo.Peer.NodeId}, wasCancelled: {wasCancelled}"); } if (!wasCancelled) { peerInfo.IsSynced = true; Synced?.Invoke(this, new SyncEventArgs(peerInfo.Peer)); } }