public void RemovePeer(ISynchronizationPeer synchronizationPeer) { if (_logger.IsTrace) { _logger.Trace($"Removing synchronization peer {synchronizationPeer.NodeId}"); } if (!_isInitialized) { if (_logger.IsTrace) { _logger.Trace($"Synchronization is disabled, removing peer is blocked: {synchronizationPeer.NodeId}"); } return; } if (!_peers.TryRemove(synchronizationPeer.NodeId, out _)) { //possible if sync failed - we remove peer and eventually initiate disconnect, which calls remove peer again return; } if (_currentSyncingPeerInfo?.Peer.NodeId.Equals(synchronizationPeer.NodeId) ?? false) { if (_logger.IsTrace) { _logger.Trace($"Requesting peer cancel with: {synchronizationPeer.NodeId}"); } _peerSyncCancellationTokenSource?.Cancel(); } if (_initCancelTokens.TryGetValue(synchronizationPeer.NodeId, out CancellationTokenSource tokenSource)) { tokenSource.Cancel(); } }
public void RemovePeer(ISynchronizationPeer synchronizationPeer) { if (!_peers.TryRemove(synchronizationPeer.NodeId, out var _)) { return; } if (_logger.IsInfoEnabled) { _logger.Info($"Removing synchronization peer {synchronizationPeer.NodeId}"); } lock (_isSyncingLock) { if (_isSyncing && (_currentSyncingPeer?.Peer.NodeId.Equals(synchronizationPeer.NodeId) ?? false)) { if (_logger.IsInfoEnabled) { _logger.Info($"Requesting peer cancel with: {synchronizationPeer.NodeId}"); } _syncCancellationTokenSource?.Cancel(); } } if (_initCancellationTokenSources.TryGetValue(synchronizationPeer.NodeId, out var tokenSource)) { tokenSource.Cancel(); } }
public SyncingContext AfterPeerIsAdded(ISynchronizationPeer syncPeer) { _peers.TryAdd(syncPeer.ClientId, syncPeer); var task = new Task(async() => { await SyncManager.AddPeer(syncPeer); }); task.RunSynchronously(); return(this); }
private async Task InitPeerInfo(ISynchronizationPeer peer, CancellationToken token) { if (_logger.IsTrace) { _logger.Trace($"Requesting head block info from {peer.NodeId}"); } Task <Keccak> getHashTask = peer.GetHeadBlockHash(token); Task <UInt256> getNumberTask = peer.GetHeadBlockNumber(token); // Task<UInt256> getDifficultyTask = peer.GetHeadDifficulty(token); await Task.WhenAny(Task.WhenAll(getHashTask, getNumberTask), Task.Delay(10000, token)).ContinueWith( t => { if (t.IsFaulted) { if (_logger.IsTrace) { _logger.Trace($"InitPeerInfo failed for node: {peer.NodeId}{Environment.NewLine}{t.Exception}"); } RemovePeer(peer); SyncEvent?.Invoke(this, new SyncEventArgs(peer, SyncStatus.InitFailed)); } else if (t.IsCanceled) { RemovePeer(peer); SyncEvent?.Invoke(this, new SyncEventArgs(peer, SyncStatus.InitCancelled)); token.ThrowIfCancellationRequested(); } else { if (_logger.IsTrace) { _logger.Trace($"Received head block info from {peer.NodeId} with head block numer {getNumberTask.Result}"); } SyncEvent?.Invoke( this, new SyncEventArgs(peer, SyncStatus.InitCompleted) { NodeBestBlockNumber = getNumberTask.Result, OurBestBlockNumber = _blockTree.BestSuggested.Number }); bool result = _peers.TryGetValue(peer.NodeId, out PeerInfo peerInfo); if (!result) { _logger.Error($"Initializing PeerInfo failed for {peer.NodeId}"); throw new EthSynchronizationException($"Initializing peer info failed for {peer.NodeId.ToString()}"); } peerInfo.NumberAvailable = getNumberTask.Result; // peerInfo.Difficulty = getDifficultyTask.Result; peerInfo.NumberReceived = _blockTree.BestSuggested.Number; peerInfo.IsInitialized = true; } }, token); }
public async Task AddPeer(ISynchronizationPeer synchronizationPeer) { if (_logger.IsTrace) { _logger.Trace($"Adding synchronization peer {synchronizationPeer.NodeId}"); } if (_peers.ContainsKey(synchronizationPeer.NodeId)) { if (_logger.IsError) { _logger.Error($"Sync peer already in peers collection: {synchronizationPeer.NodeId}"); } return; } var peerInfo = new PeerInfo(synchronizationPeer); _peers.TryAdd(synchronizationPeer.NodeId, peerInfo); var tokenSource = _initCancelTokens[synchronizationPeer.NodeId] = new CancellationTokenSource(); // ReSharper disable once MethodSupportsCancellation await InitPeerInfo(synchronizationPeer, tokenSource.Token).ContinueWith(t => { _initCancelTokens.TryRemove(synchronizationPeer.NodeId, out _); if (t.IsFaulted) { if (t.Exception != null && t.Exception.InnerExceptions.Any(x => x.InnerException is TimeoutException)) { if (_logger.IsDebug) { _logger.Debug($"AddPeer failed due to timeout: {t.Exception.Message}"); } } else if (_logger.IsError) { _logger.Error("AddPeer failed.", t.Exception); } } else if (t.IsCanceled) { if (_logger.IsTrace) { _logger.Trace($"Init peer info canceled: {synchronizationPeer.NodeId}"); } } else { CheckIfNewPeerIsBetterSyncCandidate(peerInfo); RequestSync(); } }); }
public void AddPeer(ISynchronizationPeer peer) { if (!_peers.TryAdd(peer.Node.Id, peer)) { return; } if (_logger.IsTrace) { _logger.Trace($"Added a peer: {peer.ClientId}"); } }
private async Task InitPeerInfo(ISynchronizationPeer peer, CancellationToken token) { Task <Keccak> getHashTask = peer.GetHeadBlockHash(); if (_logger.IsDebugEnabled) { _logger.Debug($"Requesting head block info from {peer.NodeId}"); } Task <BigInteger> getNumberTask = peer.GetHeadBlockNumber(token); await Task.WhenAll(getHashTask, getNumberTask).ContinueWith( t => { if (t.IsFaulted) { if (_logger.IsErrorEnabled) { if (t.Exception != null && t.Exception.InnerExceptions.Any(x => x is TimeoutException)) { _logger.Warn($"InitPeerInfo failed for node: {peer.NodeId}. {t.Exception?.Message}"); } else { _logger.Error($"InitPeerInfo failedf or node: {peer.NodeId}.", t.Exception); } } SyncFailed?.Invoke(this, new SyncEventArgs(peer)); } else if (t.IsCanceled) { token.ThrowIfCancellationRequested(); } }, token); if (_logger.IsDebugEnabled) { _logger.Info($"Received head block info from {peer.NodeId} with head block numer {getNumberTask.Result}"); } bool addResult = _peers.TryAdd(peer.NodeId, new PeerInfo(peer, getNumberTask.Result) { NumberReceived = BlockTree.BestSuggested.Number }); // TODO: cheating now with assumign the consistency of the chains if (!addResult) { _logger.Error($"Adding {nameof(PeerInfo)} failed for {peer.NodeId}"); } }
private void SessionDisconnected(object sender, DisconnectEventArgs e) { ISession session = (ISession)sender; session.Initialized -= SessionInitialized; session.Disconnected -= SessionDisconnected; if (_syncPeers.ContainsKey(session.SessionId)) { ISynchronizationPeer syncPeer = _syncPeers[session.SessionId]; _syncManager.RemovePeer(syncPeer); _transactionPool.RemovePeer(syncPeer.Node.Id); } _sessions.TryRemove(session.SessionId, out session); }
private void Notify(ISynchronizationPeer peer, Transaction transaction) { var timestamp = new UInt256(_timestamp.EpochSeconds); if (_pendingTransactionThresholdValidator.IsObsolete(timestamp, transaction.Timestamp)) { return; } Metrics.PendingTransactionsSent++; peer.SendNewTransaction(transaction); if (_logger.IsTrace) { _logger.Trace($"Notified {peer.Node.Id} about a transaction: {transaction.Hash}"); } }
public async Task Does_not_do_full_sync_when_not_needed_with_split() { BlockTree minerTree = Build.A.BlockTree(_genesisBlock).OfChainLength(6).TestObject; ISynchronizationPeer miner1 = new SynchronizationPeerMock(minerTree); AutoResetEvent resetEvent = new AutoResetEvent(false); _manager.SyncEvent += (sender, args) => { if (args.SyncStatus == SyncStatus.Completed || args.SyncStatus == SyncStatus.Failed) { resetEvent.Set(); } }; _manager.Start(); Task addMiner1Task = _manager.AddPeer(miner1); await Task.WhenAll(addMiner1Task); resetEvent.WaitOne(_standardTimeoutUnit); Assert.AreEqual(minerTree.BestSuggested.Hash, _blockTree.BestSuggested.Hash, "client agrees with miner before split"); Block newBlock = Build.A.Block.WithParent(minerTree.Head).TestObject; minerTree.SuggestBlock(newBlock); minerTree.UpdateMainChain(newBlock); ISynchronizationPeer miner2 = Substitute.For <ISynchronizationPeer>(); miner2.GetHeadBlockNumber(Arg.Any <CancellationToken>()).Returns(miner1.GetHeadBlockNumber(CancellationToken.None)); miner2.GetHeadBlockHash(Arg.Any <CancellationToken>()).Returns(miner1.GetHeadBlockHash(CancellationToken.None)); miner2.NodeId.Returns(new NodeId(TestObject.PublicKeyB)); Assert.AreEqual(newBlock.Number, await miner2.GetHeadBlockNumber(Arg.Any <CancellationToken>()), "number as expected"); Assert.AreEqual(newBlock.Hash, await miner2.GetHeadBlockHash(default(CancellationToken)), "hash as expected"); await _manager.AddPeer(miner2); resetEvent.WaitOne(_standardTimeoutUnit); await miner2.Received().GetBlockHeaders(6, 1, 0, default(CancellationToken)); }
public async Task AddPeer(ISynchronizationPeer synchronizationPeer) { if (_peers.ContainsKey(synchronizationPeer.NodeId)) { if (_logger.IsDebugEnabled) { _logger.Debug($"Sync peer already in peers collection: {synchronizationPeer.NodeId}"); } return; } if (_logger.IsInfoEnabled) { _logger.Info($"Adding synchronization peer {synchronizationPeer.NodeId}"); } var tokenSource = _initCancellationTokenSources[synchronizationPeer.NodeId] = new CancellationTokenSource(); await InitPeerInfo(synchronizationPeer, tokenSource.Token).ContinueWith(t => { _initCancellationTokenSources.TryRemove(synchronizationPeer.NodeId, out var _); if (t.IsFaulted) { if (_logger.IsErrorEnabled) { _logger.Error("AddPeer failed.", t.Exception); } } else if (t.IsCanceled) { if (_logger.IsWarnEnabled) { _logger.Warn($"Init peer info cancelled: {synchronizationPeer.NodeId}"); } } else { RunSync(); } }); }
private void SessionDisconnected(object sender, DisconnectEventArgs e) { ISession session = (ISession)sender; session.Initialized -= SessionInitialized; session.Disconnected -= SessionDisconnected; if (_syncPeers.ContainsKey(session.SessionId)) { ISynchronizationPeer syncPeer = _syncPeers[session.SessionId]; _syncManager.RemovePeer(syncPeer); _transactionPool.RemovePeer(syncPeer.Node.Id); if (_logger.IsDebug) { _logger.Debug($"{session.Node.ClientId} sync peer {session} disconnected {e.DisconnectType} {e.DisconnectReason}"); } } _sessions.TryRemove(session.SessionId, out session); }
public async Task Does_not_do_full_sync_when_not_needed_with_split() { BlockTree minerTree = Build.A.BlockTree(_genesisBlock).OfChainLength(6).TestObject; ISynchronizationPeer miner1 = new SynchronizationPeerMock(minerTree); ManualResetEvent resetEvent = new ManualResetEvent(false); _manager.SyncEvent += (sender, args) => { resetEvent.Set(); }; Task addMiner1Task = _manager.AddPeer(miner1); await Task.WhenAll(addMiner1Task); resetEvent.WaitOne(TimeSpan.FromSeconds(1)); Assert.AreEqual(minerTree.BestSuggested.Hash, _blockTree.BestSuggested.Hash, "client agrees with miner before split"); Block newBlock = Build.A.Block.WithParent(minerTree.Head).TestObject; minerTree.SuggestBlock(newBlock); minerTree.MarkAsProcessed(newBlock.Hash); minerTree.MoveToMain(newBlock.Hash); ISynchronizationPeer miner2 = Substitute.For <ISynchronizationPeer>(); miner2.GetHeadBlockNumber(Arg.Any <CancellationToken>()).Returns(miner1.GetHeadBlockNumber(Arg.Any <CancellationToken>())); miner2.GetHeadBlockHash(Arg.Any <CancellationToken>()).Returns(miner1.GetHeadBlockHash(default(CancellationToken))); miner2.NodeId.Returns(new NodeId(TestObject.PublicKeyB)); Assert.AreEqual(newBlock.Number, await miner2.GetHeadBlockNumber(Arg.Any <CancellationToken>()), "number as expected"); Assert.AreEqual(newBlock.Hash, await miner2.GetHeadBlockHash(default(CancellationToken)), "hash as expected"); await _manager.AddPeer(miner2); await miner2.Received().GetBlockHeaders(6, 1, 0, default(CancellationToken)); }
public SyncEventArgs(ISynchronizationPeer peer, SyncStatus status) { Peer = peer; SyncStatus = status; }
public SyncEventArgs(ISynchronizationPeer peer) { Peer = peer; }
public SyncingContext AfterHintBlockMessage(Block block, ISynchronizationPeer peer) { SyncManager.HintBlock(block.Hash, block.Number, peer.Node.Id); return(this); }
public SyncingContext AfterNewBlockMessage(Block block, ISynchronizationPeer peer) { block.TotalDifficulty = (UInt256)(block.Difficulty * ((BigInteger)block.Number + 1)); SyncManager.AddNewBlock(block, peer.Node.Id); return(this); }
private async Task PeerSyncAsync(CancellationToken token, PeerInfo peerInfo) { bool wasCancelled = false; ISynchronizationPeer peer = peerInfo.Peer; BigInteger bestNumber = BlockTree.BestSuggested.Number; const int maxLookup = 64; int ancestorLookupLevel = 0; bool isCommonAncestorKnown = false; while (peerInfo.NumberAvailable > bestNumber && peerInfo.NumberReceived <= peerInfo.NumberAvailable) { if (ancestorLookupLevel > maxLookup) { throw new InvalidOperationException("Cannot find ancestor"); // TODO: remodel this after full sync test is added } if (token.IsCancellationRequested) { token.ThrowIfCancellationRequested(); } if (!isCommonAncestorKnown) { // TODO: cases when many peers used for sync and one peer finished sync and then we need resync - we should start from common point and not NumberReceived that may be far in the past _logger.Info($"Finding common ancestor for {peerInfo.Peer.NodeId}"); isCommonAncestorKnown = true; } BigInteger blocksLeft = peerInfo.NumberAvailable - peerInfo.NumberReceived; // TODO: fault handling on tasks int blocksToRequest = (int)BigInteger.Min(blocksLeft + 1, BatchSize); if (_logger.IsDebugEnabled) { _logger.Debug($"Sync request to peer with {peerInfo.NumberAvailable} blocks. Got {peerInfo.NumberReceived} and asking for {blocksToRequest} more."); } Task <BlockHeader[]> headersTask = peer.GetBlockHeaders(peerInfo.NumberReceived, blocksToRequest, 0, token); _currentSyncTask = headersTask; BlockHeader[] headers = await headersTask; if (_currentSyncTask.IsCanceled) { wasCancelled = true; break; } if (_currentSyncTask.IsFaulted) { _logger.Error("Failed to retrieve headers when synchronizing", _currentSyncTask.Exception); throw _currentSyncTask.Exception; } if (token.IsCancellationRequested) { token.ThrowIfCancellationRequested(); } List <Keccak> hashes = new List <Keccak>(); Dictionary <Keccak, BlockHeader> headersByHash = new Dictionary <Keccak, BlockHeader>(); for (int i = 1; i < headers.Length; i++) { hashes.Add(headers[i].Hash); headersByHash[headers[i].Hash] = headers[i]; } Task <Block[]> bodiesTask = peer.GetBlocks(hashes.ToArray(), token); _currentSyncTask = bodiesTask; Block[] blocks = await bodiesTask; if (_currentSyncTask.IsCanceled) { wasCancelled = true; break; } if (_currentSyncTask.IsFaulted) { _logger.Error("Failed to retrieve bodies when synchronizing", _currentSyncTask.Exception); throw _currentSyncTask.Exception; } ancestorLookupLevel = 0; for (int i = 0; i < blocks.Length; i++) { if (token.IsCancellationRequested) { token.ThrowIfCancellationRequested(); } blocks[i].Header = headersByHash[hashes[i]]; } if (blocks.Length > 0) { Block parent = BlockTree.FindParent(blocks[0]); if (parent == null) { ancestorLookupLevel += BatchSize; peerInfo.NumberReceived -= BatchSize; continue; } } // Parity 1.11 non canonical blocks when testing on 27/06 for (int i = 0; i < blocks.Length; i++) { if (i != 0 && blocks[i].ParentHash != blocks[i - 1].Hash) { throw new EthSynchronizationException("Peer send an inconsistent block list"); } } for (int i = 0; i < blocks.Length; i++) { if (_logger.IsInfoEnabled) { _logger.Info($"Received {blocks[i]} from {peer.NodeId}"); } if (_blockValidator.ValidateSuggestedBlock(blocks[i])) { AddBlockResult addResult = BlockTree.SuggestBlock(blocks[i]); if (addResult == AddBlockResult.UnknownParent) { if (_logger.IsInfoEnabled) { _logger.Info($"Block {blocks[i].Number} ignored (unknown parent)"); } if (i == 0) { if (_logger.IsWarnEnabled) { _logger.Warn("Resyncing split"); } peerInfo.NumberReceived -= 1; var syncTask = Task.Run(() => PeerSyncAsync(_syncCancellationTokenSource.Token, peerInfo), _syncCancellationTokenSource.Token); await syncTask; } else { const string message = "Peer sent an inconsistent batch of block headers"; _logger.Error(message); throw new EthSynchronizationException(message); } } if (_logger.IsDebugEnabled) { _logger.Debug($"Block {blocks[i].Number} suggested for processing"); } } else { if (_logger.IsWarnEnabled) { _logger.Warn($"Block {blocks[i].Number} skipped (validation failed)"); } } } peerInfo.NumberReceived = blocks[blocks.Length - 1].Number; bestNumber = BlockTree.BestSuggested.Number; } if (_logger.IsInfoEnabled) { _logger.Info($"Stopping sync processes with Node: {peerInfo.Peer.NodeId}, wasCancelled: {wasCancelled}"); } if (!wasCancelled) { peerInfo.IsSynced = true; Synced?.Invoke(this, new SyncEventArgs(peerInfo.Peer)); } }
public void AddPeer(ISynchronizationPeer peer) { }
public PeerInfo(ISynchronizationPeer peer, BigInteger bestRemoteBlockNumber) { Peer = peer; NumberAvailable = bestRemoteBlockNumber; }
private async Task SynchronizeWithPeerAsync(PeerInfo peerInfo, CancellationToken peerSyncToken) { bool wasCanceled = false; ISynchronizationPeer peer = peerInfo.Peer; BigInteger bestNumber = _blockTree.BestSuggested.Number; // UInt256 bestDifficulty = _blockTree.BestSuggested.Difficulty; const int maxLookup = 64; int ancestorLookupLevel = 0; bool isCommonAncestorKnown = false; while (peerInfo.NumberAvailable > bestNumber && peerInfo.NumberReceived <= peerInfo.NumberAvailable) { if (_logger.IsTrace) { _logger.Trace($"Continue syncing with {peerInfo} (our best {bestNumber})"); } if (ancestorLookupLevel > maxLookup) { if (_logger.IsWarn) { _logger.Warn($"Could not find common ancestor with {peerInfo.Peer.NodeId}"); } throw new EthSynchronizationException("Peer with inconsistent chain in sync"); } if (peerSyncToken.IsCancellationRequested) { peerSyncToken.ThrowIfCancellationRequested(); } if (!isCommonAncestorKnown) { // TODO: cases when many peers used for sync and one peer finished sync and then we need resync - we should start from common point and not NumberReceived that may be far in the past _logger.Trace($"Finding common ancestor for {peerInfo.Peer.NodeId}"); isCommonAncestorKnown = true; } BigInteger blocksLeft = peerInfo.NumberAvailable - peerInfo.NumberReceived; int blocksToRequest = (int)BigInteger.Min(blocksLeft + 1, BatchSize); if (_logger.IsTrace) { _logger.Trace($"Sync request to peer with {peerInfo.NumberAvailable} blocks. Got {peerInfo.NumberReceived} and asking for {blocksToRequest} more."); } Task <BlockHeader[]> headersTask = peer.GetBlockHeaders(peerInfo.NumberReceived, blocksToRequest, 0, peerSyncToken); _currentSyncTask = headersTask; BlockHeader[] headers = await headersTask; if (_currentSyncTask.IsCanceled) { wasCanceled = true; break; } if (_currentSyncTask.IsFaulted) { if (_currentSyncTask.Exception.InnerExceptions.Any(x => x.InnerException is TimeoutException)) { if (_logger.IsTrace) { _logger.Error("Failed to retrieve headers when synchronizing (Timeout)", _currentSyncTask.Exception); } } else { if (_logger.IsError) { _logger.Error("Failed to retrieve headers when synchronizing", _currentSyncTask.Exception); } } throw _currentSyncTask.Exception; } if (peerSyncToken.IsCancellationRequested) { peerSyncToken.ThrowIfCancellationRequested(); } List <Keccak> hashes = new List <Keccak>(); Dictionary <Keccak, BlockHeader> headersByHash = new Dictionary <Keccak, BlockHeader>(); for (int i = 1; i < headers.Length; i++) { hashes.Add(headers[i].Hash); headersByHash[headers[i].Hash] = headers[i]; } Task <Block[]> bodiesTask = peer.GetBlocks(hashes.ToArray(), peerSyncToken); _currentSyncTask = bodiesTask; Block[] blocks = await bodiesTask; if (_currentSyncTask.IsCanceled) { wasCanceled = true; break; } if (_currentSyncTask.IsFaulted) { if (_currentSyncTask.Exception.InnerExceptions.Any(x => x.InnerException is TimeoutException)) { if (_logger.IsTrace) { _logger.Error("Failed to retrieve bodies when synchronizing (Timeout)", _currentSyncTask.Exception); } } else { if (_logger.IsError) { _logger.Error("Failed to retrieve bodies when synchronizing", _currentSyncTask.Exception); } } throw _currentSyncTask.Exception; } for (int i = 0; i < blocks.Length; i++) { if (peerSyncToken.IsCancellationRequested) { peerSyncToken.ThrowIfCancellationRequested(); } blocks[i].Header = headersByHash[hashes[i]]; } if (blocks.Length > 0) { Block parent = _blockTree.FindParent(blocks[0]); if (parent == null) { ancestorLookupLevel += BatchSize; peerInfo.NumberReceived = peerInfo.NumberReceived >= BatchSize ? peerInfo.NumberReceived - BatchSize : 0; continue; } } // Parity 1.11 non canonical blocks when testing on 27/06 for (int i = 0; i < blocks.Length; i++) { if (i != 0 && blocks[i].ParentHash != blocks[i - 1].Hash) { throw new EthSynchronizationException("Peer sent an inconsistent block list"); } } for (int i = 0; i < blocks.Length; i++) { if (_logger.IsTrace) { _logger.Trace($"Received {blocks[i]} from {peer.NodeId}"); } if (_blockValidator.ValidateSuggestedBlock(blocks[i])) { AddBlockResult addResult = _blockTree.SuggestBlock(blocks[i]); if (addResult == AddBlockResult.UnknownParent) { if (_logger.IsTrace) { _logger.Trace($"Block {blocks[i].Number} ignored (unknown parent)"); } if (i == 0) { const string message = "Peer sent orphaned blocks"; _logger.Error(message); throw new EthSynchronizationException(message); // if (_logger.IsTrace) _logger.Trace("Resyncing split"); // peerInfo.NumberReceived -= 1; // var syncTask = // Task.Run(() => SynchronizeWithPeerAsync(peerInfo, _peerSyncCancellationTokenSource.Token), // _peerSyncCancellationTokenSource.Token); // await syncTask; } else { const string message = "Peer sent an inconsistent batch of block headers"; _logger.Error(message); throw new EthSynchronizationException(message); } } if (_logger.IsTrace) { _logger.Trace($"Block {blocks[i].Number} suggested for processing"); } } else { if (_logger.IsWarn) { _logger.Warn($"Block {blocks[i].Number} skipped (validation failed)"); } } } peerInfo.NumberReceived = blocks[blocks.Length - 1].Number; bestNumber = _blockTree.BestSuggested.Number; } if (_logger.IsTrace) { _logger.Trace($"Stopping sync processes with Node: {peerInfo.Peer.NodeId}, wasCancelled: {wasCanceled}"); } }
public SyncingContext AfterPeerIsAdded(ISynchronizationPeer syncPeer) { _peers.TryAdd(syncPeer.ClientId, syncPeer); SyncManager.AddPeer(syncPeer); return(this); }
public PeerInfo(ISynchronizationPeer peer) { Peer = peer; }
public SyncingContext AfterPeerIsRemoved(ISynchronizationPeer syncPeer) { _peers.Remove(syncPeer.ClientId); SyncManager.RemovePeer(syncPeer); return(this); }