private StateSyncBatch PrepareRequest() { StateSyncBatch request = _nodeDataFeed.PrepareRequest(MaxRequestSize); if (_logger.IsTrace) { _logger.Trace($"Pending requests {_pendingRequests}"); } return(request); }
private async Task ExecuteRequest(CancellationToken token, StateSyncBatch batch) { SyncPeerAllocation nodeSyncAllocation = _syncPeerPool.Borrow(BorrowOptions.DoNotReplace, "node sync"); try { ISyncPeer peer = nodeSyncAllocation?.Current?.SyncPeer; batch.AssignedPeer = nodeSyncAllocation; if (peer != null) { var hashes = batch.RequestedNodes.Select(r => r.Hash).ToArray(); Task <byte[][]> getNodeDataTask = peer.GetNodeData(hashes, token); await getNodeDataTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { batch.Responses = getNodeDataTask.Result; } } ); } (NodeDataHandlerResult Result, int NodesConsumed)result = (NodeDataHandlerResult.InvalidFormat, 0); try { result = _nodeDataFeed.HandleResponse(batch); if (result.Result == NodeDataHandlerResult.BadQuality) { _syncPeerPool.ReportBadPeer(batch.AssignedPeer); } } catch (Exception e) { if (_logger.IsError) { _logger.Error($"Error when handling response", e); } } Interlocked.Add(ref _consumedNodesCount, result.NodesConsumed); if (result.NodesConsumed == 0 && peer != null) { _syncPeerPool.ReportNoSyncProgress(nodeSyncAllocation); } } finally { if (nodeSyncAllocation != null) { // _logger.Warn($"Free {nodeSyncAllocation?.Current}"); _syncPeerPool.Free(nodeSyncAllocation); } } }
private async Task ExecuteRequest(CancellationToken token, StateSyncBatch batch) { var peer = batch.AssignedPeer?.Current?.SyncPeer; if (peer != null) { var hashes = batch.RequestedNodes.Select(r => r.Hash).ToArray(); Task <byte[][]> getNodeDataTask = peer.GetNodeData(hashes, token); await getNodeDataTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { batch.Responses = getNodeDataTask.Result; } } ); } (NodeDataHandlerResult Result, int NodesConsumed)result = (NodeDataHandlerResult.InvalidFormat, 0); try { if (batch.IsAdditionalDataConsumer) { result = (NodeDataHandlerResult.OK, _additionalConsumer.HandleResponse(new DataConsumerRequest(batch.ConsumerId, batch.RequestedNodes.Select(r => r.Hash).ToArray()), batch.Responses)); } else { result = _feed.HandleResponse(batch); } if (result.Result == NodeDataHandlerResult.BadQuality) { _syncPeerPool.ReportWeakPeer(batch.AssignedPeer); } } catch (Exception e) { if (_logger.IsError) { _logger.Error($"Error when handling response", e); } } Interlocked.Add(ref _consumedNodesCount, result.NodesConsumed); if (result.NodesConsumed == 0 && peer != null) { _syncPeerPool.ReportNoSyncProgress(batch.AssignedPeer, !batch.IsAdditionalDataConsumer); } if (batch.AssignedPeer != null) { _syncPeerPool.Free(batch.AssignedPeer); } }
private async Task KeepSyncing(CancellationToken token) { do { if (token.IsCancellationRequested) { return; } await UpdateParallelism(); if (!await _semaphore.WaitAsync(1000, token)) { if (_noAllocPunishment) { _noAllocPunishment = false; } continue; } StateSyncBatch request = PrepareRequest(); if (request.RequestedNodes.Length != 0) { Interlocked.Increment(ref _pendingRequests); if (_logger.IsTrace) { _logger.Trace($"Creating new request with {request.RequestedNodes.Length} nodes"); } Task task = ExecuteRequest(token, request); #pragma warning disable 4014 task.ContinueWith(t => #pragma warning restore 4014 { Interlocked.Decrement(ref _pendingRequests); _semaphore.Release(); }); } else { await Task.Delay(50); _semaphore.Release(); if (_logger.IsDebug) { _logger.Debug($"DIAG: 0 batches created with {_pendingRequests} pending requests, {_nodeDataFeed.TotalNodesPending} pending nodes"); } } } while (_pendingRequests != 0); if (_logger.IsInfo) { _logger.Info($"Finished with {_pendingRequests} pending requests and {_lastUsefulPeerCount} useful peers."); } }
private StateSyncBatch PrepareRequest() { if (_logger.IsTrace) { _logger.Trace($"Pending requests {_pendingRequests}"); } StateSyncBatch standardBatch = _feed.PrepareRequest(); // if (_logger.IsWarn) _logger.Warn($"!!! Standard batch {standardBatch.RequestedNodes.Length}"); return(standardBatch); }
private async Task KeepSyncing(CancellationToken token) { int lastRequestSize; do { if (token.IsCancellationRequested) { return; } StateSyncBatch stateSyncBatch = PrepareRequest(); lastRequestSize = await SyncOnce(token, stateSyncBatch); } while (_pendingRequests != 0 || lastRequestSize > 0); if (_logger.IsInfo) { _logger.Info($"Finished with {_pendingRequests} pending requests and {_syncPeerPool.UsefulPeerCount} useful peers."); } }
private async Task <int> SyncOnce(CancellationToken token, StateSyncBatch request) { int requestSize = 0; if (request.RequestedNodes.Length != 0) { // should be random selection? (we do not know if they support what we need) request.AssignedPeer = await _syncPeerPool.BorrowAsync(new TotalDiffFilter(BySpeedSelectionStrategy.Fastest, request.RequiredPeerDifficulty), "node sync", 1000); Interlocked.Increment(ref _pendingRequests); // if (_logger.IsWarn) _logger.Warn($"Creating new request with {request.RequestedNodes.Length} nodes"); Task task = ExecuteRequest(token, request); #pragma warning disable 4014 task.ContinueWith(t => #pragma warning restore 4014 { if (t.IsFaulted) { if (_logger.IsWarn) { _logger.Warn($"Failure when executing node data request {t.Exception}"); } } Interlocked.Decrement(ref _pendingRequests); requestSize = request.RequestedNodes.Length; }); } else { await Task.Delay(50); if (_logger.IsDebug) { _logger.Debug($"DIAG: 0 batches created with {_pendingRequests} pending requests, {_feed.TotalNodesPending} pending nodes"); } } return(requestSize); }
private StateSyncBatch[] PrepareDataConsumerRequests() { Thread.Sleep(20); DataConsumerRequest[] requests = _additionalConsumer.PrepareRequests(); if (requests.Length == 0) { return(Array.Empty <StateSyncBatch>()); } StateSyncBatch[] stateSync = new StateSyncBatch[requests.Length]; for (int i = 0; i < stateSync.Length; i++) { StateSyncBatch priorityBatch = new StateSyncBatch(); priorityBatch.RequestedNodes = requests[i].Keys.Select(h => new StateSyncItem(h, NodeDataType.Code, 0, 0)).ToArray(); // if (_logger.IsWarn) _logger.Warn($"!!! Priority batch {priorityBatch.RequestedNodes.Length}"); priorityBatch.IsAdditionalDataConsumer = true; priorityBatch.RequiredPeerDifficulty = _additionalConsumer.RequiredPeerDifficulty; priorityBatch.ConsumerId = requests[i].ConsumerId; stateSync[i] = priorityBatch; } return(stateSync); }