private FastBlocksBatch PrepareRequest() { FastBlocksBatch request = _fastBlocksFeed.PrepareRequest(); if (_logger.IsTrace) { _logger.Trace($"Pending requests {_pendingRequests}"); } return(request); }
private void ValidateHeaders(CancellationToken cancellation, FastBlocksBatch batch) { batch.MarkValidation(); try { if (_logger.IsTrace) { _logger.Trace("Starting block validation"); } BlockHeader[] headers = batch.Headers.Response; for (int i = 0; i < headers.Length; i++) { if (cancellation.IsCancellationRequested) { if (_logger.IsTrace) { _logger.Trace("Returning fom seal validation"); } return; } BlockHeader header = headers[i]; if (header == null) { continue; } bool isHashValid = _blockValidator.ValidateHash(header); bool isSealValid = _sealValidator.ValidateSeal(header); if (!(isHashValid && isSealValid)) { if (_logger.IsTrace) { _logger.Trace("One of the blocks is invalid"); } _syncPeerPool.ReportInvalid(batch.Allocation?.Current); batch.Headers.Response = null; } } } catch (Exception ex) { if (_logger.IsError) { _logger.Error($"Error when validating headers of {batch}", ex); } batch.Headers.Response = null; } }
private async Task KeepSyncing(CancellationToken token) { int finalizeSignalsCount = 0; do { if (token.IsCancellationRequested) { return; } FastBlocksBatch request = PrepareRequest(); if (request != null) { request.Allocation = await _syncPeerPool.BorrowAsync(new FastBlocksSelectionStrategy(request.MinNumber, request.Prioritized), "fast blocks", 1000); Interlocked.Increment(ref _pendingRequests); Task task = ExecuteRequest(token, request); #pragma warning disable 4014 task.ContinueWith(t => #pragma warning restore 4014 { Interlocked.Decrement(ref _pendingRequests); }); } else { finalizeSignalsCount++; await Task.Delay(10); if (_logger.IsDebug) { _logger.Debug($"0 batches created with {_pendingRequests} pending requests."); } } } while (_pendingRequests != 0 || finalizeSignalsCount < 3 || !_fastBlocksFeed.IsFinished); if (_logger.IsInfo) { _logger.Info($"Finished download with {_pendingRequests} pending requests."); } }
private async Task ExecuteRequest(CancellationToken token, FastBlocksBatch batch) { SyncPeerAllocation nodeSyncAllocation = _syncPeerPool.Borrow(BorrowOptions.DoNotReplace | (batch.Prioritized ? BorrowOptions.None : BorrowOptions.LowPriority), "fast blocks", batch.MinNumber); foreach (PeerInfo peerInfo in _syncPeerPool.UsefulPeers) { if (peerInfo.HeadNumber < Math.Max(0, (batch.MinNumber ?? 0) - 1024)) { if (_logger.IsDebug) { _logger.Debug($"Made {peerInfo} sleep for a while - no min number satisfied"); } _syncPeerPool.ReportNoSyncProgress(peerInfo); } } try { ISyncPeer peer = nodeSyncAllocation?.Current?.SyncPeer; batch.Allocation = nodeSyncAllocation; if (peer != null) { batch.MarkSent(); switch (batch.BatchType) { case FastBlocksBatchType.Headers: { Task <BlockHeader[]> getHeadersTask = peer.GetBlockHeaders(batch.Headers.StartNumber, batch.Headers.RequestSize, 0, token); await getHeadersTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Headers.Response = getHeadersTask.Result; ValidateHeaders(token, batch); } else { _syncPeerPool.ReportInvalid(batch.Allocation); } } ); break; } case FastBlocksBatchType.Bodies: { Task <BlockBody[]> getBodiesTask = peer.GetBlocks(batch.Bodies.Request, token); await getBodiesTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Bodies.Response = getBodiesTask.Result; } else { _syncPeerPool.ReportInvalid(batch.Allocation); } } ); break; } case FastBlocksBatchType.Receipts: { Task <TxReceipt[][]> getReceiptsTask = peer.GetReceipts(batch.Receipts.Request, token); await getReceiptsTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Receipts.Response = getReceiptsTask.Result; } else { _syncPeerPool.ReportInvalid(batch.Allocation); } } ); break; } default: { throw new InvalidOperationException($"{nameof(FastBlocksBatchType)} is {batch.BatchType}"); } } } (BlocksDataHandlerResult Result, int ItemsSynced)result = (BlocksDataHandlerResult.InvalidFormat, 0); try { if (batch.Bodies?.Response == null && batch.Headers?.Response == null && batch.Receipts?.Response == null) { // to avoid uncontrolled loop in case of a code error await Task.Delay(10); } result = _fastBlocksFeed.HandleResponse(batch); } catch (Exception e) { // possibly clear the response and handle empty response batch here (to avoid missing parts) if (_logger.IsError) { _logger.Error($"Error when handling response", e); } } Interlocked.Add(ref _downloadedHeaders, result.ItemsSynced); if (result.ItemsSynced == 0 && peer != null) { _syncPeerPool.ReportNoSyncProgress(nodeSyncAllocation); } } finally { if (nodeSyncAllocation != null) { _syncPeerPool.Free(nodeSyncAllocation); } } }
private async Task KeepSyncing(CancellationToken token) { int finalizeSignalsCount = 0; do { if (token.IsCancellationRequested) { return; } await UpdateParallelism(); if (_logger.IsTrace) { _logger.Trace($"Waiting for semaphore"); } if (!await _semaphore.WaitAsync(1000, token)) { if (_logger.IsTrace) { _logger.Trace($"Failed semaphore wait"); } continue; } if (_logger.IsTrace) { _logger.Trace($"Successful semaphore wait"); } FastBlocksBatch request = PrepareRequest(); if (request != null) { // if (_logger.IsInfo) _logger.Info($"Creating new headers request {request} with current semaphore count {_semaphore.CurrentCount} and pending requests {_pendingRequests}"); Interlocked.Increment(ref _pendingRequests); Task task = ExecuteRequest(token, request); #pragma warning disable 4014 task.ContinueWith(t => #pragma warning restore 4014 { Interlocked.Decrement(ref _pendingRequests); _semaphore.Release(); // if (_logger.IsInfo) _logger.Info($"Released semaphore - now at semaphore count {_semaphore.CurrentCount} and pending requests {_pendingRequests}"); }); } else { finalizeSignalsCount++; await Task.Delay(10); _semaphore.Release(); if (_logger.IsDebug) { _logger.Debug($"DIAG: 0 batches created with {_pendingRequests} pending requests."); } } } while (_pendingRequests != 0 || finalizeSignalsCount < 3 || !_fastBlocksFeed.IsFinished); if (_logger.IsInfo) { _logger.Info($"Finished download with {_pendingRequests} pending requests and {_lastUsefulPeerCount} useful peers."); } }
private async Task ExecuteRequest(CancellationToken token, FastBlocksBatch batch) { SyncPeerAllocation syncPeerAllocation = batch.Allocation; try { foreach (PeerInfo usefulPeer in _syncPeerPool.UsefulPeers) { if (usefulPeer.HeadNumber < Math.Max(0, (batch.MinNumber ?? 0) - 1024)) { if (_logger.IsDebug) { _logger.Debug($"Made {usefulPeer} sleep for a while - no min number satisfied"); } _syncPeerPool.ReportNoSyncProgress(usefulPeer); } } PeerInfo peerInfo = syncPeerAllocation?.Current; ISyncPeer peer = peerInfo?.SyncPeer; if (peer != null) { batch.MarkSent(); switch (batch.BatchType) { case FastBlocksBatchType.Headers: { Task <BlockHeader[]> getHeadersTask = peer.GetBlockHeaders(batch.Headers.StartNumber, batch.Headers.RequestSize, 0, token); await getHeadersTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Headers.Response = getHeadersTask.Result; } else { if (t.Exception.InnerExceptions.Any(e => e is TimeoutException)) { _syncPeerPool.ReportInvalid(batch.Allocation, $"headers -> timeout"); } else { _syncPeerPool.ReportInvalid(batch.Allocation, $"headers -> {t.Exception}"); } } } ); break; } case FastBlocksBatchType.Bodies: { Task <BlockBody[]> getBodiesTask = peer.GetBlockBodies(batch.Bodies.Request, token); await getBodiesTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Bodies.Response = getBodiesTask.Result; } else { if (t.Exception.InnerExceptions.Any(e => e is TimeoutException)) { _syncPeerPool.ReportInvalid(batch.Allocation, $"bodies -> timeout"); } else { _syncPeerPool.ReportInvalid(batch.Allocation, $"bodies -> {t.Exception}"); } } } ); break; } case FastBlocksBatchType.Receipts: { Task <TxReceipt[][]> getReceiptsTask = peer.GetReceipts(batch.Receipts.Request, token); await getReceiptsTask.ContinueWith( t => { if (t.IsCompletedSuccessfully) { if (batch.RequestTime > 1000) { if (_logger.IsDebug) { _logger.Debug($"{batch} - peer is slow {batch.RequestTime:F2}"); } } batch.Receipts.Response = getReceiptsTask.Result; } else { if (t.Exception.InnerExceptions.Any(e => e is TimeoutException)) { _syncPeerPool.ReportInvalid(batch.Allocation, $"receipts -> timeout"); } else { _syncPeerPool.ReportInvalid(batch.Allocation, $"receipts -> {t.Exception}"); } } } ); break; } default: { throw new InvalidOperationException($"{nameof(FastBlocksBatchType)} is {batch.BatchType}"); } } } (BlocksDataHandlerResult Result, int ItemsSynced)result = (BlocksDataHandlerResult.InvalidFormat, 0); try { result = _fastBlocksFeed.HandleResponse(batch); } catch (Exception e) { // possibly clear the response and handle empty response batch here (to avoid missing parts) // this practically corrupts sync if (_logger.IsError) { _logger.Error($"Error when handling response", e); } } Interlocked.Add(ref _downloadedHeaders, result.ItemsSynced); if (result.ItemsSynced == 0 && peer != null) { _syncPeerPool.ReportNoSyncProgress(peerInfo); } } finally { if (syncPeerAllocation != null) { _syncPeerPool.Free(syncPeerAllocation); } } }