public DependentItem(StateSyncItem syncItem, byte[] value, int counter, bool isAccount = false) { SyncItem = syncItem; Value = value; Counter = counter; IsAccount = isAccount; }
private bool TryTake(out StateSyncItem node) { if (!Stream0.TryPop(out node)) { if (!Stream1.TryPop(out node)) { return(Stream2.TryPop(out node)); } } return(true); }
private float CalculatePriority(StateSyncItem parent) { if (parent.NodeDataType != NodeDataType.State) { return(0); } if (parent.Level > _maxStateLevel) { _maxStateLevel = parent.Level; } // priority calculation does not make that much sense but the way it works in result // is very good so not changing it for now // in particular we should probably calculate priority as 2.5f - 2 * diff return(Math.Max(1 - (float)parent.Level / _maxStateLevel, parent.Priority - (float)parent.Level / _maxStateLevel)); }
private void PushToSelectedStream(StateSyncItem stateSyncItem) { ConcurrentStack <StateSyncItem> selectedStream; if (stateSyncItem.Priority < 0.5f) { selectedStream = Stream0; } else if (stateSyncItem.Priority <= 1.5f) { selectedStream = Stream1; } else { selectedStream = Stream2; } selectedStream.Push(stateSyncItem); }
public (NodeDataHandlerResult Result, int NodesConsumed) HandleResponse(StateSyncBatch batch) { int requestLength = batch.RequestedNodes?.Length ?? 0; int responseLength = batch.Responses?.Length ?? 0; void AddAgainAllItems() { for (int i = 0; i < requestLength; i++) { AddNode(batch.RequestedNodes[i], null, "missing", true); } } try { lock (_handleWatch) { _handleWatch.Restart(); bool requestWasMade = batch.AssignedPeer?.Current != null; NodeDataHandlerResult result; if (!requestWasMade) { AddAgainAllItems(); if (_logger.IsTrace) { _logger.Trace($"Batch was not assigned to any peer."); } Interlocked.Increment(ref _notAssignedCount); result = NodeDataHandlerResult.NotAssigned; return(result, 0); } bool isMissingRequestData = batch.RequestedNodes == null; bool isMissingResponseData = batch.Responses == null; bool hasValidFormat = !isMissingRequestData && !isMissingResponseData; if (!hasValidFormat) { AddAgainAllItems(); if (_logger.IsDebug) { _logger.Debug($"Batch response had invalid format"); } Interlocked.Increment(ref _invalidFormatCount); result = NodeDataHandlerResult.InvalidFormat; return(result, 0); } if (_logger.IsTrace) { _logger.Trace($"Received node data - {responseLength} items in response to {requestLength}"); } int nonEmptyResponses = 0; int invalidNodes = 0; for (int i = 0; i < batch.RequestedNodes.Length; i++) { StateSyncItem currentStateSyncItem = batch.RequestedNodes[i]; /* if the peer has limit on number of requests in a batch then the response will possibly be * shorter than the request */ if (batch.Responses.Length < i + 1) { AddNode(currentStateSyncItem, null, "missing", true); continue; } /* if the peer does not have details of this particular node */ byte[] currentResponseItem = batch.Responses[i]; if (currentResponseItem == null) { AddNode(batch.RequestedNodes[i], null, "missing", true); continue; } /* node sent data that is not consistent with its hash - it happens surprisingly often */ if (Keccak.Compute(currentResponseItem) != currentStateSyncItem.Hash) { if (_logger.IsDebug) { _logger.Debug($"Peer sent invalid data (batch {requestLength}->{responseLength}) of length {batch.Responses[i]?.Length} of type {batch.RequestedNodes[i].NodeDataType} at level {batch.RequestedNodes[i].Level} of type {batch.RequestedNodes[i].NodeDataType} Keccak({batch.Responses[i].ToHexString()}) != {batch.RequestedNodes[i].Hash}"); } invalidNodes++; continue; } nonEmptyResponses++; NodeDataType nodeDataType = currentStateSyncItem.NodeDataType; if (nodeDataType == NodeDataType.Code) { SaveNode(currentStateSyncItem, currentResponseItem); continue; } TrieNode trieNode = new TrieNode(NodeType.Unknown, new Rlp(currentResponseItem)); trieNode.ResolveNode(null); switch (trieNode.NodeType) { case NodeType.Unknown: invalidNodes++; if (_logger.IsError) { _logger.Error($"Node {currentStateSyncItem.Hash} resolved to {nameof(NodeType.Unknown)}"); } break; case NodeType.Branch: trieNode.BuildLookupTable(); DependentItem dependentBranch = new DependentItem(currentStateSyncItem, currentResponseItem, 0); HashSet <Keccak> alreadyProcessedChildHashes = new HashSet <Keccak>(); for (int childIndex = 0; childIndex < 16; childIndex++) { Keccak child = trieNode.GetChildHash(childIndex); if (alreadyProcessedChildHashes.Contains(child)) { continue; } alreadyProcessedChildHashes.Add(child); if (child != null) { AddNodeResult addChildResult = AddNode(new StateSyncItem(child, nodeDataType, currentStateSyncItem.Level + 1, CalculatePriority(currentStateSyncItem)), dependentBranch, "branch child"); if (addChildResult != AddNodeResult.AlreadySaved) { dependentBranch.Counter++; } } } if (dependentBranch.Counter == 0) { SaveNode(currentStateSyncItem, currentResponseItem); } break; case NodeType.Extension: Keccak next = trieNode[0].Keccak; if (next != null) { DependentItem dependentItem = new DependentItem(currentStateSyncItem, currentResponseItem, 1); AddNodeResult addResult = AddNode(new StateSyncItem(next, nodeDataType, currentStateSyncItem.Level + 1, currentStateSyncItem.Priority), dependentItem, "extension child"); if (addResult == AddNodeResult.AlreadySaved) { SaveNode(currentStateSyncItem, currentResponseItem); } } else { /* this happens when we have a short RLP format of the node * that would not be stored as Keccak but full RLP*/ SaveNode(currentStateSyncItem, currentResponseItem); } break; case NodeType.Leaf: if (nodeDataType == NodeDataType.State) { DependentItem dependentItem = new DependentItem(currentStateSyncItem, currentResponseItem, 0, true); Account account = _accountDecoder.Decode(new Rlp.DecoderContext(trieNode.Value)); if (account.CodeHash != Keccak.OfAnEmptyString) { // prepare a branch without the code DB // this only protects against being same as storage root? if (account.CodeHash == account.StorageRoot) { lock (_codesSameAsNodes) { _codesSameAsNodes.Add(account.CodeHash); } } else { AddNodeResult addCodeResult = AddNode(new StateSyncItem(account.CodeHash, NodeDataType.Code, 0, 0), dependentItem, "code"); if (addCodeResult != AddNodeResult.AlreadySaved) { dependentItem.Counter++; } } } if (account.StorageRoot != Keccak.EmptyTreeHash) { AddNodeResult addStorageNodeResult = AddNode(new StateSyncItem(account.StorageRoot, NodeDataType.Storage, 0, 0), dependentItem, "storage"); if (addStorageNodeResult != AddNodeResult.AlreadySaved) { dependentItem.Counter++; } } if (dependentItem.Counter == 0) { Interlocked.Increment(ref _savedAccounts); SaveNode(currentStateSyncItem, currentResponseItem); } } else { SaveNode(currentStateSyncItem, currentResponseItem); } break; default: if (_logger.IsError) { _logger.Error($"Unknown value {currentStateSyncItem.NodeDataType} of {nameof(NodeDataType)} at {currentStateSyncItem.Hash}"); } invalidNodes++; continue; } } lock (_stateDbLock) { Rlp rlp = Rlp.Encode( Rlp.Encode(_consumedNodesCount), Rlp.Encode(_savedStorageCount), Rlp.Encode(_savedStateCount), Rlp.Encode(_savedNodesCount), Rlp.Encode(_savedAccounts), Rlp.Encode(_savedCode), Rlp.Encode(_requestedNodesCount), Rlp.Encode(_dbChecks), Rlp.Encode(_stateWasThere), Rlp.Encode(_stateWasNotThere), Rlp.Encode(_dataSize)); lock (_codeDbLock) { _codeDb[_fastSyncProgressKey.Bytes] = rlp.Bytes; _codeDb.Commit(); _stateDb.Commit(); } } Interlocked.Add(ref _consumedNodesCount, nonEmptyResponses); if (_logger.IsTrace) { _logger.Trace($"After handling response (non-empty responses {nonEmptyResponses}) of {batch.RequestedNodes.Length} from ({Stream0.Count}|{Stream1.Count}|{Stream2.Count}) nodes"); } /* magic formula is ratio of our desired batch size - 1024 to Geth max batch size 384 times some missing nodes ratio */ bool isEmptish = (decimal)nonEmptyResponses / requestLength < 384m / 1024m * 0.75m; if (isEmptish) { Interlocked.Increment(ref _emptishCount); } /* here we are very forgiving for Geth nodes that send bad data fast */ bool isBadQuality = nonEmptyResponses > 64 && (decimal)invalidNodes / requestLength > 0.50m; if (isBadQuality) { Interlocked.Increment(ref _badQualityCount); } bool isEmpty = nonEmptyResponses == 0 && !isBadQuality; if (isEmpty) { if (_logger.IsWarn) { _logger.Warn($"Peer sent no data in response to a request of length {batch.RequestedNodes.Length}"); } result = NodeDataHandlerResult.NoData; return(result, 0); } if (!isEmptish && !isBadQuality) { Interlocked.Increment(ref _okCount); } result = isEmptish ? NodeDataHandlerResult.Emptish : isBadQuality ? NodeDataHandlerResult.BadQuality : NodeDataHandlerResult.OK; if (DateTime.UtcNow - _lastReportTime > TimeSpan.FromSeconds(1)) { decimal requestedNodesPerSecond = 1000m * (_requestedNodesCount - _lastRequestedNodesCount) / (decimal)(DateTime.UtcNow - _lastReportTime).TotalMilliseconds; decimal savedNodesPerSecond = 1000m * (_savedNodesCount - _lastSavedNodesCount) / (decimal)(DateTime.UtcNow - _lastReportTime).TotalMilliseconds; _lastSavedNodesCount = _savedNodesCount; _lastRequestedNodesCount = _requestedNodesCount; _lastReportTime = DateTime.UtcNow; if (_logger.IsInfo) { _logger.Info($"SNPS: {savedNodesPerSecond,6:F0} | RNPS: {requestedNodesPerSecond,6:F0} | Saved nodes {_savedNodesCount} / requested {_requestedNodesCount} ({(decimal) _savedNodesCount / _requestedNodesCount:P2}), saved accounts {_savedAccounts}, size: {(decimal)_dataSize / 1000 / 1000:F2}MB, enqueued nodes {Stream0.Count:D5}|{Stream1.Count:D5}|{Stream2.Count:D5}"); } if (_logger.IsInfo) { _logger.Info($"AVTIH: {_averageTimeInHandler:F2}ms | P: {_pendingRequests.Count} | Request results - OK: {(decimal) _okCount / TotalRequestsCount:p2}, Emptish: {(decimal) _emptishCount / TotalRequestsCount:p2}, BadQuality: {(decimal) _badQualityCount / TotalRequestsCount:p2}, InvalidFormat: {(decimal) _invalidFormatCount / TotalRequestsCount:p2}, NotAssigned {(decimal) _notAssignedCount / TotalRequestsCount:p2}"); } if (_logger.IsTrace) { _logger.Trace($"Requested {_requestedNodesCount}, consumed {_consumedNodesCount}, missed {_requestedNodesCount - _consumedNodesCount}, {_savedCode} contracts, {_savedStateCount - _savedAccounts} states, {_savedStorageCount} storage, DB checks {_stateWasThere}/{_stateWasNotThere + _stateWasThere} cached({_checkWasCached}+{_checkWasInDependencies})"); } if (_logger.IsTrace) { _logger.Trace($"Consume : {(decimal) _consumedNodesCount / _requestedNodesCount:p2}, Save : {(decimal) _savedNodesCount / _requestedNodesCount:p2}, DB Reads : {(decimal) _dbChecks / _requestedNodesCount:p2}"); } } long total = _handleWatch.ElapsedMilliseconds + _networkWatch.ElapsedMilliseconds; if (total != 0) { // calculate averages if (_logger.IsTrace) { _logger.Trace($"Prepare batch {_networkWatch.ElapsedMilliseconds}ms ({(decimal) _networkWatch.ElapsedMilliseconds / total:P0}) - Handle {_handleWatch.ElapsedMilliseconds}ms ({(decimal) _handleWatch.ElapsedMilliseconds / total:P0})"); } } _averageTimeInHandler = (_averageTimeInHandler * (ProcessedRequestsCount - 1) + _handleWatch.ElapsedMilliseconds) / ProcessedRequestsCount; return(result, nonEmptyResponses); } } finally { _handleWatch.Stop(); if (!_pendingRequests.TryRemove(batch, out _)) { _logger.Error("Cannot remove pending request"); } } }
private void SaveNode(StateSyncItem syncItem, byte[] data) { if (_logger.IsTrace) { _logger.Trace($"SAVE {new string('+', syncItem.Level * 2)}{syncItem.NodeDataType.ToString().ToUpperInvariant()} {syncItem.Hash}"); } Interlocked.Increment(ref _savedNodesCount); switch (syncItem.NodeDataType) { case NodeDataType.State: { Interlocked.Increment(ref _savedStateCount); lock (_stateDbLock) { Interlocked.Add(ref _dataSize, data.Length); _stateDb.Set(syncItem.Hash, data); } break; } case NodeDataType.Storage: { lock (_codesSameAsNodes) { if (_codesSameAsNodes.Contains(syncItem.Hash)) { lock (_codeDbLock) { Interlocked.Add(ref _dataSize, data.Length); _codeDb.Set(syncItem.Hash, data); } _codesSameAsNodes.Remove(syncItem.Hash); } } Interlocked.Increment(ref _savedStorageCount); lock (_stateDbLock) { Interlocked.Add(ref _dataSize, data.Length); _stateDb.Set(syncItem.Hash, data); } break; } case NodeDataType.Code: { Interlocked.Increment(ref _savedCode); lock (_codeDbLock) { Interlocked.Add(ref _dataSize, data.Length); _codeDb.Set(syncItem.Hash, data); } break; } } if (syncItem.IsRoot) { if (_logger.IsInfo) { _logger.Info($"Saving root {syncItem.Hash} {syncItem.Level}"); } lock (_dependencies) { if (_dependencies.Count != 0) { if (_logger.IsError) { _logger.Error($"POSSIBLE FAST SYNC CORRUPTION | Dependencies hanging after the root node saved - count: {_dependencies.Count}, first: {_dependencies.Keys.First()}"); } } } if (TotalNodesPending != 0) { if (_logger.IsError) { _logger.Error($"POSSIBLE FAST SYNC CORRUPTION | Nodes left after the root node saved - count: {TotalNodesPending}"); } } } RunChainReaction(syncItem.Hash); }
private AddNodeResult AddNode(StateSyncItem syncItem, DependentItem dependentItem, string reason, bool missing = false) { if (!missing) { if (_alreadySaved.Get(syncItem.Hash) != null) { Interlocked.Increment(ref _checkWasCached); if (_logger.IsTrace) { _logger.Trace($"Node already in the DB - skipping {syncItem.Hash}"); } return(AddNodeResult.AlreadySaved); } object lockToTake = syncItem.NodeDataType == NodeDataType.Code ? _codeDbLock : _stateDbLock; lock (lockToTake) { ISnapshotableDb dbToCheck = syncItem.NodeDataType == NodeDataType.Code ? _codeDb : _stateDb; Interlocked.Increment(ref _dbChecks); bool keyExists = dbToCheck.KeyExists(syncItem.Hash); if (keyExists) { if (_logger.IsTrace) { _logger.Trace($"Node already in the DB - skipping {syncItem.Hash}"); } _alreadySaved.Set(syncItem.Hash, _nullObject); Interlocked.Increment(ref _stateWasThere); return(AddNodeResult.AlreadySaved); } Interlocked.Increment(ref _stateWasNotThere); } bool isAlreadyRequested; lock (_dependencies) { isAlreadyRequested = _dependencies.ContainsKey(syncItem.Hash); if (dependentItem != null) { if (_logger.IsTrace) { _logger.Trace($"Adding dependency {syncItem.Hash} -> {dependentItem.SyncItem.Hash}"); } AddDependency(syncItem.Hash, dependentItem); } } /* same items can have same hashes and we only need them once * there is an issue when we have an item, we add it to dependencies, then we request it and the request times out * and we never request it again because it was already on the dependencies list */ if (isAlreadyRequested) { Interlocked.Increment(ref _checkWasInDependencies); if (_logger.IsTrace) { _logger.Trace($"Node already requested - skipping {syncItem.Hash}"); } return(AddNodeResult.AlreadyRequested); } } PushToSelectedStream(syncItem); if (_logger.IsTrace) { _logger.Trace($"Added a node {syncItem.Hash} - {reason}"); } return(AddNodeResult.Added); }