Esempio n. 1
0
        private void AddDependency(Keccak dependency, DependentItem dependentItem)
        {
            lock (_dependencies)
            {
                if (!_dependencies.ContainsKey(dependency))
                {
                    _dependencies[dependency] = new HashSet <DependentItem>(new DependentItemComparer());
                }

                _dependencies[dependency].Add(dependentItem);
            }
        }
Esempio n. 2
0
        public (NodeDataHandlerResult Result, int NodesConsumed) HandleResponse(StateSyncBatch batch)
        {
            int requestLength  = batch.RequestedNodes?.Length ?? 0;
            int responseLength = batch.Responses?.Length ?? 0;

            void AddAgainAllItems()
            {
                for (int i = 0; i < requestLength; i++)
                {
                    AddNode(batch.RequestedNodes[i], null, "missing", true);
                }
            }

            try
            {
                lock (_handleWatch)
                {
                    _handleWatch.Restart();

                    bool requestWasMade = batch.AssignedPeer?.Current != null;
                    NodeDataHandlerResult result;
                    if (!requestWasMade)
                    {
                        AddAgainAllItems();
                        if (_logger.IsTrace)
                        {
                            _logger.Trace($"Batch was not assigned to any peer.");
                        }
                        Interlocked.Increment(ref _notAssignedCount);
                        result = NodeDataHandlerResult.NotAssigned;
                        return(result, 0);
                    }

                    bool isMissingRequestData  = batch.RequestedNodes == null;
                    bool isMissingResponseData = batch.Responses == null;
                    bool hasValidFormat        = !isMissingRequestData && !isMissingResponseData;

                    if (!hasValidFormat)
                    {
                        AddAgainAllItems();
                        if (_logger.IsDebug)
                        {
                            _logger.Debug($"Batch response had invalid format");
                        }
                        Interlocked.Increment(ref _invalidFormatCount);
                        result = NodeDataHandlerResult.InvalidFormat;
                        return(result, 0);
                    }

                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"Received node data - {responseLength} items in response to {requestLength}");
                    }
                    int nonEmptyResponses = 0;
                    int invalidNodes      = 0;
                    for (int i = 0; i < batch.RequestedNodes.Length; i++)
                    {
                        StateSyncItem currentStateSyncItem = batch.RequestedNodes[i];

                        /* if the peer has limit on number of requests in a batch then the response will possibly be
                         * shorter than the request */
                        if (batch.Responses.Length < i + 1)
                        {
                            AddNode(currentStateSyncItem, null, "missing", true);
                            continue;
                        }

                        /* if the peer does not have details of this particular node */
                        byte[] currentResponseItem = batch.Responses[i];
                        if (currentResponseItem == null)
                        {
                            AddNode(batch.RequestedNodes[i], null, "missing", true);
                            continue;
                        }

                        /* node sent data that is not consistent with its hash - it happens surprisingly often */
                        if (Keccak.Compute(currentResponseItem) != currentStateSyncItem.Hash)
                        {
                            if (_logger.IsDebug)
                            {
                                _logger.Debug($"Peer sent invalid data (batch {requestLength}->{responseLength}) of length {batch.Responses[i]?.Length} of type {batch.RequestedNodes[i].NodeDataType} at level {batch.RequestedNodes[i].Level} of type {batch.RequestedNodes[i].NodeDataType} Keccak({batch.Responses[i].ToHexString()}) != {batch.RequestedNodes[i].Hash}");
                            }
                            invalidNodes++;
                            continue;
                        }

                        nonEmptyResponses++;
                        NodeDataType nodeDataType = currentStateSyncItem.NodeDataType;
                        if (nodeDataType == NodeDataType.Code)
                        {
                            SaveNode(currentStateSyncItem, currentResponseItem);
                            continue;
                        }

                        TrieNode trieNode = new TrieNode(NodeType.Unknown, new Rlp(currentResponseItem));
                        trieNode.ResolveNode(null);
                        switch (trieNode.NodeType)
                        {
                        case NodeType.Unknown:
                            invalidNodes++;
                            if (_logger.IsError)
                            {
                                _logger.Error($"Node {currentStateSyncItem.Hash} resolved to {nameof(NodeType.Unknown)}");
                            }
                            break;

                        case NodeType.Branch:
                            trieNode.BuildLookupTable();
                            DependentItem    dependentBranch             = new DependentItem(currentStateSyncItem, currentResponseItem, 0);
                            HashSet <Keccak> alreadyProcessedChildHashes = new HashSet <Keccak>();
                            for (int childIndex = 0; childIndex < 16; childIndex++)
                            {
                                Keccak child = trieNode.GetChildHash(childIndex);
                                if (alreadyProcessedChildHashes.Contains(child))
                                {
                                    continue;
                                }

                                alreadyProcessedChildHashes.Add(child);

                                if (child != null)
                                {
                                    AddNodeResult addChildResult = AddNode(new StateSyncItem(child, nodeDataType, currentStateSyncItem.Level + 1, CalculatePriority(currentStateSyncItem)), dependentBranch, "branch child");
                                    if (addChildResult != AddNodeResult.AlreadySaved)
                                    {
                                        dependentBranch.Counter++;
                                    }
                                }
                            }

                            if (dependentBranch.Counter == 0)
                            {
                                SaveNode(currentStateSyncItem, currentResponseItem);
                            }

                            break;

                        case NodeType.Extension:
                            Keccak next = trieNode[0].Keccak;
                            if (next != null)
                            {
                                DependentItem dependentItem = new DependentItem(currentStateSyncItem, currentResponseItem, 1);
                                AddNodeResult addResult     = AddNode(new StateSyncItem(next, nodeDataType, currentStateSyncItem.Level + 1, currentStateSyncItem.Priority), dependentItem, "extension child");
                                if (addResult == AddNodeResult.AlreadySaved)
                                {
                                    SaveNode(currentStateSyncItem, currentResponseItem);
                                }
                            }
                            else
                            {
                                /* this happens when we have a short RLP format of the node
                                 * that would not be stored as Keccak but full RLP*/
                                SaveNode(currentStateSyncItem, currentResponseItem);
                            }

                            break;

                        case NodeType.Leaf:
                            if (nodeDataType == NodeDataType.State)
                            {
                                DependentItem dependentItem = new DependentItem(currentStateSyncItem, currentResponseItem, 0, true);
                                Account       account       = _accountDecoder.Decode(new Rlp.DecoderContext(trieNode.Value));
                                if (account.CodeHash != Keccak.OfAnEmptyString)
                                {
                                    // prepare a branch without the code DB
                                    // this only protects against being same as storage root?
                                    if (account.CodeHash == account.StorageRoot)
                                    {
                                        lock (_codesSameAsNodes)
                                        {
                                            _codesSameAsNodes.Add(account.CodeHash);
                                        }
                                    }
                                    else
                                    {
                                        AddNodeResult addCodeResult = AddNode(new StateSyncItem(account.CodeHash, NodeDataType.Code, 0, 0), dependentItem, "code");
                                        if (addCodeResult != AddNodeResult.AlreadySaved)
                                        {
                                            dependentItem.Counter++;
                                        }
                                    }
                                }

                                if (account.StorageRoot != Keccak.EmptyTreeHash)
                                {
                                    AddNodeResult addStorageNodeResult = AddNode(new StateSyncItem(account.StorageRoot, NodeDataType.Storage, 0, 0), dependentItem, "storage");
                                    if (addStorageNodeResult != AddNodeResult.AlreadySaved)
                                    {
                                        dependentItem.Counter++;
                                    }
                                }

                                if (dependentItem.Counter == 0)
                                {
                                    Interlocked.Increment(ref _savedAccounts);
                                    SaveNode(currentStateSyncItem, currentResponseItem);
                                }
                            }
                            else
                            {
                                SaveNode(currentStateSyncItem, currentResponseItem);
                            }

                            break;

                        default:
                            if (_logger.IsError)
                            {
                                _logger.Error($"Unknown value {currentStateSyncItem.NodeDataType} of {nameof(NodeDataType)} at {currentStateSyncItem.Hash}");
                            }
                            invalidNodes++;
                            continue;
                        }
                    }

                    lock (_stateDbLock)
                    {
                        Rlp rlp = Rlp.Encode(
                            Rlp.Encode(_consumedNodesCount),
                            Rlp.Encode(_savedStorageCount),
                            Rlp.Encode(_savedStateCount),
                            Rlp.Encode(_savedNodesCount),
                            Rlp.Encode(_savedAccounts),
                            Rlp.Encode(_savedCode),
                            Rlp.Encode(_requestedNodesCount),
                            Rlp.Encode(_dbChecks),
                            Rlp.Encode(_stateWasThere),
                            Rlp.Encode(_stateWasNotThere),
                            Rlp.Encode(_dataSize));
                        lock (_codeDbLock)
                        {
                            _codeDb[_fastSyncProgressKey.Bytes] = rlp.Bytes;
                            _codeDb.Commit();
                            _stateDb.Commit();
                        }
                    }

                    Interlocked.Add(ref _consumedNodesCount, nonEmptyResponses);

                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"After handling response (non-empty responses {nonEmptyResponses}) of {batch.RequestedNodes.Length} from ({Stream0.Count}|{Stream1.Count}|{Stream2.Count}) nodes");
                    }

                    /* magic formula is ratio of our desired batch size - 1024 to Geth max batch size 384 times some missing nodes ratio */
                    bool isEmptish = (decimal)nonEmptyResponses / requestLength < 384m / 1024m * 0.75m;
                    if (isEmptish)
                    {
                        Interlocked.Increment(ref _emptishCount);
                    }

                    /* here we are very forgiving for Geth nodes that send bad data fast */
                    bool isBadQuality = nonEmptyResponses > 64 && (decimal)invalidNodes / requestLength > 0.50m;
                    if (isBadQuality)
                    {
                        Interlocked.Increment(ref _badQualityCount);
                    }

                    bool isEmpty = nonEmptyResponses == 0 && !isBadQuality;
                    if (isEmpty)
                    {
                        if (_logger.IsWarn)
                        {
                            _logger.Warn($"Peer sent no data in response to a request of length {batch.RequestedNodes.Length}");
                        }
                        result = NodeDataHandlerResult.NoData;
                        return(result, 0);
                    }

                    if (!isEmptish && !isBadQuality)
                    {
                        Interlocked.Increment(ref _okCount);
                    }

                    result = isEmptish
                        ? NodeDataHandlerResult.Emptish
                        : isBadQuality
                            ? NodeDataHandlerResult.BadQuality
                            : NodeDataHandlerResult.OK;

                    if (DateTime.UtcNow - _lastReportTime > TimeSpan.FromSeconds(1))
                    {
                        decimal requestedNodesPerSecond = 1000m * (_requestedNodesCount - _lastRequestedNodesCount) / (decimal)(DateTime.UtcNow - _lastReportTime).TotalMilliseconds;
                        decimal savedNodesPerSecond     = 1000m * (_savedNodesCount - _lastSavedNodesCount) / (decimal)(DateTime.UtcNow - _lastReportTime).TotalMilliseconds;
                        _lastSavedNodesCount     = _savedNodesCount;
                        _lastRequestedNodesCount = _requestedNodesCount;
                        _lastReportTime          = DateTime.UtcNow;
                        if (_logger.IsInfo)
                        {
                            _logger.Info($"SNPS: {savedNodesPerSecond,6:F0} | RNPS: {requestedNodesPerSecond,6:F0} | Saved nodes {_savedNodesCount} / requested {_requestedNodesCount} ({(decimal) _savedNodesCount / _requestedNodesCount:P2}), saved accounts {_savedAccounts}, size: {(decimal)_dataSize / 1000 / 1000:F2}MB, enqueued nodes {Stream0.Count:D5}|{Stream1.Count:D5}|{Stream2.Count:D5}");
                        }
                        if (_logger.IsInfo)
                        {
                            _logger.Info($"AVTIH: {_averageTimeInHandler:F2}ms | P: {_pendingRequests.Count} | Request results - OK: {(decimal) _okCount / TotalRequestsCount:p2}, Emptish: {(decimal) _emptishCount / TotalRequestsCount:p2}, BadQuality: {(decimal) _badQualityCount / TotalRequestsCount:p2}, InvalidFormat: {(decimal) _invalidFormatCount / TotalRequestsCount:p2}, NotAssigned {(decimal) _notAssignedCount / TotalRequestsCount:p2}");
                        }
                        if (_logger.IsTrace)
                        {
                            _logger.Trace($"Requested {_requestedNodesCount}, consumed {_consumedNodesCount}, missed {_requestedNodesCount - _consumedNodesCount}, {_savedCode} contracts, {_savedStateCount - _savedAccounts} states, {_savedStorageCount} storage, DB checks {_stateWasThere}/{_stateWasNotThere + _stateWasThere} cached({_checkWasCached}+{_checkWasInDependencies})");
                        }
                        if (_logger.IsTrace)
                        {
                            _logger.Trace($"Consume : {(decimal) _consumedNodesCount / _requestedNodesCount:p2}, Save : {(decimal) _savedNodesCount / _requestedNodesCount:p2}, DB Reads : {(decimal) _dbChecks / _requestedNodesCount:p2}");
                        }
                    }

                    long total = _handleWatch.ElapsedMilliseconds + _networkWatch.ElapsedMilliseconds;
                    if (total != 0)
                    {
                        // calculate averages
                        if (_logger.IsTrace)
                        {
                            _logger.Trace($"Prepare batch {_networkWatch.ElapsedMilliseconds}ms ({(decimal) _networkWatch.ElapsedMilliseconds / total:P0}) - Handle {_handleWatch.ElapsedMilliseconds}ms ({(decimal) _handleWatch.ElapsedMilliseconds / total:P0})");
                        }
                    }

                    _averageTimeInHandler = (_averageTimeInHandler * (ProcessedRequestsCount - 1) + _handleWatch.ElapsedMilliseconds) / ProcessedRequestsCount;
                    return(result, nonEmptyResponses);
                }
            }
            finally
            {
                _handleWatch.Stop();
                if (!_pendingRequests.TryRemove(batch, out _))
                {
                    _logger.Error("Cannot remove pending request");
                }
            }
        }
Esempio n. 3
0
        private AddNodeResult AddNode(StateSyncItem syncItem, DependentItem dependentItem, string reason, bool missing = false)
        {
            if (!missing)
            {
                if (_alreadySaved.Get(syncItem.Hash) != null)
                {
                    Interlocked.Increment(ref _checkWasCached);
                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"Node already in the DB - skipping {syncItem.Hash}");
                    }
                    return(AddNodeResult.AlreadySaved);
                }

                object lockToTake = syncItem.NodeDataType == NodeDataType.Code ? _codeDbLock : _stateDbLock;
                lock (lockToTake)
                {
                    ISnapshotableDb dbToCheck = syncItem.NodeDataType == NodeDataType.Code ? _codeDb : _stateDb;
                    Interlocked.Increment(ref _dbChecks);
                    bool keyExists = dbToCheck.KeyExists(syncItem.Hash);
                    if (keyExists)
                    {
                        if (_logger.IsTrace)
                        {
                            _logger.Trace($"Node already in the DB - skipping {syncItem.Hash}");
                        }
                        _alreadySaved.Set(syncItem.Hash, _nullObject);
                        Interlocked.Increment(ref _stateWasThere);
                        return(AddNodeResult.AlreadySaved);
                    }

                    Interlocked.Increment(ref _stateWasNotThere);
                }

                bool isAlreadyRequested;
                lock (_dependencies)
                {
                    isAlreadyRequested = _dependencies.ContainsKey(syncItem.Hash);
                    if (dependentItem != null)
                    {
                        if (_logger.IsTrace)
                        {
                            _logger.Trace($"Adding dependency {syncItem.Hash} -> {dependentItem.SyncItem.Hash}");
                        }
                        AddDependency(syncItem.Hash, dependentItem);
                    }
                }

                /* same items can have same hashes and we only need them once
                 * there is an issue when we have an item, we add it to dependencies, then we request it and the request times out
                 * and we never request it again because it was already on the dependencies list */
                if (isAlreadyRequested)
                {
                    Interlocked.Increment(ref _checkWasInDependencies);
                    if (_logger.IsTrace)
                    {
                        _logger.Trace($"Node already requested - skipping {syncItem.Hash}");
                    }
                    return(AddNodeResult.AlreadyRequested);
                }
            }

            PushToSelectedStream(syncItem);
            if (_logger.IsTrace)
            {
                _logger.Trace($"Added a node {syncItem.Hash} - {reason}");
            }
            return(AddNodeResult.Added);
        }