public DependentItem(StateSyncItem syncItem, byte[] value, int counter, bool isAccount = false) { SyncItem = syncItem; Value = value; Counter = counter; IsAccount = isAccount; }
public void ReportSynced(StateSyncItem syncItem, NodeProgressState nodeProgressState) { ReportSynced( syncItem.Level, syncItem.ParentBranchChildIndex, syncItem.BranchChildIndex, syncItem.NodeDataType, NodeProgressState.Requested); }
private bool TryTake(out StateSyncItem node) { for (int i = 0; i < _allStacks.Length; i++) { if (_allStacks[i].TryPop(out node)) { return(true); } } node = null; return(false); }
private static uint CalculateRightness(NodeType nodeType, StateSyncItem currentStateSyncItem, int childIndex) { if (nodeType == NodeType.Branch) { return(currentStateSyncItem.Rightness + (uint)Math.Pow(16, Math.Max(0, 7 - currentStateSyncItem.Level)) * (uint)childIndex); } if (nodeType == NodeType.Extension) { return(currentStateSyncItem.Rightness + (uint)Math.Pow(16, Math.Max(0, 7 - currentStateSyncItem.Level)) * 16 - 1); } throw new InvalidOperationException($"Not designed for {nodeType}"); }
public void PushToSelectedStream(StateSyncItem stateSyncItem, decimal progress) { _lastSyncProgress = progress; double priority = CalculatePriority(stateSyncItem.NodeDataType, stateSyncItem.Level, stateSyncItem.Rightness); var selectedCollection = stateSyncItem.NodeDataType switch { NodeDataType.Code => CodeItems, NodeDataType.State when priority <= 0.5f => StateItemsPriority0, NodeDataType.State when priority <= 1.5f => StateItemsPriority1, NodeDataType.State => StateItemsPriority2, NodeDataType.Storage when priority <= 0.5f => StorageItemsPriority0, NodeDataType.Storage when priority <= 1.5f => StorageItemsPriority1, NodeDataType.Storage => StorageItemsPriority2, _ => throw new ArgumentOutOfRangeException() }; selectedCollection.Push(stateSyncItem); }
public void ResetStateRoot(long blockNumber, Keccak stateRoot) { if (CurrentState != SyncFeedState.Dormant) { throw new InvalidOperationException("Cannot reset state sync on an active feed"); } Interlocked.Exchange(ref _hintsToResetRoot, 0); if (_logger.IsInfo) { _logger.Info($"Setting state sync state root to {blockNumber} {stateRoot}"); } _currentSyncStart = DateTime.UtcNow; _currentSyncStartSecondsInSync = _data.SecondsInSync; _data.LastReportTime = (DateTime.UtcNow, DateTime.UtcNow); _data.LastSavedNodesCount = _data.SavedNodesCount; _data.LastRequestedNodesCount = _data.RequestedNodesCount; if (_rootNode != stateRoot) { _syncProgress = new StateSyncProgress(blockNumber, _logger); _blockNumber = blockNumber; _rootNode = stateRoot; lock (_dependencies) _dependencies.Clear(); lock (_codesSameAsNodes) _codesSameAsNodes.Clear(); _pendingItems.Clear(); if (_logger.IsDebug) { _logger.Debug($"Clearing node stacks ({_pendingItems.Description})"); } } else { foreach ((StateSyncBatch pendingRequest, _) in _pendingRequests) { // re-add the pending request for (int i = 0; i < pendingRequest.RequestedNodes.Length; i++) { AddNodeToPending(pendingRequest.RequestedNodes[i], null, "pending request", true); } } } _pendingRequests.Clear(); bool hasOnlyRootNode = false; if (_rootNode != Keccak.EmptyTreeHash) { if (_pendingItems.Count == 1) { // state root can only be located on state stream StateSyncItem potentialRoot = _pendingItems.PeekState(); if (potentialRoot?.Hash == _rootNode) { hasOnlyRootNode = true; } } if (!hasOnlyRootNode) { AddNodeToPending(new StateSyncItem(_rootNode, NodeDataType.State, 0, 0), null, "initial"); } } }
private void HandleTrieNode(StateSyncItem currentStateSyncItem, byte[] currentResponseItem, ref int invalidNodes) { NodeDataType nodeDataType = currentStateSyncItem.NodeDataType; TrieNode trieNode = new TrieNode(NodeType.Unknown, currentResponseItem); trieNode.ResolveNode(null); switch (trieNode.NodeType) { case NodeType.Unknown: invalidNodes++; if (_logger.IsError) { _logger.Error($"Node {currentStateSyncItem.Hash} resolved to {nameof(NodeType.Unknown)}"); } break; case NodeType.Branch: DependentItem dependentBranch = new DependentItem(currentStateSyncItem, currentResponseItem, 0); // children may have the same hashes (e.g. a set of accounts with the same code at different addresses) HashSet <Keccak> alreadyProcessedChildHashes = new HashSet <Keccak>(); for (int childIndex = 15; childIndex >= 0; childIndex--) { Keccak childHash = trieNode.GetChildHash(childIndex); if (childHash != null && alreadyProcessedChildHashes.Contains(childHash)) { continue; } alreadyProcessedChildHashes.Add(childHash); if (childHash != null) { AddNodeResult addChildResult = AddNodeToPending(new StateSyncItem(childHash, nodeDataType, currentStateSyncItem.Level + 1, CalculateRightness(trieNode.NodeType, currentStateSyncItem, childIndex)) { BranchChildIndex = (short)childIndex, ParentBranchChildIndex = currentStateSyncItem.BranchChildIndex }, dependentBranch, "branch child"); if (addChildResult != AddNodeResult.AlreadySaved) { dependentBranch.Counter++; } else { _syncProgress.ReportSynced(currentStateSyncItem.Level + 1, currentStateSyncItem.BranchChildIndex, childIndex, currentStateSyncItem.NodeDataType, NodeProgressState.AlreadySaved); } } else { _syncProgress.ReportSynced(currentStateSyncItem.Level + 1, currentStateSyncItem.BranchChildIndex, childIndex, currentStateSyncItem.NodeDataType, NodeProgressState.Empty); } } if (dependentBranch.Counter == 0) { SaveNode(currentStateSyncItem, currentResponseItem); } break; case NodeType.Extension: Keccak next = trieNode[0].Keccak; if (next != null) { DependentItem dependentItem = new DependentItem(currentStateSyncItem, currentResponseItem, 1); AddNodeResult addResult = AddNodeToPending(new StateSyncItem(next, nodeDataType, currentStateSyncItem.Level + trieNode.Path.Length, CalculateRightness(trieNode.NodeType, currentStateSyncItem, 0)) { ParentBranchChildIndex = currentStateSyncItem.BranchChildIndex }, dependentItem, "extension child"); if (addResult == AddNodeResult.AlreadySaved) { SaveNode(currentStateSyncItem, currentResponseItem); } } else { /* this happens when we have a short RLP format of the node * that would not be stored as Keccak but full RLP*/ SaveNode(currentStateSyncItem, currentResponseItem); } break; case NodeType.Leaf: if (nodeDataType == NodeDataType.State) { _pendingItems.MaxStateLevel = 64; DependentItem dependentItem = new DependentItem(currentStateSyncItem, currentResponseItem, 0, true); (Keccak codeHash, Keccak storageRoot) = AccountDecoder.DecodeHashesOnly(new RlpStream(trieNode.Value)); if (codeHash != Keccak.OfAnEmptyString) { // prepare a branch without the code DB // this only protects against being same as storage root? if (codeHash == storageRoot) { lock (_codesSameAsNodes) { _codesSameAsNodes.Add(codeHash); } } else { AddNodeResult addCodeResult = AddNodeToPending(new StateSyncItem(codeHash, NodeDataType.Code, 0, currentStateSyncItem.Rightness), dependentItem, "code"); if (addCodeResult != AddNodeResult.AlreadySaved) { dependentItem.Counter++; } } } if (storageRoot != Keccak.EmptyTreeHash) { AddNodeResult addStorageNodeResult = AddNodeToPending(new StateSyncItem(storageRoot, NodeDataType.Storage, 0, currentStateSyncItem.Rightness), dependentItem, "storage"); if (addStorageNodeResult != AddNodeResult.AlreadySaved) { dependentItem.Counter++; } } if (dependentItem.Counter == 0) { Interlocked.Increment(ref _data.SavedAccounts); SaveNode(currentStateSyncItem, currentResponseItem); } } else { _pendingItems.MaxStorageLevel = 64; SaveNode(currentStateSyncItem, currentResponseItem); } break; default: if (_logger.IsError) { _logger.Error($"Unknown value {currentStateSyncItem.NodeDataType} of {nameof(NodeDataType)} at {currentStateSyncItem.Hash}"); } invalidNodes++; break; } }
public override SyncResponseHandlingResult HandleResponse(StateSyncBatch batch) { if (batch == EmptyBatch) { _logger.Error("Received empty batch as a response"); } if (!_pendingRequests.TryRemove(batch, out _)) { if (_logger.IsDebug) { _logger.Debug($"Cannot remove pending request {batch}"); } return(SyncResponseHandlingResult.OK); } else { if (_logger.IsTrace) { _logger.Trace($"Removing pending request {batch}"); } } int requestLength = batch.RequestedNodes?.Length ?? 0; int responseLength = batch.Responses?.Length ?? 0; void AddAgainAllItems() { for (int i = 0; i < requestLength; i++) { AddNodeToPending(batch.RequestedNodes[i], null, "missing", true); } } try { lock (_handleWatch) { if (DateTime.UtcNow - _lastReview > TimeSpan.FromSeconds(60)) { _lastReview = DateTime.UtcNow; string reviewMessage = _pendingItems.RecalculatePriorities(); if (_logger.IsInfo) { _logger.Info(reviewMessage); } } _handleWatch.Restart(); bool requestWasMade = batch.Responses != null; if (!requestWasMade) { AddAgainAllItems(); if (_logger.IsTrace) { _logger.Trace($"Batch was not assigned to any peer."); } Interlocked.Increment(ref _data.NotAssignedCount); return(SyncResponseHandlingResult.NotAssigned); } bool isMissingRequestData = batch.RequestedNodes == null; bool isMissingResponseData = batch.Responses == null; bool hasValidFormat = !isMissingRequestData && !isMissingResponseData; if (!hasValidFormat) { _hintsToResetRoot++; AddAgainAllItems(); if (_logger.IsWarn) { _logger.Warn($"Batch response had invalid format"); } Interlocked.Increment(ref _data.InvalidFormatCount); return(isMissingRequestData ? SyncResponseHandlingResult.InternalError : SyncResponseHandlingResult.NotAssigned); } if (_logger.IsTrace) { _logger.Trace($"Received node data - {responseLength} items in response to {requestLength}"); } int nonEmptyResponses = 0; int invalidNodes = 0; for (int i = 0; i < batch.RequestedNodes.Length; i++) { StateSyncItem currentStateSyncItem = batch.RequestedNodes[i]; /* if the peer has limit on number of requests in a batch then the response will possibly be * shorter than the request */ if (batch.Responses.Length < i + 1) { AddNodeToPending(currentStateSyncItem, null, "missing", true); continue; } /* if the peer does not have details of this particular node */ byte[] currentResponseItem = batch.Responses[i]; if (currentResponseItem == null) { AddNodeToPending(batch.RequestedNodes[i], null, "missing", true); continue; } /* node sent data that is not consistent with its hash - it happens surprisingly often */ if (!ValueKeccak.Compute(currentResponseItem).BytesAsSpan.SequenceEqual(currentStateSyncItem.Hash.Bytes)) { AddNodeToPending(currentStateSyncItem, null, "missing", true); if (_logger.IsTrace) { _logger.Trace($"Peer sent invalid data (batch {requestLength}->{responseLength}) of length {batch.Responses[i]?.Length} of type {batch.RequestedNodes[i].NodeDataType} at level {batch.RequestedNodes[i].Level} of type {batch.RequestedNodes[i].NodeDataType} Keccak({batch.Responses[i].ToHexString()}) != {batch.RequestedNodes[i].Hash}"); } invalidNodes++; continue; } nonEmptyResponses++; NodeDataType nodeDataType = currentStateSyncItem.NodeDataType; if (nodeDataType == NodeDataType.Code) { SaveNode(currentStateSyncItem, currentResponseItem); continue; } HandleTrieNode(currentStateSyncItem, currentResponseItem, ref invalidNodes); } Interlocked.Add(ref _data.ConsumedNodesCount, nonEmptyResponses); StoreProgressInDb(); if (_logger.IsTrace) { _logger.Trace($"After handling response (non-empty responses {nonEmptyResponses}) of {batch.RequestedNodes.Length} from ({_pendingItems.Description}) nodes"); } /* magic formula is ratio of our desired batch size - 1024 to Geth max batch size 384 times some missing nodes ratio */ bool isEmptish = (decimal)nonEmptyResponses / Math.Max(requestLength, 1) < 384m / 1024m * 0.75m; if (isEmptish) { Interlocked.Increment(ref _hintsToResetRoot); Interlocked.Increment(ref _data.EmptishCount); } else { Interlocked.Exchange(ref _hintsToResetRoot, 0); } /* here we are very forgiving for Geth nodes that send bad data fast */ bool isBadQuality = nonEmptyResponses > 64 && (decimal)invalidNodes / Math.Max(requestLength, 1) > 0.50m; if (isBadQuality) { Interlocked.Increment(ref _data.BadQualityCount); } bool isEmpty = nonEmptyResponses == 0 && !isBadQuality; if (isEmpty) { if (_logger.IsDebug) { _logger.Debug($"Peer sent no data in response to a request of length {batch.RequestedNodes.Length}"); } return(SyncResponseHandlingResult.NoProgress); } if (!isEmptish && !isBadQuality) { Interlocked.Increment(ref _data.OkCount); } SyncResponseHandlingResult result = isEmptish ? SyncResponseHandlingResult.Emptish : isBadQuality ? SyncResponseHandlingResult.LesserQuality : SyncResponseHandlingResult.OK; _data.DisplayProgressReport(_pendingRequests.Count, _logger); long total = _handleWatch.ElapsedMilliseconds + _networkWatch.ElapsedMilliseconds; if (total != 0) { // calculate averages if (_logger.IsTrace) { _logger.Trace($"Prepare batch {_networkWatch.ElapsedMilliseconds}ms ({(decimal) _networkWatch.ElapsedMilliseconds / total:P0}) - Handle {_handleWatch.ElapsedMilliseconds}ms ({(decimal) _handleWatch.ElapsedMilliseconds / total:P0})"); } } if (_handleWatch.ElapsedMilliseconds > 250) { if (_logger.IsDebug) { _logger.Debug($"Handle watch {_handleWatch.ElapsedMilliseconds}, DB reads {_data.DbChecks - _data.LastDbReads}, ratio {(decimal) _handleWatch.ElapsedMilliseconds / Math.Max(1, _data.DbChecks - _data.LastDbReads)}"); } } _data.LastDbReads = _data.DbChecks; _data.AverageTimeInHandler = (_data.AverageTimeInHandler * (_data.ProcessedRequestsCount - 1) + _handleWatch.ElapsedMilliseconds) / _data.ProcessedRequestsCount; Interlocked.Add(ref _data.HandledNodesCount, nonEmptyResponses); return(result); } } catch (Exception e) { _logger.Error("Error when handling state sync response", e); return(SyncResponseHandlingResult.InternalError); } finally { _handleWatch.Stop(); } }
private void SaveNode(StateSyncItem syncItem, byte[] data) { if (_logger.IsTrace) { _logger.Trace($"SAVE {new string('+', syncItem.Level * 2)}{syncItem.NodeDataType.ToString().ToUpperInvariant()} {syncItem.Hash}"); } Interlocked.Increment(ref _data.SavedNodesCount); switch (syncItem.NodeDataType) { case NodeDataType.State: { Interlocked.Increment(ref _data.SavedStateCount); lock (_stateDbLock) { Interlocked.Add(ref _data.DataSize, data.Length); Interlocked.Increment(ref Metrics.SyncedStateTrieNodes); _stateDb.Set(syncItem.Hash, data); } break; } case NodeDataType.Storage: { lock (_codesSameAsNodes) { if (_codesSameAsNodes.Contains(syncItem.Hash)) { lock (_codeDbLock) { Interlocked.Add(ref _data.DataSize, data.Length); Interlocked.Increment(ref Metrics.SyncedCodes); _codeDb.Set(syncItem.Hash, data); } _codesSameAsNodes.Remove(syncItem.Hash); } } Interlocked.Increment(ref _data.SavedStorageCount); lock (_stateDbLock) { Interlocked.Add(ref _data.DataSize, data.Length); Interlocked.Increment(ref Metrics.SyncedStorageTrieNodes); _stateDb.Set(syncItem.Hash, data); } break; } case NodeDataType.Code: { Interlocked.Increment(ref _data.SavedCode); lock (_codeDbLock) { Interlocked.Add(ref _data.DataSize, data.Length); Interlocked.Increment(ref Metrics.SyncedCodes); _codeDb.Set(syncItem.Hash, data); } break; } } if (syncItem.IsRoot) { if (_logger.IsInfo) { _logger.Info($"Saving root {syncItem.Hash} of {_syncProgress.CurrentSyncBlock}"); } VerifyPostSyncCleanUp(); FinishThisSyncRound(); } _syncProgress.ReportSynced(syncItem.Level, syncItem.ParentBranchChildIndex, syncItem.BranchChildIndex, syncItem.NodeDataType, NodeProgressState.Saved); PossiblySaveDependentNodes(syncItem.Hash); }
private AddNodeResult AddNodeToPending(StateSyncItem syncItem, DependentItem dependentItem, string reason, bool missing = false) { if (!missing) { if (syncItem.Level <= 2) { _syncProgress.ReportSynced(syncItem.Level, syncItem.ParentBranchChildIndex, syncItem.BranchChildIndex, syncItem.NodeDataType, NodeProgressState.Requested); } if (_alreadySaved.Get(syncItem.Hash)) { Interlocked.Increment(ref _data.CheckWasCached); if (_logger.IsTrace) { _logger.Trace($"Node already in the DB - skipping {syncItem.Hash}"); } return(AddNodeResult.AlreadySaved); } object lockToTake = syncItem.NodeDataType == NodeDataType.Code ? _codeDbLock : _stateDbLock; lock (lockToTake) { IDb dbToCheck = syncItem.NodeDataType == NodeDataType.Code ? _codeDb : _stateDb; Interlocked.Increment(ref _data.DbChecks); bool keyExists = dbToCheck.KeyExists(syncItem.Hash); if (keyExists) { if (_logger.IsTrace) { _logger.Trace($"Node already in the DB - skipping {syncItem.Hash}"); } _alreadySaved.Set(syncItem.Hash); Interlocked.Increment(ref _data.StateWasThere); return(AddNodeResult.AlreadySaved); } Interlocked.Increment(ref _data.StateWasNotThere); } bool isAlreadyRequested; lock (_dependencies) { isAlreadyRequested = _dependencies.ContainsKey(syncItem.Hash); if (dependentItem != null) { if (_logger.IsTrace) { _logger.Trace($"Adding dependency {syncItem.Hash} -> {dependentItem.SyncItem.Hash}"); } AddDependency(syncItem.Hash, dependentItem); } } /* same items can have same hashes and we only need them once * there is an issue when we have an item, we add it to dependencies, then we request it and the request times out * and we never request it again because it was already on the dependencies list */ if (isAlreadyRequested) { Interlocked.Increment(ref _data.CheckWasInDependencies); if (_logger.IsTrace) { _logger.Trace($"Node already requested - skipping {syncItem.Hash}"); } return(AddNodeResult.AlreadyRequested); } } _pendingItems.PushToSelectedStream(syncItem, _syncProgress.LastProgress); if (_logger.IsTrace) { _logger.Trace($"Added a node {syncItem.Hash} - {reason}"); } return(AddNodeResult.Added); }