private void MakeTried(AddressInfo info, int nId) { // remove the entry from all new buckets for (var bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { var pos = info.GetBucketPosition(_nKey, true, bucket); if (_vvNew[bucket, pos] == nId) { _vvNew[bucket, pos] = -1; info.NRefCount--; } } _nNew--; Assert(info.NRefCount == 0); // which tried bucket to move the entry to var nKBucket = info.GetTriedBucket(_nKey); var nKBucketPos = info.GetBucketPosition(_nKey, false, nKBucket); // first make space to add it (the existing tried entry there is moved to new, deleting whatever is there). if (_vvTried[nKBucket, nKBucketPos] != -1) { // find an item to evict var nIdEvict = _vvTried[nKBucket, nKBucketPos]; Assert(_mapInfo.ContainsKey(nIdEvict)); var infoOld = _mapInfo[nIdEvict]; // Remove the to-be-evicted item from the tried set. infoOld.fInTried = false; _vvTried[nKBucket, nKBucketPos] = -1; _nTried--; // find which new bucket it belongs to var nUBucket = infoOld.GetNewBucket(_nKey); var nUBucketPos = infoOld.GetBucketPosition(_nKey, true, nUBucket); ClearNew(nUBucket, nUBucketPos); Assert(_vvNew[nUBucket, nUBucketPos] == -1); // Enter it into the new set again. infoOld.NRefCount = 1; _vvNew[nUBucket, nUBucketPos] = nIdEvict; _nNew++; } Assert(_vvTried[nKBucket, nKBucketPos] == -1); _vvTried[nKBucket, nKBucketPos] = nId; _nTried++; info.fInTried = true; }
// IBitcoinSerializable Members public void ReadWrite(BitcoinStream stream) { lock (_cs) { Check(); if (!stream.Serializing) { Clear(); } stream.ReadWrite(ref _nVersion); stream.ReadWrite(ref _nKeySize); if (!stream.Serializing && _nKeySize != 32) { throw new FormatException("Incorrect keysize in addrman deserialization"); } stream.ReadWrite(ref _nKey); stream.ReadWrite(ref _nNew); stream.ReadWrite(ref _nTried); var nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30); stream.ReadWrite(ref nUBuckets); if (_nVersion != 0) { nUBuckets ^= 1 << 30; } if (!stream.Serializing) { // Deserialize entries from the new table. for (var n = 0; n < _nNew; n++) { var info = new AddressInfo(); info.ReadWrite(stream); _mapInfo.Add(n, info); _mapAddr[info.Address.Endpoint.Address] = n; info.NRandomPos = _vRandom.Count; _vRandom.Add(n); if (_nVersion != 1 || nUBuckets != ADDRMAN_NEW_BUCKET_COUNT) { // In case the new table data cannot be used (nVersion unknown, or bucket count wrong), // immediately try to give them a reference based on their primary source address. var nUBucket = info.GetNewBucket(_nKey); var nUBucketPos = info.GetBucketPosition(_nKey, true, nUBucket); if (_vvNew[nUBucket, nUBucketPos] == -1) { _vvNew[nUBucket, nUBucketPos] = n; info.NRefCount++; } } } _nIdCount = _nNew; // Deserialize entries from the tried table. var nLost = 0; for (var n = 0; n < _nTried; n++) { var info = new AddressInfo(); info.ReadWrite(stream); var nKBucket = info.GetTriedBucket(_nKey); var nKBucketPos = info.GetBucketPosition(_nKey, false, nKBucket); if (_vvTried[nKBucket, nKBucketPos] == -1) { info.NRandomPos = _vRandom.Count; info.fInTried = true; _vRandom.Add(_nIdCount); _mapInfo[_nIdCount] = info; _mapAddr[info.Address.Endpoint.Address] = _nIdCount; _vvTried[nKBucket, nKBucketPos] = _nIdCount; _nIdCount++; } else { nLost++; } } _nTried -= nLost; // Deserialize positions in the new table (if possible). for (var bucket = 0; bucket < nUBuckets; bucket++) { var nSize = 0; stream.ReadWrite(ref nSize); for (var n = 0; n < nSize; n++) { var nIndex = 0; stream.ReadWrite(ref nIndex); if (nIndex >= 0 && nIndex < _nNew) { var info = _mapInfo[nIndex]; var nUBucketPos = info.GetBucketPosition(_nKey, true, bucket); if (_nVersion == 1 && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && _vvNew[bucket, nUBucketPos] == -1 && info.NRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS) { info.NRefCount++; _vvNew[bucket, nUBucketPos] = nIndex; } } } } // Prune new entries with refcount 0 (as a result of collisions). var nLostUnk = 0; foreach (var kv in _mapInfo.ToList()) { if (kv.Value.fInTried == false && kv.Value.NRefCount == 0) { Delete(kv.Key); nLostUnk++; } } } else { var mapUnkIds = new Dictionary <int, int>(); var nIds = 0; foreach (var kv in _mapInfo) { mapUnkIds[kv.Key] = nIds; var info = kv.Value; if (info.NRefCount != 0) { Assert(nIds != _nNew); // this means nNew was wrong, oh ow info.ReadWrite(stream); nIds++; } } nIds = 0; foreach (var kv in _mapInfo) { var info = kv.Value; if (info.fInTried) { Assert(nIds != _nTried); // this means nTried was wrong, oh ow info.ReadWrite(stream); nIds++; } } for (var bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { var nSize = 0; for (var i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (_vvNew[bucket, i] != -1) { nSize++; } } stream.ReadWrite(ref nSize); for (var i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (_vvNew[bucket, i] != -1) { var nIndex = mapUnkIds[_vvNew[bucket, i]]; stream.ReadWrite(ref nIndex); } } } } Check(); } }