// IBitcoinSerializable Members public void ReadWrite(BitcoinStream stream) { lock (_cs) { Check(); if (!stream.Serializing) { Clear(); } stream.ReadWrite(ref _nVersion); stream.ReadWrite(ref _nKeySize); if (!stream.Serializing && _nKeySize != 32) { throw new FormatException("Incorrect keysize in addrman deserialization"); } stream.ReadWrite(ref _nKey); stream.ReadWrite(ref _nNew); stream.ReadWrite(ref _nTried); var nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30); stream.ReadWrite(ref nUBuckets); if (_nVersion != 0) { nUBuckets ^= 1 << 30; } if (!stream.Serializing) { // Deserialize entries from the new table. for (var n = 0; n < _nNew; n++) { var info = new AddressInfo(); info.ReadWrite(stream); _mapInfo.Add(n, info); _mapAddr[info.Address.Endpoint.Address] = n; info.NRandomPos = _vRandom.Count; _vRandom.Add(n); if (_nVersion != 1 || nUBuckets != ADDRMAN_NEW_BUCKET_COUNT) { // In case the new table data cannot be used (nVersion unknown, or bucket count wrong), // immediately try to give them a reference based on their primary source address. var nUBucket = info.GetNewBucket(_nKey); var nUBucketPos = info.GetBucketPosition(_nKey, true, nUBucket); if (_vvNew[nUBucket, nUBucketPos] == -1) { _vvNew[nUBucket, nUBucketPos] = n; info.NRefCount++; } } } _nIdCount = _nNew; // Deserialize entries from the tried table. var nLost = 0; for (var n = 0; n < _nTried; n++) { var info = new AddressInfo(); info.ReadWrite(stream); var nKBucket = info.GetTriedBucket(_nKey); var nKBucketPos = info.GetBucketPosition(_nKey, false, nKBucket); if (_vvTried[nKBucket, nKBucketPos] == -1) { info.NRandomPos = _vRandom.Count; info.fInTried = true; _vRandom.Add(_nIdCount); _mapInfo[_nIdCount] = info; _mapAddr[info.Address.Endpoint.Address] = _nIdCount; _vvTried[nKBucket, nKBucketPos] = _nIdCount; _nIdCount++; } else { nLost++; } } _nTried -= nLost; // Deserialize positions in the new table (if possible). for (var bucket = 0; bucket < nUBuckets; bucket++) { var nSize = 0; stream.ReadWrite(ref nSize); for (var n = 0; n < nSize; n++) { var nIndex = 0; stream.ReadWrite(ref nIndex); if (nIndex >= 0 && nIndex < _nNew) { var info = _mapInfo[nIndex]; var nUBucketPos = info.GetBucketPosition(_nKey, true, bucket); if (_nVersion == 1 && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && _vvNew[bucket, nUBucketPos] == -1 && info.NRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS) { info.NRefCount++; _vvNew[bucket, nUBucketPos] = nIndex; } } } } // Prune new entries with refcount 0 (as a result of collisions). var nLostUnk = 0; foreach (var kv in _mapInfo.ToList()) { if (kv.Value.fInTried == false && kv.Value.NRefCount == 0) { Delete(kv.Key); nLostUnk++; } } } else { var mapUnkIds = new Dictionary <int, int>(); var nIds = 0; foreach (var kv in _mapInfo) { mapUnkIds[kv.Key] = nIds; var info = kv.Value; if (info.NRefCount != 0) { Assert(nIds != _nNew); // this means nNew was wrong, oh ow info.ReadWrite(stream); nIds++; } } nIds = 0; foreach (var kv in _mapInfo) { var info = kv.Value; if (info.fInTried) { Assert(nIds != _nTried); // this means nTried was wrong, oh ow info.ReadWrite(stream); nIds++; } } for (var bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { var nSize = 0; for (var i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (_vvNew[bucket, i] != -1) { nSize++; } } stream.ReadWrite(ref nSize); for (var i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (_vvNew[bucket, i] != -1) { var nIndex = mapUnkIds[_vvNew[bucket, i]]; stream.ReadWrite(ref nIndex); } } } } Check(); } }
public void ReadWrite(BitcoinStream stream) { lock (cs) { Check(); if (!stream.Serializing) { Clear(); } stream.ReadWrite(ref nVersion); stream.ReadWrite(ref nKeySize); if (!stream.Serializing && nKeySize != 32) { throw new FormatException("Incorrect keysize in addrman deserialization"); } stream.ReadWrite(ref nKey); stream.ReadWrite(ref nNew); stream.ReadWrite(ref nTried); int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30); stream.ReadWrite(ref nUBuckets); if (nVersion != 0) { nUBuckets ^= (1 << 30); } if (!stream.Serializing) { // Deserialize entries from the new table. for (int n = 0; n < nNew; n++) { } nIdCount = nNew; // Deserialize entries from the tried table. int nLost = 0; for (int n = 0; n < nTried; n++) { } nTried -= nLost; // Deserialize positions in the new table (if possible). for (int bucket = 0; bucket < nUBuckets; bucket++) { int nSize = 0; stream.ReadWrite(ref nSize); for (int n = 0; n < nSize; n++) { int nIndex = 0; stream.ReadWrite(ref nIndex); if (nIndex >= 0 && nIndex < nNew) { AddressInfo info = mapInfo[nIndex]; int nUBucketPos = info.GetBucketPosition(nKey, true, bucket); if (nVersion == 1 && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && vvNew[bucket, nUBucketPos] == -1 && info.nRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS) { info.nRefCount++; vvNew[bucket, nUBucketPos] = nIndex; } } } } // Prune new entries with refcount 0 (as a result of collisions). int nLostUnk = 0; foreach (var kv in mapInfo.ToList()) { if (kv.Value.fInTried == false && kv.Value.nRefCount == 0) { Delete(kv.Key); nLostUnk++; } } } else { Dictionary <int, int> mapUnkIds = new Dictionary <int, int>(); int nIds = 0; foreach (var kv in mapInfo) { mapUnkIds[kv.Key] = nIds; AddressInfo info = kv.Value; if (info.nRefCount != 0) { assert(nIds != nNew); // this means nNew was wrong, oh ow info.ReadWrite(stream); nIds++; } } nIds = 0; foreach (var kv in mapInfo) { AddressInfo info = kv.Value; if (info.fInTried) { assert(nIds != nTried); // this means nTried was wrong, oh ow info.ReadWrite(stream); nIds++; } } for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { int nSize = 0; for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (vvNew[bucket, i] != -1) { nSize++; } } stream.ReadWrite(ref nSize); for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (vvNew[bucket, i] != -1) { int nIndex = mapUnkIds[vvNew[bucket, i]]; stream.ReadWrite(ref nIndex); } } } } Check(); } }