private void MakeTried(AddressInfo info, int nId) { // remove the entry from all new buckets for (var bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { var pos = info.GetBucketPosition(_nKey, true, bucket); if (_vvNew[bucket, pos] == nId) { _vvNew[bucket, pos] = -1; info.NRefCount--; } } _nNew--; Assert(info.NRefCount == 0); // which tried bucket to move the entry to var nKBucket = info.GetTriedBucket(_nKey); var nKBucketPos = info.GetBucketPosition(_nKey, false, nKBucket); // first make space to add it (the existing tried entry there is moved to new, deleting whatever is there). if (_vvTried[nKBucket, nKBucketPos] != -1) { // find an item to evict var nIdEvict = _vvTried[nKBucket, nKBucketPos]; Assert(_mapInfo.ContainsKey(nIdEvict)); var infoOld = _mapInfo[nIdEvict]; // Remove the to-be-evicted item from the tried set. infoOld.fInTried = false; _vvTried[nKBucket, nKBucketPos] = -1; _nTried--; // find which new bucket it belongs to var nUBucket = infoOld.GetNewBucket(_nKey); var nUBucketPos = infoOld.GetBucketPosition(_nKey, true, nUBucket); ClearNew(nUBucket, nUBucketPos); Assert(_vvNew[nUBucket, nUBucketPos] == -1); // Enter it into the new set again. infoOld.NRefCount = 1; _vvNew[nUBucket, nUBucketPos] = nIdEvict; _nNew++; } Assert(_vvTried[nKBucket, nKBucketPos] == -1); _vvTried[nKBucket, nKBucketPos] = nId; _nTried++; info.fInTried = true; }
private void Good_(NetworkAddress addr, DateTimeOffset nTime) { int nId; AddressInfo pinfo = Find(addr, out nId); // if not found, bail out if (pinfo == null) { return; } AddressInfo info = pinfo; // check whether we are talking about the exact same CService (including same port) if (!info.Match(addr)) { return; } // update info info.LastSuccess = nTime; info.LastTry = nTime; info.nAttempts = 0; // nTime is not updated here, to avoid leaking information about // currently-connected peers. // if it is already in the tried set, don't do anything else if (info.fInTried) { return; } // find a bucket it is in now int nRnd = GetRandInt(ADDRMAN_NEW_BUCKET_COUNT); int nUBucket = -1; for (int n = 0; n < ADDRMAN_NEW_BUCKET_COUNT; n++) { int nB = (n + nRnd) % ADDRMAN_NEW_BUCKET_COUNT; int nBpos = info.GetBucketPosition(nKey, true, nB); if (vvNew[nB, nBpos] == nId) { nUBucket = nB; break; } } // if no bucket is found, something bad happened; // TODO: maybe re-add the node, but for now, just bail out if (nUBucket == -1) { return; } // move nId to the tried tables MakeTried(info, nId); }
// IBitcoinSerializable Members public void ReadWrite(BitcoinStream stream) { lock (_cs) { Check(); if (!stream.Serializing) { Clear(); } stream.ReadWrite(ref _nVersion); stream.ReadWrite(ref _nKeySize); if (!stream.Serializing && _nKeySize != 32) { throw new FormatException("Incorrect keysize in addrman deserialization"); } stream.ReadWrite(ref _nKey); stream.ReadWrite(ref _nNew); stream.ReadWrite(ref _nTried); var nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30); stream.ReadWrite(ref nUBuckets); if (_nVersion != 0) { nUBuckets ^= 1 << 30; } if (!stream.Serializing) { // Deserialize entries from the new table. for (var n = 0; n < _nNew; n++) { var info = new AddressInfo(); info.ReadWrite(stream); _mapInfo.Add(n, info); _mapAddr[info.Address.Endpoint.Address] = n; info.NRandomPos = _vRandom.Count; _vRandom.Add(n); if (_nVersion != 1 || nUBuckets != ADDRMAN_NEW_BUCKET_COUNT) { // In case the new table data cannot be used (nVersion unknown, or bucket count wrong), // immediately try to give them a reference based on their primary source address. var nUBucket = info.GetNewBucket(_nKey); var nUBucketPos = info.GetBucketPosition(_nKey, true, nUBucket); if (_vvNew[nUBucket, nUBucketPos] == -1) { _vvNew[nUBucket, nUBucketPos] = n; info.NRefCount++; } } } _nIdCount = _nNew; // Deserialize entries from the tried table. var nLost = 0; for (var n = 0; n < _nTried; n++) { var info = new AddressInfo(); info.ReadWrite(stream); var nKBucket = info.GetTriedBucket(_nKey); var nKBucketPos = info.GetBucketPosition(_nKey, false, nKBucket); if (_vvTried[nKBucket, nKBucketPos] == -1) { info.NRandomPos = _vRandom.Count; info.fInTried = true; _vRandom.Add(_nIdCount); _mapInfo[_nIdCount] = info; _mapAddr[info.Address.Endpoint.Address] = _nIdCount; _vvTried[nKBucket, nKBucketPos] = _nIdCount; _nIdCount++; } else { nLost++; } } _nTried -= nLost; // Deserialize positions in the new table (if possible). for (var bucket = 0; bucket < nUBuckets; bucket++) { var nSize = 0; stream.ReadWrite(ref nSize); for (var n = 0; n < nSize; n++) { var nIndex = 0; stream.ReadWrite(ref nIndex); if (nIndex >= 0 && nIndex < _nNew) { var info = _mapInfo[nIndex]; var nUBucketPos = info.GetBucketPosition(_nKey, true, bucket); if (_nVersion == 1 && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && _vvNew[bucket, nUBucketPos] == -1 && info.NRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS) { info.NRefCount++; _vvNew[bucket, nUBucketPos] = nIndex; } } } } // Prune new entries with refcount 0 (as a result of collisions). var nLostUnk = 0; foreach (var kv in _mapInfo.ToList()) { if (kv.Value.fInTried == false && kv.Value.NRefCount == 0) { Delete(kv.Key); nLostUnk++; } } } else { var mapUnkIds = new Dictionary <int, int>(); var nIds = 0; foreach (var kv in _mapInfo) { mapUnkIds[kv.Key] = nIds; var info = kv.Value; if (info.NRefCount != 0) { Assert(nIds != _nNew); // this means nNew was wrong, oh ow info.ReadWrite(stream); nIds++; } } nIds = 0; foreach (var kv in _mapInfo) { var info = kv.Value; if (info.fInTried) { Assert(nIds != _nTried); // this means nTried was wrong, oh ow info.ReadWrite(stream); nIds++; } } for (var bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { var nSize = 0; for (var i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (_vvNew[bucket, i] != -1) { nSize++; } } stream.ReadWrite(ref nSize); for (var i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (_vvNew[bucket, i] != -1) { var nIndex = mapUnkIds[_vvNew[bucket, i]]; stream.ReadWrite(ref nIndex); } } } } Check(); } }
private bool Add_(NetworkAddress addr, IPAddress source, TimeSpan nTimePenalty) { if (!addr.Endpoint.Address.IsRoutable(true)) { return(false); } bool fNew = false; int nId; AddressInfo pinfo = Find(addr, out nId); if (pinfo != null) { // periodically update nTime bool fCurrentlyOnline = (DateTimeOffset.UtcNow - addr.Time < TimeSpan.FromSeconds(24 * 60 * 60)); var nUpdateInterval = TimeSpan.FromSeconds(fCurrentlyOnline ? 60 * 60 : 24 * 60 * 60); if (addr.ntime != 0 && (pinfo.Address.ntime == 0 || pinfo.Address.Time < addr.Time - nUpdateInterval - nTimePenalty)) { pinfo.Address.ntime = (uint)Math.Max(0L, (long)Utils.DateTimeToUnixTime(addr.Time - nTimePenalty)); } // add services pinfo.Address.Service |= addr.Service; // do not update if no new information is present if (addr.ntime == 0 || (pinfo.Address.ntime != 0 && addr.Time <= pinfo.Address.Time)) { return(false); } // do not update if the entry was already in the "tried" table if (pinfo.fInTried) { return(false); } // do not update if the max reference count is reached if (pinfo.nRefCount == ADDRMAN_NEW_BUCKETS_PER_ADDRESS) { return(false); } // stochastic test: previous nRefCount == N: 2^N times harder to increase it int nFactor = 1; for (int n = 0; n < pinfo.nRefCount; n++) { nFactor *= 2; } if (nFactor > 1 && (GetRandInt(nFactor) != 0)) { return(false); } } else { pinfo = Create(addr, source, out nId); pinfo.Address.ntime = (uint)Math.Max((long)0, (long)Utils.DateTimeToUnixTime(pinfo.Address.Time - nTimePenalty)); nNew++; fNew = true; } int nUBucket = pinfo.GetNewBucket(nKey, source); int nUBucketPos = pinfo.GetBucketPosition(nKey, true, nUBucket); if (vvNew[nUBucket, nUBucketPos] != nId) { bool fInsert = vvNew[nUBucket, nUBucketPos] == -1; if (!fInsert) { AddressInfo infoExisting = mapInfo[vvNew[nUBucket, nUBucketPos]]; if (infoExisting.IsTerrible || (infoExisting.nRefCount > 1 && pinfo.nRefCount == 0)) { // Overwrite the existing new table entry. fInsert = true; } } if (fInsert) { ClearNew(nUBucket, nUBucketPos); pinfo.nRefCount++; vvNew[nUBucket, nUBucketPos] = nId; } else { if (pinfo.nRefCount == 0) { Delete(nId); } } } return(fNew); }
public void ReadWrite(BitcoinStream stream) { lock (cs) { Check(); if (!stream.Serializing) { Clear(); } stream.ReadWrite(ref nVersion); stream.ReadWrite(ref nKeySize); if (!stream.Serializing && nKeySize != 32) { throw new FormatException("Incorrect keysize in addrman deserialization"); } stream.ReadWrite(ref nKey); stream.ReadWrite(ref nNew); stream.ReadWrite(ref nTried); int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30); stream.ReadWrite(ref nUBuckets); if (nVersion != 0) { nUBuckets ^= (1 << 30); } if (!stream.Serializing) { // Deserialize entries from the new table. for (int n = 0; n < nNew; n++) { } nIdCount = nNew; // Deserialize entries from the tried table. int nLost = 0; for (int n = 0; n < nTried; n++) { } nTried -= nLost; // Deserialize positions in the new table (if possible). for (int bucket = 0; bucket < nUBuckets; bucket++) { int nSize = 0; stream.ReadWrite(ref nSize); for (int n = 0; n < nSize; n++) { int nIndex = 0; stream.ReadWrite(ref nIndex); if (nIndex >= 0 && nIndex < nNew) { AddressInfo info = mapInfo[nIndex]; int nUBucketPos = info.GetBucketPosition(nKey, true, bucket); if (nVersion == 1 && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && vvNew[bucket, nUBucketPos] == -1 && info.nRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS) { info.nRefCount++; vvNew[bucket, nUBucketPos] = nIndex; } } } } // Prune new entries with refcount 0 (as a result of collisions). int nLostUnk = 0; foreach (var kv in mapInfo.ToList()) { if (kv.Value.fInTried == false && kv.Value.nRefCount == 0) { Delete(kv.Key); nLostUnk++; } } } else { Dictionary <int, int> mapUnkIds = new Dictionary <int, int>(); int nIds = 0; foreach (var kv in mapInfo) { mapUnkIds[kv.Key] = nIds; AddressInfo info = kv.Value; if (info.nRefCount != 0) { assert(nIds != nNew); // this means nNew was wrong, oh ow info.ReadWrite(stream); nIds++; } } nIds = 0; foreach (var kv in mapInfo) { AddressInfo info = kv.Value; if (info.fInTried) { assert(nIds != nTried); // this means nTried was wrong, oh ow info.ReadWrite(stream); nIds++; } } for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { int nSize = 0; for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (vvNew[bucket, i] != -1) { nSize++; } } stream.ReadWrite(ref nSize); for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { if (vvNew[bucket, i] != -1) { int nIndex = mapUnkIds[vvNew[bucket, i]]; stream.ReadWrite(ref nIndex); } } } } Check(); } }