private void ResetSlabs() { _cbuckets = 0; // hash bucket extra mem pages are returned to system on reset. if (_lpHash != null) { for (var ls = _lpHash; ls != null; ls = ls.next) ls.Release(); _lpHash = null; } if (_lpBase == null) _lpBase = new LargePage(_virtual_memory, 4); _htable = (HashBucket*)_lpBase.data; // init shared tables with clean values. for (var i = 0; i < _config.HashTableSize; i++) _htable[i] = new HashBucket(); var bp = (byte*)(_htable + _config.HashTableSize); _stabs = (int*)bp; // encoding slab max size and pos like -> 32 : 0 _stabs[0] = 0x200000; _stabs[1] = 0x400001; _slabs = (SlabList*)_align((byte*)(_stabs + 1 + (iLastSlabSize + SizeofRcp) / iQuantum)); // calculate slabs indexes and max sizes. int sbts = 64, scnt = 2, pos = scnt; for (var half = sbts / 2; sbts < iLastSlabSize; half = sbts / 2) { for (var es = sbts + half; sbts < es; sbts += iQuantum) _stabs[pos++] = scnt | (es << 16); scnt++; for (var es = sbts + half; sbts < es; sbts += iQuantum) _stabs[pos++] = scnt | (es << 16); scnt++; } _stabs[pos] = scnt; bp = _align((byte*)(_slabs + scnt + 1)); // pre-allocate ~32k into every slab slot. var quot = sbts / 4; while (scnt-- > 0) { RefCountPtr* ls = null; var count = 0; for (var todo = 32768; todo > 0; count++, todo -= sbts, bp += sbts) { var rc = (RefCountPtr*)bp; rc->_list = ls; ls = rc; } _slabs[scnt].Set(ls, count); sbts -= quot; if ((scnt & 1) != 0) quot = sbts / 4; } // pre-allocate 256k into hash table buckets; _hb_cp = (HashBucket*)bp; _hb_ep = (HashBucket*)(bp += 256 * 1024); // pre-allocate ~1mb into big-mem chunks. _sbigs = null; _cntBigs = 0; sbts = iBigBlockSize + SizeofRcp; for (int left = 1024 * (1024 + 16); left > 0; _cntBigs++, left -= sbts, bp += sbts) { var rc = (RefCountPtr*)bp; rc->_list = _sbigs; _sbigs = rc; } // pre-allocate large memory pages for cache purposes. if (!_config.ReserveMemory) _cntPages = 1 + (int)((Stats.MemoryLimit - 1) / _config.AllocPageSize); if (_lpList != null) if (_config.ReserveMemory) { } else { for (var ls = _lpList; ls != null; ls = ls.next) ls.Release(); _lpList = null; } else if (_config.ReserveMemory) for (var bytes = Stats.MemoryLimit; bytes > 0; bytes -= _lpList.size) _lpList = new LargePage(_virtual_memory, _config.AllocPageSize) { next = _lpList }; _lpNext = _lpList; _cp = bp; _ep = _lpBase.data + _lpBase.size; }
private HashBucket* GetBucket() { var bucket = _hb_cp; _hb_cp = bucket + (iHSubMask + 1); if (_hb_cp <= _hb_ep) { _cbuckets++; return bucket; } uint hbSize; if (_cbuckets < 2048) { _hb_cp = (HashBucket*)GetBig(); hbSize = iBigBlockSize + SizeofRcp; } else { var hash = new LargePage(_virtual_memory, 4) { next = _lpHash }; hbSize = hash.size; _lpHash = hash; _hb_cp = (HashBucket*)hash.data; } _hb_ep = (HashBucket*)((byte*)_hb_cp + hbSize); return GetBucket(); }
public void Dispose() { if (_lpBase != null) _lpBase = _lpBase.Release(); if (_lpHash != null) _lpHash = _lpHash.Release(); while (_lpList != null) { var next = _lpList.next; _lpList.Release(); _lpList = next; } GC.SuppressFinalize(this); }
private RefCountPtr* GetNewMem(int smax) { var left = (int)(_ep - _cp); if (left >= smax) { var bt = _cp; _cp += smax; return (RefCountPtr*)bt; } // drop "leftover" chunk into proper slab. if (left > iQuantum) { var sx = left >= iLastSize ? iLastSize + 1 : left; var ix = _slab_id(sx) - 1; _slabs[ix].Push((RefCountPtr*)_cp); _cp = _ep; } // check if some large pages are left to chop. var lpage = _lpNext; if (_config.ReserveMemory) _lpNext = lpage != null ? lpage.next : null; else if (_cntPages > 0) { _cntPages--; _lpList = lpage = new LargePage(_virtual_memory, _config.AllocPageSize) { next = _lpList }; } if (lpage != null) { _cp = lpage.data; _ep = _cp + lpage.size; return GetNewMem(smax); } // if all memory is in use, start cache evictions. smax -= SizeofRcp; EvictForSize(smax); return GetMem(smax); }