internal void RemoveCacheEntry(MemoryCacheEntry cacheEntry) { lock (this) { UsageEntryRef entryRef = cacheEntry.UsageEntryRef; if (entryRef.IsInvalid) { return; } UsageEntry[] entries = (_pages[(entryRef.PageIndex)]._entries); int entryIndex = entryRef.Ref1Index; cacheEntry.UsageEntryRef = UsageEntryRef.INVALID; entries[entryIndex]._cacheEntry = null; RemoveEntryFromLastRefList(entryRef); AddUsageEntryToFreeList(entryRef); Reduce(); Dbg.Trace("CacheUsageRemove", "Removed item=" + cacheEntry.Key + ",_bucket=" + _bucket + ",ref=" + entryRef); } }
internal void EnableExpirationTimer(bool enable) { if (enable) { if (_timerHandleRef == null) { DateTime utcNow = DateTime.UtcNow; TimeSpan due = _tsPerBucket - (new TimeSpan(utcNow.Ticks % _tsPerBucket.Ticks)); Timer timer = new Timer(new TimerCallback(this.TimerCallback), null, due.Ticks / TimeSpan.TicksPerMillisecond, _tsPerBucket.Ticks / TimeSpan.TicksPerMillisecond); _timerHandleRef = new GCHandleRef <Timer>(timer); Dbg.Trace("Cache", "Cache expiration timer created."); } } else { GCHandleRef <Timer> timerHandleRef = _timerHandleRef; if (timerHandleRef != null && Interlocked.CompareExchange(ref _timerHandleRef, null, timerHandleRef) == timerHandleRef) { timerHandleRef.Dispose(); Dbg.Trace("Cache", "Cache expiration timer disposed."); while (_inFlush != 0) { Thread.Sleep(100); } } } }
internal void UtcUpdate(MemoryCacheEntry cacheEntry, DateTime utcNewExpires) { int oldBucket = cacheEntry.ExpiresBucket; int newBucket = UtcCalcExpiresBucket(utcNewExpires); if (oldBucket != newBucket) { Dbg.Trace("CacheExpiresUpdate", "Updating item " + cacheEntry.Key + " from bucket " + oldBucket + " to new bucket " + newBucket); if (oldBucket != 0xff) { _buckets[oldBucket].RemoveCacheEntry(cacheEntry); cacheEntry.UtcAbsExp = utcNewExpires; _buckets[newBucket].AddCacheEntry(cacheEntry); } } else { if (oldBucket != 0xff) { _buckets[oldBucket].UtcUpdateCacheEntry(cacheEntry, utcNewExpires); } } }
private int FlushExpiredItems(bool checkDelta, bool useInsertBlock) { int flushed = 0; if (Interlocked.Exchange(ref _inFlush, 1) == 0) { try { if (_timerHandleRef == null) { return(0); } DateTime utcNow = DateTime.UtcNow; if (!checkDelta || utcNow - _utcLastFlush >= MIN_FLUSH_INTERVAL || utcNow < _utcLastFlush) { _utcLastFlush = utcNow; foreach (ExpiresBucket bucket in _buckets) { flushed += bucket.FlushExpiredItems(utcNow, useInsertBlock); } Dbg.Trace("CacheExpiresFlushTotal", "FlushExpiredItems flushed a total of " + flushed + " items; Time=" + DateTime.Now.ToString("o", CultureInfo.InvariantCulture)); } } finally { Interlocked.Exchange(ref _inFlush, 0); } } return(flushed); }
internal void UtcUpdateCacheEntry(MemoryCacheEntry cacheEntry, DateTime utcExpires) { lock (this) { ExpiresEntryRef entryRef = cacheEntry.ExpiresEntryRef; if (cacheEntry.ExpiresBucket != _bucket || entryRef.IsInvalid) { return; } ExpiresEntry[] entries = (_pages[(entryRef.PageIndex)]._entries); int entryIndex = entryRef.Index; Dbg.Assert(cacheEntry == entries[entryIndex]._cacheEntry); RemoveCount(entries[entryIndex]._utcExpires); AddCount(utcExpires); entries[entryIndex]._utcExpires = utcExpires; cacheEntry.UtcAbsExp = utcExpires; Dbg.Validate("CacheValidateExpires", this); Dbg.Trace("CacheExpiresUpdate", "Updated item " + cacheEntry.Key + " in bucket " + _bucket); } }
protected override int GetCurrentPressure() { // Call GetUpdatedTotalCacheSize to update the total // cache size, if there has been a recent Gen 2 Collection. // This update must happen, otherwise the CacheManager won't // know the total cache size. int gen2Count = GC.CollectionCount(2); SRefMultiple sref = _sizedRefMultiple; if (gen2Count != _gen2Count && sref != null) { // update _gen2Count _gen2Count = gen2Count; // the SizedRef is only updated after a Gen2 Collection // increment the index (it's either 1 or 0) Dbg.Assert(SAMPLE_COUNT == 2); _idx = _idx ^ 1; // remember the sample time _cacheSizeSampleTimes[_idx] = DateTime.UtcNow; // remember the sample value _cacheSizeSamples[_idx] = sref.ApproximateSize; #if DBG Dbg.Trace("MemoryCacheStats", "SizedRef.ApproximateSize=" + _cacheSizeSamples[_idx]); #endif IMemoryCacheManager memoryCacheManager = s_memoryCacheManager; if (memoryCacheManager != null) { memoryCacheManager.UpdateCacheSize(_cacheSizeSamples[_idx], _memoryCache); } } // if there's no memory limit, then there's nothing more to do if (_memoryLimit <= 0) { return(0); } long cacheSize = _cacheSizeSamples[_idx]; // use _memoryLimit as an upper bound so that pressure is a percentage (between 0 and 100, inclusive). if (cacheSize > _memoryLimit) { cacheSize = _memoryLimit; } // PerfCounter: Cache Percentage Process Memory Limit Used // = memory used by this process / process memory limit at pressureHigh // Set private bytes used in kilobytes because the counter is a DWORD // int result = (int)(cacheSize * 100 / _memoryLimit); return(result); }
internal long TrimInternal(int percent) { int count = Count; int toTrim = 0; // do we need to drop a percentage of entries? if (percent > 0) { toTrim = (int)(((long)count * (long)percent) / 100L); // would this leave us above MAX_COUNT? int minTrim = count - MAX_COUNT; if (toTrim < minTrim) { toTrim = minTrim; } // would this put us below MIN_COUNT? int maxTrim = count - MIN_COUNT; if (toTrim > maxTrim) { toTrim = maxTrim; } } // do we need to trim? if (toTrim <= 0 || _disposed == 1) { return(0); } int trimmed = 0; // total number of entries trimmed int trimmedOrExpired = 0; #if DBG int beginTotalCount = count; #endif trimmedOrExpired = _expires.FlushExpiredItems(true); if (trimmedOrExpired < toTrim) { trimmed = _usage.FlushUnderUsedItems(toTrim - trimmedOrExpired); trimmedOrExpired += trimmed; } if (trimmed > 0 && _perfCounters != null) { // Update values for perfcounters _perfCounters.IncrementBy(PerfCounterName.Trims, trimmed); } #if DBG Dbg.Trace("MemoryCacheStore", "TrimInternal:" + " beginTotalCount=" + beginTotalCount + ", endTotalCount=" + count + ", percent=" + percent + ", trimmed=" + trimmed); #endif return(trimmedOrExpired); }
internal long CacheManagerThread(int minPercent) { if (Interlocked.Exchange(ref _inCacheManagerThread, 1) != 0) { return(0); } try { if (_disposed == 1) { return(0); } #if DEBUG Dbg.Trace("MemoryCacheStats", "**BEG** CacheManagerThread " + DateTime.Now.ToString("T", CultureInfo.InvariantCulture)); #endif // The timer thread must always call Update so that the CacheManager // knows the size of the cache. Update(); AdjustTimer(); int percent = Math.Max(minPercent, GetPercentToTrim()); long beginTotalCount = _memoryCache.GetCount(); Stopwatch sw = Stopwatch.StartNew(); long trimmedOrExpired = _memoryCache.Trim(percent); sw.Stop(); // 1) don't update stats if the trim happend because MAX_COUNT was exceeded // 2) don't update stats unless we removed at least one entry if (percent > 0 && trimmedOrExpired > 0) { SetTrimStats(sw.Elapsed.Ticks, beginTotalCount, trimmedOrExpired); } #if DEBUG Dbg.Trace("MemoryCacheStats", "**END** CacheManagerThread: " + ", percent=" + percent + ", beginTotalCount=" + beginTotalCount + ", trimmed=" + trimmedOrExpired + ", Milliseconds=" + sw.ElapsedMilliseconds); #endif #if PERF Debug.WriteLine("CacheCommon.CacheManagerThread:" + " minPercent= " + minPercent + ", percent= " + percent + ", beginTotalCount=" + beginTotalCount + ", trimmed=" + trimmedOrExpired + ", Milliseconds=" + sw.ElapsedMilliseconds + "\n"); #endif return(trimmedOrExpired); } finally { Interlocked.Exchange(ref _inCacheManagerThread, 0); } }
internal void SetLimit(int physicalMemoryLimitPercentage) { if (physicalMemoryLimitPercentage == 0) { // use defaults return; } _pressureHigh = Math.Max(3, physicalMemoryLimitPercentage); _pressureLow = Math.Max(1, _pressureHigh - 9); Dbg.Trace("MemoryCacheStats", $"PhysicalMemoryMonitor.SetLimit: _pressureHigh={_pressureHigh}, _pressureLow={_pressureLow}"); }
internal void EnableExpirationTimer(bool enable) { if (enable) { if (_timerHandleRef == null) { DateTime utcNow = DateTime.UtcNow; TimeSpan due = _tsPerBucket - (new TimeSpan(utcNow.Ticks % _tsPerBucket.Ticks)); Timer timer; // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever bool restoreFlow = false; try { if (!ExecutionContext.IsFlowSuppressed()) { ExecutionContext.SuppressFlow(); restoreFlow = true; } timer = new Timer(new TimerCallback(this.TimerCallback), null, due.Ticks / TimeSpan.TicksPerMillisecond, _tsPerBucket.Ticks / TimeSpan.TicksPerMillisecond); } finally { // Restore the current ExecutionContext if (restoreFlow) { ExecutionContext.RestoreFlow(); } } _timerHandleRef = new GCHandleRef <Timer>(timer); Dbg.Trace("Cache", "Cache expiration timer created."); } } else { GCHandleRef <Timer> timerHandleRef = _timerHandleRef; if (timerHandleRef != null && Interlocked.CompareExchange(ref _timerHandleRef, null, timerHandleRef) == timerHandleRef) { timerHandleRef.Dispose(); Dbg.Trace("Cache", "Cache expiration timer disposed."); while (_inFlush != 0) { Thread.Sleep(100); } } } }
// Get current pressure and update history internal void Update() { int pressure = GetCurrentPressure(); _i0 = (_i0 + 1) % HISTORY_COUNT; _pressureTotal -= _pressureHist[_i0]; _pressureTotal += pressure; _pressureHist[_i0] = pressure; Dbg.Trace("MemoryCacheStats", this.GetType().Name + ".Update: last=" + pressure + ",high=" + PressureHigh + ",low=" + PressureLow + " " + DateTime.Now.ToString("o", CultureInfo.InvariantCulture)); }
internal void SetLimit(int physicalMemoryLimitPercentage) { if (physicalMemoryLimitPercentage == 0) { // use defaults return; } _pressureHigh = Math.Max(3, physicalMemoryLimitPercentage); _pressureLow = Math.Max(1, _pressureHigh - 9); #if DBG Dbg.Trace("MemoryCacheStats", "PhysicalMemoryMonitor.SetLimit: _pressureHigh=" + _pressureHigh + ", _pressureLow=" + _pressureLow); #endif }
// Get current pressure and update history internal void Update() { int pressure = GetCurrentPressure(); _i0 = (_i0 + 1) % HISTORY_COUNT; _pressureTotal -= _pressureHist[_i0]; _pressureTotal += pressure; _pressureHist[_i0] = pressure; #if DBG Dbg.Trace("MemoryCacheStats", this.GetType().Name + ".Update: last=" + pressure + ",high=" + PressureHigh + ",low=" + PressureLow + " " + Dbg.FormatLocalDate(DateTime.Now)); #endif }
internal void SetLimit(int cacheMemoryLimitMegabytes) { long cacheMemoryLimit = cacheMemoryLimitMegabytes; cacheMemoryLimit = cacheMemoryLimit << MEGABYTE_SHIFT; // _memoryLimit = 0; // VSWhidbey 546381: never override what the user specifies as the limit; // only call AutoPrivateBytesLimit when the user does not specify one. if (cacheMemoryLimit == 0 && _memoryLimit == 0) { // Zero means we impose a limit _memoryLimit = EffectiveProcessMemoryLimit; } else if (cacheMemoryLimit != 0 && _memoryLimit != 0) { // Take the min of "cache memory limit" and the host's "process memory limit". _memoryLimit = Math.Min(_memoryLimit, cacheMemoryLimit); } else if (cacheMemoryLimit != 0) { // _memoryLimit is 0, but "cache memory limit" is non-zero, so use it as the limit _memoryLimit = cacheMemoryLimit; } Dbg.Trace("MemoryCacheStats", "CacheMemoryMonitor.SetLimit: _memoryLimit=" + (_memoryLimit >> MEGABYTE_SHIFT) + "Mb"); if (_memoryLimit > 0) { _pressureHigh = 100; _pressureLow = 80; } else { _pressureHigh = 99; _pressureLow = 97; } Dbg.Trace("MemoryCacheStats", "CacheMemoryMonitor.SetLimit: _pressureHigh=" + _pressureHigh + ", _pressureLow=" + _pressureLow); }
public void Dispose() { if (Interlocked.Exchange(ref _disposed, 1) == 0) { lock (_timerLock) { GCHandleRef <Timer> timerHandleRef = _timerHandleRef; if (timerHandleRef != null && Interlocked.CompareExchange(ref _timerHandleRef, null, timerHandleRef) == timerHandleRef) { timerHandleRef.Dispose(); Dbg.Trace("MemoryCacheStats", "Stopped CacheMemoryTimers"); } } while (_inCacheManagerThread != 0) { Thread.Sleep(100); } _cacheMemoryMonitor?.Dispose(); // Don't need to call GC.SuppressFinalize(this) for sealed types without finalizers. } }
private void RemoveCacheEntryNoLock(MemoryCacheEntry cacheEntry) { ExpiresEntryRef entryRef = cacheEntry.ExpiresEntryRef; if (cacheEntry.ExpiresBucket != _bucket || entryRef.IsInvalid) { return; } ExpiresEntry[] entries = (_pages[(entryRef.PageIndex)]._entries); int entryIndex = entryRef.Index; RemoveCount(entries[entryIndex]._utcExpires); cacheEntry.ExpiresBucket = 0xff; cacheEntry.ExpiresEntryRef = ExpiresEntryRef.INVALID; entries[entryIndex]._cacheEntry = null; AddExpiresEntryToFreeList(entryRef); if (_cEntriesInUse == 0) { ResetCounts(DateTime.UtcNow); } Reduce(); Dbg.Trace("CacheExpiresRemove", "Removed item=" + cacheEntry.Key + ",_bucket=" + _bucket + ",ref=" + entryRef + ",now=" + Dbg.FormatLocalDate(DateTime.Now) + ",expires=" + cacheEntry.UtcAbsExp.ToLocalTime()); Dbg.Validate("CacheValidateExpires", this); Dbg.Dump("CacheExpiresRemove", this); }
internal void SetLimit(int cacheMemoryLimitMegabytes) { long cacheMemoryLimit = cacheMemoryLimitMegabytes << MEGABYTE_SHIFT; _memoryLimit = cacheMemoryLimit != 0 ? cacheMemoryLimit : EffectiveProcessMemoryLimit; Dbg.Trace("MemoryCacheStats", "CacheMemoryMonitor.SetLimit: _memoryLimit=" + (_memoryLimit >> MEGABYTE_SHIFT) + "Mb"); if (_memoryLimit > 0) { _pressureHigh = 100; _pressureLow = 80; } else { _pressureHigh = 99; _pressureLow = 97; } Dbg.Trace("MemoryCacheStats", "CacheMemoryMonitor.SetLimit: _pressureHigh=" + _pressureHigh + ", _pressureLow=" + _pressureLow); }
internal void AddCacheEntry(MemoryCacheEntry cacheEntry) { lock (this) { if (_freeEntryList._head == -1) { Expand(); } UsageEntryRef freeRef1 = GetFreeUsageEntry(); UsageEntryRef freeRef2 = (new UsageEntryRef((freeRef1).PageIndex, -(freeRef1).Ref1Index)); Debug.Assert(cacheEntry.UsageEntryRef.IsInvalid, "cacheEntry.UsageEntryRef.IsInvalid"); cacheEntry.UsageEntryRef = freeRef1; UsageEntry[] entries = (_pages[(freeRef1.PageIndex)]._entries); int entryIndex = freeRef1.Ref1Index; entries[entryIndex]._cacheEntry = cacheEntry; entries[entryIndex]._utcDate = DateTime.UtcNow; entries[entryIndex]._ref1._prev = UsageEntryRef.INVALID; entries[entryIndex]._ref2._next = _addRef2Head; if (_lastRefHead.IsInvalid) { entries[entryIndex]._ref1._next = freeRef2; entries[entryIndex]._ref2._prev = freeRef1; _lastRefTail = freeRef2; } else { entries[entryIndex]._ref1._next = _lastRefHead; { if ((_lastRefHead).IsRef1) { (_pages[((_lastRefHead).PageIndex)]._entries)[(_lastRefHead).Ref1Index]._ref1._prev = (freeRef1); } else if ((_lastRefHead).IsRef2) { (_pages[((_lastRefHead).PageIndex)]._entries)[(_lastRefHead).Ref2Index]._ref2._prev = (freeRef1); } else { _lastRefTail = (freeRef1); } }; UsageEntryRef next, prev; if (_addRef2Head.IsInvalid) { prev = _lastRefTail; next = UsageEntryRef.INVALID; } else { prev = (_pages[(_addRef2Head.PageIndex)]._entries)[_addRef2Head.Ref2Index]._ref2._prev; next = _addRef2Head; } entries[entryIndex]._ref2._prev = prev; { if ((prev).IsRef1) { (_pages[((prev).PageIndex)]._entries)[(prev).Ref1Index]._ref1._next = (freeRef2); } else if ((prev).IsRef2) { (_pages[((prev).PageIndex)]._entries)[(prev).Ref2Index]._ref2._next = (freeRef2); } else { _lastRefHead = (freeRef2); } }; { if ((next).IsRef1) { (_pages[((next).PageIndex)]._entries)[(next).Ref1Index]._ref1._prev = (freeRef2); } else if ((next).IsRef2) { (_pages[((next).PageIndex)]._entries)[(next).Ref2Index]._ref2._prev = (freeRef2); } else { _lastRefTail = (freeRef2); } }; } _lastRefHead = freeRef1; _addRef2Head = freeRef2; _cEntriesInUse++; Dbg.Trace("CacheUsageAdd", "Added item=" + cacheEntry.Key + ",_bucket=" + _bucket + ",ref=" + freeRef1); } }
internal int FlushExpiredItems(DateTime utcNow, bool useInsertBlock) { if (_cEntriesInUse == 0 || GetExpiresCount(utcNow) == 0) { return(0); } Debug.Assert(_cEntriesInFlush == 0, "_cEntriesInFlush == 0"); ExpiresEntryRef inFlushHead = ExpiresEntryRef.INVALID; ExpiresEntry[] entries; int entryIndex; MemoryCacheEntry cacheEntry; int flushed = 0; try { if (useInsertBlock) { _cacheExpires.MemoryCacheStore.BlockInsert(); } lock (this) { Debug.Assert(_blockReduce == false, "_blockReduce == false"); if (_cEntriesInUse == 0 || GetExpiresCount(utcNow) == 0) { return(0); } ResetCounts(utcNow); int cPages = _cPagesInUse; for (int i = 0; i < _pages.Length; i++) { entries = _pages[i]._entries; if (entries != null) { int cEntries = NUM_ENTRIES - ((entries)[0]._cFree); for (int j = 1; j < entries.Length; j++) { cacheEntry = entries[j]._cacheEntry; if (cacheEntry != null) { if (entries[j]._utcExpires > utcNow) { AddCount(entries[j]._utcExpires); } else { cacheEntry.ExpiresBucket = 0xff; cacheEntry.ExpiresEntryRef = ExpiresEntryRef.INVALID; entries[j]._cFree = 1; entries[j]._next = inFlushHead; inFlushHead = new ExpiresEntryRef(i, j); flushed++; _cEntriesInFlush++; } cEntries--; if (cEntries == 0) { break; } } } cPages--; if (cPages == 0) { break; } } } if (flushed == 0) { Dbg.Trace("CacheExpiresFlushTotal", "FlushExpiredItems flushed " + flushed + " expired items, bucket=" + _bucket + "; Time=" + DateTime.Now.ToString("o", CultureInfo.InvariantCulture)); return(0); } _blockReduce = true; } } finally { if (useInsertBlock) { _cacheExpires.MemoryCacheStore.UnblockInsert(); } } Debug.Assert(!inFlushHead.IsInvalid, "!inFlushHead.IsInvalid"); MemoryCacheStore cacheStore = _cacheExpires.MemoryCacheStore; ExpiresEntryRef current = inFlushHead; ExpiresEntryRef next; while (!current.IsInvalid) { entries = (_pages[(current.PageIndex)]._entries); entryIndex = current.Index; next = entries[entryIndex]._next; cacheEntry = entries[entryIndex]._cacheEntry; entries[entryIndex]._cacheEntry = null; Debug.Assert(cacheEntry.ExpiresEntryRef.IsInvalid, "cacheEntry.ExpiresEntryRef.IsInvalid"); cacheStore.Remove(cacheEntry, cacheEntry, CacheEntryRemovedReason.Expired); current = next; } try { if (useInsertBlock) { _cacheExpires.MemoryCacheStore.BlockInsert(); } lock (this) { current = inFlushHead; while (!current.IsInvalid) { entries = (_pages[(current.PageIndex)]._entries); entryIndex = current.Index; next = entries[entryIndex]._next; _cEntriesInFlush--; AddExpiresEntryToFreeList(current); current = next; } Debug.Assert(_cEntriesInFlush == 0, "_cEntriesInFlush == 0"); _blockReduce = false; Reduce(); Dbg.Trace("CacheExpiresFlushTotal", "FlushExpiredItems flushed " + flushed + " expired items, bucket=" + _bucket + "; Time=" + DateTime.Now.ToString("o", CultureInfo.InvariantCulture)); } } finally { if (useInsertBlock) { _cacheExpires.MemoryCacheStore.UnblockInsert(); } } return(flushed); }
internal long CacheManagerThread(int minPercent) { if (Interlocked.Exchange(ref _inCacheManagerThread, 1) != 0) { return(0); } try { if (_disposed == 1) { return(0); } Dbg.Trace("MemoryCacheStats", "**BEG** CacheManagerThread " + DateTime.Now.ToString("T", CultureInfo.InvariantCulture)); // The timer thread must always call Update so that the CacheManager // knows the size of the cache. Update(); AdjustTimer(); int percent = Math.Max(minPercent, GetPercentToTrim()); long beginTotalCount = _memoryCache.GetCount(); long trimmedOrExpired = 0; Stopwatch sw = new Stopwatch(); // There is a small window here where the cache could be empty, but percentToTrim is > 0. // In this case, it makes no sense to trim, and in fact causes a divide-by-zero exception. // See - https://github.com/dotnet/runtime/issues/1423 if (percent > 0 && beginTotalCount > 0) { sw.Start(); trimmedOrExpired = _memoryCache.Trim(percent); sw.Stop(); // 1) don't update stats if the trim happend because MAX_COUNT was exceeded // 2) don't update stats unless we removed at least one entry if (percent > 0 && trimmedOrExpired > 0) { SetTrimStats(sw.Elapsed.Ticks, beginTotalCount, trimmedOrExpired); } } Dbg.Trace("MemoryCacheStats", "**END** CacheManagerThread: " + ", percent=" + percent + ", beginTotalCount=" + beginTotalCount + ", trimmed=" + trimmedOrExpired + ", Milliseconds=" + sw.ElapsedMilliseconds); #if PERF Debug.WriteLine("CacheCommon.CacheManagerThread:" + " minPercent= " + minPercent + ", percent= " + percent + ", beginTotalCount=" + beginTotalCount + ", trimmed=" + trimmedOrExpired + ", Milliseconds=" + sw.ElapsedMilliseconds + Environment.NewLine); #endif return(trimmedOrExpired); } catch (ObjectDisposedException) { // There is a small window for _memoryCache to be disposed after we check our own // disposed bit. No big deal. return(0); } finally { Interlocked.Exchange(ref _inCacheManagerThread, 0); } }
internal int FlushUnderUsedItems(int maxFlush, bool force) { if (_cEntriesInUse == 0) { return(0); } Debug.Assert(maxFlush > 0, "maxFlush is not greater than 0, instead is " + maxFlush); Debug.Assert(_cEntriesInFlush == 0, "_cEntriesInFlush == 0"); UsageEntryRef inFlushHead = UsageEntryRef.INVALID; UsageEntryRef prev, prevNext; DateTime utcDate; UsageEntry[] entries; int entryIndex; MemoryCacheEntry cacheEntry; int flushed = 0; try { _cacheUsage.MemoryCacheStore.BlockInsert(); lock (this) { Debug.Assert(_blockReduce == false, "_blockReduce == false"); if (_cEntriesInUse == 0) { return(0); } DateTime utcNow = DateTime.UtcNow; for (prev = _lastRefTail; _cEntriesInFlush < maxFlush && !prev.IsInvalid; prev = prevNext) { Debug.Assert(_cEntriesInUse > 0, "_cEntriesInUse > 0"); prevNext = (_pages[(prev.PageIndex)]._entries)[prev.Ref2Index]._ref2._prev; while (prevNext.IsRef1) { prevNext = (_pages[(prevNext.PageIndex)]._entries)[prevNext.Ref1Index]._ref1._prev; } entries = (_pages[(prev.PageIndex)]._entries); entryIndex = prev.Ref2Index; if (!force) { utcDate = entries[entryIndex]._utcDate; Debug.Assert(utcDate != DateTime.MinValue, "utcDate != DateTime.MinValue"); if (utcNow - utcDate <= CacheUsage.NEWADD_INTERVAL && utcNow >= utcDate) { continue; } } UsageEntryRef prev1 = (new UsageEntryRef((prev).PageIndex, (prev).Ref2Index)); cacheEntry = entries[entryIndex]._cacheEntry; Debug.Assert(cacheEntry.UsageEntryRef == prev1, "cacheEntry.UsageEntryRef == prev1"); Dbg.Trace("CacheUsageFlushUnderUsedItem", "Flushing underused items, item=" + cacheEntry.Key + ", bucket=" + _bucket); cacheEntry.UsageEntryRef = UsageEntryRef.INVALID; RemoveEntryFromLastRefList(prev1); entries[entryIndex]._ref1._next = inFlushHead; inFlushHead = prev1; flushed++; _cEntriesInFlush++; } if (flushed == 0) { Dbg.Trace("CacheUsageFlushTotal", "Flush(" + maxFlush + "," + force + ") removed " + flushed + " underused items; Time=" + DateTime.Now.ToString("o", CultureInfo.InvariantCulture)); return(0); } _blockReduce = true; } } finally { _cacheUsage.MemoryCacheStore.UnblockInsert(); } Debug.Assert(!inFlushHead.IsInvalid, "!inFlushHead.IsInvalid"); MemoryCacheStore cacheStore = _cacheUsage.MemoryCacheStore; UsageEntryRef current = inFlushHead; UsageEntryRef next; while (!current.IsInvalid) { entries = (_pages[(current.PageIndex)]._entries); entryIndex = current.Ref1Index; next = entries[entryIndex]._ref1._next; cacheEntry = entries[entryIndex]._cacheEntry; entries[entryIndex]._cacheEntry = null; Debug.Assert(cacheEntry.UsageEntryRef.IsInvalid, "cacheEntry.UsageEntryRef.IsInvalid"); cacheStore.Remove(cacheEntry, cacheEntry, CacheEntryRemovedReason.Evicted); current = next; } try { _cacheUsage.MemoryCacheStore.BlockInsert(); lock (this) { current = inFlushHead; while (!current.IsInvalid) { entries = (_pages[(current.PageIndex)]._entries); entryIndex = current.Ref1Index; next = entries[entryIndex]._ref1._next; _cEntriesInFlush--; AddUsageEntryToFreeList(current); current = next; } Debug.Assert(_cEntriesInFlush == 0, "_cEntriesInFlush == 0"); _blockReduce = false; Reduce(); Dbg.Trace("CacheUsageFlushTotal", "Flush(" + maxFlush + "," + force + ") removed " + flushed + " underused items; Time=" + DateTime.Now.ToString("o", CultureInfo.InvariantCulture)); } } finally { _cacheUsage.MemoryCacheStore.UnblockInsert(); } return(flushed); }
internal void UpdateCacheEntry(MemoryCacheEntry cacheEntry) { lock (this) { UsageEntryRef entryRef = cacheEntry.UsageEntryRef; if (entryRef.IsInvalid) { return; } UsageEntry[] entries = (_pages[(entryRef.PageIndex)]._entries); int entryIndex = entryRef.Ref1Index; UsageEntryRef entryRef2 = (new UsageEntryRef((entryRef).PageIndex, -(entryRef).Ref1Index)); UsageEntryRef prev = entries[entryIndex]._ref2._prev; UsageEntryRef next = entries[entryIndex]._ref2._next; { if ((prev).IsRef1) { (_pages[((prev).PageIndex)]._entries)[(prev).Ref1Index]._ref1._next = (next); } else if ((prev).IsRef2) { (_pages[((prev).PageIndex)]._entries)[(prev).Ref2Index]._ref2._next = (next); } else { _lastRefHead = (next); } }; { if ((next).IsRef1) { (_pages[((next).PageIndex)]._entries)[(next).Ref1Index]._ref1._prev = (prev); } else if ((next).IsRef2) { (_pages[((next).PageIndex)]._entries)[(next).Ref2Index]._ref2._prev = (prev); } else { _lastRefTail = (prev); } }; if (_addRef2Head == entryRef2) { _addRef2Head = next; } entries[entryIndex]._ref2 = entries[entryIndex]._ref1; prev = entries[entryIndex]._ref2._prev; next = entries[entryIndex]._ref2._next; { if ((prev).IsRef1) { (_pages[((prev).PageIndex)]._entries)[(prev).Ref1Index]._ref1._next = (entryRef2); } else if ((prev).IsRef2) { (_pages[((prev).PageIndex)]._entries)[(prev).Ref2Index]._ref2._next = (entryRef2); } else { _lastRefHead = (entryRef2); } }; { if ((next).IsRef1) { (_pages[((next).PageIndex)]._entries)[(next).Ref1Index]._ref1._prev = (entryRef2); } else if ((next).IsRef2) { (_pages[((next).PageIndex)]._entries)[(next).Ref2Index]._ref2._prev = (entryRef2); } else { _lastRefTail = (entryRef2); } }; entries[entryIndex]._ref1._prev = UsageEntryRef.INVALID; entries[entryIndex]._ref1._next = _lastRefHead; { if ((_lastRefHead).IsRef1) { (_pages[((_lastRefHead).PageIndex)]._entries)[(_lastRefHead).Ref1Index]._ref1._prev = (entryRef); } else if ((_lastRefHead).IsRef2) { (_pages[((_lastRefHead).PageIndex)]._entries)[(_lastRefHead).Ref2Index]._ref2._prev = (entryRef); } else { _lastRefTail = (entryRef); } }; _lastRefHead = entryRef; Dbg.Trace("CacheUsageUpdate", "Updated item=" + cacheEntry.Key + ",_bucket=" + _bucket + ",ref=" + entryRef); } }