private Task <object> GetOrAdd(string key, Func <Task <object> > factory, CacheItemPolicyEx policy, string groupName, string logData, string[] metricsKeys, Type taskResultType) { var shouldLog = ShouldLog(groupName); async Task <object> WrappedFactory(bool removeOnException) { try { if (shouldLog) { Log.Info(x => x("Cache item is waiting for value to be resolved", unencryptedTags: new { cacheKey = key, cacheGroup = groupName, cacheData = logData })); } var result = await factory().ConfigureAwait(false); if (shouldLog) { Log.Info(x => x("Cache item value is resolved", unencryptedTags: new { cacheKey = key, cacheGroup = groupName, cacheData = logData, value = GetValueForLogging(result) })); } //Can happen if item removed before task is completed if (MemoryCache.Contains(key)) { var revocableResult = result as IRevocable; if (revocableResult?.RevokeKeys != null) { foreach (var revokeKey in revocableResult.RevokeKeys) { var cacheKeys = RevokeKeyToCacheKeysIndex.GetOrAdd(revokeKey, k => new HashSet <string>()); lock (cacheKeys) { cacheKeys.Add(key); } Log.Info(x => x("RevokeKey added to reverse index", unencryptedTags: new { revokeKey = revokeKey, cacheKey = key, cacheGroup = groupName, cacheData = logData })); } } } AwaitingResult.Decrement(metricsKeys); return(result); } catch (Exception exception) { Log.Info(x => x("Error resolving value for cache item", unencryptedTags: new { cacheKey = key, cacheGroup = groupName, cacheData = logData, removeOnException, errorMessage = exception.Message })); if (removeOnException) { MemoryCache.Remove(key); // Do not cache exceptions. } AwaitingResult.Decrement(metricsKeys); Failed.Mark(metricsKeys); throw; } } var newItem = shouldLog ? new AsyncCacheItem { GroupName = string.Intern(groupName), LogData = logData } : new AsyncCacheItem(); // if log is not needed, then do not cache unnecessary details which will blow up the memory Task <object> resultTask; // Taking a lock on the newItem in case it actually becomes the item in the cache (if no item with that key // existed). For another thread, it will be returned into the existingItem variable and will block on the // second lock, preventing concurrent mutation of the same object. lock (newItem.Lock) { if (typeof(IRevocable).IsAssignableFrom(taskResultType)) { policy.RemovedCallback += ItemRemovedCallback; } // Surprisingly, when using MemoryCache.AddOrGetExisting() where the item doesn't exist in the cache, // null is returned. var existingItem = (AsyncCacheItem)MemoryCache.AddOrGetExisting(key, newItem, policy); if (existingItem == null) { Misses.Mark(metricsKeys); AwaitingResult.Increment(metricsKeys); newItem.CurrentValueTask = WrappedFactory(true); newItem.NextRefreshTime = DateTime.UtcNow + policy.RefreshTime; resultTask = newItem.CurrentValueTask; if (shouldLog) { Log.Info(x => x("Item added to cache", unencryptedTags: new { cacheKey = key, cacheGroup = groupName, cacheData = logData })); } } else { // This lock makes sure we're not mutating the same object as was added to the cache by an earlier // thread (which was the first to add from 'newItem', for subsequent threads it will be 'existingItem'). lock (existingItem.Lock) { resultTask = existingItem.CurrentValueTask; // Start refresh if an existing refresh ins't in progress and we've passed the next refresh time. if (existingItem.RefreshTask?.IsCompleted != false && DateTime.UtcNow >= existingItem.NextRefreshTime) { existingItem.RefreshTask = ((Func <Task>)(async() => { try { var getNewValue = WrappedFactory(false); await getNewValue.ConfigureAwait(false); existingItem.CurrentValueTask = getNewValue; existingItem.NextRefreshTime = DateTime.UtcNow + policy.RefreshTime; MemoryCache.Set(new CacheItem(key, existingItem), policy); } catch { existingItem.NextRefreshTime = DateTime.UtcNow + policy.FailedRefreshDelay; } })).Invoke(); } } if (resultTask.GetAwaiter().IsCompleted) { Hits.Mark(metricsKeys); } else { JoinedTeam.Mark(metricsKeys); } } } return(resultTask); }
private Task <object> GetOrAdd(string key, Func <Task <object> > factory, CacheItemPolicyEx policy, string[] metricsKeys, Type taskResultType) { Func <bool, Task <object> > wrappedFactory = async removeOnException => { try { var result = await factory().ConfigureAwait(false); //Can happen if item removed before task is completed if (MemoryCache.Contains(key)) { var revocableResult = result as IRevocable; if (revocableResult?.RevokeKeys != null) { foreach (var revokeKey in revocableResult.RevokeKeys) { var cacheKeys = RevokeKeyToCacheKeysIndex.GetOrAdd(revokeKey, k => new HashSet <string>()); lock (cacheKeys) { cacheKeys.Add(key); } } } } AwaitingResult.Decrement(metricsKeys); return(result); } catch { if (removeOnException) { MemoryCache.Remove(key); // Do not cache exceptions. } AwaitingResult.Decrement(metricsKeys); Failed.Mark(metricsKeys); throw; } }; var newItem = new AsyncCacheItem(); Task <object> resultTask; // Taking a lock on the newItem in case it actually becomes the item in the cache (if no item with that key // existed). For another thread, it will be returned into the existingItem variable and will block on the // second lock, preventing concurrent mutation of the same object. lock (newItem.Lock) { if (typeof(IRevocable).IsAssignableFrom(taskResultType)) { policy.RemovedCallback += ItemRemovedCallback; } // Surprisingly, when using MemoryCache.AddOrGetExisting() where the item doesn't exist in the cache, // null is returned. var existingItem = (AsyncCacheItem)MemoryCache.AddOrGetExisting(key, newItem, policy); if (existingItem == null) { Misses.Mark(metricsKeys); AwaitingResult.Increment(metricsKeys); newItem.CurrentValueTask = wrappedFactory(true); newItem.NextRefreshTime = DateTime.UtcNow + policy.RefreshTime; resultTask = newItem.CurrentValueTask; } else { // This lock makes sure we're not mutating the same object as was added to the cache by an earlier // thread (which was the first to add from 'newItem', for subsequent threads it will be 'existingItem'). lock (existingItem.Lock) { resultTask = existingItem.CurrentValueTask; // Start refresh if an existing refresh ins't in progress and we've passed the next refresh time. if (existingItem.RefreshTask == null && DateTime.UtcNow >= existingItem.NextRefreshTime) { existingItem.RefreshTask = ((Func <Task>)(async() => { try { var getNewValue = wrappedFactory(false); await getNewValue.ConfigureAwait(false); existingItem.CurrentValueTask = getNewValue; existingItem.NextRefreshTime = DateTime.UtcNow + policy.RefreshTime; existingItem.RefreshTask = null; MemoryCache.Set(new CacheItem(key, existingItem), policy); } catch { existingItem.NextRefreshTime = DateTime.UtcNow + policy.FailedRefreshDelay; existingItem.RefreshTask = null; } })).Invoke(); } } if (resultTask.GetAwaiter().IsCompleted) { Hits.Mark(metricsKeys); } else { JoinedTeam.Mark(metricsKeys); } } } return(resultTask); }
private async Task <object> GetOrAdd(string key, Func <Task <object> > serviceMethod, string[] metricsKeys, IMethodCachingSettings settings) { AsyncCacheItem cached; Task <object> MarkHitAndReturnValue(Task <object> obj) { Hits.Mark(metricsKeys); return(obj); } // In case caching is suppressed, we don't try to obtain an existing value from the cache, or even wait on an existing request // in progress. Meaning we potentially issue multiple concurrent calls to the service (no request grouping). if (TracingContext.CacheSuppress == CacheSuppress.UpToNextServices || TracingContext.CacheSuppress == CacheSuppress.RecursiveAllDownstreamServices) { // If the caching settings specify we shouldn't cache responses when suppressed, then don't. if (settings.CacheResponsesWhenSupressedBehavior == CacheResponsesWhenSupressedBehavior.Disabled) { return(await(await CallService(serviceMethod, metricsKeys)).response); } // ...otherwise we do put the response in the cache so that subsequent calls to the cache do not revert to the previously-cached value. else { return(await TryFetchNewValue(key, serviceMethod, settings, metricsKeys, CallReason.Suppress)); } } // Found a cached response. // WARNING! Immediately after calling the line below, another thread may set a new value in the cache, and lines below // that call TryFetchNewValue() do it needlessly, causing a redundant refresh operation. This is a rare race // condition with negligible negative effects so we can ignore it for the sake of the simplicity of the code. else if ((cached = (AsyncCacheItem)MemoryCache.Get(key)) != null) { // Response was revoked and settings specify we should not use revoked responses if (cached.IsRevoked && settings.RevokedResponseBehavior != RevokedResponseBehavior.KeepUsingRevokedResponse) { // The caching settings specify we should ignore revoked responses; issue a request. if (settings.RevokedResponseBehavior == RevokedResponseBehavior.FetchNewValueNextTime) { return(await GroupRequestsIfNeeded(settings, key, metricsKeys, () => TryFetchNewValue(key, serviceMethod, settings, metricsKeys, CallReason.Revoked))); } // The caching settings specify we should attempt to fetch a fresh response. If failed, return currently cached value. else if (settings.RevokedResponseBehavior == RevokedResponseBehavior.TryFetchNewValueNextTimeOrUseOld) { return(await GroupRequestsIfNeeded(settings, key, metricsKeys, () => TryFetchNewValue(key, serviceMethod, settings, metricsKeys, CallReason.Revoked, cached))); } // In case RevokedResponseBehavior=TryFetchNewValueInBackgroundNextTime or the enum value is a new option we don't know // how to handle yet, we initiate a background refresh and return the stale response. else { _ = GroupRequestsIfNeeded(settings, key, metricsKeys, () => TryFetchNewValue(key, serviceMethod, settings, metricsKeys, CallReason.Revoked)); return(await MarkHitAndReturnValue(cached.Value)); // Might throw stored exception } } // If refreshes are disabled it's because manual revokes are being performed, meaning the current response is up-to-date. // Same for UseRefreshesWhenDisconnectedFromCacheRevokesBus, which we currently don't support, and assume everything is okay. else if (settings.RefreshMode == RefreshMode.DoNotUseRefreshes || settings.RefreshMode == RefreshMode.UseRefreshesWhenDisconnectedFromCacheRevokesBus) //TODO: after DisconnectedFromCacheRevokesBus feature is done, fix this if (to be only when connected) { return(await MarkHitAndReturnValue(cached.Value)); // Might throw stored exception } // Refreshes are enabled and the refresh time passed else if (DateTime.UtcNow >= cached.NextRefreshTime) { // Try calling the service to obtain a fresh response. In case of failure return the old response. if (settings.RefreshBehavior == RefreshBehavior.TryFetchNewValueOrUseOld) { return(await GroupRequestsIfNeeded(settings, key, metricsKeys, () => TryFetchNewValue(key, serviceMethod, settings, metricsKeys, CallReason.Refresh, cached))); } // Return the current old response, and trigger a background refresh to obtain a new value. // TODO: In Microdot v4, we'd like to change the default to try and fetch a new value and wait for it (TryFetchNewValueOrUseOld) else { _ = GroupRequestsIfNeeded(settings, key, metricsKeys, () => TryFetchNewValue(key, serviceMethod, settings, metricsKeys, CallReason.Refresh, cached)); return(await MarkHitAndReturnValue(cached.Value)); // Might throw stored exception } } // All ok, return cached value else { return(await MarkHitAndReturnValue(cached.Value)); // Might throw stored exception } } // No cached response. Call service. else { return(await GroupRequestsIfNeeded(settings, key, metricsKeys, () => TryFetchNewValue(key, serviceMethod, settings, metricsKeys, CallReason.New))); } }