/// <summary> /// Clear everything and restart clean /// </summary> public void Reset() { RepositoryCacheManager.Reset(); }
public void Set(DatastoreEntities context, RepositorySchema schema, DataQuery query, int repositoryId, Guid id, DataQueryResults results) { if (!ConfigHelper.AllowCaching) { return; } if (results == null) { return; } //Do not cache big items if (results.RecordList.Count > 100) { return; } if (!string.IsNullOrEmpty(query.Keyword) && !this.FTSReadyCache.IsReady(id)) { return; } //if (!string.IsNullOrEmpty(query.Keyword) && !ConfigHelper.AllowCacheWithKeyword) return; var timer = Stopwatch.StartNew(); var cache = RepositoryCacheManager.GetCache(id, RepositoryManager.GetSchemaParentId(repositoryId)); long lockTime = 0; var changeStamp = 0; var queryHash = 0; var subCacheKey = GetSubKey(schema, query); try { //Some queries should be cached a long time var longCache = !query.FieldFilters.Any() && !query.FieldSorts.Any() && string.IsNullOrEmpty(query.Keyword) && !query.SkipDimensions.Any(); var extraMinutes = longCache ? 480 : 0; var coreHash = 0; CacheResultsQuery item; using (var q = new AcquireReaderLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime += q.LockTime; queryHash = query.GetHashCode(); if (!query.ExcludeCount && query.IncludeDimensions && !query.IncludeEmptyDimensions) { coreHash = query.CoreHashCode(); } changeStamp = RepositoryManager.GetRepositoryChangeStamp(context, repositoryId); lock (cache) { item = cache?.FirstOrDefault(x => x.QueryHash == queryHash && x.ChangeStamp == changeStamp); } //If data has not changed and results are in cache then do nothing except mark as accessed if (item != null) { item.Results = results; item.Timestamp = DateTime.Now.AddMinutes(extraMinutes); item.SubKey = subCacheKey; return; } } lock (cache) { using (var q = new AcquireWriterLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime += q.LockTime; //Create a new cache item item = new CacheResultsQuery() { QueryHash = queryHash, QueryCoreHash = coreHash, RepositoryId = repositoryId, ChangeStamp = changeStamp, Results = results, QueryString = query.ToString(), ParentId = RepositoryManager.GetSchemaParentId(repositoryId), Timestamp = DateTime.Now.AddMinutes(extraMinutes), SubKey = subCacheKey, }; cache.Add(item); } } } catch (Exception ex) { timer.Stop(); LoggerCQ.LogError(ex, $"RepositoryId={id}, Elapsed={timer.ElapsedMilliseconds}, ID={id}, LockTime={lockTime}, Count={cache.Count}, QueryHash={queryHash}, ChangeStamp={changeStamp}"); throw; } finally { timer.Stop(); if (timer.ElapsedMilliseconds > 50) { LoggerCQ.LogWarning($"Slow cache set: Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, ID={id}, Query=\"{query.ToString()}\""); } LoggerCQ.LogTrace($"QueryCache: Set: SubCacheKey={subCacheKey}"); } }
/// <summary> /// Invalidate the cache for a specific Repository /// </summary> public void Clear(int repositoryId, Guid id, string reason, string cacheSubKey = null) { try { var count = 0; var cache = RepositoryCacheManager.GetCache(id, RepositoryManager.GetSchemaParentId(repositoryId)); this.FTSReadyCache.Clear(id); ListDimensionCache.Clear(repositoryId); using (var q = new AcquireWriterLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { if (cacheSubKey == null) { count += cache.Count; cache.Clear(); //Clear entire cache LoggerCQ.LogTrace($"QueryCache: Clear Full, ID={id}, Count={count}"); } else { //Clear all based on subkey AND with no key since it is unknown what data is in those count += cache.RemoveAll(x => x.SubKey == cacheSubKey.ToLower() || x.SubKey == null); LoggerCQ.LogTrace($"QueryCache: SubKey={cacheSubKey}, ID={id}, Count={count}"); } if (_schemaDatagrouping.ContainsKey(repositoryId)) { _schemaDatagrouping.Remove(repositoryId); } } //Find caches where this is the parent and clear them all too var parentCaches = RepositoryCacheManager.All.Where(x => x.ParentId == repositoryId); foreach (var pcache in parentCaches) { using (var q = new AcquireWriterLock(ServerUtilities.RandomizeGuid(pcache.ID, RSeed), "QueryCache")) { count += pcache.Count; pcache.Clear(); } } using (var q = new AcquireWriterLock(QueryCacheID, "QueryCache")) { count += _cacheSlice.RemoveAll(x => x.RepositoryId == repositoryId); count += _cacheSlice.RemoveAll(x => x.ParentId == repositoryId); } //If the query cache is being cleared then the List dimension count cache should be too ListDimensionCache.Clear(repositoryId); //Log the invalidation Task.Factory.StartNew(() => { using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { var newItem = new EFDAL.Entity.CacheInvalidate { Count = count, RepositoryId = repositoryId }; newItem.SetValue(EFDAL.Entity.CacheInvalidate.FieldNameConstants.Reason, reason, true); newItem.SetValue(EFDAL.Entity.CacheInvalidate.FieldNameConstants.Subkey, cacheSubKey, true); context.AddItem(newItem); context.SaveChanges(); } }); } catch (Exception ex) { LoggerCQ.LogError(ex); } }
public DataQueryResults Get(DatastoreEntities context, RepositorySchema schema, DataQuery query, int repositoryId, Guid id, out bool isCore) { isCore = false; if (!ConfigHelper.AllowCaching) { return(null); } long lockTime = 0; int queryHash = 0; int coreHash = 0; int changeStamp = 0; var task1 = Task.Factory.StartNew(() => { queryHash = query.GetHashCode(); coreHash = query.CoreHashCode(); changeStamp = RepositoryManager.GetRepositoryChangeStamp(context, repositoryId); }); var timer = new Stopwatch(); var cache = RepositoryCacheManager.GetCache(id, RepositoryManager.GetSchemaParentId(repositoryId)); try { using (var q = new AcquireReaderLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime = q.LockTime; timer.Start(); //Ensure that the pre-calculations are complete task1.Wait(); CacheResultsQuery item = null; lock (cache) { item = cache?.FirstOrDefault(x => x.QueryHash == queryHash && x.ChangeStamp == changeStamp); } if (item == null) //return null; { if (ConfigHelper.AllowCoreCache) { //TODO: OPTIMIZE: this is a linear search of thousands of items!!!! //If did not find a match then find see if core properties match //If so we can use the dimension and count values and just replace the records collection lock (cache) { item = cache?.FirstOrDefault(x => x.QueryCoreHash == coreHash && x.ChangeStamp == changeStamp); } } if (item == null) { return(null); } isCore = true; item.HitCount++; return(item.Results); } item.Timestamp = DateTime.Now; item.HitCount++; return(item.Results); } } catch (Exception ex) { timer.Stop(); LoggerCQ.LogError(ex, $"RepositoryId={id}, Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, QueryHash={queryHash}, ChangeStamp={changeStamp}, ID={id}"); throw; } finally { timer.Stop(); if (timer.ElapsedMilliseconds > 50) { LoggerCQ.LogWarning($"Slow cache get: Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, ID={id}, QueryString=\"{query.ToString()}\""); } } }