private static void TimerHeartBeatElapsed(object sender, System.Timers.ElapsedEventArgs e) { //Send the heart beat to the DB to coordinate all data store instances try { _timerHeartBeat.Stop(); using (var context = new DatastoreEntities()) { var item = context.ServiceInstance.FirstOrDefault(); if (item != null && item.InstanceId == RepositoryManager.InstanceId) { item.LastCommunication = DateTime.UtcNow.AddSeconds(-_serverTimeSkew); context.SaveChanges(); CurrentMaster = item.InstanceId; } else if (item != null) { CurrentMaster = item.InstanceId; } else { CurrentMaster = Guid.Empty; } } } catch (Exception ex) { LoggerCQ.LogError(ex); } finally { _timerHeartBeat.Start(); } }
private static void InitializeSchemaCacheItem(RepositoryCacheItem cacheItem) { using (var context = new DatastoreEntities()) { var r = context.Repository.FirstOrDefault(x => x.UniqueKey == cacheItem.Key && !x.IsDeleted && x.IsInitialized); if (r == null) { cacheItem.IsInitialized = true; cacheItem.Exists = false; return; } ; var retval = new RepositorySchema { InternalID = r.RepositoryId, ChangeStamp = r.Changestamp }; retval.LoadXml(r.DefinitionData); cacheItem.IsInitialized = true; cacheItem.Exists = true; cacheItem.Xml = retval.ToXml(true); cacheItem.VersionHash = retval.VersionHash; // Cache the version to reduce number of calculations cacheItem.InternalId = retval.InternalID; cacheItem.HasParent = (r.ParentId != null); } }
private void _timer_Elapsed(object sender, System.Timers.ElapsedEventArgs e) { List <Gravitybox.Datastore.EFDAL.Entity.RepositoryLog> work; try { //Copy the cache list and empty so other threads can continue to use it using (var q = new AcquireReaderLock(QueryLogID, "QueryLog")) { work = _cache.ToList(); _cache.Clear(); } //Save all of these to disk using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { foreach (var item in work) { context.AddItem(item); } context.SaveChanges(); } } catch (Exception ex) { LoggerCQ.LogWarning($"QueryLogManager: Error={ex.Message}"); } }
public static void StartFullIndex() { if (!ConfigHelper.EnabledDataManager) { return; } try { //If need a full index then add all repositories to the list using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { var list = context.Repository .Where(x => x.ParentId == null) .Select(x => x.UniqueKey) .ToList(); list.ForEach(x => _highPriority.Add(x)); } InFullIndex = true; } catch (Exception ex) { LoggerCQ.LogError(ex); } }
public void SetSlice(DatastoreEntities context, SummarySlice slice, int repositoryId, SummarySliceValue results) { if (!ConfigHelper.AllowCaching) { return; } int changeStamp = 0; int queryHash = 0; CacheResultsSlice item; using (var q = new AcquireReaderLock(QueryCacheID, "QueryCache")) { //Do not cache big items if (results.RecordList.Count > 500) { return; } if (slice.Query != null && !string.IsNullOrEmpty(slice.Query.Keyword) && !ConfigHelper.AllowCacheWithKeyword) { return; } queryHash = slice.GetHashCode(); changeStamp = RepositoryManager.GetRepositoryChangeStamp(context, repositoryId); item = _cacheSlice.FirstOrDefault(x => x.QueryHash == queryHash && x.RepositoryId == repositoryId && x.ChangeStamp == changeStamp); //If data has not changed and results are in cache then do nothing except mark as accessed if (item != null) { item.Results = results; item.Timestamp = DateTime.Now; return; } } using (var q = new AcquireWriterLock(QueryCacheID, "QueryCache")) { //Remove previous cache _cacheSlice.RemoveAll(x => x.QueryHash == queryHash && x.RepositoryId == repositoryId); //Create a new cache item item = new CacheResultsSlice() { QueryHash = queryHash, RepositoryId = repositoryId, ChangeStamp = changeStamp, Results = results, QueryString = slice.ToString(), ParentId = RepositoryManager.GetSchemaParentId(repositoryId), }; _cacheSlice.Add(item); } }
internal int?GetSchemaParentId(int repositoryId) { return(_schemaParentCache.GetOrAdd(repositoryId, (q) => { using (var context = new DatastoreEntities()) { return context.Repository .Where(x => x.RepositoryId == repositoryId) .Select(x => x.ParentId) .FirstOrDefault(); } })); }
private static int RepositoryItemCount(Guid id) { try { using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { return(context.Repository.FirstOrDefault(x => x.UniqueKey == id)?.ItemCount ?? 0); } } catch (Exception ex) { LoggerCQ.LogError(ex); return(0); } }
private static void TimerTick(object sender, System.Timers.ElapsedEventArgs e) { if (!_ready) { return; } _timer.Stop(); try { //Lock the stats list and build queries List <LockInfoItem> copyCache = null; lock (_cache) { copyCache = _cache.ToList(); _cache.Clear(); } using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { foreach (var item in copyCache) { var newItem = new LockStat() { CurrentReadCount = item.CurrentReadCount, Elapsed = item.Elapsed, Failure = item.Failure, IsWriteLockHeld = item.IsWriteLockHeld, ThreadId = item.ThreadId, WaitingReadCount = item.WaitingReadCount, WaitingWriteCount = item.WaitingWriteCount, DateStamp = item.DateStamp, TraceInfo = item.TraceInfo, }; context.AddItem(newItem); } context.SaveChanges(); } } catch (Exception ex) { LoggerCQ.LogError(ex); } finally { _timer.Start(); } }
internal void Populate() { try { using (var context = new DatastoreEntities()) { var list = context.Repository .Select(x => new { x.RepositoryId, x.ParentId }) .ToList(); foreach (var item in list) { _schemaParentCache.TryAdd(item.RepositoryId, item.ParentId); } } } catch (Exception ex) { LoggerCQ.LogError(ex, "Populate schema parent ID cache failed"); } }
public SummarySliceValue GetSlice(DatastoreEntities context, SummarySlice slice, int repositoryId) { if (!ConfigHelper.AllowCaching) { return(null); } using (var q = new AcquireReaderLock(QueryCacheID, "QueryCache")) { var queryHash = slice.GetHashCode(); var changeStamp = RepositoryManager.GetRepositoryChangeStamp(context, repositoryId); var item = _cacheSlice.FirstOrDefault(x => x.QueryHash == queryHash && x.RepositoryId == repositoryId && x.ChangeStamp == changeStamp); if (item == null) { return(null); } item.Timestamp = DateTime.Now; item.HitCount++; return(item.Results); } }
private static Dictionary <string, string> GetSettings() { try { using (var context = new DatastoreEntities(ConnectionString)) { return(context.ConfigurationSetting.ToList().ToDictionary(x => x.Name.ToLower(), x => x.Value)); } } catch (Exception ex) { if (ex.ToString().Contains("Timeout expired")) { LoggerCQ.LogWarning(ex, "ConfigHelper.GetSettings timeout expired"); } else { LoggerCQ.LogError(ex); } return(null); } }
private void DoHouseKeeping() { using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { var list = context.Housekeeping.ToList(); foreach (var item in list) { if (item.TypeValue == HousekeepingTaskType.ClearRepositoryLog) { var obj = ServerUtilities.DeserializeObject <HkClearRepositoryLog>(item.Data); if (obj.Run()) { context.DeleteItem(item); context.SaveChanges(); } } else { LoggerCQ.LogWarning($"Unknown housekeeping type: {item.Type}"); } } } }
public void QueueTask(IHousekeepingTask task) { try { if (task == null) { return; } using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { var newItem = new EFDAL.Entity.Housekeeping { Data = ServerUtilities.SerializeObject(task), Type = (int)task.Type, }; context.AddItem(newItem); context.SaveChanges(); } } catch (Exception ex) { LoggerCQ.LogError(ex); } }
public void Run() { //Only have 1 running async query for a repository while (!_runningList.TryAdd(_schema.ID)) { System.Threading.Thread.Sleep(1000); } try { var timer = Stopwatch.StartNew(); List <DimensionItem> dimensionList = null; using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { dimensionList = _dimensionCache.Get(context, _schema, _schema.InternalID, new List <DataItem>()); } //There is no such thing as a list field that is not a dimension var dataTableFields = _schema.FieldList.Where(x => x.DataType != RepositorySchema.DataTypeConstants.List).ToList(); var nonListDimensionFields = _schema.DimensionList.Where(x => x.DataType != RepositorySchema.DataTypeConstants.List).ToList(); var listDimensionFields = _schema.DimensionList.Where(x => x.DimensionType == RepositorySchema.DimensionTypeConstants.List).ToList(); var parameters = new List <SqlParameter>(); var sql = SqlHelper.QueryAsync(_schema, _schema.InternalID, this.Query, dimensionList, parameters, ConfigHelper.ConnectionString); #region Get all the list dimensions for those fields var dimensionMapper = new ConcurrentDictionary <long, Dictionary <long, List <long> > >(); var timerList = Stopwatch.StartNew(); Parallel.ForEach(listDimensionFields, new ParallelOptions { MaxDegreeOfParallelism = 4 }, (ditem) => { try { var valueMapper = new Dictionary <long, List <long> >(); dimensionMapper.TryAdd(ditem.DIdx, valueMapper); var dTable = SqlHelper.GetListTableName(_schema.ID, ditem.DIdx); //This is the fastest way I could find to load this data using (var connection = new SqlConnection(ConfigHelper.ConnectionString)) { connection.Open(); using (var command = new SqlCommand($"SELECT Y.[{SqlHelper.RecordIdxField}], Y.[DVIdx] FROM [{dTable}] Y {SqlHelper.NoLockText()} ORDER BY Y.[{SqlHelper.RecordIdxField}], Y.[DVIdx]", connection)) { using (var reader = command.ExecuteReader()) { while (reader.Read()) { var recordIndex = (long)reader[0]; var dvidx = (long)reader[1]; if (!valueMapper.ContainsKey(recordIndex)) { valueMapper.Add(recordIndex, new List <long>()); } valueMapper[recordIndex].Add(dvidx); } } } } } catch (Exception ex) { LoggerCQ.LogError(ex); throw; } }); timerList.Stop(); #endregion var fileName = Path.Combine(ConfigHelper.AsyncCachePath, this.Key.ToString()); var rowCount = 0; using (var tempFile = XmlTextWriter.Create(fileName)) { tempFile.WriteStartDocument(); tempFile.WriteStartElement("root"); using (var connection = new SqlConnection(ConfigHelper.ConnectionString)) { var command = new SqlCommand(sql, connection); command.CommandTimeout = 3600; command.Parameters.AddRange(parameters.ToArray()); connection.Open(); using (var reader = command.ExecuteReader()) { if (reader.HasRows) { #region Write headers tempFile.WriteStartElement("headers"); foreach (var h in dataTableFields) { var d = nonListDimensionFields.FirstOrDefault(x => x.Name == h.Name); if (d == null) { tempFile.WriteElementString("h", h.Name); } else { tempFile.WriteStartElement("h"); tempFile.WriteAttributeString("didx", d.DIdx.ToString()); tempFile.WriteValue(d.Name); tempFile.WriteEndElement(); } } foreach (var d in listDimensionFields) { tempFile.WriteStartElement("h"); tempFile.WriteAttributeString("didx", d.DIdx.ToString()); tempFile.WriteValue(d.Name); tempFile.WriteEndElement(); //h } tempFile.WriteEndElement(); //headers #endregion #region Write Dimension Defs tempFile.WriteStartElement("dimensions"); foreach (var d in dimensionList) { tempFile.WriteStartElement("d"); tempFile.WriteAttributeString("didx", d.DIdx.ToString()); tempFile.WriteAttributeString("name", d.Name); foreach (var r in d.RefinementList) { tempFile.WriteStartElement("r"); tempFile.WriteAttributeString("dvidx", r.DVIdx.ToString()); tempFile.WriteValue(r.FieldValue); tempFile.WriteEndElement(); //r } tempFile.WriteEndElement(); //d } tempFile.WriteEndElement(); //dimensions #endregion #region Write Items tempFile.WriteStartElement("items"); while (reader.Read()) { var index = 0; tempFile.WriteStartElement("i"); //Write static fields var recordIndex = reader.GetInt64(dataTableFields.Count); var timestamp = reader.GetInt32(dataTableFields.Count + 1); tempFile.WriteAttributeString("ri", recordIndex.ToString()); tempFile.WriteAttributeString("ts", timestamp.ToString()); #region Write all data table (Z) fields foreach (var field in dataTableFields) { if (reader.IsDBNull(index)) { tempFile.WriteElementString("v", "~■!N"); } else { switch (field.DataType) { case RepositorySchema.DataTypeConstants.Bool: tempFile.WriteElementString("v", reader.GetBoolean(index) ? "1" : "0"); break; case RepositorySchema.DataTypeConstants.DateTime: tempFile.WriteElementString("v", reader.GetDateTime(index).Ticks.ToString()); break; case RepositorySchema.DataTypeConstants.Float: tempFile.WriteElementString("v", reader.GetDouble(index).ToString()); break; case RepositorySchema.DataTypeConstants.GeoCode: var geo = (Microsoft.SqlServer.Types.SqlGeography)reader.GetValue(index); tempFile.WriteElementString("v", $"{geo.Lat}|{geo.Long}"); break; case RepositorySchema.DataTypeConstants.Int: tempFile.WriteElementString("v", reader.GetInt32(index).ToString()); break; case RepositorySchema.DataTypeConstants.Int64: tempFile.WriteElementString("v", reader.GetInt64(index).ToString()); break; case RepositorySchema.DataTypeConstants.String: tempFile.WriteElementString("v", StripNonValidXMLCharacters(reader.GetString(index))); break; default: break; } } index++; } #endregion #region Write List fields foreach (var field in listDimensionFields) { if (dimensionMapper.ContainsKey(field.DIdx) && dimensionMapper[field.DIdx].ContainsKey(recordIndex)) { tempFile.WriteElementString("v", dimensionMapper[field.DIdx][recordIndex].ToList().ToStringList("|")); } } #endregion tempFile.WriteEndElement(); //i rowCount++; } tempFile.WriteEndElement(); //items #endregion } reader.Close(); } } tempFile.WriteEndElement(); //root } //Write file that signifies we are done var zipFile = Extensions.ZipFile(fileName); var outFile = fileName + ".zzz"; File.Move(zipFile, outFile); var size = (new FileInfo(outFile)).Length; System.Threading.Thread.Sleep(300); File.Delete(fileName); System.Threading.Thread.Sleep(300); timer.Stop(); LoggerCQ.LogInfo($"QueryThreaded Complete: ID={_schema.ID}, File={outFile}, Size={size}, Count={rowCount}, ListElapsed={timerList.ElapsedMilliseconds}, Elapsed={timer.ElapsedMilliseconds}"); } catch (Exception ex) { LoggerCQ.LogError(ex, $"ID={_schema.ID}, Query=\"{this.Query.ToString()}\""); File.WriteAllText(Path.Combine(ConfigHelper.AsyncCachePath, this.Key.ToString() + ".error"), "error"); } finally { this.IsComplete = true; _runningList.Remove(_schema.ID); } }
/// <summary> /// Invalidate the cache for a specific Repository /// </summary> public void Clear(int repositoryId, Guid id, string reason, string cacheSubKey = null) { try { var count = 0; var cache = RepositoryCacheManager.GetCache(id, RepositoryManager.GetSchemaParentId(repositoryId)); this.FTSReadyCache.Clear(id); ListDimensionCache.Clear(repositoryId); using (var q = new AcquireWriterLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { if (cacheSubKey == null) { count += cache.Count; cache.Clear(); //Clear entire cache LoggerCQ.LogTrace($"QueryCache: Clear Full, ID={id}, Count={count}"); } else { //Clear all based on subkey AND with no key since it is unknown what data is in those count += cache.RemoveAll(x => x.SubKey == cacheSubKey.ToLower() || x.SubKey == null); LoggerCQ.LogTrace($"QueryCache: SubKey={cacheSubKey}, ID={id}, Count={count}"); } if (_schemaDatagrouping.ContainsKey(repositoryId)) { _schemaDatagrouping.Remove(repositoryId); } } //Find caches where this is the parent and clear them all too var parentCaches = RepositoryCacheManager.All.Where(x => x.ParentId == repositoryId); foreach (var pcache in parentCaches) { using (var q = new AcquireWriterLock(ServerUtilities.RandomizeGuid(pcache.ID, RSeed), "QueryCache")) { count += pcache.Count; pcache.Clear(); } } using (var q = new AcquireWriterLock(QueryCacheID, "QueryCache")) { count += _cacheSlice.RemoveAll(x => x.RepositoryId == repositoryId); count += _cacheSlice.RemoveAll(x => x.ParentId == repositoryId); } //If the query cache is being cleared then the List dimension count cache should be too ListDimensionCache.Clear(repositoryId); //Log the invalidation Task.Factory.StartNew(() => { using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { var newItem = new EFDAL.Entity.CacheInvalidate { Count = count, RepositoryId = repositoryId }; newItem.SetValue(EFDAL.Entity.CacheInvalidate.FieldNameConstants.Reason, reason, true); newItem.SetValue(EFDAL.Entity.CacheInvalidate.FieldNameConstants.Subkey, cacheSubKey, true); context.AddItem(newItem); context.SaveChanges(); } }); } catch (Exception ex) { LoggerCQ.LogError(ex); } }
private void ProcessDeleted() { try { using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { const int CHUNKSIZE = 20000; var deleteList = context.DeleteQueue.Where(x => x.IsReady).ToList(); foreach (var dItem in deleteList) { var rKey = context.Repository.Where(x => x.RepositoryId == dItem.RepositoryId).Select(x => x.UniqueKey).FirstOrDefault(); if (rKey != Guid.Empty) { var schema = RepositoryManager.GetSchema(rKey); if (schema != null) { #region Parent Schema RepositorySchema parentSchema = null; if (schema.ParentID != null) { if (string.IsNullOrEmpty(schema.ObjectAlias)) { throw new Exception("An inherited repository must have an alias."); } parentSchema = RepositoryManager.GetSchema(schema.ParentID.Value, true); if (parentSchema == null) { throw new Exception("Parent schema not found"); } if (!context.Repository.Any(x => x.UniqueKey == schema.ParentID && x.ParentId == null)) { throw new Exception("Cannot create an repository from a non-base parent"); } schema = parentSchema.Merge(schema); } #endregion var listDimensions = schema.FieldList .Where(x => x.DataType == RepositorySchema.DataTypeConstants.List && x is DimensionDefinition) .Cast <DimensionDefinition>() .ToList(); foreach (var dimension in listDimensions) { var timer = Stopwatch.StartNew(); var listTable = SqlHelper.GetListTableName(schema.ID, dimension.DIdx); if (parentSchema != null && parentSchema.DimensionList.Any(x => x.DIdx == dimension.DIdx)) { listTable = SqlHelper.GetListTableName(schema.ParentID.Value, dimension.DIdx); } var newParam = new SqlParameter { DbType = DbType.Int64, IsNullable = false, ParameterName = $"@ParentRowId", Value = dItem.RowId }; var sbList = new StringBuilder(); sbList.AppendLine($"--MARKER 19"); sbList.AppendLine($"SET ROWCOUNT {CHUNKSIZE};"); sbList.AppendLine("set nocount off;"); sbList.AppendLine($"WITH S([{SqlHelper.RecordIdxField}])"); sbList.AppendLine("AS"); sbList.AppendLine("("); sbList.AppendLine($"select [RecordIdx] from [DeleteQueueItem] {SqlHelper.NoLockText()}"); sbList.AppendLine($"where [ParentRowId] = {newParam.ParameterName}"); sbList.AppendLine(")"); sbList.AppendLine($"DELETE FROM [{listTable}]"); sbList.AppendLine($"FROM S inner join [{listTable}] on S.[{SqlHelper.RecordIdxField}] = [{listTable}].[{SqlHelper.RecordIdxField}];"); var lastCount = 0; do { lastCount = SqlHelper.ExecuteSql(ConfigHelper.ConnectionString, sbList.ToString(), new[] { newParam }); } while (lastCount >= CHUNKSIZE); timer.Stop(); } //Dimension } } //Remove from queue context.DeleteQueueItem.Where(x => x.ParentRowId == dItem.RowId).Delete(); context.DeleteQueue.Where(x => x.RowId == dItem.RowId).Delete(); context.SaveChanges(); } } } catch (Exception ex) { throw; } }
public void Set(DatastoreEntities context, RepositorySchema schema, DataQuery query, int repositoryId, Guid id, DataQueryResults results) { if (!ConfigHelper.AllowCaching) { return; } if (results == null) { return; } //Do not cache big items if (results.RecordList.Count > 100) { return; } if (!string.IsNullOrEmpty(query.Keyword) && !this.FTSReadyCache.IsReady(id)) { return; } //if (!string.IsNullOrEmpty(query.Keyword) && !ConfigHelper.AllowCacheWithKeyword) return; var timer = Stopwatch.StartNew(); var cache = RepositoryCacheManager.GetCache(id, RepositoryManager.GetSchemaParentId(repositoryId)); long lockTime = 0; var changeStamp = 0; var queryHash = 0; var subCacheKey = GetSubKey(schema, query); try { //Some queries should be cached a long time var longCache = !query.FieldFilters.Any() && !query.FieldSorts.Any() && string.IsNullOrEmpty(query.Keyword) && !query.SkipDimensions.Any(); var extraMinutes = longCache ? 480 : 0; var coreHash = 0; CacheResultsQuery item; using (var q = new AcquireReaderLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime += q.LockTime; queryHash = query.GetHashCode(); if (!query.ExcludeCount && query.IncludeDimensions && !query.IncludeEmptyDimensions) { coreHash = query.CoreHashCode(); } changeStamp = RepositoryManager.GetRepositoryChangeStamp(context, repositoryId); lock (cache) { item = cache?.FirstOrDefault(x => x.QueryHash == queryHash && x.ChangeStamp == changeStamp); } //If data has not changed and results are in cache then do nothing except mark as accessed if (item != null) { item.Results = results; item.Timestamp = DateTime.Now.AddMinutes(extraMinutes); item.SubKey = subCacheKey; return; } } lock (cache) { using (var q = new AcquireWriterLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime += q.LockTime; //Create a new cache item item = new CacheResultsQuery() { QueryHash = queryHash, QueryCoreHash = coreHash, RepositoryId = repositoryId, ChangeStamp = changeStamp, Results = results, QueryString = query.ToString(), ParentId = RepositoryManager.GetSchemaParentId(repositoryId), Timestamp = DateTime.Now.AddMinutes(extraMinutes), SubKey = subCacheKey, }; cache.Add(item); } } } catch (Exception ex) { timer.Stop(); LoggerCQ.LogError(ex, $"RepositoryId={id}, Elapsed={timer.ElapsedMilliseconds}, ID={id}, LockTime={lockTime}, Count={cache.Count}, QueryHash={queryHash}, ChangeStamp={changeStamp}"); throw; } finally { timer.Stop(); if (timer.ElapsedMilliseconds > 50) { LoggerCQ.LogWarning($"Slow cache set: Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, ID={id}, Query=\"{query.ToString()}\""); } LoggerCQ.LogTrace($"QueryCache: Set: SubCacheKey={subCacheKey}"); } }
public DataQueryResults Get(DatastoreEntities context, RepositorySchema schema, DataQuery query, int repositoryId, Guid id, out bool isCore) { isCore = false; if (!ConfigHelper.AllowCaching) { return(null); } long lockTime = 0; int queryHash = 0; int coreHash = 0; int changeStamp = 0; var task1 = Task.Factory.StartNew(() => { queryHash = query.GetHashCode(); coreHash = query.CoreHashCode(); changeStamp = RepositoryManager.GetRepositoryChangeStamp(context, repositoryId); }); var timer = new Stopwatch(); var cache = RepositoryCacheManager.GetCache(id, RepositoryManager.GetSchemaParentId(repositoryId)); try { using (var q = new AcquireReaderLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime = q.LockTime; timer.Start(); //Ensure that the pre-calculations are complete task1.Wait(); CacheResultsQuery item = null; lock (cache) { item = cache?.FirstOrDefault(x => x.QueryHash == queryHash && x.ChangeStamp == changeStamp); } if (item == null) //return null; { if (ConfigHelper.AllowCoreCache) { //TODO: OPTIMIZE: this is a linear search of thousands of items!!!! //If did not find a match then find see if core properties match //If so we can use the dimension and count values and just replace the records collection lock (cache) { item = cache?.FirstOrDefault(x => x.QueryCoreHash == coreHash && x.ChangeStamp == changeStamp); } } if (item == null) { return(null); } isCore = true; item.HitCount++; return(item.Results); } item.Timestamp = DateTime.Now; item.HitCount++; return(item.Results); } } catch (Exception ex) { timer.Stop(); LoggerCQ.LogError(ex, $"RepositoryId={id}, Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, QueryHash={queryHash}, ChangeStamp={changeStamp}, ID={id}"); throw; } finally { timer.Stop(); if (timer.ElapsedMilliseconds > 50) { LoggerCQ.LogWarning($"Slow cache get: Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, ID={id}, QueryString=\"{query.ToString()}\""); } } }
private static void TimerTick(object sender, System.Timers.ElapsedEventArgs e) { if (!_ready) { return; } _timer.Stop(); try { //Lock the stats list and build queries Dictionary <Guid, List <RepositorySummmaryStats> > copyCache = null; lock (_statCache) { copyCache = _statCache.ToDictionary(kvp => kvp.Key, kvp => kvp.Value); _statCache.Clear(); } var repositoryCache = new Dictionary <Guid, Repository>(); using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { var typeValues = Enum.GetValues(typeof(RepositoryActionTypeConstants)).Cast <int>().ToList(); foreach (var typeId in typeValues) { foreach (var key in copyCache.Keys) { var q = (RepositoryActionConstants)typeId; var queryList = copyCache[key].Where(x => x.ActionType == q).ToList(); var elapsed = queryList.Sum(x => x.Elapsed); //Total elapsed time var lockTime = queryList.Sum(x => x.LockTime); //Total lock time var waitingLocks = queryList.Sum(x => x.WaitingLocksOnEntry); //Total write locks on entry var readLockCount = queryList.Sum(x => x.ReadLockCount); //Total read locks on entry var count = queryList.Count; //Number of queries var itemCount = 0; if (queryList.Count > 0) { itemCount = queryList.Sum(x => x.ItemCount); } //Ensure repository still exists (may have been removed in interim) Repository repository = null; if (repositoryCache.ContainsKey(key)) { repository = repositoryCache[key]; } else { repository = context.Repository.FirstOrDefault(x => x.UniqueKey == key); repositoryCache[key] = repository; } if (repository != null && (count > 0 || elapsed > 0 || itemCount > 0)) { var newItem = new RepositoryStat() { Count = count, Elapsed = elapsed, LockTime = lockTime, ItemCount = itemCount, RepositoryActionTypeId = typeId, RepositoryId = repository.RepositoryId, WaitingLocks = waitingLocks, ReadLockCount = readLockCount, }; context.AddItem(newItem); } } } context.SaveChanges(); } } catch (Exception ex) { LoggerCQ.LogError(ex); } finally { _timer.Start(); } }
public List <DimensionItem> Get(DatastoreEntities context, RepositorySchema schema, int id, IEnumerable <DataFieldUpdate> list = null) { if (schema == null) { throw new Exception("The schema is null"); } try { var dimensionValueTableName = SqlHelper.GetDimensionValueTableName(schema.ID); var dimensionValueTableNameParent = string.Empty; var parameters = new List <SqlParameter>(); var didxParam = 0; lock (_cache) { var retval = GetCache(context, id, schema); #region Do this after "GetCache" call as it will flush the cache if need be //If there is a parent repository then get parent schema as will will need to know which dimension table to use for different fields RepositorySchema diff = null; if (schema.ParentID != null) { if (!_parentSchemaCache.ContainsKey(schema.ID)) { var parentSchema = RepositoryManager.GetSchema(schema.ParentID.Value); _parentSchemaCache.Add(schema.ID, schema.Subtract(parentSchema)); } diff = _parentSchemaCache[schema.ID]; dimensionValueTableNameParent = SqlHelper.GetDimensionValueTableName(schema.ParentID.Value); } #endregion var sb = new StringBuilder(); parameters = new List <SqlParameter>(); didxParam = 0; var dvidxParam = 0; #region Find new refinements in list //Create a cache of all next keys var _nextKeys = new Dictionary <DimensionItem, long>(); //TODO: this is taking too long on every request (~1%) retval.Results.ForEach(z => _nextKeys.Add(z, z.RefinementList.OrderByDescending(x => x.DVIdx).Select(x => x.DVIdx).FirstOrDefault() + 1)); var paramIndex = 0; var needSave = false; if (list != null) { foreach (var item in list.Where(x => x.FieldValue != null)) { var values = new HashSet <string>(); var dimension = schema.FieldList.FirstOrDefault(x => x.Name == item.FieldName) as DimensionDefinition; if (dimension != null) { if (dimension.DataType == RepositorySchema.DataTypeConstants.List) { var l = (string[])item.FieldValue; foreach (var v in l) { if (!values.Contains(v)) { values.Add(v); } } } else { if ((dimension.DataType == RepositorySchema.DataTypeConstants.Int || dimension.DataType == RepositorySchema.DataTypeConstants.Int64) && dimension.NumericBreak != null && dimension.NumericBreak > 0) { var v = Convert.ToInt64(item.FieldValue); var scaled = ((v / dimension.NumericBreak) * dimension.NumericBreak).ToString(); if (!values.Contains(scaled)) { values.Add(scaled); } } else { var v = SqlHelper.GetTypedDimValue(dimension.DataType, item.FieldValue); if (!values.Contains(v)) { values.Add(v); } } } } //for unique values if not exist then insert foreach (var v in values?.Where(x => x != null).ToList()) { long baseDVIdx; if (schema.ParentID != null && diff.DimensionList.Any(x => x.DIdx == dimension.DIdx)) { baseDVIdx = ((dimension.DIdx - Constants.DGROUPEXT) + 1) * Constants.DVALUEGROUPEXT; //Child Repository } else { baseDVIdx = ((dimension.DIdx - Constants.DGROUP) + 1) * Constants.DVALUEGROUP; //Normal } var dbDimension = retval.Results.FirstOrDefault(x => x.DIdx == dimension.DIdx); if (!dbDimension.RefinementList.Any(x => x.FieldValue == v)) { if (!_nextKeys.ContainsKey(dbDimension)) //If was empty then default to base index { _nextKeys.Add(dbDimension, baseDVIdx); } if (_nextKeys[dbDimension] == 1) //If was empty then default to base index { _nextKeys[dbDimension] = baseDVIdx; } var nextDVIdx = _nextKeys[dbDimension]; _nextKeys[dbDimension]++; var newParam = new SqlParameter { DbType = DbType.String, IsNullable = false, ParameterName = $"@__z{paramIndex}", Value = v }; parameters.Add(newParam); paramIndex++; if (diff == null) { //This is for stand-alone tables. There is only one dimension table var paramDIdx = new SqlParameter { DbType = DbType.Int64, IsNullable = false, ParameterName = $"@__didx{didxParam}", Value = dimension.DIdx }; parameters.Add(paramDIdx); var paramDVIdx = new SqlParameter { DbType = DbType.Int64, IsNullable = false, ParameterName = $"@__dvidx{dvidxParam}", Value = nextDVIdx }; parameters.Add(paramDVIdx); didxParam++; dvidxParam++; sb.AppendLine($"if not exists(select * from [{dimensionValueTableName}] where [DIdx] = {paramDIdx.ParameterName} and [DVIdx] = {paramDVIdx.ParameterName})"); sb.AppendLine($"insert into [{dimensionValueTableName}] ([DIdx], [DVIdx], [Value]) values ({paramDIdx.ParameterName}, {paramDVIdx.ParameterName}, {newParam.ParameterName})"); } else { //This is for inherited tables. Figure out which dimension table to use var tempTable = dimensionValueTableNameParent; if (diff.DimensionList.Any(x => x.DIdx == dimension.DIdx)) { tempTable = dimensionValueTableName; } var paramDIdx = new SqlParameter { DbType = DbType.Int64, IsNullable = false, ParameterName = $"@__didx{didxParam}", Value = dimension.DIdx }; parameters.Add(paramDIdx); var paramDVIdx = new SqlParameter { DbType = DbType.Int64, IsNullable = false, ParameterName = $"@__dvidx{dvidxParam}", Value = nextDVIdx }; parameters.Add(paramDVIdx); didxParam++; dvidxParam++; sb.AppendLine($"if not exists(select * from [{tempTable}] where [DIdx] = {paramDIdx.ParameterName} and [DVIdx] = {paramDVIdx.ParameterName})"); sb.AppendLine($"insert into [{tempTable}] ([DIdx], [DVIdx], [Value]) values ({paramDIdx.ParameterName}, {paramDVIdx.ParameterName}, {newParam.ParameterName})"); } needSave = true; } } } if (needSave) { SqlHelper.ExecuteSql(ConfigHelper.ConnectionString, sb.ToString(), parameters, false); Clear(id); SqlHelper.MarkDimensionsChanged(id); retval = GetCache(context, id, schema); needSave = false; } } #endregion return(retval.Results); } } catch (Exception ex) { throw; } }
private CacheResults GetCache(DatastoreEntities context, int id, RepositorySchema schema) { try { var dimensionValueTableName = SqlHelper.GetDimensionValueTableName(schema.ID); var dimensionValueTableNameParent = string.Empty; lock (_cache) { var dimensionStamp = RepositoryManager.GetDimensionChanged(context, id); var retval = _cache.FirstOrDefault(x => x.RepositoryId == id); //Check repository DimensionStamp and if changed the reload dimensions if (retval != null && retval.DimensionStamp != dimensionStamp) { Clear(id); retval = null; } if (retval == null) { #region Parent table stuff if (schema.ParentID != null) { if (!_parentSchemaCache.ContainsKey(schema.ID)) { var parentSchema = RepositoryManager.GetSchema(schema.ParentID.Value); _parentSchemaCache.Add(schema.ID, schema.Subtract(parentSchema)); } dimensionValueTableNameParent = SqlHelper.GetDimensionValueTableName(schema.ParentID.Value); } #endregion retval = new CacheResults() { RepositoryId = id, ParentId = RepositoryManager.GetSchemaParentId(id) }; _cache.Add(retval); var sb = new StringBuilder(); sb.AppendLine($"select v.DIdx, v.DVIdx, v.Value from [{dimensionValueTableName}] v"); //If there is a parent schema then UNION its dimension tables if (schema.ParentID != null) { sb.AppendLine($"union select v.DIdx, v.DVIdx, v.Value from [{dimensionValueTableNameParent}] v"); } sb.AppendLine("order by DIdx, DVIdx"); var ds = SqlHelper.GetDataset(ConfigHelper.ConnectionString, sb.ToString(), null); retval.Results = new List <DimensionItem>(); //Load all dimensions foreach (var dimension in schema.DimensionList) { retval.Results.Add(new DimensionItem { DIdx = dimension.DIdx, Name = dimension.Name, }); } foreach (DataRow dr in ds.Tables[0].Rows) { var didx = (long)dr["DIdx"]; long dvidx = 0; string v = null; if (dr["DVIdx"] != System.DBNull.Value) { dvidx = (long)dr["DVIdx"]; v = (string)dr["Value"]; } var d = retval.Results.FirstOrDefault(x => x.DIdx == didx); if (d == null) { d = new DimensionItem { DIdx = (int)didx, Name = schema.DimensionList.Where(x => x.DIdx == didx).Select(x => x.Name).FirstOrDefault() }; retval.Results.Add(d); } if (dvidx != 0) { d.RefinementList.Add(new RefinementItem { DVIdx = dvidx, FieldValue = v, DIdx = didx }); } //Rearrange all refinements alpha (for debugging and such) //retval.Results.ForEach(ditem => ditem.RefinementList = ditem.RefinementList.OrderBy(x => x.FieldValue).ToList()); } } retval.DimensionStamp = dimensionStamp; retval.Timestamp = DateTime.Now; //Accessed return(retval); } } catch (Exception ex) { throw; } }