private void _timerStats_Elapsed(object sender, System.Timers.ElapsedEventArgs e) { try { if (DateTime.Now.Subtract(_lastTimerStats).TotalHours <= 12) { return; } _lastTimerStats = DateTime.Now; HashSet <Guid> copy = null; lock (_statisticsCache) { copy = new HashSet <Guid>(_statisticsCache); _statisticsCache.Clear(); } var timer = Stopwatch.StartNew(); foreach (var g in copy) { SqlHelper.UpdateStatistics(g); } timer.Stop(); if (copy.Count > 0) { LoggerCQ.LogDebug($"Update table statistics: Count={copy.Count}, Elapsed={timer.ElapsedMilliseconds}"); } } catch (Exception ex) { LoggerCQ.LogWarning(ex); } }
private void _timer_Elapsed(object sender, System.Timers.ElapsedEventArgs e) { List <Gravitybox.Datastore.EFDAL.Entity.RepositoryLog> work; try { //Copy the cache list and empty so other threads can continue to use it using (var q = new AcquireReaderLock(QueryLogID, "QueryLog")) { work = _cache.ToList(); _cache.Clear(); } //Save all of these to disk using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { foreach (var item in work) { context.AddItem(item); } context.SaveChanges(); } } catch (Exception ex) { LoggerCQ.LogWarning($"QueryLogManager: Error={ex.Message}"); } }
public Task Execute() { return(Task.Factory.StartNew(() => { //Nothing to do if (!_configuration.query.IncludeDimensions && !_configuration.query.IncludeRecords) { return; } var timer = Stopwatch.StartNew(); try { _dsList = SqlHelper.GetDataset(ConfigHelper.ConnectionString, _sql, _listParameters); //Log long running timer.Stop(); if (timer.ElapsedMilliseconds > 10000) { LoggerCQ.LogWarning($"ListDimensionBuilderDelay: ID={_configuration.schema.ID}, DIdx={_newDimension?.DIdx}, Elapsed={timer.ElapsedMilliseconds}, Query=\"{_configuration.query.ToString()}\""); } } catch (Exception ex) { RepositoryHealthMonitor.HealthCheck(_configuration.schema.ID); DataManager.AddSkipItem(_configuration.schema.ID); var message = ex.Message; if (message.Contains("Timeout Expired")) { message = "Timeout Expired"; //Do not show whole message, no value } LoggerCQ.LogError($"ListDimensionBuilder: ID={_configuration.schema.ID}, DIdx={_newDimension?.DIdx}, Elapsed={timer.ElapsedMilliseconds}, Query=\"{_configuration.query.ToString()}\", Error={message}"); } })); }
private void _timer_Elapsed(object sender, System.Timers.ElapsedEventArgs e) { _timer.Stop(); try { lock (_cache) { //Keep only the last N items var l = _cache .OrderByDescending(x => x.Timestamp) .Skip(MAXITEMS) .ToList(); l.ForEach(x => _cache.Remove(x)); //Purge anything not used in an hour l.Where(x => DateTime.Now.Subtract(x.Timestamp).TotalMinutes >= 60) .ToList() .ForEach(x => _cache.Remove(x)); } } catch (Exception ex) { LoggerCQ.LogWarning(ex, "DimensionCache housekeeping failed"); } finally { _timer.Start(); } }
/// <summary /> protected virtual void Dispose(bool disposing) { if (!m_Disposed) { if (disposing && m_Lock != null) { var traceInfo = m_Lock.TraceInfo; var elapsed = (int)DateTime.Now.Subtract(_initTime).TotalMilliseconds; if (!_inError) { if (_lockIndex != 0) { DateTime dt; if (!m_Lock.HeldReads.TryRemove(_lockIndex, out dt)) { LoggerCQ.LogWarning($"HeldReads was not released. ObjectId={m_Lock.ObjectId}, Index={_lockIndex}, TraceInfo={m_Lock.TraceInfo}, Elapsed={elapsed}"); } _lockIndex = 0; } m_Lock.TraceInfo = null; m_Lock.HoldingThreadId = null; } if (ConfigHelper.AllowReadLocking) { m_Lock.ExitReadLock(); if (elapsed > 60000) { LoggerCQ.LogWarning($"ReaderLock Long: Elapsed={elapsed}, ID={_id}"); } } } } m_Disposed = true; }
public Task Execute() { return(Task.Factory.StartNew(() => { //Nothing to do if (!_configuration.query.IncludeDimensions) { return; } if (_doExecute) { var timer = Stopwatch.StartNew(); try { _datset = SqlHelper.GetDataset(ConfigHelper.ConnectionString, _sql, _configuration.parameters); //Log long running timer.Stop(); if (timer.ElapsedMilliseconds > 10000) { LoggerCQ.LogWarning($"NormalDimensionBuilderDelay: ID={_configuration.schema.ID}, Elapsed={timer.ElapsedMilliseconds}, Query=\"{_configuration.query.ToString()}\""); } } catch (Exception ex) { RepositoryHealthMonitor.HealthCheck(_configuration.schema.ID); DataManager.AddSkipItem(_configuration.schema.ID); LoggerCQ.LogError(ex, $"NormalDimensionBuilder: ID={_configuration.schema.ID}, Query=\"{_configuration.query.ToString()}\", Error={ex.Message}"); } } })); }
private void TimerElapsed(object sender, System.Timers.ElapsedEventArgs e) { var lockTime = 0; var cacheCount = 0; _timer.Stop(); try { var timer = Stopwatch.StartNew(); var count = 0; var allCaches = RepositoryCacheManager.All; cacheCount = allCaches.Count; foreach (var cache in allCaches) { using (var q = new AcquireWriterLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime += q.LockTime; _maxItems = System.Math.Max(0, ConfigHelper.QueryCacheCount); //Purge anything not used in the last N minutes count += cache.RemoveAll(x => DateTime.Now.Subtract(x.Timestamp).TotalMinutes >= CacheExpireMinutes); //Keep only the last N items cache.OrderByDescending(x => x.Timestamp) .Skip(_maxItems) .ToList() .ForEach(x => { cache.Remove(x); count++; }); } } #region Now do the Slices //Keep only the last N items _cacheSlice.OrderByDescending(x => x.Timestamp) .Skip(_maxItems) .ToList() .ForEach(x => { _cacheSlice.Remove(x); count++; }); //Purge anything not used in the last N minutes count += _cacheSlice.RemoveAll(x => DateTime.Now.Subtract(x.Timestamp).TotalMinutes >= CacheExpireMinutes); #endregion timer.Stop(); //Log it if too long if (timer.ElapsedMilliseconds > 2000) { LoggerCQ.LogWarning($"QueryCache housekeeping: Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, ItemsRemoved={count}, CacheCount={cacheCount}"); } } catch (Exception ex) { LoggerCQ.LogWarning(ex, "QueryCache housekeeping failed"); } finally { _timer.Start(); } }
private static void SyncInternal(List <DataItem> list, RepositorySchema schema) { var timer = Stopwatch.StartNew(); var count = 0; var processed = 0; long lastRIdx = 0; try { if (list == null) { return; } if (schema == null) { return; } var l = list.Where(x => x.__Hash == 0 && x.__RecordIndex > 0).ToList(); if (!l.Any()) { return; } count = l.Count; var dataTable = SqlHelper.GetTableName(schema.ID); foreach (var item in l) { var sb = new StringBuilder(); var parameters = new List <SqlParameter>(); parameters.Add(new SqlParameter { DbType = DbType.Int64, IsNullable = false, ParameterName = $"@{SqlHelper.HashField}", Value = item.Hash(), }); parameters.Add(new SqlParameter { DbType = DbType.Int64, IsNullable = false, ParameterName = $"@{SqlHelper.RecordIdxField}", Value = item.__RecordIndex, }); lastRIdx = item.__RecordIndex; sb.AppendLine($"UPDATE [{dataTable}] SET [{SqlHelper.HashField}] = @{SqlHelper.HashField} WHERE [{SqlHelper.RecordIdxField}] = @{SqlHelper.RecordIdxField} AND [{SqlHelper.HashField}] = 0"); SqlHelper.ExecuteSql(ConfigHelper.ConnectionString, sb.ToString(), parameters, false, false, 5); Interlocked.Increment(ref _counter); processed++; } } catch (Exception ex) { LoggerCQ.LogWarning($"DataManager.SyncInternal: ID={schema.ID}, Count={count}, Processed={processed}, RIdx={lastRIdx}, Elapsed={timer.ElapsedMilliseconds}"); } }
public bool Run() { var timer = Stopwatch.StartNew(); try { var sb = new StringBuilder(); sb.AppendLine("SET ROWCOUNT 5000;"); if (this.PivotDate == null) { sb.AppendLine($"DELETE FROM [RepositoryLog] WHERE [RepositoryId] = {this.RepositoryId};"); } else { sb.AppendLine($"DELETE FROM [RepositoryLog] WHERE [RepositoryId] = {this.RepositoryId} AND [CreatedDate] <= '{this.PivotDate.Value.ToString(DimensionItem.DateTimeFormat)}';"); } var count = 0; var tempCount = 0; do { RetryHelper.DefaultRetryPolicy(5) .Execute(() => { tempCount = SqlHelper.ExecuteSql(ConfigHelper.ConnectionString, sb.ToString(), null, false); count += tempCount; }); }while (tempCount > 0); timer.Stop(); LoggerCQ.LogInfo($"HkClearRepositoryLog: Count={count}, RepositoryId={this.RepositoryId}, Elapsed={timer.ElapsedMilliseconds}"); return(true); } catch (Exception ex) { timer.Stop(); LoggerCQ.LogWarning(ex); return(false); } }
private static Dictionary <string, string> GetSettings() { try { using (var context = new DatastoreEntities(ConnectionString)) { return(context.ConfigurationSetting.ToList().ToDictionary(x => x.Name.ToLower(), x => x.Value)); } } catch (Exception ex) { if (ex.ToString().Contains("Timeout expired")) { LoggerCQ.LogWarning(ex, "ConfigHelper.GetSettings timeout expired"); } else { LoggerCQ.LogError(ex); } return(null); } }
/// <summary /> public AcquireReaderLock(Guid id, string traceInfo) { this.LockTime = 0; m_Lock = LockingManager.GetLocker(id); _id = id; if (!ConfigHelper.AllowReadLocking) { return; } this.ReadLockCount = m_Lock.CurrentReadCount; this.WaitingLocksOnEntry = m_Lock.WaitingWriteCount; if (this.WaitingLocksOnEntry > 10) { LoggerCQ.LogWarning($"AcquireReaderLock Waiting Writer Locks: Count={this.WaitingLocksOnEntry}, RepositoryId={id}, TraceInfo={traceInfo}"); } if (!m_Lock.TryEnterReadLock(TimeOut)) { _inError = true; throw new Exception("Could not get reader lock: " + ((m_Lock.ObjectId == Guid.Empty) ? string.Empty : $"ID={m_Lock.ObjectId}") + $", CurrentReadCount={m_Lock.CurrentReadCount}" + $", WaitingReadCount={m_Lock.WaitingReadCount}" + $", WaitingWriteCount={m_Lock.WaitingWriteCount}" + $", HoldingThread={m_Lock.HoldingThreadId}" + $", TraceInfo={m_Lock.TraceInfo}" + $", LockFailTime={(int)DateTime.Now.Subtract(_initTime).TotalMilliseconds}" + $", WriteHeldTime={m_Lock.WriteHeldTime}"); } this.LockTime = (int)DateTime.Now.Subtract(_initTime).TotalMilliseconds; Interlocked.Increment(ref _counter); _lockIndex = _counter; m_Lock.HeldReads.AddOrUpdate(_lockIndex, DateTime.Now, (key, value) => DateTime.Now); m_Lock.TraceInfo = traceInfo; m_Lock.HoldingThreadId = System.Threading.Thread.CurrentThread.ManagedThreadId; }
private void DoHouseKeeping() { using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { var list = context.Housekeeping.ToList(); foreach (var item in list) { if (item.TypeValue == HousekeepingTaskType.ClearRepositoryLog) { var obj = ServerUtilities.DeserializeObject <HkClearRepositoryLog>(item.Data); if (obj.Run()) { context.DeleteItem(item); context.SaveChanges(); } } else { LoggerCQ.LogWarning($"Unknown housekeeping type: {item.Type}"); } } } }
/// <summary /> protected virtual void Dispose(bool disposing) { if (!m_Disposed && disposing && m_Lock != null) { var traceInfo = m_Lock.TraceInfo; if (!_inError) { m_Lock.WriteLockHeldTime = null; m_Lock.TraceInfo = null; m_Lock.HoldingThreadId = null; } if (ConfigHelper.AllowWriteLocking) { m_Lock.ExitWriteLock(); var totalTime = (int)DateTime.Now.Subtract(_initTime).TotalMilliseconds; if (totalTime > 60000) { LoggerCQ.LogWarning($"WriterLock Long: Elapsed={totalTime}"); } RepositoryManager.SystemCore.LogLockStat(new LockInfoItem { CurrentReadCount = m_Lock.CurrentReadCount, Elapsed = (int)DateTime.Now.Subtract(_initTime).TotalMilliseconds, Failure = false, IsWriteLockHeld = m_Lock.IsWriteLockHeld, ThreadId = m_Lock.HoldingThreadId.GetValueOrDefault(), WaitingReadCount = m_Lock.WaitingReadCount, WaitingWriteCount = m_Lock.WaitingWriteCount, TraceInfo = traceInfo }); } } m_Disposed = true; }
private void StartupEndpoint() { var config = new SetupConfig(); try { LoggerCQ.LogInfo("Attempting to upgrade database."); var connectionStringSettings = ConfigurationManager.ConnectionStrings["DatastoreEntities"]; var connectionStringBuilder = new SqlConnectionStringBuilder(connectionStringSettings.ConnectionString) { InitialCatalog = "Master" }; //Make sure there are no other nHydrate installations on this database if (DbMaintenanceHelper.ContainsOtherInstalls(connectionStringSettings.ConnectionString)) { LoggerCQ.LogError($"The database contains another installation. This is an error condition. Database={connectionStringBuilder.InitialCatalog}"); throw new Exception($"The database contains another installation. This is an error condition. Database={connectionStringBuilder.InitialCatalog}"); } //Even a blank database gets updated below so save if DB is blank when started var isBlank = DbMaintenanceHelper.IsBlank(connectionStringSettings.ConnectionString); var installer = new DatabaseInstaller(); if (installer.NeedsUpdate(connectionStringSettings.ConnectionString)) { var setup = new InstallSetup { AcceptVersionWarningsChangedScripts = true, AcceptVersionWarningsNewScripts = true, ConnectionString = connectionStringSettings.ConnectionString, InstallStatus = InstallStatusConstants.Upgrade, MasterConnectionString = connectionStringBuilder.ToString(), SuppressUI = true, }; installer.Install(setup); } //If new database then add file split data files to reduce file locking if (isBlank) { try { DbMaintenanceHelper.SplitDbFiles(connectionStringSettings.ConnectionString); LoggerCQ.LogInfo("New database has split data files."); } catch { LoggerCQ.LogWarning("New database could not split data files."); } try { var configFile = Path.Combine(Path.GetDirectoryName(System.Reflection.Assembly.GetExecutingAssembly().Location), "setup.config"); if (File.Exists(configFile)) { var barr = File.ReadAllBytes(configFile); config = ServerUtilities.DeserializeObject <SetupConfig>(barr); } } catch (Exception ex) { throw new Exception($"Setup configuration file is not valid."); } if (config != null) { if (!string.IsNullOrEmpty(config.ListDataPath) && !Directory.Exists(config.ListDataPath)) { throw new Exception("The setup configuration file value 'ListDataPath' is not valid"); } if (!string.IsNullOrEmpty(config.IndexPath) && !Directory.Exists(config.IndexPath)) { throw new Exception("The setup configuration file value 'IndexPath' is not valid"); } //Create a file group for List tables config.ListDataPath = DbMaintenanceHelper.CreateFileGroup(connectionStringSettings.ConnectionString, config.ListDataPath, SetupConfig.YFileGroup); //Create a file group for Indexes config.IndexPath = DbMaintenanceHelper.CreateFileGroup(connectionStringSettings.ConnectionString, config.IndexPath, SetupConfig.IndexFileGroup); } } } catch (Exception ex) { LoggerCQ.LogError(ex, "Failed on database upgrade."); throw new Exception("Failed on database upgrade."); } LoggerCQ.LogInfo("Service started begin"); try { #region Primary Endpoint var service = new Gravitybox.Datastore.Server.Core.SystemCore(ConfigurationManager.ConnectionStrings["DatastoreEntities"].ConnectionString, _enableHouseKeeping); if (config != null) { ConfigHelper.SetupConfig = config; } #region Determine if configured port is free var isPortFree = false; do { try { //Determine if can connect to port using (var p1 = new System.Net.Sockets.TcpClient("localhost", ConfigHelper.Port)) { } //If did connect successfully then there is already something on this port isPortFree = false; LoggerCQ.LogInfo($"Port {ConfigHelper.Port} is in use..."); System.Threading.Thread.Sleep(3000); //wait... } catch (Exception ex) { //If there is an error connecting then nothing is listening on that port so FREE isPortFree = true; } } while (!isPortFree); #endregion var primaryAddress = new Uri($"net.tcp://localhost:{ConfigHelper.Port}/__datastore_core"); var primaryHost = new ServiceHost(service, primaryAddress); //Initialize the service var netTcpBinding = new NetTcpBinding(); netTcpBinding.MaxConnections = ThrottleMax; netTcpBinding.Security.Mode = SecurityMode.None; primaryHost.AddServiceEndpoint(typeof(Gravitybox.Datastore.Common.ISystemCore), netTcpBinding, string.Empty); //Add more threads var stb = new ServiceThrottlingBehavior { MaxConcurrentSessions = ThrottleMax, MaxConcurrentCalls = ThrottleMax, MaxConcurrentInstances = ThrottleMax, }; primaryHost.Description.Behaviors.Add(stb); primaryHost.Open(); //Create Core Listener var primaryEndpoint = new EndpointAddress(primaryHost.BaseAddresses.First().AbsoluteUri); var primaryClient = new ChannelFactory <Gravitybox.Datastore.Common.ISystemCore>(netTcpBinding, primaryEndpoint); _core = primaryClient.CreateChannel(); (_core as IContextChannel).OperationTimeout = new TimeSpan(0, 0, 120); //Timeout=2m #endregion LoadEngine(service); service.Manager.ResetMaster(); LoggerCQ.LogInfo("Service started complete"); ConfigHelper.StartUp(); } catch (Exception ex) { LoggerCQ.LogError(ex); throw; } }
public Task Load() { return(Task.Factory.StartNew(() => { //Nothing to do if (!_configuration.query.IncludeDimensions && !_configuration.query.IncludeRecords) { return; } if (_dsList == null) { return; } if (_dsList.Tables.Count > 0) { //Put these in a lookup dictionary as there maybe thousands or more //It makes lookup below much faster for these large sets if (!this.IsCachehit || (_lookupRefinement == null) || (_lookupRefinement?.Count == 0 && _newDimension.RefinementList.Any())) { _lookupRefinement = _newDimension.RefinementList.ToDictionary(x => x.DVIdx, z => z); } var recordCache = new Dictionary <long, List <string> >(); if (_configuration.query.IncludeRecords) { foreach (DataRow dr in _dsList.Tables[0].Rows) { var recordIdx = (long)dr[0]; var dvidx = (long)dr[1]; if (!recordCache.ContainsKey(recordIdx)) { recordCache.Add(recordIdx, new List <string>()); } recordCache[recordIdx].Add(_lookupRefinement[dvidx].FieldValue); } } if (!this.IsCachehit) { foreach (DataRow dr in _dsList.Tables[1].Rows) { var dvidx = (long)dr[0]; var count = (int)dr[1]; if (_lookupRefinement.ContainsKey(dvidx)) { _lookupRefinement[dvidx].Count = count; } else { LoggerCQ.LogWarning($"Cannot find DVIdx={dvidx}"); } } ListDimensionCache.Add(_configuration.repositoryId, _newDimension.DIdx, _configuration.query, _lookupRefinement); } //Only look in here if there are items if (_lookupRefinement.Keys.Count != 0) { //This lambda has been replaced. There was some instanec where the //dictionary did NOT have the DVIdx value in it so it blew up //Try to catch this case and log it //_newDimension.RefinementList.ForEach(x => x.Count = _lookupRefinement[x.DVIdx].Count); foreach (var ritem in _newDimension.RefinementList) { if (_lookupRefinement.ContainsKey(ritem.DVIdx)) { ritem.Count = _lookupRefinement[ritem.DVIdx].Count; } else { LoggerCQ.LogWarning($"Missing Dictionary Value: ID={_configuration.schema.ID}, DIdx={ritem.DIdx}, DVIdx={ritem.DVIdx}, Value={ritem.FieldValue}"); } } } //Now setup values for records if (_configuration.query.IncludeRecords) { var fieldIndex = _configuration.schema.FieldList.Select(x => x.Name).ToList().IndexOf(_newDimension.Name); //NOTE: there is a lock around "retval" as this is the only external object that is modified lock (_configuration.retval) { foreach (var record in _configuration.retval.RecordList) { if (recordCache.ContainsKey(record.__RecordIndex)) { record.ItemArray[fieldIndex] = recordCache[record.__RecordIndex].ToArray(); } else { record.ItemArray[fieldIndex] = new string[] { } }; } } } if (!_configuration.query.IncludeEmptyDimensions) { _newDimension.RefinementList.RemoveAll(x => x.Count == 0); } _newDimension.RefinementList.RemoveAll(x => _configuration.query.DimensionValueList.Contains(x.DVIdx)); if (_configuration.query.IncludeDimensions) { lock (_configuration.retval) { _configuration.retval.DimensionList.Add(_newDimension); } } lock (_configuration) { _configuration.PerfLoadLDim++; } } })); }
public AcquireWriterLock(Guid id, string traceInfo) { if (id == Guid.Empty) { return; } m_Lock = LockingManager.GetLocker(id); if (!ConfigHelper.AllowWriteLocking) { return; } this.ReadLockCount = m_Lock.CurrentReadCount; this.WaitingLocksOnEntry = m_Lock.WaitingWriteCount; if (this.WaitingLocksOnEntry > 10) { LoggerCQ.LogWarning($"AcquireWriterLock Waiting Writer Locks: Count={this.WaitingLocksOnEntry}, RepositoryId={id}, TraceInfo={traceInfo}"); } //If there is another write lock held then wait to enter and give the read locks to run //This is a hack to address the issue with the lock object: it prioritizes writes and starves reads var lockHeld = false; if (traceInfo == RepositoryManager.TraceInfoUpdateData) { while (m_Lock.IsWriteLockHeld && DateTime.Now.Subtract(_initTime).TotalMilliseconds < 10000) { System.Threading.Thread.Sleep(20); lockHeld = true; } } var delay = (int)DateTime.Now.Subtract(_initTime).TotalMilliseconds; if (lockHeld) { LoggerCQ.LogDebug($"AcquireWriterLock: Held={delay}, RepositoryId={id}, TraceInfo={traceInfo}"); } var timeoutValue = TimeOut - delay; if (!m_Lock.TryEnterWriteLock(timeoutValue)) { _inError = true; RepositoryManager.SystemCore.LogLockStat(new LockInfoItem { CurrentReadCount = m_Lock.CurrentReadCount, Elapsed = (int)DateTime.Now.Subtract(_initTime).TotalMilliseconds, Failure = true, IsWriteLockHeld = m_Lock.IsWriteLockHeld, ThreadId = (m_Lock.HoldingThreadId == null) ? 0 : m_Lock.HoldingThreadId.Value, WaitingReadCount = m_Lock.WaitingReadCount, WaitingWriteCount = m_Lock.WaitingWriteCount, TraceInfo = traceInfo, }); var lapses = string.Join("-", m_Lock.HeldReads.Values.ToList().Select(x => (int)DateTime.Now.Subtract(x).TotalSeconds).ToList()); throw new Exception("Could not get writer lock: " + ((m_Lock.ObjectId == Guid.Empty) ? string.Empty : $"ID={m_Lock.ObjectId}") + $", CurrentReadCount={m_Lock.CurrentReadCount}" + $", WaitingReadCount={m_Lock.WaitingReadCount}" + $", WaitingWriteCount={m_Lock.WaitingWriteCount}" + $", HoldingThread={m_Lock.HoldingThreadId}" + $", TraceInfo={m_Lock.TraceInfo}" + $", WriteHeldTime={m_Lock.WriteHeldTime}" + $", LockFailTime={(int)DateTime.Now.Subtract(_initTime).TotalMilliseconds}" + $", Lapses={lapses}"); } this.LockTime = (int)DateTime.Now.Subtract(_initTime).TotalMilliseconds; m_Lock.TraceInfo = traceInfo; m_Lock.WriteLockHeldTime = DateTime.Now; m_Lock.HoldingThreadId = System.Threading.Thread.CurrentThread.ManagedThreadId; }
public void Set(DatastoreEntities context, RepositorySchema schema, DataQuery query, int repositoryId, Guid id, DataQueryResults results) { if (!ConfigHelper.AllowCaching) { return; } if (results == null) { return; } //Do not cache big items if (results.RecordList.Count > 100) { return; } if (!string.IsNullOrEmpty(query.Keyword) && !this.FTSReadyCache.IsReady(id)) { return; } //if (!string.IsNullOrEmpty(query.Keyword) && !ConfigHelper.AllowCacheWithKeyword) return; var timer = Stopwatch.StartNew(); var cache = RepositoryCacheManager.GetCache(id, RepositoryManager.GetSchemaParentId(repositoryId)); long lockTime = 0; var changeStamp = 0; var queryHash = 0; var subCacheKey = GetSubKey(schema, query); try { //Some queries should be cached a long time var longCache = !query.FieldFilters.Any() && !query.FieldSorts.Any() && string.IsNullOrEmpty(query.Keyword) && !query.SkipDimensions.Any(); var extraMinutes = longCache ? 480 : 0; var coreHash = 0; CacheResultsQuery item; using (var q = new AcquireReaderLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime += q.LockTime; queryHash = query.GetHashCode(); if (!query.ExcludeCount && query.IncludeDimensions && !query.IncludeEmptyDimensions) { coreHash = query.CoreHashCode(); } changeStamp = RepositoryManager.GetRepositoryChangeStamp(context, repositoryId); lock (cache) { item = cache?.FirstOrDefault(x => x.QueryHash == queryHash && x.ChangeStamp == changeStamp); } //If data has not changed and results are in cache then do nothing except mark as accessed if (item != null) { item.Results = results; item.Timestamp = DateTime.Now.AddMinutes(extraMinutes); item.SubKey = subCacheKey; return; } } lock (cache) { using (var q = new AcquireWriterLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime += q.LockTime; //Create a new cache item item = new CacheResultsQuery() { QueryHash = queryHash, QueryCoreHash = coreHash, RepositoryId = repositoryId, ChangeStamp = changeStamp, Results = results, QueryString = query.ToString(), ParentId = RepositoryManager.GetSchemaParentId(repositoryId), Timestamp = DateTime.Now.AddMinutes(extraMinutes), SubKey = subCacheKey, }; cache.Add(item); } } } catch (Exception ex) { timer.Stop(); LoggerCQ.LogError(ex, $"RepositoryId={id}, Elapsed={timer.ElapsedMilliseconds}, ID={id}, LockTime={lockTime}, Count={cache.Count}, QueryHash={queryHash}, ChangeStamp={changeStamp}"); throw; } finally { timer.Stop(); if (timer.ElapsedMilliseconds > 50) { LoggerCQ.LogWarning($"Slow cache set: Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, ID={id}, Query=\"{query.ToString()}\""); } LoggerCQ.LogTrace($"QueryCache: Set: SubCacheKey={subCacheKey}"); } }
public DataQueryResults Get(DatastoreEntities context, RepositorySchema schema, DataQuery query, int repositoryId, Guid id, out bool isCore) { isCore = false; if (!ConfigHelper.AllowCaching) { return(null); } long lockTime = 0; int queryHash = 0; int coreHash = 0; int changeStamp = 0; var task1 = Task.Factory.StartNew(() => { queryHash = query.GetHashCode(); coreHash = query.CoreHashCode(); changeStamp = RepositoryManager.GetRepositoryChangeStamp(context, repositoryId); }); var timer = new Stopwatch(); var cache = RepositoryCacheManager.GetCache(id, RepositoryManager.GetSchemaParentId(repositoryId)); try { using (var q = new AcquireReaderLock(ServerUtilities.RandomizeGuid(cache.ID, RSeed), "QueryCache")) { lockTime = q.LockTime; timer.Start(); //Ensure that the pre-calculations are complete task1.Wait(); CacheResultsQuery item = null; lock (cache) { item = cache?.FirstOrDefault(x => x.QueryHash == queryHash && x.ChangeStamp == changeStamp); } if (item == null) //return null; { if (ConfigHelper.AllowCoreCache) { //TODO: OPTIMIZE: this is a linear search of thousands of items!!!! //If did not find a match then find see if core properties match //If so we can use the dimension and count values and just replace the records collection lock (cache) { item = cache?.FirstOrDefault(x => x.QueryCoreHash == coreHash && x.ChangeStamp == changeStamp); } } if (item == null) { return(null); } isCore = true; item.HitCount++; return(item.Results); } item.Timestamp = DateTime.Now; item.HitCount++; return(item.Results); } } catch (Exception ex) { timer.Stop(); LoggerCQ.LogError(ex, $"RepositoryId={id}, Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, QueryHash={queryHash}, ChangeStamp={changeStamp}, ID={id}"); throw; } finally { timer.Stop(); if (timer.ElapsedMilliseconds > 50) { LoggerCQ.LogWarning($"Slow cache get: Elapsed={timer.ElapsedMilliseconds}, LockTime={lockTime}, Count={cache.Count}, ID={id}, QueryString=\"{query.ToString()}\""); } } }