public void MarkRefreshStats(Guid repositoryId) { lock (_statisticsCache) { if (!_statisticsCache.Contains(repositoryId)) { _statisticsCache.Add(repositoryId); LoggerCQ.LogInfo($"TableStatsMaintenace MarkRefreshStats: ID={repositoryId}"); } } }
protected override void OnShutdown() { try { base.OnShutdown(); _core.ShutDown(); LoggerCQ.LogInfo("Services ShutDown"); //KillTimer(); } catch (Exception ex) { LoggerCQ.LogError("Error 0x2401: Shutdown Failed"); throw; } }
protected override void OnStop() { //KillTimer(); try { ConfigHelper.ShutDown(); if (_core != null) { _core.ShutDown(); } } catch (Exception ex) { LoggerCQ.LogError("Error 0x2400: Shutdown Failed"); } LoggerCQ.LogInfo("Services Stopped"); }
public bool Run() { var timer = Stopwatch.StartNew(); try { var sb = new StringBuilder(); sb.AppendLine("SET ROWCOUNT 5000;"); if (this.PivotDate == null) { sb.AppendLine($"DELETE FROM [RepositoryLog] WHERE [RepositoryId] = {this.RepositoryId};"); } else { sb.AppendLine($"DELETE FROM [RepositoryLog] WHERE [RepositoryId] = {this.RepositoryId} AND [CreatedDate] <= '{this.PivotDate.Value.ToString(DimensionItem.DateTimeFormat)}';"); } var count = 0; var tempCount = 0; do { RetryHelper.DefaultRetryPolicy(5) .Execute(() => { tempCount = SqlHelper.ExecuteSql(ConfigHelper.ConnectionString, sb.ToString(), null, false); count += tempCount; }); }while (tempCount > 0); timer.Stop(); LoggerCQ.LogInfo($"HkClearRepositoryLog: Count={count}, RepositoryId={this.RepositoryId}, Elapsed={timer.ElapsedMilliseconds}"); return(true); } catch (Exception ex) { timer.Stop(); LoggerCQ.LogWarning(ex); return(false); } }
private static void HitHard() { try { long index = 0; while (true) { using (var repo = new DatastoreRepository <MyItem>(repoID, SERVER, PORT)) { index++; var id = _rnd.Next(1, 9999999); LoggerCQ.LogInfo("HitHard: ID=" + id + ", Index=" + index); var results = repo.Query .Where(x => x.ID == id) .Results(); } } } catch (Exception ex) { throw; } }
private void StartupEndpoint() { var config = new SetupConfig(); try { LoggerCQ.LogInfo("Attempting to upgrade database."); var connectionStringSettings = ConfigurationManager.ConnectionStrings["DatastoreEntities"]; var connectionStringBuilder = new SqlConnectionStringBuilder(connectionStringSettings.ConnectionString) { InitialCatalog = "Master" }; //Make sure there are no other nHydrate installations on this database if (DbMaintenanceHelper.ContainsOtherInstalls(connectionStringSettings.ConnectionString)) { LoggerCQ.LogError($"The database contains another installation. This is an error condition. Database={connectionStringBuilder.InitialCatalog}"); throw new Exception($"The database contains another installation. This is an error condition. Database={connectionStringBuilder.InitialCatalog}"); } //Even a blank database gets updated below so save if DB is blank when started var isBlank = DbMaintenanceHelper.IsBlank(connectionStringSettings.ConnectionString); var installer = new DatabaseInstaller(); if (installer.NeedsUpdate(connectionStringSettings.ConnectionString)) { var setup = new InstallSetup { AcceptVersionWarningsChangedScripts = true, AcceptVersionWarningsNewScripts = true, ConnectionString = connectionStringSettings.ConnectionString, InstallStatus = InstallStatusConstants.Upgrade, MasterConnectionString = connectionStringBuilder.ToString(), SuppressUI = true, }; installer.Install(setup); } //If new database then add file split data files to reduce file locking if (isBlank) { try { DbMaintenanceHelper.SplitDbFiles(connectionStringSettings.ConnectionString); LoggerCQ.LogInfo("New database has split data files."); } catch { LoggerCQ.LogWarning("New database could not split data files."); } try { var configFile = Path.Combine(Path.GetDirectoryName(System.Reflection.Assembly.GetExecutingAssembly().Location), "setup.config"); if (File.Exists(configFile)) { var barr = File.ReadAllBytes(configFile); config = ServerUtilities.DeserializeObject <SetupConfig>(barr); } } catch (Exception ex) { throw new Exception($"Setup configuration file is not valid."); } if (config != null) { if (!string.IsNullOrEmpty(config.ListDataPath) && !Directory.Exists(config.ListDataPath)) { throw new Exception("The setup configuration file value 'ListDataPath' is not valid"); } if (!string.IsNullOrEmpty(config.IndexPath) && !Directory.Exists(config.IndexPath)) { throw new Exception("The setup configuration file value 'IndexPath' is not valid"); } //Create a file group for List tables config.ListDataPath = DbMaintenanceHelper.CreateFileGroup(connectionStringSettings.ConnectionString, config.ListDataPath, SetupConfig.YFileGroup); //Create a file group for Indexes config.IndexPath = DbMaintenanceHelper.CreateFileGroup(connectionStringSettings.ConnectionString, config.IndexPath, SetupConfig.IndexFileGroup); } } } catch (Exception ex) { LoggerCQ.LogError(ex, "Failed on database upgrade."); throw new Exception("Failed on database upgrade."); } LoggerCQ.LogInfo("Service started begin"); try { #region Primary Endpoint var service = new Gravitybox.Datastore.Server.Core.SystemCore(ConfigurationManager.ConnectionStrings["DatastoreEntities"].ConnectionString, _enableHouseKeeping); if (config != null) { ConfigHelper.SetupConfig = config; } #region Determine if configured port is free var isPortFree = false; do { try { //Determine if can connect to port using (var p1 = new System.Net.Sockets.TcpClient("localhost", ConfigHelper.Port)) { } //If did connect successfully then there is already something on this port isPortFree = false; LoggerCQ.LogInfo($"Port {ConfigHelper.Port} is in use..."); System.Threading.Thread.Sleep(3000); //wait... } catch (Exception ex) { //If there is an error connecting then nothing is listening on that port so FREE isPortFree = true; } } while (!isPortFree); #endregion var primaryAddress = new Uri($"net.tcp://localhost:{ConfigHelper.Port}/__datastore_core"); var primaryHost = new ServiceHost(service, primaryAddress); //Initialize the service var netTcpBinding = new NetTcpBinding(); netTcpBinding.MaxConnections = ThrottleMax; netTcpBinding.Security.Mode = SecurityMode.None; primaryHost.AddServiceEndpoint(typeof(Gravitybox.Datastore.Common.ISystemCore), netTcpBinding, string.Empty); //Add more threads var stb = new ServiceThrottlingBehavior { MaxConcurrentSessions = ThrottleMax, MaxConcurrentCalls = ThrottleMax, MaxConcurrentInstances = ThrottleMax, }; primaryHost.Description.Behaviors.Add(stb); primaryHost.Open(); //Create Core Listener var primaryEndpoint = new EndpointAddress(primaryHost.BaseAddresses.First().AbsoluteUri); var primaryClient = new ChannelFactory <Gravitybox.Datastore.Common.ISystemCore>(netTcpBinding, primaryEndpoint); _core = primaryClient.CreateChannel(); (_core as IContextChannel).OperationTimeout = new TimeSpan(0, 0, 120); //Timeout=2m #endregion LoadEngine(service); service.Manager.ResetMaster(); LoggerCQ.LogInfo("Service started complete"); ConfigHelper.StartUp(); } catch (Exception ex) { LoggerCQ.LogError(ex); throw; } }
public void Run() { //Only have 1 running async query for a repository while (!_runningList.TryAdd(_schema.ID)) { System.Threading.Thread.Sleep(1000); } try { var timer = Stopwatch.StartNew(); List <DimensionItem> dimensionList = null; using (var context = new DatastoreEntities(ConfigHelper.ConnectionString)) { dimensionList = _dimensionCache.Get(context, _schema, _schema.InternalID, new List <DataItem>()); } //There is no such thing as a list field that is not a dimension var dataTableFields = _schema.FieldList.Where(x => x.DataType != RepositorySchema.DataTypeConstants.List).ToList(); var nonListDimensionFields = _schema.DimensionList.Where(x => x.DataType != RepositorySchema.DataTypeConstants.List).ToList(); var listDimensionFields = _schema.DimensionList.Where(x => x.DimensionType == RepositorySchema.DimensionTypeConstants.List).ToList(); var parameters = new List <SqlParameter>(); var sql = SqlHelper.QueryAsync(_schema, _schema.InternalID, this.Query, dimensionList, parameters, ConfigHelper.ConnectionString); #region Get all the list dimensions for those fields var dimensionMapper = new ConcurrentDictionary <long, Dictionary <long, List <long> > >(); var timerList = Stopwatch.StartNew(); Parallel.ForEach(listDimensionFields, new ParallelOptions { MaxDegreeOfParallelism = 4 }, (ditem) => { try { var valueMapper = new Dictionary <long, List <long> >(); dimensionMapper.TryAdd(ditem.DIdx, valueMapper); var dTable = SqlHelper.GetListTableName(_schema.ID, ditem.DIdx); //This is the fastest way I could find to load this data using (var connection = new SqlConnection(ConfigHelper.ConnectionString)) { connection.Open(); using (var command = new SqlCommand($"SELECT Y.[{SqlHelper.RecordIdxField}], Y.[DVIdx] FROM [{dTable}] Y {SqlHelper.NoLockText()} ORDER BY Y.[{SqlHelper.RecordIdxField}], Y.[DVIdx]", connection)) { using (var reader = command.ExecuteReader()) { while (reader.Read()) { var recordIndex = (long)reader[0]; var dvidx = (long)reader[1]; if (!valueMapper.ContainsKey(recordIndex)) { valueMapper.Add(recordIndex, new List <long>()); } valueMapper[recordIndex].Add(dvidx); } } } } } catch (Exception ex) { LoggerCQ.LogError(ex); throw; } }); timerList.Stop(); #endregion var fileName = Path.Combine(ConfigHelper.AsyncCachePath, this.Key.ToString()); var rowCount = 0; using (var tempFile = XmlTextWriter.Create(fileName)) { tempFile.WriteStartDocument(); tempFile.WriteStartElement("root"); using (var connection = new SqlConnection(ConfigHelper.ConnectionString)) { var command = new SqlCommand(sql, connection); command.CommandTimeout = 3600; command.Parameters.AddRange(parameters.ToArray()); connection.Open(); using (var reader = command.ExecuteReader()) { if (reader.HasRows) { #region Write headers tempFile.WriteStartElement("headers"); foreach (var h in dataTableFields) { var d = nonListDimensionFields.FirstOrDefault(x => x.Name == h.Name); if (d == null) { tempFile.WriteElementString("h", h.Name); } else { tempFile.WriteStartElement("h"); tempFile.WriteAttributeString("didx", d.DIdx.ToString()); tempFile.WriteValue(d.Name); tempFile.WriteEndElement(); } } foreach (var d in listDimensionFields) { tempFile.WriteStartElement("h"); tempFile.WriteAttributeString("didx", d.DIdx.ToString()); tempFile.WriteValue(d.Name); tempFile.WriteEndElement(); //h } tempFile.WriteEndElement(); //headers #endregion #region Write Dimension Defs tempFile.WriteStartElement("dimensions"); foreach (var d in dimensionList) { tempFile.WriteStartElement("d"); tempFile.WriteAttributeString("didx", d.DIdx.ToString()); tempFile.WriteAttributeString("name", d.Name); foreach (var r in d.RefinementList) { tempFile.WriteStartElement("r"); tempFile.WriteAttributeString("dvidx", r.DVIdx.ToString()); tempFile.WriteValue(r.FieldValue); tempFile.WriteEndElement(); //r } tempFile.WriteEndElement(); //d } tempFile.WriteEndElement(); //dimensions #endregion #region Write Items tempFile.WriteStartElement("items"); while (reader.Read()) { var index = 0; tempFile.WriteStartElement("i"); //Write static fields var recordIndex = reader.GetInt64(dataTableFields.Count); var timestamp = reader.GetInt32(dataTableFields.Count + 1); tempFile.WriteAttributeString("ri", recordIndex.ToString()); tempFile.WriteAttributeString("ts", timestamp.ToString()); #region Write all data table (Z) fields foreach (var field in dataTableFields) { if (reader.IsDBNull(index)) { tempFile.WriteElementString("v", "~■!N"); } else { switch (field.DataType) { case RepositorySchema.DataTypeConstants.Bool: tempFile.WriteElementString("v", reader.GetBoolean(index) ? "1" : "0"); break; case RepositorySchema.DataTypeConstants.DateTime: tempFile.WriteElementString("v", reader.GetDateTime(index).Ticks.ToString()); break; case RepositorySchema.DataTypeConstants.Float: tempFile.WriteElementString("v", reader.GetDouble(index).ToString()); break; case RepositorySchema.DataTypeConstants.GeoCode: var geo = (Microsoft.SqlServer.Types.SqlGeography)reader.GetValue(index); tempFile.WriteElementString("v", $"{geo.Lat}|{geo.Long}"); break; case RepositorySchema.DataTypeConstants.Int: tempFile.WriteElementString("v", reader.GetInt32(index).ToString()); break; case RepositorySchema.DataTypeConstants.Int64: tempFile.WriteElementString("v", reader.GetInt64(index).ToString()); break; case RepositorySchema.DataTypeConstants.String: tempFile.WriteElementString("v", StripNonValidXMLCharacters(reader.GetString(index))); break; default: break; } } index++; } #endregion #region Write List fields foreach (var field in listDimensionFields) { if (dimensionMapper.ContainsKey(field.DIdx) && dimensionMapper[field.DIdx].ContainsKey(recordIndex)) { tempFile.WriteElementString("v", dimensionMapper[field.DIdx][recordIndex].ToList().ToStringList("|")); } } #endregion tempFile.WriteEndElement(); //i rowCount++; } tempFile.WriteEndElement(); //items #endregion } reader.Close(); } } tempFile.WriteEndElement(); //root } //Write file that signifies we are done var zipFile = Extensions.ZipFile(fileName); var outFile = fileName + ".zzz"; File.Move(zipFile, outFile); var size = (new FileInfo(outFile)).Length; System.Threading.Thread.Sleep(300); File.Delete(fileName); System.Threading.Thread.Sleep(300); timer.Stop(); LoggerCQ.LogInfo($"QueryThreaded Complete: ID={_schema.ID}, File={outFile}, Size={size}, Count={rowCount}, ListElapsed={timerList.ElapsedMilliseconds}, Elapsed={timer.ElapsedMilliseconds}"); } catch (Exception ex) { LoggerCQ.LogError(ex, $"ID={_schema.ID}, Query=\"{this.Query.ToString()}\""); File.WriteAllText(Path.Combine(ConfigHelper.AsyncCachePath, this.Key.ToString() + ".error"), "error"); } finally { this.IsComplete = true; _runningList.Remove(_schema.ID); } }
private static void Main(string[] args) { NLog.Targets.Target.Register <Logging.ExceptionalErrorStoreTarget>("ErrorStore"); LoggerCQ.LogInfo("Initializing Service..."); #if DEBUG LoggerCQ.LogInfo("(Debug Build)"); #endif //Try to connect to database and if successfull the assume service will start try { var installer = new DatabaseInstaller(); var connectionStringSettings = ConfigurationManager.ConnectionStrings["DatastoreEntities"]; //Just wait a few seconds to determine if the database is there var cb = new System.Data.SqlClient.SqlConnectionStringBuilder(connectionStringSettings.ConnectionString); cb.ConnectTimeout = 12; var b = installer.NeedsUpdate(cb.ToString()); } catch (Exception ex) { LoggerCQ.LogError(ex, "Failed to connect to database."); throw new Exception("Failed to connect to database."); } LoggerCQ.LogInfo("Database connection verified."); if (args.Any(x => x == "-console" || x == "/console")) { try { var enableHouseKeeping = true; if (args.Any(x => x == "-nohousekeeping" || x == "/nohousekeeping")) { enableHouseKeeping = false; } var service = new PersistentService(enableHouseKeeping); service.Start(); Console.WriteLine("Press <ENTER> to stop..."); Console.ReadLine(); service.Cleanup(); service.Stop(); } catch (Exception ex) { LoggerCQ.LogError(ex, "Failed to start service from console."); throw; } } else { try { var servicesToRun = new ServiceBase[] { new PersistentService() }; ServiceBase.Run(servicesToRun); } catch (Exception ex) { LoggerCQ.LogError(ex, "Failed to start service."); } } }