/// <summary> /// Creates a deep copy of the passed object. /// </summary> /// <param name="old">A <b>Server Instance</b> object to create the deep copy from.</param> private void CopyMembers(ServerInstance old) { this.instanceName = old.instanceName; this.integratedSecurity = old.integratedSecurity; this.adminUser = old.adminUser; this.adminPassword = old.adminPassword; }
public DatabaseInstance GenerateDatabaseInstance(ServerInstance serverInstance, Slice slice, DatabaseVersion databaseVersion) { return GenerateDatabaseInstance( serverInstance, slice, databaseVersion, databaseDefinition.DatabaseInstanceNamePattern, databaseDefinition.DatabaseNamePattern, databaseVersion.SizeMultiplier, true); }
protected override void Execute(CodeActivityContext activityContext) { QueryPartitionBase queryPartition = QueryPartition.Get(activityContext); switch (queryPartition.Query.ExecutionMode) { case ExecutionMode.SingleServer: queryPartition.InitializeQueryObject(null, null, true); break; case ExecutionMode.Graywulf: using (var context = ContextManager.Instance.CreateContext(this, activityContext, ConnectionMode.AutoOpen, TransactionMode.AutoCommit)) { var scheduler = activityContext.GetExtension<IScheduler>(); //queryPartition.DatabaseVersionName = queryPartition.Query.SourceDatabaseVersionName; TODO: delete queryPartition.InitializeQueryObject(context, scheduler, false); var dss = queryPartition.FindRequiredDatasets(); // Check if there are any Graywulf datasets referenced in the query var assignmydb = (dss.Values.FirstOrDefault(ds => !ds.IsSpecificInstanceRequired) == null); // *** TODO: replace this whole thing to use JOIN graphs // If no graywulf datasets are used, use the server containing myDB, // otherwise ask the scheduler for an appropriate server if (dss.Count == 0 || assignmydb) { // use MyDB's server var ef = new EntityFactory(context); var federation = queryPartition.FederationReference.Value; var user = ef.LoadEntity<User>(context.UserGuid); var di = user.GetUserDatabaseInstance(federation.MyDBDatabaseVersion); queryPartition.AssignedServerInstance = di.ServerInstance; } else { // Assign new server instance var si = new ServerInstance(context); si.Guid = scheduler.GetNextServerInstance( dss.Values.Where(x => !x.IsSpecificInstanceRequired).Select(x => x.DatabaseDefinition.Guid).ToArray(), queryPartition.Query.SourceDatabaseVersionName, null); si.Load(); queryPartition.AssignedServerInstance = si; } queryPartition.InitializeQueryObject(context, scheduler, true); EntityGuid.Set(activityContext, queryPartition.AssignedServerInstance.Guid); } break; } }
private DiagnosticMessage TestSqlConnection() { DiagnosticMessage msg = new DiagnosticMessage() { EntityName = GetFullyQualifiedName(), NetworkName = Federation.SchemaSourceServerInstance.GetCompositeName(), ServiceName = "SQL Connection to Schema Source Server" }; ServerInstance.RunDiagnostics(GetConnectionString().ConnectionString, msg); return(msg); }
public Guid GetNextDatabaseInstance(Guid databaseDefinition, string databaseVersion) { var q = from di in queueManager.Cluster.DatabaseDefinitions[databaseDefinition].DatabaseInstances[databaseVersion].Values where di.ServerInstance.IsAvailable select di; var dis = q.ToArray(); var sis = new ServerInstance[dis.Length]; for (int i = 0; i < sis.Length; i++) { sis[i] = dis[i].ServerInstance; } return sis[GetNextServerIndex(sis)].Guid; }
public DatabaseInstance GenerateDatabaseInstance(ServerInstance serverInstance, Slice slice, DatabaseVersion databaseVersion, string namePattern, string databaseNamePattern, double sizeFactor, bool generateFileGroups) { return GenerateDatabaseInstance(serverInstance, null, null, slice, databaseVersion, namePattern, databaseNamePattern, sizeFactor, generateFileGroups); }
public List<DatabaseInstance> GenerateDatabaseInstances(ServerInstance[][] serverInstances, string namePattern, string databaseNamePattern, double sizeFactor, bool generateFileGroups) { List<DatabaseInstance> instances = new List<DatabaseInstance>(); databaseDefinition.LoadDatabaseVersions(false); databaseDefinition.LoadSlices(false); List<Slice> slices = new List<Slice>(databaseDefinition.Slices.Values.OrderBy(i => i.Number)); for (int si = 0; si < slices.Count; si++) { Slice slice = slices[si]; // **** TODO review this part and add [$Number] to pattern if mirrored // to avoid name collision under databaseinstance foreach (DatabaseVersion rs in databaseDefinition.DatabaseVersions.Values) { // TODO: do not use rs.Number here!!! DatabaseInstance ndi = GenerateDatabaseInstance(serverInstances[si][rs.Number], slices[si], rs, namePattern, databaseNamePattern, sizeFactor, generateFileGroups); instances.Add(ndi); } } return instances; }
public List<DatabaseInstance> GenerateDatabaseInstances(ServerInstance[][] serverInstances) { return GenerateDatabaseInstances( serverInstances, databaseDefinition.DatabaseInstanceNamePattern, databaseDefinition.DatabaseInstanceNamePattern, 1.0, true); }
/// <summary> /// Returns the server instances that all contain an instance /// the database definition /// </summary> /// <param name="databaseDefinitions"></param> /// <param name="databaseVersionName"></param> /// <returns></returns> private ServerInstance[] GetServerInstancesInternal(Guid[] databaseDefinitions, string databaseVersionName, Guid[] databaseInstances) { // TODO: lock? // TODO: for some reason it might return wrong results when database version is non-existing, check this // Start with all server instances var sis = new HashSet<Guid>(queueManager.Cluster.ServerInstances.Keys); // If there's any database instances specified then a specific server instance will be required if (databaseInstances != null && databaseInstances.Length > 0) { var disis = new HashSet<Guid>(); foreach (var di in databaseInstances) { var si = queueManager.Cluster.DatabaseInstances[di].ServerInstance; if (si.IsAvailable) { disis.Add(si.Guid); } } sis.IntersectWith(disis); } foreach (var dd in databaseDefinitions) { if (queueManager.Cluster.DatabaseDefinitions[dd].DatabaseInstances.ContainsKey(databaseVersionName)) { var disis = new HashSet<Guid>(); foreach (var di in queueManager.Cluster.DatabaseDefinitions[dd].DatabaseInstances[databaseVersionName].Values) { var si = di.ServerInstance; if (si.IsAvailable) { disis.Add(si.Guid); } } sis.IntersectWith(disis); } } var res = new ServerInstance[sis.Count]; int q = 0; foreach (var si in sis) { res[q] = queueManager.Cluster.ServerInstances[si]; q++; } return res; }
/// <summary> /// Copy contructor for doing deep copy of the <b>Server Instance</b> objects. /// </summary> /// <param name="old">The <b>Server Instance</b> to copy from.</param> public ServerInstance(ServerInstance old) : base(old) { CopyMembers(old); }
public void Install(bool system, string clusterName, string username, string email, string password) { var cluster = new Cluster(Context) { Name = clusterName, System = system, }; cluster.Save(); // Create administrator group and user var ug = new UserGroup(cluster) { Name = Constants.ClusterAdministratorUserGroupName, System = system, }; ug.Save(); var u = new User(cluster) { Name = username, System = system, Email = email, DeploymentState = Registry.DeploymentState.Deployed, }; u.SetPassword(password); u.Save(); u.MakeMemberOf(ug.Guid); // Create machine roles and machines // -- controller role var mrcont = new MachineRole(cluster) { Name = Constants.ControllerMachineRoleName, System = system, MachineRoleType = MachineRoleType.StandAlone, }; mrcont.Save(); var sv = new ServerVersion(mrcont) { Name = Constants.ServerVersionName, }; sv.Save(); var mcont = new Machine(mrcont) { Name = Constants.ControllerMachineName, }; mcont.Save(); var si = new ServerInstance(mcont) { Name = Constants.ServerInstanceName, ServerVersion = sv, }; si.Save(); // -- node role var mrnode = new MachineRole(cluster) { Name = Constants.NodeMachineRoleName, MachineRoleType = MachineRoleType.MirroredSet, }; mrnode.Save(); sv = new ServerVersion(mrnode) { Name = Constants.ServerVersionName, }; sv.Save(); // -- Create a node /* Machine mnode = new Machine(Context, mrnode); mnode.Name = Constants.NodeMachineName; mnode.Save(); si = new ServerInstance(Context, mnode); si.Name = Constants.ServerInstanceName; si.ServerVersionReference.Value = sv; si.Save();*/ // Temp database definition var tempdd = new DatabaseDefinition(cluster) { Name = Constants.TempDbName, System = system, LayoutType = DatabaseLayoutType.Monolithic, DatabaseInstanceNamePattern = Constants.TempDbInstanceNamePattern, DatabaseNamePattern = Constants.TempDbNamePattern, SliceCount = 1, PartitionCount = 1, }; tempdd.Save(); var tempddi = new DatabaseDefinitionInstaller(tempdd); tempddi.GenerateDefaultChildren(sv, Constants.TempDbName); // Create cluster level jobs and queues // -- admin queue definition QueueDefinition qd = new QueueDefinition(cluster) { Name = Constants.MaintenanceQueueDefinitionName, System = system, }; qd.Save(); QueueInstance qi = new QueueInstance(mcont) { Name = Constants.MaintenanceQueueName, RunningState = Registry.RunningState.Running, }; qi.QueueDefinitionReference.Value = qd; qi.Save(); // -- long queue definition qd = new QueueDefinition(cluster) { Name = Constants.LongQueueDefinitionName }; qd.Save(); qi = new QueueInstance(mcont) { Name = Constants.LongQueueName, RunningState = Registry.RunningState.Running, }; qi.QueueDefinitionReference.Value = qd; qi.Save(); // -- quick queue definition qd = new QueueDefinition(cluster) { Name = Constants.QuickQueueDefinitionName, }; qd.Save(); qi = new QueueInstance(mcont) { Name = Constants.QuickQueueName, RunningState = Registry.RunningState.Running, }; qi.QueueDefinitionReference.Value = qd; qi.Save(); // -- database mirror job var jd = new JobDefinition(cluster) { Name = typeof(Jhu.Graywulf.Jobs.MirrorDatabase.MirrorDatabaseJob).Name, System = system, WorkflowTypeName = typeof(Jhu.Graywulf.Jobs.MirrorDatabase.MirrorDatabaseJob).AssemblyQualifiedName, }; jd.Save(); // -- test job jd = new JobDefinition(cluster) { Name = typeof(Jhu.Graywulf.Jobs.Test.TestJob).Name, System = system, WorkflowTypeName = typeof(Jhu.Graywulf.Jobs.Test.TestJob).AssemblyQualifiedName, }; jd.Save(); }
protected override void Execute(CodeActivityContext activityContext) { QueryBase query = Query.Get(activityContext); int pcount = 1; // Single server mode will run on one partition by definition, // Graywulf mode has to look at the registry for available machines switch (query.ExecutionMode) { case ExecutionMode.SingleServer: query.InitializeQueryObject(null); break; case ExecutionMode.Graywulf: using (Context context = ContextManager.Instance.CreateContext(this, activityContext, ConnectionMode.AutoOpen, TransactionMode.AutoCommit)) { var scheduler = activityContext.GetExtension<IScheduler>(); query.InitializeQueryObject(context, scheduler); // TODO: move this code inside the QueryBase class // If query is partitioned, statistics must be gathered if (query.IsPartitioned) { // Assign a server that will run the statistics queries // Try to find a server that contains all required datasets. This is true right now for // SkyQuery where all databases are mirrored but will have to be updated later // Collect all datasets that are required to answer the query var dss = query.FindRequiredDatasets(); // Datasets that are mirrored and can be on any server var reqds = (from ds in dss.Values where !ds.IsSpecificInstanceRequired select ds.DatabaseDefinition.Guid).ToArray(); // Datasets that are only available at a specific server instance /*var spds = (from ds in dss.Values where ds.IsSpecificInstanceRequired && !ds.DatabaseDefinition.IsEmpty select ds.DatabaseDefinition.Guid).ToArray();*/ var spds = (from ds in dss.Values where ds.IsSpecificInstanceRequired && !ds.DatabaseInstance.IsEmpty select ds.DatabaseInstance.Guid).ToArray(); var si = new ServerInstance(context); si.Guid = scheduler.GetNextServerInstance(reqds, query.StatDatabaseVersionName, spds); si.Load(); query.AssignedServerInstance = si; //query.DatabaseVersionName = query.StatDatabaseVersionName; //*** TODO: delete // *** TODO: find optimal number of partitions // TODO: replace "2" with a value from settings pcount = 2 * scheduler.GetServerInstances(reqds, query.SourceDatabaseVersionName, spds).Length; // Now have to reinitialize to load the assigned server instances query.InitializeQueryObject(context, scheduler, true); EntityGuid.Set(activityContext, query.AssignedServerInstance.Guid); } } break; default: throw new NotImplementedException(); } query.GeneratePartitions(pcount); }
public DatabaseInstance GenerateDatabaseInstance(ServerInstance serverInstance, List<DiskVolume> dataDiskVolumes, List<DiskVolume> logDiskVolumes, Slice slice, DatabaseVersion databaseVersion, string namePattern, string databaseNamePattern, double sizeFactor, bool generateFileGroups) { // --- Create the new database instance and set name DatabaseInstance ndi = new DatabaseInstance(databaseDefinition); ndi.ServerInstanceReference.Guid = serverInstance.Guid; ndi.SliceReference.Guid = slice.Guid; ndi.DatabaseVersionReference.Guid = databaseVersion.Guid; ndi.Name = ExpressionProperty.ResolveExpression(ndi, namePattern); ndi.DatabaseName = ExpressionProperty.ResolveExpression(ndi, databaseNamePattern); ndi.Save(); if (generateFileGroups) { ndi.ServerInstance.Machine.LoadDiskVolumes(false); databaseDefinition.LoadFileGroups(false); slice.LoadPartitions(false); List<Partition> partitions = new List<Partition>(slice.Partitions.Values.OrderBy(i => i.Number)); List<FileGroup> filegroups = new List<FileGroup>(databaseDefinition.FileGroups.Values.OrderBy(i => i.Number)); for (int fi = 0; fi < filegroups.Count; fi++) { // --- Create data and "log" file groups --- if (filegroups[fi].LayoutType == FileGroupLayoutType.Monolithic || filegroups[fi].FileGroupType == FileGroupType.Log) { DatabaseInstanceFileGroup nfg = new DatabaseInstanceFileGroup(ndi); nfg.FileGroupType = filegroups[fi].FileGroupType; nfg.FileGroupName = nfg.Name = filegroups[fi].FileGroupName; nfg.FileGroupReference.Guid = filegroups[fi].Guid; nfg.PartitionReference.Guid = Guid.Empty; nfg.AllocatedSpace = (long)(filegroups[fi].AllocatedSpace * sizeFactor); nfg.Save(); nfg.GenerateInstanceFiles(dataDiskVolumes, sizeFactor); } else if (filegroups[fi].LayoutType == FileGroupLayoutType.Sliced) { for (int pi = 0; pi < partitions.Count; pi++) { DatabaseInstanceFileGroup nfg = new DatabaseInstanceFileGroup(ndi); nfg.FileGroupType = filegroups[fi].FileGroupType; nfg.FileGroupName = nfg.Name = string.Format("{0}_{1}", filegroups[fi].FileGroupName, pi); nfg.FileGroupReference.Guid = filegroups[fi].Guid; nfg.PartitionReference.Guid = partitions[pi].Guid; nfg.AllocatedSpace = (long)(filegroups[fi].AllocatedSpace * sizeFactor); nfg.Save(); nfg.GenerateInstanceFiles(dataDiskVolumes, sizeFactor); } } else { throw new NotImplementedException(); } } } return ndi; }
public Cluster Install(bool system, string clusterName, string username, string email, string password) { cluster = new Cluster(Context) { Name = clusterName, System = system, }; cluster.Save(); // Create machine roles and machines // -- controller role var mrcont = new MachineRole(cluster) { Name = Constants.ControllerMachineRoleName, System = system, MachineRoleType = MachineRoleType.StandAlone, }; mrcont.Save(); var sv = new ServerVersion(mrcont) { Name = Constants.ServerVersionName, System = system, }; sv.Save(); var mcont = new Machine(mrcont) { Name = Constants.ControllerMachineName, }; mcont.Save(); var sicont = new ServerInstance(mcont) { Name = Constants.ServerInstanceName, ServerVersion = sv, }; sicont.Save(); // -- node role var mrnode = new MachineRole(cluster) { Name = Constants.NodeMachineRoleName, MachineRoleType = MachineRoleType.MirroredSet, }; mrnode.Save(); var nodesv = new ServerVersion(mrnode) { Name = Constants.ServerVersionName, }; nodesv.Save(); // -- Create a node /* Machine mnode = new Machine(Context, mrnode); mnode.Name = Constants.NodeMachineName; mnode.Save(); si = new ServerInstance(Context, mnode); si.Name = Constants.ServerInstanceName; si.ServerVersionReference.Value = sv; si.Save();*/ // Create the shared domain for cluster level databases and users var domain = new Domain(cluster) { Name = Constants.SharedDomainName, Email = email, System = system, }; domain.Save(); // Create administrator group and user GenerateAdminGroup(system); GenerateAdmin(system, username, email, password); // Create the shared feredation var federation = new Federation(domain) { Name = Constants.SharedFederationName, Email = email, System = system, ControllerMachine = mcont, SchemaSourceServerInstance = sicont, }; federation.Save(); // Temp database definition var tempdd = new DatabaseDefinition(federation) { Name = Constants.TempDbName, System = system, LayoutType = DatabaseLayoutType.Monolithic, DatabaseInstanceNamePattern = Constants.TempDbInstanceNamePattern, DatabaseNamePattern = Constants.TempDbNamePattern, SliceCount = 1, PartitionCount = 1, }; tempdd.Save(); var tempddi = new DatabaseDefinitionInstaller(tempdd); tempddi.GenerateDefaultChildren(nodesv, Constants.TempDbName); // Create cluster level jobs and queues // -- admin queue definition QueueDefinition qd = new QueueDefinition(cluster) { Name = Constants.MaintenanceQueueDefinitionName, System = system, }; qd.Save(); QueueInstance qi = new QueueInstance(mcont) { Name = Constants.MaintenanceQueueName, RunningState = Registry.RunningState.Running, }; qi.QueueDefinitionReference.Value = qd; qi.Save(); // -- long queue definition qd = new QueueDefinition(cluster) { Name = Constants.LongQueueDefinitionName }; qd.Save(); qi = new QueueInstance(mcont) { Name = Constants.LongQueueName, RunningState = Registry.RunningState.Running, }; qi.QueueDefinitionReference.Value = qd; qi.Save(); // -- quick queue definition qd = new QueueDefinition(cluster) { Name = Constants.QuickQueueDefinitionName, }; qd.Save(); qi = new QueueInstance(mcont) { Name = Constants.QuickQueueName, RunningState = Registry.RunningState.Running, }; qi.QueueDefinitionReference.Value = qd; qi.Save(); // -- database mirror job var jd = new JobDefinition(federation) { Name = typeof(Jhu.Graywulf.Jobs.MirrorDatabase.MirrorDatabaseJob).Name, System = system, WorkflowTypeName = typeof(Jhu.Graywulf.Jobs.MirrorDatabase.MirrorDatabaseJob).AssemblyQualifiedName, }; jd.DiscoverWorkflowParameters(); jd.Save(); // -- test job jd = new JobDefinition(federation) { Name = typeof(Jhu.Graywulf.Jobs.Test.TestJob).Name, System = system, WorkflowTypeName = typeof(Jhu.Graywulf.Jobs.Test.TestJob).AssemblyQualifiedName, }; jd.DiscoverWorkflowParameters(); jd.Save(); return cluster; }
public void PrepareComputeTableStatistics(Context context, TableReference tr, out string connectionString, out string sql) { // Assign a database server to the query // TODO: maybe make this function generic // TODO: check this part to use appropriate server and database var sm = GetSchemaManager(false); var ds = sm.Datasets[tr.DatasetName]; if (ds is GraywulfDataset && !((GraywulfDataset)ds).IsSpecificInstanceRequired) { var gds = (GraywulfDataset)ds; var dd = new DatabaseDefinition(context); dd.Guid = gds.DatabaseDefinition.Guid; dd.Load(); // Get a server from the scheduler var si = new ServerInstance(Context); si.Guid = Scheduler.GetNextServerInstance(new Guid[] { dd.Guid }, StatDatabaseVersionName, null); si.Load(); connectionString = si.GetConnectionString().ConnectionString; SubstituteDatabaseName(tr, si.Guid, StatDatabaseVersionName); tr.DatabaseObject = null; } else { // Run it on the specific database connectionString = ds.ConnectionString; } // Generate statistics query var cg = new SqlServerCodeGenerator(); cg.ResolveNames = true; sql = cg.GenerateTableStatisticsQuery(tr); }
public void Load(string clusterName) { using (Context context = ContextManager.Instance.CreateContext(ConnectionMode.AutoOpen, TransactionMode.AutoCommit)) { var ef = new EntityFactory(context); var cluster = ef.LoadEntity<Jhu.Graywulf.Registry.Cluster>(clusterName); machines = new Dictionary<Guid, Machine>(); serverInstances = new Dictionary<Guid, ServerInstance>(); databaseInstances = new Dictionary<Guid, DatabaseInstance>(); databaseDefinitions = new Dictionary<Guid, DatabaseDefinition>(); queues = new Dictionary<Guid, Queue>(); cluster.LoadMachineRoles(true); // *** TODO: handle machines that are down foreach (var mr in cluster.MachineRoles.Values) { mr.LoadMachines(true); foreach (var mm in mr.Machines.Values) { var mmi = new Machine(mm); machines.Add(mm.Guid, mmi); mm.LoadServerInstances(true); foreach (var si in mm.ServerInstances.Values) { var ssi = new ServerInstance(si); ssi.Machine = mmi; serverInstances.Add(si.Guid, ssi); } mm.LoadQueueInstances(true); foreach (var qi in mm.QueueInstances.Values) { var q = new Queue(); q.Update(qi); queues.Add(qi.Guid, q); } } } cluster.LoadDomains(true); foreach (var dom in cluster.Domains.Values) { dom.LoadFederations(true); foreach (var ff in dom.Federations.Values) { ff.LoadDatabaseDefinitions(true); foreach (var dd in ff.DatabaseDefinitions.Values) { databaseDefinitions.Add(dd.Guid, new DatabaseDefinition(dd)); dd.LoadDatabaseInstances(true); foreach (var di in dd.DatabaseInstances.Values) { var ddi = new DatabaseInstance(di); // add to global list databaseInstances.Add(di.Guid, ddi); // add to database definition lists Dictionary<Guid, DatabaseInstance> databaseinstances; if (databaseDefinitions[dd.Guid].DatabaseInstances.ContainsKey((di.DatabaseVersion.Name))) { databaseinstances = databaseDefinitions[dd.Guid].DatabaseInstances[di.DatabaseVersion.Name]; } else { databaseinstances = new Dictionary<Guid, DatabaseInstance>(); databaseDefinitions[dd.Guid].DatabaseInstances.Add(di.DatabaseVersion.Name, databaseinstances); } databaseinstances.Add(di.Guid, ddi); ddi.ServerInstance = serverInstances[di.ServerInstanceReference.Guid]; ddi.DatabaseDefinition = databaseDefinitions[dd.Guid]; } } } } } }
private int GetNextServerIndex(ServerInstance[] serverInstances) { lock (syncRoot) { // Find server with the earliest time stamp DateTime min = DateTime.MaxValue; int m = -1; for (int i = 0; i < serverInstances.Length; i++) { ServerInstance si = serverInstances[i]; if (si.LastAssigned < min) { min = si.LastAssigned; m = i; } } if (m == -1) { throw new SchedulerException(ExceptionMessages.NoServerForDatabaseFound); } serverInstances[m].LastAssigned = DateTime.Now; return m; } }