public void Fill(Database database, string connectioString) { int lastObjectId = 0; PartitionScheme item = null; if (database.Options.Ignore.FilterPartitionScheme) { using (SqlConnection conn = new SqlConnection(connectioString)) { using (SqlCommand command = new SqlCommand(GetSQL(), conn)) { conn.Open(); command.CommandTimeout = 0; using (SqlDataReader reader = command.ExecuteReader()) { while (reader.Read()) { if (lastObjectId != (int)reader["ID"]) { lastObjectId = (int)reader["ID"]; item = new PartitionScheme(database); item.Id = (int)reader["ID"]; item.Name = reader["name"].ToString(); item.PartitionFunction = reader["FunctionName"].ToString(); database.PartitionSchemes.Add(item); } item.FileGroups.Add(reader["FileGroupName"].ToString()); } } } } } }
internal static Type ResolveDerivedClass(PartitionScheme scheme) { switch (scheme) { case PartitionScheme.Singleton: return(typeof(SingletonPartitionSchemeDescription)); case PartitionScheme.UniformInt64Range: return(typeof(UniformInt64RangePartitionSchemeDescription)); case PartitionScheme.Named: return(typeof(NamedPartitionSchemeDescription)); default: return(null); } }
private void CreateDatabasePartition4Organization(Int32 currOrganizationCode, Int32 intOrganizationCode) { try { String strOrganizationName = txtDomainName.Text.Trim().ToLower().Replace("http://", String.Empty); String strOranizationCode = MakeZeroString(intOrganizationCode); String strDataFileName = "datafile_" + strOranizationCode; String strFileGroupName = "filegrp_" + strOranizationCode; ServerConnection cnnServer = new ServerConnection(System.Configuration.ConfigurationManager.AppSettings["DataCentreHost"], "sa", "q2c4b7m1"); Server myServer = new Server(cnnServer); Database LapbaseDatabase = myServer.Databases[System.Configuration.ConfigurationManager.AppSettings["LapbaseDatabase"]]; FileGroup orgFileGroup; if (LapbaseDatabase.FileGroups.Contains(strFileGroupName)) { orgFileGroup = LapbaseDatabase.FileGroups[strFileGroupName]; } else { orgFileGroup = new FileGroup(LapbaseDatabase, strFileGroupName); orgFileGroup.Create(); } try { DataFile orgDataFile = new DataFile(orgFileGroup, strDataFileName, LapbaseDatabase.PrimaryFilePath + @"\" + strDataFileName + ".ndf"); orgDataFile.IsPrimaryFile = false; orgDataFile.Size = 10240; //Initial size: 10 MB orgDataFile.Growth = 10240; //Growth by : 10 MB orgDataFile.Create(); PartitionScheme schemePartiton = LapbaseDatabase.PartitionSchemes["scheme_OrganizationCodePartition"]; try { schemePartiton.NextUsedFileGroup = orgFileGroup.Name; schemePartiton.Alter(); } catch (Exception err) { gClass.AddErrorLogData("0", Request.Url.Host, "", "CreateDatabasePartition4Organization", "schemePartiton.NextUsedFileGroup", err.ToString()); } PartitionFunction fnPartition = LapbaseDatabase.PartitionFunctions["fn_OrganizationCodePartition"]; try { fnPartition.SplitRangePartition(intOrganizationCode); fnPartition.Alter(); } catch (Exception err) { gClass.AddErrorLogData("0", Request.Url.Host, "", "CreateDatabasePartition4Organization", "fnPartition.SplitRangePartition", err.ToString()); } } catch (Exception err) { gClass.AddErrorLogData("0", Request.Url.Host, "", "CreateDatabasePartition4Organization", "Add new organization", err.ToString()); } LapbaseDatabase.Refresh(); cnnServer.Disconnect(); } catch (Exception ex) { gClass.AddErrorLogData("0", Request.Url.Host, "", "linkbtnSaveOrganization_onclick", "Add new organization", ex.ToString()); } }
static void Main() { Server server = new Server(@"."); string dbName = @"SMOPartitionAndCompressionSampleDB"; Database db = new Database(server, dbName); db.Create(); db.FileGroups.Add(new FileGroup(db, "TABLE_PART1_FG")); db.FileGroups.Add(new FileGroup(db, "TABLE_PART2_FG")); db.FileGroups.Add(new FileGroup(db, "TABLE_PART3_FG")); db.FileGroups.Add(new FileGroup(db, "TABLE_PART4_FG")); db.FileGroups.Add(new FileGroup(db, "TABLE_PART5_FG")); db.FileGroups.Add(new FileGroup(db, "TABLE_PART6_FG")); string temp_dir = server.Information.MasterDBPath; //db.FileGroups[0] is primary db.FileGroups[1].Files.Add(new DataFile(db.FileGroups[1], "tbl_datafile1", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_1.mdf", temp_dir))); db.FileGroups[2].Files.Add(new DataFile(db.FileGroups[2], "tbl_datafile2", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_2.mdf", temp_dir))); db.FileGroups[3].Files.Add(new DataFile(db.FileGroups[3], "tbl_datafile3", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_3.mdf", temp_dir))); db.FileGroups[4].Files.Add(new DataFile(db.FileGroups[4], "tbl_datafile4", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_4.mdf", temp_dir))); db.FileGroups[5].Files.Add(new DataFile(db.FileGroups[5], "tbl_datafile5", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_5.mdf", temp_dir))); db.FileGroups[6].Files.Add(new DataFile(db.FileGroups[6], "tbl_datafile6", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_6.mdf", temp_dir))); db.Alter(); PartitionFunction tbl_pf = new PartitionFunction(db, "MyTablePartitionFunction"); tbl_pf.PartitionFunctionParameters.Add(new PartitionFunctionParameter(tbl_pf, DataType.Int)); tbl_pf.RangeType = RangeType.Left; tbl_pf.RangeValues = new object[] { 5000, 10000, 15000, 20000, 25000, 30000 }; tbl_pf.Create(); PartitionScheme tbl_ps = new PartitionScheme(db, "MyTablePartitionScheme"); tbl_ps.PartitionFunction = "MyTablePartitionFunction"; tbl_ps.FileGroups.Add("PRIMARY"); tbl_ps.FileGroups.Add("TABLE_PART1_FG"); tbl_ps.FileGroups.Add("TABLE_PART2_FG"); tbl_ps.FileGroups.Add("TABLE_PART3_FG"); tbl_ps.FileGroups.Add("TABLE_PART4_FG"); tbl_ps.FileGroups.Add("TABLE_PART5_FG"); tbl_ps.FileGroups.Add("TABLE_PART6_FG"); tbl_ps.Create(); Table table = new Table(db, "MyTable"); table.Columns.Add(new Column(table, "col1", DataType.Int)); table.Columns.Add(new Column(table, "col2", DataType.Int)); table.PartitionScheme = "MyTablePartitionScheme"; table.PartitionSchemeParameters.Add(new PartitionSchemeParameter(table, "col1")); //Add PhysicalPartition objects for the partitions on which you like to apply compression. //Make Partition 5 as Page compressed table.PhysicalPartitions.Add(new PhysicalPartition(table, 5, DataCompressionType.Page)); //Make Partition 6 and 7 Row compressed table.PhysicalPartitions.Add(new PhysicalPartition(table, 6, DataCompressionType.Row)); table.PhysicalPartitions.Add(new PhysicalPartition(table, 7, DataCompressionType.Row)); table.Create(); }
// This method gets called by the runtime. Use this method to configure the HTTP request pipeline. public void Configure(IApplicationBuilder app, IHostingEnvironment env) { if (env.IsDevelopment()) { app.UseDeveloperExceptionPage(); } app.UseSwagger(); // Enable middleware to serve swagger-ui (HTML, JS, CSS, etc.), // specifying the Swagger JSON endpoint. app.UseSwaggerUI(c => { c.SwaggerEndpoint($"/swagger/v1/swagger.json", "OrderBook API"); c.RoutePrefix = ""; //c.SupportedSubmitMethods(new SubmitMethod[] { }); }); app.UseSwaggerUI(c => { c.SwaggerEndpoint($"OrderBook/swagger/v1/swagger.json", "OrderBook API"); c.RoutePrefix = "orderbook"; c.SupportedSubmitMethods(new SubmitMethod[] { }); }); app.UseSwaggerUI(c => { c.SwaggerEndpoint($"Fulfillment/swagger/v1/swagger.json", "Fulfillment API"); c.RoutePrefix = "fulfillment"; c.SupportedSubmitMethods(new SubmitMethod[] { }); }); app.UseSwaggerUI(c => { c.SwaggerEndpoint($"Logger/swagger/v1/swagger.json", "Logger API"); c.RoutePrefix = "logger"; c.SupportedSubmitMethods(new SubmitMethod[] { }); }); // This is a simple proxy middleware that will detect whether a service // is partitioned or not and forward the request accordingly. This is // intended as the entry point for the load tests, please do not edit. app.Run(async(context) => { // REQUIRED, DO NOT REMOVE. FraudCheck.Check(); // If no path is provided, error. if (context.Request.Path == "/") { context.Response.StatusCode = 502; await context.Response.WriteAsync($"Invalid Input"); return; } // If the path matches the OrderBook api if (IsOrderBookServiceRequest(context)) { PartitionScheme partitioningScheme = await GetOrderBookPartitioningScheme(); var currency = GetAndRemoveCurrencyFromRequest(ref context); // Expects paths of the form '/api/orders/bid/GBPUSD' // If the OrderBook is a singleton if (partitioningScheme == PartitionScheme.Singleton) { string forwardingUrl = GetForwardingUrl(context, "OrderBook"); await ProxyRequestHelper(context, forwardingUrl); return; } // If the OrderBook is using named partitions if (partitioningScheme == PartitionScheme.Named && currency != string.Empty) { string forwardingUrl = GetForwardingUrl(context, "OrderBook"); var partitionedEndpoint = $"{forwardingUrl}?PartitionKey={currency}&PartitionKind=Named&Timeout={requestTimeout}"; await ProxyRequestHelper(context, partitionedEndpoint); return; } throw new InvalidOperationException("OrderBook must use either singleton or named partition scheme"); } // If the path matches the Fulfillment api if (IsFulfillmentServiceRequest(context)) { // The Fulfillment service is Int64Range partitioned by default // so we don't handle the singleton case. string forwardingUrl = GetForwardingUrl(context, "Fulfillment"); var partitionedEndpoint = new Uri($"{forwardingUrl}?PartitionKey=1&PartitionKind=Int64Range&Timeout={requestTimeout}"); // All requests through the gateway will hit a single partition of the // fulfillment service. This is because we only use it to create // our test users. using (var requestMessage = context.CreateProxyHttpRequest(partitionedEndpoint)) { using (var responseMessage = await context.SendProxyHttpRequest(requestMessage)) { await context.CopyProxyHttpResponse(responseMessage); return; } } } // If the path matches the Logger api if (IsLoggerServiceRequest(context)) { string forwardingUrl = GetForwardingUrl(context, "Logger"); // All requests through the gateway will hit a single partition of the // logger service. This is because we only use it to query the DB count. var partitionedEndpoint = new Uri($"{forwardingUrl}?PartitionKey=1&PartitionKind=Int64Range&Timeout={requestTimeout}"); using (var requestMessage = context.CreateProxyHttpRequest(partitionedEndpoint)) { using (var responseMessage = await context.SendProxyHttpRequest(requestMessage)) { await context.CopyProxyHttpResponse(responseMessage); return; } } } return; }); }
/// <summary> /// <para>Instantiates a <see cref="System.Fabric.Description.PartitionSchemeDescription" /> class. </para> /// </summary> /// <param name="scheme"> /// <para> /// <see cref="System.Fabric.Description.PartitionScheme" /> defines the kind of partition scheme.</para> /// </param> protected PartitionSchemeDescription(PartitionScheme scheme) { this.Scheme = scheme; }
/// <summary> /// <para>Initializes a new instance of this class.</para> /// </summary> /// <param name="kind"> /// <para> /// The kind specifies the derived type of this instance. /// </para> /// </param> protected RepartitionDescription(PartitionScheme kind) { this.PartitionKind = kind; }
// Based on Microsoft's example https://docs.microsoft.com/en-us/sql/t-sql/statements/create-partition-scheme-transact-sql?view=sql-server-2017 static void Main(string[] args) { var connectionString = ConfigurationManager.ConnectionStrings["ConnStr"].ConnectionString; ServerConnection cnn; using (var sqlConnection = new SqlConnection(connectionString)) { cnn = new ServerConnection(sqlConnection); } cnn.Connect(); Console.WriteLine("Connected"); //Create the server object var server = new Server(cnn); Console.WriteLine("Create the server object"); const string dataBaseName = @"TestPartition"; try { //Drop the database if exists server.Databases[dataBaseName]?.Drop(); //Create TestPartition database Console.WriteLine($"Creating the database{dataBaseName}"); var db = new Database(server, dataBaseName); db.Create(); // Adding four file groups and corespodenting files ConsoleEx.WriteLine("Adding four file groups", ConsoleColor.Cyan); for (var i = 1; i < 5; i++) { db.FileGroups.Add(new FileGroup(db, $"test{i}fg")); db.FileGroups[i].Files.Add(new DataFile(db.FileGroups[i], $"test{i}datafile", $"{server.Information.MasterDBPath}\\{dataBaseName}_{i}.mdf")); } // Actually added db.Alter(); ConsoleEx.WriteLine("Creating the partition function", ConsoleColor.Yellow); //Create the partition function var partitionFunction = new PartitionFunction(db, "myRangePF1"); partitionFunction.PartitionFunctionParameters.Add( new PartitionFunctionParameter(partitionFunction, DataType.Int)); partitionFunction.RangeType = RangeType.Left; partitionFunction.RangeValues = new object[] { 1, 100, 1000 }; partitionFunction.Create(); // Filegroup test1fg | test2fg | test3fg | test4fg // Partition 1 | 2 | 3 | 4 // Values col1 <= 1| col1 > 1 AND col1 <= 100 | col1 > 100 AND col1 <= 1000| col1 > 1000 ConsoleEx.WriteLine("Creating the partition scheme", ConsoleColor.Red); var partitionScheme = new PartitionScheme(db, "myRangePS1") { PartitionFunction = "myRangePF1" }; for (var i = 1; i < 5; i++) { partitionScheme.FileGroups.Add($"test{i}fg"); } partitionScheme.Create(); ConsoleEx.WriteLine("Creating the table", ConsoleColor.Cyan); var table = new Table(db, "TestTable"); table.Columns.Add(new Column(table, "col1", DataType.Int)); table.PartitionScheme = "myRangePS1"; table.PartitionSchemeParameters.Add(new PartitionSchemeParameter(table, "col1")); table.Create(); //Insert a few records into newly create table db.ExecuteNonQuery(@"INSERT INTO DBO.TESTTABLE VALUES (0), (1), (100), (200), (3000);"); // Examine sys.dm_db_partition_stats var dataset = db.ExecuteWithResults( @"SELECT partition_number,row_count FROM sys.dm_db_partition_stats WHERE object_id = OBJECT_ID('DBO.TESTTABLE');"); foreach (DataRow row in dataset.Tables[0].Rows) { ConsoleEx.WriteLine($"Partition {row["partition_number"]} has {row["row_count"]} rows", ConsoleColor.Magenta); } db = null; } catch (Exception ex) { Console.WriteLine(string.Join(Environment.NewLine + "\t", ex.CollectThemAll(ex1 => ex1.InnerException) .Select(ex1 => ex1.Message))); } if (cnn.IsOpen) { cnn.Disconnect(); } cnn = null; server = null; Console.WriteLine("Press any key to exit..."); Console.ReadLine(); }