コード例 #1
0
        public static IEnumerable <SarifLog> Split(this SarifLog sarifLog, SplittingStrategy splittingStrategy)
        {
            PartitionFunction <string> partitionFunction = null;

            switch (splittingStrategy)
            {
            case SplittingStrategy.PerResult:
            {
                partitionFunction = (result) => result.RuleId;
                break;
            }

            case SplittingStrategy.PerRun:
            {
                foreach (Run run in sarifLog.Runs)
                {
                    run.SetRunOnResults();
                }

                partitionFunction = (result) => result.Run.GetHashCode().ToString();
                break;
            }

            default:
            {
                throw new NotImplementedException($"SplittingStrategy: {splittingStrategy}");
            }
            }

            var partitioningVisitor = new PartitioningVisitor <string>(partitionFunction, deepClone: false);

            partitioningVisitor.VisitSarifLog(sarifLog);

            return(partitioningVisitor.GetPartitionLogs().Values);
        }
コード例 #2
0
 public static SqlAlterPartitionFunction Alter(
     PartitionFunction partitionFunction, string booundary, SqlAlterPartitionFunctionOption option)
 {
     ArgumentValidator.EnsureArgumentNotNull(partitionFunction, "partitionFunction");
     ArgumentValidator.EnsureArgumentNotNull(booundary, "booundary");
     return(new SqlAlterPartitionFunction(partitionFunction, booundary, option));
 }
コード例 #3
0
        // Extract selected results from original SarifLog.
        internal SarifLog GetPartitionedLog(IEnumerable <SarifErrorListItem> listItems)
        {
            int    runIndex = -1;
            string guid     = Guid.NewGuid().ToString();

            foreach (SarifErrorListItem item in listItems)
            {
                if (item.SarifResult != null)
                {
                    item.SarifResult.Guid = guid;
                    if (runIndex == -1)
                    {
                        runIndex = item.RunIndex;
                    }
                }
            }

            if (runIndex == -1 || !this.RunIndexToRunDataCache.TryGetValue(runIndex, out RunDataCache dataCache) || dataCache.SarifLog == null)
            {
                return(null);
            }

            // parition results in log
            PartitionFunction <string> partitionFunction = (result) => result.Guid ?? null;
            var partitioningVisitor = new PartitioningVisitor <string>(partitionFunction, deepClone: false);

            partitioningVisitor.VisitSarifLog(dataCache.SarifLog);
            Dictionary <string, SarifLog> partitions = partitioningVisitor.GetPartitionLogs();

            return(partitions[guid]);
        }
コード例 #4
0
 internal SqlAlterPartitionFunction(PartitionFunction partitionFunction, string boundary,
                                    SqlAlterPartitionFunctionOption option)
     : base(SqlNodeType.Alter)
 {
     this.partitionFunction = partitionFunction;
     this.boundary          = boundary;
     this.option            = option;
 }
コード例 #5
0
        public void Fill(Database database, string connectioString)
        {
            int lastObjectId       = 0;
            PartitionFunction item = null;

            if (database.Options.Ignore.FilterPartitionFunction)
            {
                using (SqlConnection conn = new SqlConnection(connectioString))
                {
                    using (SqlCommand command = new SqlCommand(GetSQL(), conn))
                    {
                        conn.Open();
                        command.CommandTimeout = 0;
                        using (SqlDataReader reader = command.ExecuteReader())
                        {
                            while (reader.Read())
                            {
                                if (lastObjectId != (int)reader["function_id"])
                                {
                                    lastObjectId         = (int)reader["function_id"];
                                    item                 = new PartitionFunction(database);
                                    item.Id              = (int)reader["function_id"];
                                    item.Name            = reader["name"].ToString();
                                    item.IsBoundaryRight = (bool)reader["IsRight"];
                                    item.Precision       = (byte)reader["precision"];
                                    item.Scale           = (byte)reader["scale"];
                                    item.Size            = (short)reader["max_length"];
                                    item.Type            = reader["TypeName"].ToString();
                                    database.PartitionFunctions.Add(item);
                                }

                                switch (item.Type)
                                {
                                case "binary":
                                case "varbinary":
                                    item.Values.Add(ToHex((byte[])reader["value"]));
                                    break;

                                case "date":
                                    item.Values.Add(String.Format("'{0:yyyy/MM/dd}'", (DateTime)reader["value"]));
                                    break;

                                case "smalldatetime":
                                case "datetime":
                                    item.Values.Add(String.Format("'{0:yyyy/MM/dd HH:mm:ss.fff}'", (DateTime)reader["value"]));
                                    break;

                                default:
                                    item.Values.Add(reader["value"].ToString());
                                    break;
                                }
                            }
                        }
                    }
                }
            }
        }
コード例 #6
0
        /// <summary>
        /// Partition the specified SARIF log into a set of "partitioned logs" according to
        /// the specified partitioning function. Each partitioned log contains only those
        /// elements of run-level collections such as Run.Artifacts that are relevant to the
        /// subset of results in that log.
        /// </summary>
        /// <typeparam name="T">
        /// The type of the object returned by the partition function. It must be a reference
        /// type so that null is a valid value. It must override bool Equals(T other) so that
        /// two Ts can compare equal even if they are not reference equal.
        /// </typeparam>
        /// <param name="log">
        /// The SARIF log to be partitioned.
        /// </param>
        /// <param name="partitionFunction">
        /// A function that returns a value specifying which partitioned log a specified result
        /// belongs in, or null if the result should be discarded (not placed in any of the
        /// partitioned logs).
        /// </param>
        /// <param name="deepClone">
        /// A value that specifies how the partitioned logs are constructed from the original log.
        /// If <c>true</c>, each partitioned log is constructed from a deep clone of the
        /// original log; if <c>false</c>, each partitioned log is constructed from a shallow
        /// copy of the original log. Deep cloning ensures that the original and partitioned logs
        /// do not share any objects, so they can be modified safely, but at a cost of increased
        /// partitioning time and  working set. Shallow copying reduces partitioning time and
        /// working set, but it is not safe to modify any of the resulting logs because this class
        /// makes no guarantee about which objects are shared.
        /// </param>
        /// <returns>
        /// A dictionary whose keys are the values returned by <paramref name="partitionFunction"/>
        /// for the results in <paramref name="log"/> and whose values are the SARIF logs
        /// containing the results for which the partition function returns those values.
        /// </returns>
        public static IDictionary <T, SarifLog> Partition <T>(
            SarifLog log,
            PartitionFunction <T> partitionFunction,
            bool deepClone)
            where T : class, IEquatable <T>
        {
            var visitor = new PartitioningVisitor <T>(partitionFunction, deepClone);

            visitor.VisitSarifLog(log);

            return(visitor.GetPartitionLogs());
        }
コード例 #7
0
        public void Fill(Database database, string connectioString)
        {
            int lastObjectId       = 0;
            PartitionFunction item = null;

            if (database.Info.Version == DatabaseInfo.VersionNumber.SQLServer2000)
            {
                return;
            }
            if (database.Options.Ignore.FilterPartitionFunction)
            {
                using (SqlConnection conn = new SqlConnection(connectioString))
                {
                    using (SqlCommand command = new SqlCommand(GetSQL(), conn))
                    {
                        conn.Open();
                        command.CommandTimeout = 0;
                        using (SqlDataReader reader = command.ExecuteReader())
                        {
                            while (reader.Read())
                            {
                                if (lastObjectId != (int)reader["function_id"])
                                {
                                    lastObjectId         = (int)reader["function_id"];
                                    item                 = new PartitionFunction(database);
                                    item.Id              = (int)reader["function_id"];
                                    item.Name            = reader["name"].ToString();
                                    item.IsBoundaryRight = (bool)reader["IsRight"];
                                    item.Precision       = (byte)reader["precision"];
                                    item.Scale           = (byte)reader["scale"];
                                    item.Size            = (short)reader["max_length"];
                                    item.Type            = reader["TypeName"].ToString();
                                    database.PartitionFunctions.Add(item);
                                }
                                if (item.Type.Equals("binary") || item.Type.Equals("varbinary"))
                                {
                                    item.Values.Add(ToHex((byte[])reader["value"]));
                                }
                                else
                                {
                                    item.Values.Add(reader["value"].ToString());
                                }
                            }
                        }
                    }
                }
            }
        }
コード例 #8
0
    private void CreateDatabasePartition4Organization(Int32 currOrganizationCode, Int32 intOrganizationCode)
    {
        try
        {
            String           strOrganizationName = txtDomainName.Text.Trim().ToLower().Replace("http://", String.Empty);
            String           strOranizationCode  = MakeZeroString(intOrganizationCode);
            String           strDataFileName     = "datafile_" + strOranizationCode;
            String           strFileGroupName    = "filegrp_" + strOranizationCode;
            ServerConnection cnnServer           = new ServerConnection(System.Configuration.ConfigurationManager.AppSettings["DataCentreHost"], "sa", "q2c4b7m1");
            Server           myServer            = new Server(cnnServer);
            Database         LapbaseDatabase     = myServer.Databases[System.Configuration.ConfigurationManager.AppSettings["LapbaseDatabase"]];
            FileGroup        orgFileGroup;

            if (LapbaseDatabase.FileGroups.Contains(strFileGroupName))
            {
                orgFileGroup = LapbaseDatabase.FileGroups[strFileGroupName];
            }
            else
            {
                orgFileGroup = new FileGroup(LapbaseDatabase, strFileGroupName);
                orgFileGroup.Create();
            }

            try
            {
                DataFile orgDataFile = new DataFile(orgFileGroup, strDataFileName, LapbaseDatabase.PrimaryFilePath + @"\" + strDataFileName + ".ndf");
                orgDataFile.IsPrimaryFile = false;
                orgDataFile.Size          = 10240; //Initial size: 10 MB
                orgDataFile.Growth        = 10240; //Growth by : 10 MB
                orgDataFile.Create();
                PartitionScheme schemePartiton = LapbaseDatabase.PartitionSchemes["scheme_OrganizationCodePartition"];
                try { schemePartiton.NextUsedFileGroup = orgFileGroup.Name; schemePartiton.Alter(); }
                catch (Exception err) { gClass.AddErrorLogData("0", Request.Url.Host, "", "CreateDatabasePartition4Organization", "schemePartiton.NextUsedFileGroup", err.ToString()); }
                PartitionFunction fnPartition = LapbaseDatabase.PartitionFunctions["fn_OrganizationCodePartition"];
                try { fnPartition.SplitRangePartition(intOrganizationCode); fnPartition.Alter(); }
                catch (Exception err) { gClass.AddErrorLogData("0", Request.Url.Host, "", "CreateDatabasePartition4Organization", "fnPartition.SplitRangePartition", err.ToString()); }
            }
            catch (Exception err) { gClass.AddErrorLogData("0", Request.Url.Host, "", "CreateDatabasePartition4Organization", "Add new organization", err.ToString()); }


            LapbaseDatabase.Refresh();
            cnnServer.Disconnect();
        }
        catch (Exception ex) { gClass.AddErrorLogData("0", Request.Url.Host, "", "linkbtnSaveOrganization_onclick", "Add new organization", ex.ToString()); }
    }
コード例 #9
0
        // This version of the constructor relies on an explicit partition number to be provided
        public PartitionManager(ServerConnection conn, String dbName, String schName,
                                String partitionTblName, String stgTblName, int partitionNumber, StreamWriter scriptWriter,
                                bool executeCommands)
        {
            // Create all objects
            this.conn            = conn;
            this.partitionNumber = partitionNumber;
            this.executeCommands = executeCommands;
            this.scriptWriter    = scriptWriter;

            srv = new Server(conn);

            db = srv.Databases[dbName];

            scriptChunks = new List <System.Collections.Specialized.StringCollection>();

            // validate table
            if ((partitionTable = db.Tables[partitionTblName, schName]) == null)
            {
                throw new System.ArgumentException("Table [" + schName + "].[" + partitionTblName + "] not found in database [" + dbName + "]");
            }
            // validate it is partitioned
            if (String.IsNullOrEmpty(partitionTable.PartitionScheme))
            {
                throw new System.ArgumentException("Table [" + schName + "].[" + partitionTblName + "] is not partitioned");
            }
            else
            {
                pf = db.PartitionFunctions[db.PartitionSchemes[partitionTable.PartitionScheme].PartitionFunction];
            }
            // validate the partition number
            if ((pf.NumberOfPartitions < partitionNumber) || (partitionNumber <= 0))
            {
                throw new System.ArgumentException("Invalid Partition Number");
            }
            // check for presence of staging table with the same name
            if (db.Tables.Contains(stgTblName, schName))
            {
                stgTable = db.Tables[stgTblName, schName];
            }
            else
            {
                stgTable = new Table(db, stgTblName, schName);
            }
        }
コード例 #10
0
ファイル: PartitionTests.cs プロジェクト: tosmolka/sarif-sdk
        private void Partition_WithTrivialPartitionFunction_ReturnsLogWithAllResultsAndRunLevelArrayContentsFromAllResults(bool deepClone)
        {
            PartitionFunction <string> partitionFunction = result => "default";

            RunTest(
                inputResourceNames: new List <string> {
                "Partition.sarif"
            },
                expectedOutputResourceNames: new Dictionary <string, string>
            {
                ["default"] = "TrivialPartitionFunction.sarif"
            },
                parameter: new TestParameters
            {
                PartitionFunction = partitionFunction,
                DeepClone         = deepClone
            });
        }
コード例 #11
0
ファイル: PartitionTests.cs プロジェクト: tosmolka/sarif-sdk
        private void Partition_ByRuleId_ProducesOneLogFilePerRule(bool deepClone)
        {
            PartitionFunction <string> partitionFunction = result => result.RuleId;

            RunTest(
                inputResourceNames: new List <string> {
                "Partition.sarif"
            },
                expectedOutputResourceNames: new Dictionary <string, string>
            {
                ["TST0001"] = "TST0001.sarif",
                ["TST0002"] = "TST0002.sarif",
                ["TST9999"] = "TST9999.sarif"
            },
                parameter: new TestParameters
            {
                PartitionFunction = partitionFunction,
                DeepClone         = deepClone
            });
        }
コード例 #12
0
 internal SqlDropPartitionFunction(PartitionFunction partitionFunction)
     : base(SqlNodeType.Drop)
 {
     this.partitionFunction = partitionFunction;
 }
コード例 #13
0
        static void Main()
        {
            Server   server = new Server(@".");
            string   dbName = @"SMOPartitionAndCompressionSampleDB";
            Database db     = new Database(server, dbName);

            db.Create();

            db.FileGroups.Add(new FileGroup(db, "TABLE_PART1_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART2_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART3_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART4_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART5_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART6_FG"));


            string temp_dir = server.Information.MasterDBPath;

            //db.FileGroups[0] is primary
            db.FileGroups[1].Files.Add(new DataFile(db.FileGroups[1], "tbl_datafile1", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_1.mdf", temp_dir)));
            db.FileGroups[2].Files.Add(new DataFile(db.FileGroups[2], "tbl_datafile2", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_2.mdf", temp_dir)));
            db.FileGroups[3].Files.Add(new DataFile(db.FileGroups[3], "tbl_datafile3", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_3.mdf", temp_dir)));
            db.FileGroups[4].Files.Add(new DataFile(db.FileGroups[4], "tbl_datafile4", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_4.mdf", temp_dir)));
            db.FileGroups[5].Files.Add(new DataFile(db.FileGroups[5], "tbl_datafile5", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_5.mdf", temp_dir)));
            db.FileGroups[6].Files.Add(new DataFile(db.FileGroups[6], "tbl_datafile6", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_6.mdf", temp_dir)));

            db.Alter();

            PartitionFunction tbl_pf = new PartitionFunction(db, "MyTablePartitionFunction");

            tbl_pf.PartitionFunctionParameters.Add(new PartitionFunctionParameter(tbl_pf, DataType.Int));
            tbl_pf.RangeType   = RangeType.Left;
            tbl_pf.RangeValues = new object[] { 5000, 10000, 15000, 20000, 25000, 30000 };
            tbl_pf.Create();
            PartitionScheme tbl_ps = new PartitionScheme(db, "MyTablePartitionScheme");

            tbl_ps.PartitionFunction = "MyTablePartitionFunction";
            tbl_ps.FileGroups.Add("PRIMARY");
            tbl_ps.FileGroups.Add("TABLE_PART1_FG");
            tbl_ps.FileGroups.Add("TABLE_PART2_FG");
            tbl_ps.FileGroups.Add("TABLE_PART3_FG");
            tbl_ps.FileGroups.Add("TABLE_PART4_FG");
            tbl_ps.FileGroups.Add("TABLE_PART5_FG");
            tbl_ps.FileGroups.Add("TABLE_PART6_FG");
            tbl_ps.Create();

            Table table = new Table(db, "MyTable");

            table.Columns.Add(new Column(table, "col1", DataType.Int));
            table.Columns.Add(new Column(table, "col2", DataType.Int));
            table.PartitionScheme = "MyTablePartitionScheme";
            table.PartitionSchemeParameters.Add(new PartitionSchemeParameter(table, "col1"));

            //Add PhysicalPartition objects for the partitions on which you like to apply compression.

            //Make Partition 5 as Page compressed
            table.PhysicalPartitions.Add(new PhysicalPartition(table, 5, DataCompressionType.Page));

            //Make Partition 6 and 7 Row compressed
            table.PhysicalPartitions.Add(new PhysicalPartition(table, 6, DataCompressionType.Row));
            table.PhysicalPartitions.Add(new PhysicalPartition(table, 7, DataCompressionType.Row));

            table.Create();
        }
        // This version of the constructor relies on an explicit partition number to be provided
        public PartitionManager(ServerConnection conn, String dbName, String schName,
		  String partitionTblName, String stgTblName, int partitionNumber, StreamWriter scriptWriter, 
		  bool executeCommands)
        {
            // Create all objects
              this.conn = conn;
              this.partitionNumber = partitionNumber;
              this.executeCommands = executeCommands;
              this.scriptWriter = scriptWriter;

              srv = new Server(conn);

              db = srv.Databases[dbName];

              scriptChunks = new List<System.Collections.Specialized.StringCollection>();

              // validate table
              if ((partitionTable = db.Tables[partitionTblName, schName]) == null)
              {
             throw new System.ArgumentException("Table [" + schName + "].[" + partitionTblName + "] not found in database [" + dbName + "]");
              }
              // validate it is partitioned
              if (String.IsNullOrEmpty(partitionTable.PartitionScheme))
              {
             throw new System.ArgumentException("Table [" + schName + "].[" + partitionTblName + "] is not partitioned");
              }
              else
              {
             pf = db.PartitionFunctions[db.PartitionSchemes[partitionTable.PartitionScheme].PartitionFunction];
              }
              // validate the partition number
              if ((pf.NumberOfPartitions < partitionNumber) || (partitionNumber <= 0))
              {
             throw new System.ArgumentException("Invalid Partition Number");
              }
              // check for presence of staging table with the same name
              if (db.Tables.Contains(stgTblName, schName))
              {
             stgTable = db.Tables[stgTblName,schName];
              }
              else
              {
             stgTable = new Table(db, stgTblName, schName);
              }
        }
コード例 #15
0
 /// <inheritdoc/>
 /// <exception cref="NotSupportedException">Method is not supported.</exception>
 protected override IPathNode VisitPartitionFunction(PartitionFunction partitionFunction)
 {
     throw new NotSupportedException();
 }
コード例 #16
0
        static void Main()
        {
            Server server = new Server(@".");
            string dbName = @"SMOPartitionAndCompressionSampleDB";
            Database db = new Database(server, dbName);
            db.Create();

            db.FileGroups.Add(new FileGroup(db, "TABLE_PART1_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART2_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART3_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART4_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART5_FG"));
            db.FileGroups.Add(new FileGroup(db, "TABLE_PART6_FG"));


            string temp_dir = server.Information.MasterDBPath;
            //db.FileGroups[0] is primary
            db.FileGroups[1].Files.Add(new DataFile(db.FileGroups[1], "tbl_datafile1", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_1.mdf", temp_dir)));
            db.FileGroups[2].Files.Add(new DataFile(db.FileGroups[2], "tbl_datafile2", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_2.mdf", temp_dir)));
            db.FileGroups[3].Files.Add(new DataFile(db.FileGroups[3], "tbl_datafile3", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_3.mdf", temp_dir)));
            db.FileGroups[4].Files.Add(new DataFile(db.FileGroups[4], "tbl_datafile4", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_4.mdf", temp_dir)));
            db.FileGroups[5].Files.Add(new DataFile(db.FileGroups[5], "tbl_datafile5", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_5.mdf", temp_dir)));
            db.FileGroups[6].Files.Add(new DataFile(db.FileGroups[6], "tbl_datafile6", string.Format(CultureInfo.InvariantCulture, "{0}\\PPSampledb_6.mdf", temp_dir)));

            db.Alter();

            PartitionFunction tbl_pf = new PartitionFunction(db, "MyTablePartitionFunction");
            tbl_pf.PartitionFunctionParameters.Add(new PartitionFunctionParameter(tbl_pf, DataType.Int));
            tbl_pf.RangeType = RangeType.Left;
            tbl_pf.RangeValues = new object[] { 5000, 10000, 15000, 20000, 25000, 30000 };
            tbl_pf.Create();
            PartitionScheme tbl_ps = new PartitionScheme(db, "MyTablePartitionScheme");
            tbl_ps.PartitionFunction = "MyTablePartitionFunction";
            tbl_ps.FileGroups.Add("PRIMARY");
            tbl_ps.FileGroups.Add("TABLE_PART1_FG");
            tbl_ps.FileGroups.Add("TABLE_PART2_FG");
            tbl_ps.FileGroups.Add("TABLE_PART3_FG");
            tbl_ps.FileGroups.Add("TABLE_PART4_FG");
            tbl_ps.FileGroups.Add("TABLE_PART5_FG");
            tbl_ps.FileGroups.Add("TABLE_PART6_FG");
            tbl_ps.Create();

            Table table = new Table(db, "MyTable");
            table.Columns.Add(new Column(table, "col1", DataType.Int));
            table.Columns.Add(new Column(table, "col2", DataType.Int));
            table.PartitionScheme = "MyTablePartitionScheme";
            table.PartitionSchemeParameters.Add(new PartitionSchemeParameter(table, "col1"));

            //Add PhysicalPartition objects for the partitions on which you like to apply compression.

            //Make Partition 5 as Page compressed
            table.PhysicalPartitions.Add(new PhysicalPartition(table, 5, DataCompressionType.Page));

            //Make Partition 6 and 7 Row compressed
            table.PhysicalPartitions.Add(new PhysicalPartition(table, 6, DataCompressionType.Row));
            table.PhysicalPartitions.Add(new PhysicalPartition(table, 7, DataCompressionType.Row));
           
            table.Create();

          
        }
コード例 #17
0
 internal SqlCreatePartitionFunction(PartitionFunction partitionFunction)
     : base(SqlNodeType.Create)
 {
     this.partitionFunction = partitionFunction;
 }
コード例 #18
0
        public virtual IReadOnlyList <SarifLog> SplitLogFile(SarifLog sarifLog)
        {
            IList <SarifLog> logsToProcess;

            using (Logger.BeginScopeContext(nameof(SplitLogFile)))
            {
                sarifLog = sarifLog ?? throw new ArgumentNullException(nameof(sarifLog));
                sarifLog.SetProperty("guid", Guid.NewGuid());

                this.FilingResult   = FilingResult.None;
                this.FiledWorkItems = new List <WorkItemModel>();

                sarifLog = sarifLog ?? throw new ArgumentNullException(nameof(sarifLog));

                Logger.LogInformation("Connecting to filing client: {accountOrOrganization}", this.FilingClient.AccountOrOrganization);
                this.FilingClient.Connect(this.FilingContext.PersonalAccessToken).Wait();

                OptionallyEmittedData optionallyEmittedData = this.FilingContext.DataToRemove;
                if (optionallyEmittedData != OptionallyEmittedData.None)
                {
                    Logger.LogDebug("Removing optional data.");
                    var dataRemovingVisitor = new RemoveOptionalDataVisitor(optionallyEmittedData);
                    dataRemovingVisitor.Visit(sarifLog);
                }

                optionallyEmittedData = this.FilingContext.DataToInsert;
                if (optionallyEmittedData != OptionallyEmittedData.None)
                {
                    Logger.LogDebug("Inserting optional data.");
                    var dataInsertingVisitor = new InsertOptionalDataVisitor(optionallyEmittedData);
                    dataInsertingVisitor.Visit(sarifLog);
                }

                using (Logger.BeginScopeContext("Splitting visitor"))
                {
                    SplittingStrategy splittingStrategy = this.FilingContext.SplittingStrategy;

                    Logger.LogInformation($"Splitting strategy - {splittingStrategy}");

                    if (splittingStrategy == SplittingStrategy.None)
                    {
                        return(new[] { sarifLog });
                    }

                    PartitionFunction <string> partitionFunction = null;

                    Stopwatch splittingStopwatch = Stopwatch.StartNew();

                    switch (splittingStrategy)
                    {
                    case SplittingStrategy.PerRun:
                    {
                        partitionFunction = (result) => result.ShouldBeFiled() ? "Include" : null;
                        break;
                    }

                    case SplittingStrategy.PerResult:
                    {
                        partitionFunction = (result) => result.ShouldBeFiled() ? Guid.NewGuid().ToString() : null;
                        break;
                    }

                    case SplittingStrategy.PerRunPerOrgPerEntityTypePerPartialFingerprint:
                    {
                        partitionFunction = (result) => result.ShouldBeFiled() ? result.GetFingerprintSplittingStrategyId() : null;
                        break;
                    }

                    case SplittingStrategy.PerRunPerOrgPerEntityTypePerRepositoryPerPartialFingerprint:
                    {
                        partitionFunction = (result) => result.ShouldBeFiled() ? result.GetPerRepositoryFingerprintSplittingStrategyId() : null;
                        break;
                    }

                    default:
                    {
                        throw new ArgumentOutOfRangeException($"SplittingStrategy: {splittingStrategy}");
                    }
                    }

                    Logger.LogDebug("Begin splitting logs");
                    var partitioningVisitor = new PartitioningVisitor <string>(partitionFunction, deepClone: false);
                    partitioningVisitor.VisitSarifLog(sarifLog);

                    Logger.LogDebug("Begin retrieving split logs");
                    logsToProcess = new List <SarifLog>(partitioningVisitor.GetPartitionLogs().Values);

                    Logger.LogDebug("End retrieving split logs");

                    var logsToProcessMetrics = new Dictionary <string, object>
                    {
                        { "splittingStrategy", splittingStrategy },
                        { "logsToProcessCount", logsToProcess.Count },
                        { "splittingDurationInMilliseconds", splittingStopwatch.ElapsedMilliseconds },
                    };

                    this.Logger.LogMetrics(EventIds.LogsToProcessMetrics, logsToProcessMetrics);
                    splittingStopwatch.Stop();
                }
            }

            if (logsToProcess != null && !this.FilingContext.ShouldFileUnchanged)
            {
                // Remove any logs that do not contain at least one result with a New or None baselinestate.
                logsToProcess = logsToProcess.Where(log => log?.Runs?.Any(run => run.Results?.Any(result => result.BaselineState == BaselineState.New || result.BaselineState == BaselineState.None) == true) == true).ToList();
            }

            return(logsToProcess.ToArray());
        }
コード例 #19
0
        // Based on Microsoft's example https://docs.microsoft.com/en-us/sql/t-sql/statements/create-partition-scheme-transact-sql?view=sql-server-2017

        static void Main(string[] args)
        {
            var connectionString = ConfigurationManager.ConnectionStrings["ConnStr"].ConnectionString;
            ServerConnection cnn;

            using (var sqlConnection = new SqlConnection(connectionString))
            {
                cnn = new ServerConnection(sqlConnection);
            }

            cnn.Connect();
            Console.WriteLine("Connected");
            //Create the server object
            var server = new Server(cnn);

            Console.WriteLine("Create the server object");


            const string dataBaseName = @"TestPartition";

            try
            {
                //Drop the database if exists
                server.Databases[dataBaseName]?.Drop();

                //Create TestPartition database
                Console.WriteLine($"Creating the database{dataBaseName}");
                var db = new Database(server, dataBaseName);
                db.Create();

                // Adding four file groups and corespodenting files
                ConsoleEx.WriteLine("Adding four file groups", ConsoleColor.Cyan);
                for (var i = 1; i < 5; i++)
                {
                    db.FileGroups.Add(new FileGroup(db, $"test{i}fg"));
                    db.FileGroups[i].Files.Add(new DataFile(db.FileGroups[i], $"test{i}datafile",
                                                            $"{server.Information.MasterDBPath}\\{dataBaseName}_{i}.mdf"));
                }
                // Actually added
                db.Alter();



                ConsoleEx.WriteLine("Creating the partition function", ConsoleColor.Yellow);
                //Create the partition function
                var partitionFunction = new PartitionFunction(db, "myRangePF1");

                partitionFunction.PartitionFunctionParameters.Add(
                    new PartitionFunctionParameter(partitionFunction, DataType.Int));
                partitionFunction.RangeType   = RangeType.Left;
                partitionFunction.RangeValues = new object[] { 1, 100, 1000 };
                partitionFunction.Create();

                // Filegroup  test1fg  |  test2fg                  |  test3fg                    |   test4fg
                // Partition  1        |  2                        |  3                          |   4
                // Values     col1 <= 1|  col1 > 1 AND col1 <= 100 |  col1 > 100 AND col1 <= 1000|   col1 > 1000

                ConsoleEx.WriteLine("Creating the partition scheme", ConsoleColor.Red);
                var partitionScheme = new PartitionScheme(db, "myRangePS1")
                {
                    PartitionFunction = "myRangePF1"
                };
                for (var i = 1; i < 5; i++)
                {
                    partitionScheme.FileGroups.Add($"test{i}fg");
                }

                partitionScheme.Create();


                ConsoleEx.WriteLine("Creating the table", ConsoleColor.Cyan);
                var table = new Table(db, "TestTable");
                table.Columns.Add(new Column(table, "col1", DataType.Int));
                table.PartitionScheme = "myRangePS1";
                table.PartitionSchemeParameters.Add(new PartitionSchemeParameter(table, "col1"));
                table.Create();

                //Insert a few records into newly create table
                db.ExecuteNonQuery(@"INSERT INTO DBO.TESTTABLE
                        VALUES (0), (1), (100), (200), (3000);");


                // Examine sys.dm_db_partition_stats
                var dataset = db.ExecuteWithResults(
                    @"SELECT partition_number,row_count
                       FROM sys.dm_db_partition_stats
                       WHERE object_id = OBJECT_ID('DBO.TESTTABLE');");

                foreach (DataRow row in dataset.Tables[0].Rows)
                {
                    ConsoleEx.WriteLine($"Partition {row["partition_number"]} has {row["row_count"]} rows", ConsoleColor.Magenta);
                }
                db = null;
            }
            catch (Exception ex)
            {
                Console.WriteLine(string.Join(Environment.NewLine + "\t", ex.CollectThemAll(ex1 => ex1.InnerException)
                                              .Select(ex1 => ex1.Message)));
            }


            if (cnn.IsOpen)
            {
                cnn.Disconnect();
            }
            cnn = null;

            server = null;
            Console.WriteLine("Press any key to exit...");
            Console.ReadLine();
        }
コード例 #20
0
 public static SqlCreatePartitionFunction Create(PartitionFunction partitionFunction)
 {
     ArgumentValidator.EnsureArgumentNotNull(partitionFunction, "partitionFunction");
     return(new SqlCreatePartitionFunction(partitionFunction));
 }
コード例 #21
0
        public virtual SarifLog FileWorkItems(SarifLog sarifLog)
        {
            sarifLog = sarifLog ?? throw new ArgumentNullException(nameof(sarifLog));

            sarifLog.SetProperty("guid", Guid.NewGuid());

            using (Logger.BeginScope(nameof(FileWorkItems)))
            {
                this.FilingResult = FilingResult.None;
                this.FiledWorkItems = new List<WorkItemModel>();

                sarifLog = sarifLog ?? throw new ArgumentNullException(nameof(sarifLog));

                Logger.LogInformation("Connecting to filing client: {accountOrOrganization}", this.FilingClient.AccountOrOrganization);
                this.FilingClient.Connect(this.FilingContext.PersonalAccessToken).Wait();

                OptionallyEmittedData optionallyEmittedData = this.FilingContext.DataToRemove;
                if (optionallyEmittedData != OptionallyEmittedData.None)
                {
                    var dataRemovingVisitor = new RemoveOptionalDataVisitor(optionallyEmittedData);
                    dataRemovingVisitor.Visit(sarifLog);
                }

                optionallyEmittedData = this.FilingContext.DataToInsert;
                if (optionallyEmittedData != OptionallyEmittedData.None)
                {
                    var dataInsertingVisitor = new InsertOptionalDataVisitor(optionallyEmittedData);
                    dataInsertingVisitor.Visit(sarifLog);
                }

                SplittingStrategy splittingStrategy = this.FilingContext.SplittingStrategy;

                if (splittingStrategy == SplittingStrategy.None)
                {
                    FileWorkItemsHelper(sarifLog, this.FilingContext, this.FilingClient);
                    return sarifLog;
                }

                IList<SarifLog> logsToProcess;

                PartitionFunction<string> partitionFunction = null;

                Stopwatch splittingStopwatch = Stopwatch.StartNew();
                
                switch (splittingStrategy)
                {
                    case SplittingStrategy.PerRun:
                    {
                        partitionFunction = (result) => result.ShouldBeFiled() ? "Include" : null;
                        break;
                    }
                    case SplittingStrategy.PerResult:
                    {
                        partitionFunction = (result) => result.ShouldBeFiled() ? Guid.NewGuid().ToString() : null;
                        break;
                    }
                    default:
                    {
                        throw new ArgumentOutOfRangeException($"SplittingStrategy: {splittingStrategy}");
                    }
                }

                var partitioningVisitor = new PartitioningVisitor<string>(partitionFunction, deepClone: false);
                partitioningVisitor.VisitSarifLog(sarifLog);

                logsToProcess = new List<SarifLog>(partitioningVisitor.GetPartitionLogs().Values);

                var logsToProcessMetrics = new Dictionary<string, object>
                {
                    { "splittingStrategy", splittingStrategy },
                    { "logsToProcessCount", logsToProcess.Count },
                    { "splittingDurationInMilliseconds", splittingStopwatch.ElapsedMilliseconds },
                };

                this.Logger.LogMetrics(EventIds.LogsToProcessMetrics, logsToProcessMetrics);
                splittingStopwatch.Stop();

                for (int splitFileIndex = 0; splitFileIndex < logsToProcess.Count; splitFileIndex++)
                {
                    SarifLog splitLog = logsToProcess[splitFileIndex];
                    FileWorkItemsHelper(splitLog, this.FilingContext, this.FilingClient);
                }
            }

            return sarifLog;
        }
コード例 #22
0
 /// <summary>
 /// Initializes a new instance of the <see cref="PartitioningVisitor"/> class.
 /// </summary>
 /// <param name="partitionFunction">
 /// A delegate for a function that returns a value specifying which partition each result
 /// belongs to.
 /// </param>
 /// <param name="deepClone">
 /// A value that specifies how the partitioned logs are constructed from the original log.
 /// If <c>true</c>, each partitioned log is constructed from a deep clone of the
 /// original log; if <c>false</c>, each partitioned log is constructed from a shallow
 /// copy of the original log. Deep cloning ensures that the original and partitioned logs
 /// do not share any objects, so they can be modified safely, but at a cost of increased
 /// partitioning time and  working set. Shallow copying reduces partitioning time and
 /// working set, but it is not safe to modify any of the resulting logs because this class
 /// makes no guarantee about which objects are shared. The default is <c>false</c>.
 /// </param>
 public PartitioningVisitor(PartitionFunction <T> partitionFunction, bool deepClone)
 {
     this.partitionFunction = partitionFunction;
     this.deepClone         = deepClone;
 }