Beispiel #1
0
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet);
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End of model evaluation ===============");
            SaveModelAsFile(mlContext, model);
        }
Beispiel #2
0
        public void Test(string testDL, string modelLocation)
        {
            Console.WriteLine("Testing...");
            Load(modelLocation);
            testDataView = mlContext.Data.LoadFromTextFile <SentimentIssue>(testDL, hasHeader: true);
            var predictions = trainedModel.Transform(testDataView);

            metrics = mlContext.BinaryClassification.Evaluate(data: predictions, labelColumnName: "Label", scoreColumnName: "Score");
            Console.WriteLine("Accuracy: {0}\n" +
                              "TestDataFile: {1}\n" +
                              "TrainDataFile: {2}\n" +
                              "TrainedModelLocation: {3}", metrics.Accuracy, testDataLocation, trainDataLocation, modelLocation);
            Console.WriteLine("Done Testing, Press Any Key To Exit...");
            Console.ReadKey();
        }
Beispiel #3
0
 public static void PrintBinaryClassificationMetrics(string name, CalibratedBinaryClassificationMetrics metrics)
 {
     Console.WriteLine($"************************************************************");
     Console.WriteLine($"*       Metrics for {name} binary classification model      ");
     Console.WriteLine($"*-----------------------------------------------------------");
     Console.WriteLine($"*       Accuracy: {metrics.Accuracy:P2}");
     Console.WriteLine($"*       F1Score:  {metrics.F1Score:P2}");
     Console.WriteLine($"*       LogLoss:  {metrics.LogLoss:#.##}");
     Console.WriteLine($"*       LogLossReduction:  {metrics.LogLossReduction:#.##}");
     Console.WriteLine($"*       PositivePrecision:  {metrics.PositivePrecision:#.##}");
     Console.WriteLine($"*       PositiveRecall:  {metrics.PositiveRecall:#.##}");
     Console.WriteLine($"*       NegativePrecision:  {metrics.NegativePrecision:#.##}");
     Console.WriteLine($"*       NegativeRecall:  {metrics.NegativeRecall:P2}");
     Console.WriteLine($"************************************************************");
 }
Beispiel #4
0
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet);
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End of model evaluation ===============");
            using (var fs = new FileStream(_modelPath, FileMode.Create, FileAccess.Write, FileShare.Write))
                mlContext.Model.Save(model, fs);
        }
Beispiel #5
0
 public static void PrintBinaryClassificationMetrics(string name, CalibratedBinaryClassificationMetrics metrics)
 {
     s_log.Debug($"************************************************************");
     s_log.Debug($"*       Metrics for {name} binary classification model      ");
     s_log.Debug($"*-----------------------------------------------------------");
     s_log.Debug($"*       Accuracy: {metrics.Accuracy:P2}");
     s_log.Debug($"*       Area Under Curve:      {metrics.AreaUnderRocCurve:P2}");
     s_log.Debug($"*       Area under Precision recall Curve:  {metrics.AreaUnderPrecisionRecallCurve:P2}");
     s_log.Debug($"*       F1Score:  {metrics.F1Score:P2}");
     s_log.Debug($"*       LogLoss:  {metrics.LogLoss:#.##}");
     s_log.Debug($"*       LogLossReduction:  {metrics.LogLossReduction:#.##}");
     s_log.Debug($"*       PositivePrecision:  {metrics.PositivePrecision:#.##}");
     s_log.Debug($"*       PositiveRecall:  {metrics.PositiveRecall:#.##}");
     s_log.Debug($"*       NegativePrecision:  {metrics.NegativePrecision:#.##}");
     s_log.Debug($"*       NegativeRecall:  {metrics.NegativeRecall:P2}");
     s_log.Debug($"************************************************************");
 }
Beispiel #6
0
 public static void PrintBinaryClassificationMetrics(string name, CalibratedBinaryClassificationMetrics metrics)
 {
     Console.WriteLine($"************************************************************");
     Console.WriteLine($"*        二进制分类模型的指标 {name}      ");
     Console.WriteLine($"*-----------------------------------------------------------");
     Console.WriteLine($"*       Accuracy: {metrics.Accuracy:P2}");
     Console.WriteLine($"*       Area Under Curve:      {metrics.AreaUnderRocCurve:P2}");
     Console.WriteLine($"*       Area under Precision recall Curve:  {metrics.AreaUnderPrecisionRecallCurve:P2}");
     Console.WriteLine($"*       F1Score:  {metrics.F1Score:P2}");
     Console.WriteLine($"*       LogLoss:  {metrics.LogLoss:#.##}");
     Console.WriteLine($"*       LogLossReduction:  {metrics.LogLossReduction:#.##}");
     Console.WriteLine($"*       PositivePrecision:  {metrics.PositivePrecision:#.##}");
     Console.WriteLine($"*       PositiveRecall:  {metrics.PositiveRecall:#.##}");
     Console.WriteLine($"*       NegativePrecision:  {metrics.NegativePrecision:#.##}");
     Console.WriteLine($"*       NegativeRecall:  {metrics.NegativeRecall:P2}");
     Console.WriteLine($"************************************************************");
 }
Beispiel #7
0
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            Console.WriteLine("========== Evaluating Model accuracy with Test data ==========");

            // Transform the splitTestSet data by adding the following code to Evaluate():
            IDataView predictions = model.Transform(splitTestSet);

            // Evaluate the model by adding the following as the next line of code in the Evaluate() method:
            // Once you have the prediction set, Evaluate() assesses the model, which compares the predicted values with the actual Labels in the test dataset
            // It returns a CalibratedBinaryClassificationMetrics object on how the model is performing.
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            Console.WriteLine("Model quality metrics evaluation:");
            Console.WriteLine($"- Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"- Auc: {metrics.AreaUnderRocCurve:P2}");
            Console.WriteLine($"- F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("========== End of model evaluation ==========");
        }
Beispiel #8
0
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            // Evaluate the model and show accuracy stats

            //Take the data in, make transformations, output the data.
            // <SnippetTransformData>
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet);
            // </SnippetTransformData>

            // BinaryClassificationContext.Evaluate returns a BinaryClassificationEvaluator.CalibratedResult
            // that contains the computed overall metrics.
            // <SnippetEvaluate>
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            // </SnippetEvaluate>

            // The Accuracy metric gets the accuracy of a classifier, which is the proportion
            // of correct predictions in the test set.

            // The Auc metric gets the area under the ROC curve.
            // The area under the ROC curve is equal to the probability that the classifier ranks
            // a randomly chosen positive instance higher than a randomly chosen negative one
            // (assuming 'positive' ranks higher than 'negative').

            // The F1Score metric gets the classifier's F1 score.
            // The F1 score is the harmonic mean of precision and recall:
            //  2 * precision * recall / (precision + recall).

            // <SnippetDisplayMetrics>
            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End of model evaluation ===============");
            //</SnippetDisplayMetrics>

            // Save the new model to .ZIP file
            // <SnippetCallSaveModel>
            SaveModelAsFile(mlContext, model);
            // </SnippetCallSaveModel>
        }
Beispiel #9
0
        public static void Evaluate(ITransformer model, IDataView testSet)
        {
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            CalibratedBinaryClassificationMetrics metrics = GetMetrics(model, testSet);


            Console.WriteLine("Sensitivity: {0}", metrics.PositiveRecall);
            Console.WriteLine("Specificity: {0}", metrics.NegativeRecall);
            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.AreaUnderRocCurve:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Output("AUC" + metrics.AreaUnderRocCurve);
            Output("Accuracy" + metrics.Accuracy);
            Output("Specificity" + metrics.NegativeRecall);
            Console.WriteLine("=============== End of model evaluation ===============");
        }
Beispiel #10
0
        // Function used to show evaluation metrics such as accuracy of predictions.
        private static void PrintMetrics(
            CalibratedBinaryClassificationMetrics metrics)

        {
            Console.WriteLine($"Accuracy: {metrics.Accuracy:F2}");
            Console.WriteLine($"AUC: {metrics.AreaUnderRocCurve:F2}");
            Console.WriteLine($"F1 Score: {metrics.F1Score:F2}");
            Console.WriteLine($"Negative Precision: " +
                              $"{metrics.NegativePrecision:F2}");

            Console.WriteLine($"Negative Recall: {metrics.NegativeRecall:F2}");
            Console.WriteLine($"Positive Precision: " +
                              $"{metrics.PositivePrecision:F2}");

            Console.WriteLine($"Positive Recall: {metrics.PositiveRecall:F2}");
            Console.WriteLine($"Log Loss: {metrics.LogLoss:F2}");
            Console.WriteLine($"Log Loss Reduction: {metrics.LogLossReduction:F2}");
            Console.WriteLine($"Entropy: {metrics.Entropy:F2}");
        }
Beispiel #11
0
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet); // Transform() method to make predictions for multiple provided input rows of a test dataset.

            /*
             *  Evaluate() method assesses the model, which compares the predicted values with the actual Labels in the test dataset
             *  and returns a CalibratedBinaryClassificationMetrics object on how the model is performing.
             */
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");     //accuracy of a model, which is the proportion of correct predictions in the test set.
            Console.WriteLine($"Auc: {metrics.AreaUnderRocCurve:P2}"); //how confident the model is correctly classifying the positive and negative classes. You want the AreaUnderRocCurve to be as close to one as possible.
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");       //measure of balance between precision and recall. You want the F1Score to be as close to one as possible.
            Console.WriteLine("=============== End of model evaluation ===============");
        }
Beispiel #12
0
        /// <summary>
        /// 加载测试数据集。
        /// 创建 BinaryClassification 计算器。
        /// 评估模型并创建指标。
        /// 显示指标。
        /// </summary>
        /// <param name="mlContext">环境</param>
        /// <param name="model">训练好的模型</param>
        /// <param name="splitTestSet">训练集</param>
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            // Evaluate the model and show accuracy stats
            //Take the data in, make transformations, output the data.
            // 用测试集对模型进行评估/只是把测试集加载进去(懒加载的形式) 预测集
            // <SnippetTransformData>
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet);
            // </SnippetTransformData>

            // BinaryClassificationContext.Evaluate returns a BinaryClassificationEvaluator.CalibratedResult
            // that contains the computed overall metrics.
            // 把预测集 扔进去 指定测试集中的lable 对进度进行评估 返回评估的打分
            // <SnippetEvaluate>
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            // </SnippetEvaluate>

            // The Accuracy metric gets the accuracy of a model, which is the proportion
            // of correct predictions in the test set.

            // The AreaUnderROCCurve metric is equal to the probability that the algorithm ranks
            // a randomly chosen positive instance higher than a randomly chosen negative one
            // (assuming 'positive' ranks higher than 'negative').

            // The F1Score metric gets the model's F1 score.
            // The F1 score is the harmonic mean of precision and recall:
            //  2 * precision * recall / (precision + recall).

            // <SnippetDisplayMetrics>
            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation/ 模型质量量度评估");
            Console.WriteLine("--------------------------------");
            //Accuracy 指标可获取模型的准确性,即测试集中正确预测所占的比例。
            Console.WriteLine($"Accuracy/正确比例: {metrics.Accuracy:P2}");
            //AreaUnderRocCurve 指标指示模型对正面类和负面类进行正确分类的置信度。 应该使 AreaUnderRocCurve 尽可能接近 1。
            Console.WriteLine($"Auc/todo鉴别: {metrics.AreaUnderRocCurve:P2}");
            //F1Score 指标可获取模型的 F1 分数,该分数是查准率和查全率之间的平衡关系的度量值。 应该使 F1Score 尽可能接近 1。
            Console.WriteLine($"F1Score/模型的F1得分: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End of model evaluation ===============");
            //</SnippetDisplayMetrics>
        }
Beispiel #13
0
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            // Evaluate the model and show accuracy stats
            Console.WriteLine("=== Evaluating model accuracy with test data ===");
            //Take the data in, make transformations, output the data.
            IDataView predictions = model.Transform(splitTestSet);
            // BinaryClassificationContext.Evaluate returns computed overall metrics.
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=== End of model evaluation ===");

            // Save the new model to .ZIP file
            SaveModelAsFile(mlContext, model);
        }
        /// <summary>
        /// At the time when the ~10% of the data gets chosen for training, another 10% gets chosen for evaluation. Then after training,
        /// the effectiveness of the model gets evaluated on the test set. The results of that evaluation are converted to text values called
        /// BinarySearchTreeMetrics and this gets written to the results.tsv
        /// </summary>
        /// <param name="name"></param>
        /// <param name="metrics"></param>
        /// <returns></returns>
        public static string PrintBinaryClassificationMetrics(string name, CalibratedBinaryClassificationMetrics metrics)
        {
            StringBuilder s = new StringBuilder();

            s.AppendLine("************************************************************");
            s.AppendLine("*       Metrics for Determination of PEP Using Binary Classification      ");
            s.AppendLine("*-----------------------------------------------------------");
            s.AppendLine("*       Accuracy:  " + metrics.Accuracy.ToString());
            s.AppendLine("*       Area Under Curve:  " + metrics.AreaUnderRocCurve.ToString());
            s.AppendLine("*       Area under Precision recall Curve:  " + metrics.AreaUnderPrecisionRecallCurve.ToString());
            s.AppendLine("*       F1Score:  " + metrics.F1Score.ToString());
            s.AppendLine("*       LogLoss:  " + metrics.LogLoss.ToString());
            s.AppendLine("*       LogLossReduction:  " + metrics.LogLossReduction.ToString());
            s.AppendLine("*       PositivePrecision:  " + metrics.PositivePrecision.ToString());
            s.AppendLine("*       PositiveRecall:  " + metrics.PositiveRecall.ToString());
            s.AppendLine("*       NegativePrecision:  " + metrics.NegativePrecision.ToString());
            s.AppendLine("*       NegativeRecall:  " + metrics.NegativeRecall.ToString());
            s.AppendLine("************************************************************");
            return(s.ToString());
        }
Beispiel #15
0
 public void Evaluate()
 {
     if (initalized)
     {
         Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
         IDataView predictions = model.Transform(splitDataView.TestSet);
         CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");
         Console.WriteLine();
         Console.WriteLine("Model quality metrics evaluation");
         Console.WriteLine("--------------------------------");
         Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
         Console.WriteLine($"Auc: {metrics.AreaUnderRocCurve:P2}");
         Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
         Console.WriteLine("=============== End of model evaluation ===============");
     }
     else
     {
         Console.WriteLine("Error! Sentiment Analyser not initalized.");
     }
 }
Beispiel #16
0
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView testSet)
        {
            // Transform the test set in order to make predictions
            Console.WriteLine("========== Evaluating Model Accuracy with Test Data ==========");
            IDataView predictions = model.Transform(testSet);

            // Compare predicted values with actual labels to assess model's performance
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            // Display metrics
            // Accuracy proportion of correct predictions in the test set
            // AUC is model confidence in classifying positive/negative classes. The closer to 1, the better
            // F1 Score is measure of balance between precision and recall. The closer to 1, the better
            Console.WriteLine();
            Console.WriteLine("Model Quality Metrics Evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Area Under Roc Curve: {metrics.AreaUnderRocCurve:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("========== End of Model Evaluation ==========");
        }
Beispiel #17
0
        /// <summary>
        /// Evaluate value
        /// </summary>
        /// <param name="mlContext"></param>
        /// <param name="model"></param>
        /// <param name="splitTestSet"></param>
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            IDataView predictions = null;

            try
            {
                predictions = model.Transform(splitTestSet);
                CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");
                //Console.WriteLine();
                //Console.WriteLine("Model quality metrics evaluation");
                //Console.WriteLine("--------------------------------");
                //Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
                //Console.WriteLine($"Auc: {metrics.AreaUnderRocCurve:P2}");
                //Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
                //Console.WriteLine("=============== End of model evaluation ===============");
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.StackTrace);
            }
        }
Beispiel #18
0
        public void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet, string labelColumn)
        {
            // This code was taken from the linear regession demo on the ML.NET github page

            // Evaluate the model and show accuracy stats

            //Take the data in, make transformations, output the data.
            // <SnippetTransformData>
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet);
            // </SnippetTransformData>

            // BinaryClassificationContext.Evaluate returns a BinaryClassificationEvaluator.CalibratedResult
            // that contains the computed overall metrics.
            // <SnippetEvaluate>
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, labelColumn);

            // </SnippetEvaluate>

            // The Accuracy metric gets the accuracy of a model, which is the proportion
            // of correct predictions in the test set.

            // The AreaUnderROCCurve metric is equal to the probability that the algorithm ranks
            // a randomly chosen positive instance higher than a randomly chosen negative one
            // (assuming 'positive' ranks higher than 'negative').

            // The F1Score metric gets the model's F1 score.
            // The F1 score is the harmonic mean of precision and recall:
            //  2 * precision * recall / (precision + recall).

            // <SnippetDisplayMetrics>
            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.AreaUnderRocCurve:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("\n=============== End of model evaluation ===============");
            //</SnippetDisplayMetrics>
        }
Beispiel #19
0
        SetThreshold(BinaryPredictionTransformer <Microsoft.ML.Calibrators.CalibratedModelParametersBase <LinearBinaryModelParameters, Microsoft.ML.Calibrators.PlattCalibrator> > lrModel, IDataView testSet)
        {
            float  threshold          = 1.0F;
            double currentSpecificity = 1.0;

            do
            {
                threshold -= tick;

                CalibratedBinaryClassificationMetrics metrics = GetMetrics((ITransformer)lrModel, testSet);
                currentSpecificity = metrics.NegativeRecall;

                double AUC = metrics.AreaUnderRocCurve;

                Console.WriteLine("Threshold: {0:0.00}; Specificity: {1:0.00}; AUC: {2:0.00}", threshold, currentSpecificity, AUC);


                lrModel = mlContext.BinaryClassification.ChangeModelThreshold(lrModel, threshold);

                Thread.Sleep(5);
            } while (currentSpecificity > minimumSpecificity);
            return(lrModel);
        }
Beispiel #20
0
        public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            // Evaluate the model and show accuracy stats

            //Take the data in, make transformations, output the data.
            // <SnippetTransformData>
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet);
            // </SnippetTransformData>

            // BinaryClassificationContext.Evaluate returns a BinaryClassificationEvaluator.CalibratedResult
            // that contains the computed overall metrics.
            // <SnippetEvaluate>
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            // </SnippetEvaluate>

            // The Accuracy metric gets the accuracy of a model, which is the proportion
            // of correct predictions in the test set.

            // The AreaUnderRocCurve metric is an indicator of how confident the model is
            // correctly classifying the positive and negative classes as such.

            // The F1Score metric gets the model's F1 score.
            //  F1 is a measure of tradeoff between precision and recall.
            //  2 * precision * recall / (precision + recall).

            // <SnippetDisplayMetrics>
            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"            Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Area Under Roc Curve: {metrics.AreaUnderRocCurve:P2}");
            Console.WriteLine($"             F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End of model evaluation ===============");
            //</SnippetDisplayMetrics>
        }
Beispiel #21
0
        public StringBuilder Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            StringBuilder builder = new StringBuilder();

            builder.AppendLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet);
            //Transform() method makes predictions for multiple provided input rows of a test dataset.

            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            #region Comments#2
            //Once you have the prediction set(predictions), the Evaluate() method assesses the model,
            //which compares the predicted values with the actual Labels in the test dataset and
            //returns a CalibratedBinaryClassificationMetrics object on how the model is performing.
            #endregion


            builder.AppendLine("Model quality metrics evaluation");
            builder.AppendLine("--------------------------------");
            builder.AppendLine($"Accuracy: {metrics.Accuracy:P2}");
            builder.AppendLine($"Auc: {metrics.AreaUnderRocCurve:P2}");
            builder.AppendLine($"F1Score: {metrics.F1Score:P2}");
            builder.AppendLine("=============== End of model evaluation ===============");

            #region Comments#3
            //The Accuracy metric gets the accuracy of a model, which is the proportion of correct
            //predictions in the test set.

            //The AreaUnderRocCurve metric indicates how confident the model is correctly
            //classifying the positive and negative classes. You want the AreaUnderRocCurve to
            //be as close to one as possible.

            //The F1Score metric gets the model's F1 score, which is a measure of balance
            //between precision and recall. You want the F1Score to be as close to one as possible.
            #endregion
            return(builder);
        }
        static void Main(string[] args)
        {
            MLContext mlContext = new MLContext();

            // Load Data
            IDataView     dataView      = mlContext.Data.LoadFromTextFile <SentimentData>(_dataPath, hasHeader: false);
            TrainTestData splitDataView = mlContext.Data.TrainTestSplit(dataView, testFraction: 0.2);

            // BuildAndTrainModel
            var estimator = mlContext.Transforms.Text
                            .FeaturizeText(outputColumnName: "Features", inputColumnName: nameof(SentimentData.SentimentText))
                            .Append(mlContext.BinaryClassification.Trainers.SdcaLogisticRegression(labelColumnName: "Label", featureColumnName: "Features", maximumNumberOfIterations: 100));

            Console.WriteLine("=============== Create and Train the Model ===============");
            var model = estimator.Fit(splitDataView.TrainSet);

            Console.WriteLine("=============== End of training ===============");
            Console.WriteLine();

            // Evaluate
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitDataView.TestSet);

            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");

            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.AreaUnderRocCurve:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End of model evaluation ===============");

            // UseModelWithSingleItem
            PredictionEngine <SentimentData, SentimentPrediction> predictionFunction = mlContext.Model.CreatePredictionEngine <SentimentData, SentimentPrediction>(model);

            SentimentData sampleStatement = new SentimentData
            {
                SentimentText = "이 영화 정말 재미없어요"
            };

            var resultPrediction = predictionFunction.Predict(sampleStatement);

            Console.WriteLine();
            Console.WriteLine("=============== Prediction Test of model with a single sample and test dataset===============");
            Console.WriteLine();
            Console.WriteLine($"Sentiment: {resultPrediction.SentimentText} | Prediction: {(Convert.ToBoolean(resultPrediction.Prediction) ? "Positive" : "Negative")} | Probability: {resultPrediction.Probability} ");
            Console.WriteLine("=============== End of Predictions ===============");
            Console.WriteLine();

            // UseModelWithBatchItems
            IEnumerable <SentimentData> sentiments = new[]
            {
                new SentimentData {
                    SentimentText = "지루한 영화에요"
                },
                new SentimentData {
                    SentimentText = "이거 정말 최고에요!"
                },
                new SentimentData {
                    SentimentText = "올해의 영화로 손꼽고 싶군요"
                }
            };

            IDataView batchComments = mlContext.Data.LoadFromEnumerable(sentiments);

            predictions = model.Transform(batchComments);

            // Use model to predict whether comment data is Positive (1) or Negative (0).
            IEnumerable <SentimentPrediction> predictedResults = mlContext.Data.CreateEnumerable <SentimentPrediction>(predictions, reuseRowObject: false);

            Console.WriteLine();
            Console.WriteLine("=============== Prediction Test of loaded model with multiple samples ===============");
            foreach (SentimentPrediction prediction in predictedResults)
            {
                Console.WriteLine($"Sentiment: {prediction.SentimentText} | Prediction:{ (Convert.ToBoolean(prediction.Prediction) ? "Positive" : "Negative")} | Probability:{ prediction.Probability}");
            }
            Console.WriteLine("=============== End of predictions ===============");
        }
Beispiel #23
0
        private void Evaluate(IDataView splitTestSet)
        {
            IDataView predictions = model.Transform(splitTestSet);

            metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");
        }
        static void Main(string[] args)
        {
            MLContext ml = new MLContext(seed: 1);

            var trainData = ml.Data.LoadFromTextFile <MlInput>("learningdata.csv", ',', hasHeader: true);

            //var select = ml.Transforms.SelectColumns("Airlines", "Architecture", "Art & Museums", "Automotive", "Banking & Financial", "Cannabis", "Casinos & Lottery");
            //var selectTransform = select.Fit(trainData).Transform(trainData);

            var inputcolumns = new List <string> {
            };

            double f1sum = 0;
            int    count = 0;

            var techlist = new List <string> {
                "360 Photo/Video", "3D Environments", "3D Modelling", "AR", "AR Authoring", "AR Cloud", "AR Headsets", "AR Kit", "AR: Hololens", "AR: Magic Leap", "ARCore", "Avatar Creation", "Distribution & Device Management", "Eye Tracking", "Haptics & Peripherals", "Motion Capture",
                "Motion Simulators", "Networking", "Photogrammetry", "Spatial Audio", "Unity", "Unreal", "VR", "VR Authoring", "VR Headsets", "VR: Mobile", "VR: PC", "Volumetric Capture", "Web XR"
            };

            foreach (var technology in techlist.ToList())
            {
                var removedlist = techlist.ToList();
                removedlist.Remove(technology);
                string[] removedarray = removedlist.ToArray();

                var prepedData          = ml.Transforms.DropColumns(removedarray);
                var prepedDataTransform = prepedData.Fit(trainData).Transform(trainData);

                TrainTestData trainTestData = ml.Data.TrainTestSplit(prepedDataTransform, testFraction: 0.2);

                IEstimator <ITransformer> dataPipe = ml.Transforms.Concatenate("Features", new[] { "Airlines", "Architecture", "Art & Museums", "Automotive", "Banking & Financial", "Cannabis", "Casinos & Lottery", "Charities", "Education & Training", "Education (K-12)", "Emergency Response", "Health & Medical", "Industrial",
                                                                                                   "Legal & Insurance", "Media & News & Entertainment", "Military", "Music", "Real Estate", "Restaurant & Food", "Retail", "Sales & Marketing", "Sports", "Telecommunications", "Travel & Tourism", "Collaboration & Social", "Communications", "Data Analytics", "Design", "Gaming", "General Training", "Health & Safety",
                                                                                                   "LBE", "Leadership", "Marketing", "Rehabilitation", "Simulation", "Tools", "Training: Hands On", "Training: Hard Skills", "Training: Soft Skills" });

                var options = new LbfgsLogisticRegressionBinaryTrainer.Options()
                {
                    LabelColumnName           = technology,
                    FeatureColumnName         = "Features",
                    MaximumNumberOfIterations = 100,
                    OptimizationTolerance     = 1e-8f
                };

                var lbfsgLogistic = ml.BinaryClassification.Trainers.LbfgsLogisticRegression(options);
                var trainPipe     = dataPipe.Append(lbfsgLogistic);

                Display(trainTestData.TrainSet);
                var model = trainPipe.Fit(trainTestData.TrainSet);

                IDataView predictionView = model.Transform(trainTestData.TestSet);
                CalibratedBinaryClassificationMetrics metrics = ml.BinaryClassification.Evaluate(predictionView, labelColumnName: technology);

                //Console.WriteLine($"F1 {metrics.ConfusionMatrix.GetFormattedConfusionTable().ToString()} ");
                Console.WriteLine($"F1 {metrics.F1Score} ");
                if (metrics.F1Score > 0)
                {
                    f1sum += metrics.F1Score;
                    count++;
                }
            }

            Console.WriteLine($"Average: {f1sum/count}");
        }
Beispiel #25
0
 public static void Evaluate(MLContext mlContext, ITransformer model, IDataView splitTestSet)
 {
     IDataView predictions = model.Transform(splitTestSet);
     CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");
 }
        public async Task <IActionResult> OnPostAsync()
        {
            if (await _context.AccountDatasets.FirstOrDefaultAsync(x => x.DatasetId == DatasetId && x.AccountId == HttpContext.Session.GetInt32("id")) == null)
            {
                return(RedirectToPage("Error"));
            }

            Models.Dataset dataset = await _context.Datasets.FirstOrDefaultAsync(x => x.Id == DatasetId);

            List <Models.Article> articles = await _context.Articles.Where(x => x.DatasetId == dataset.Id).ToListAsync();

            if (articles.Count(x => x.Classification) < 5 || articles.Count(x => !x.Classification) < 5)
            {
                ModelState.AddModelError("DatasetId", "Dataset must have at least 5 articles of each classification.");
            }

            if (!ModelState.IsValid)
            {
                Datasets = await _context.Datasets.Where(x => x.AccountDatasets.Any(y => y.DatasetId == x.Id && y.AccountId == HttpContext.Session.GetInt32("id"))).ToListAsync();

                return(Page());
            }

            List <ArticleData> data = articles.Select(x => new ArticleData()
            {
                Text = x.Title + " " + x.Abstract, Classification = x.Classification
            }).ToList();
            MLContext     mlContext     = new MLContext();
            IDataView     dataView      = mlContext.Data.LoadFromEnumerable(data);
            TrainTestData splitDataView = mlContext.Data.TrainTestSplit(dataView, 0.2);
            var           estimator     = mlContext.Transforms.Text.FeaturizeText(outputColumnName: "Features", inputColumnName: nameof(ArticleData.Text))
                                          .Append(mlContext.BinaryClassification.Trainers.SdcaLogisticRegression(labelColumnName: "Label", featureColumnName: "Features"));
            ITransformer model       = estimator.Fit(splitDataView.TrainSet);
            IDataView    predictions = model.Transform(splitDataView.TestSet);
            CalibratedBinaryClassificationMetrics metrics = mlContext.BinaryClassification.Evaluate(predictions, "Label");
            var guid = Guid.NewGuid().ToString();
            var path = Path.Combine(_environment.WebRootPath, "classifiers", $"{guid}.zip");

            mlContext.Model.Save(model, dataView.Schema, path);

            Models.Classifier classifier = new Models.Classifier()
            {
                Name      = Name,
                Accuracy  = metrics.Accuracy,
                Precision = metrics.PositivePrecision,
                Recall    = metrics.PositiveRecall,
                Date      = DateTime.Now,
                Model     = guid
            };

            Models.Account account = await _context.Accounts.FirstOrDefaultAsync(x => x.Id == HttpContext.Session.GetInt32("id"));

            _context.Classifiers.Add(classifier);
            _context.AccountClassifiers.Add(new Models.AccountClassifier()
            {
                Account = account, Classifier = classifier
            });
            await _context.SaveChangesAsync();

            return(RedirectToPage("Classifiers"));
        }