///<summary>
        ///Produce a set of metrics of a Model by running a set of Test data against it.
        ///</summary>
        public BinaryClassificationMetrics Evaluate()
        {
            var testData = Model.Transform(SplitDataView.TestSet);
            BinaryClassificationMetrics bcmMetrics = objContext.BinaryClassification.Evaluate(data: testData, labelColumnName: "Sentiment", predictedLabelColumnName: "PredictedLabel", scoreColumnName: "Score", probabilityColumnName: "Probability");

            return(bcmMetrics);
        }
示例#2
0
文件: Program.cs 项目: sjison/ML
        /// <summary>
        /// Evaluates the trained model for quality assurance against a second data set.
        ///
        /// Loads the test dataset.
        /// Creates the binary evaluator.
        /// Evaluates the model and create metrics.
        ///
        /// Displays the metrics.
        /// </summary>
        /// <param name="model"></param>
        internal static void Evaluate(
            PredictionModel <ClassificationData, ClassPrediction> model,
            InputData input)
        {
            // loads the new test dataset with the same schema.
            // You can evaluate the model using this dataset as a quality check.

            //var testData = new TextLoader(_testDataPath).CreateFrom<SentimentData>();
            var testData = new TextLoader(input.TestData).CreateFrom <ClassificationData>();

            // Computes the quality metrics for the PredictionModel using the specified dataset.
            var evaluator = new BinaryClassificationEvaluator();

            // The BinaryClassificationMetrics contains the overall metrics computed by binary
            // classification evaluators. To display these to determine the quality of the model,
            // you need to get the metrics first.
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            // Displaying the metrics for model validation
            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"     Auc: {metrics.Auc:P2}");
            Console.WriteLine($" F1Score: {metrics.F1Score:P2}");
        }
 public BinaryClassificationItertionResult(ITransformer model, BinaryClassificationMetrics metrics, IDataView scoredValidationData, Pipeline pipeline = null)
 {
     Model = model;
     ScoredValidationData = scoredValidationData;
     Metrics  = metrics;
     Pipeline = pipeline;
 }
示例#4
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            var testData = new List <SentimentData>()
            {
                new SentimentData {
                    Sentiment     = 6f,
                    SentimentText = "such good thing"
                },
                new SentimentData {
                    Sentiment     = -9.3f,
                    SentimentText = "f*****g article"
                }
            };

            var collection = CollectionDataSource.Create(testData);
            var evaluator  = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, collection);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
        private void ValidateBinaryMetrics(BinaryClassificationMetrics metrics)
        {
            Assert.Equal(0.6111, metrics.Accuracy, 4);
            Assert.Equal(0.6667, metrics.Auc, 4);
            Assert.Equal(0.8621, metrics.Auprc, 4);
            Assert.Equal(1, metrics.Entropy, 3);
            Assert.Equal(0.72, metrics.F1Score, 2);
            Assert.Equal(0.9689, metrics.LogLoss, 4);
            Assert.Equal(3.1122, metrics.LogLossReduction, 4);
            Assert.Equal(1, metrics.NegativePrecision, 1);
            Assert.Equal(0.2222, metrics.NegativeRecall, 4);
            Assert.Equal(0.5625, metrics.PositivePrecision, 4);
            Assert.Equal(1, metrics.PositiveRecall);

            var matrix = metrics.ConfusionMatrix;

            Assert.Equal(2, matrix.Order);
            Assert.Equal(2, matrix.ClassNames.Count);
            Assert.Equal("positive", matrix.ClassNames[0]);
            Assert.Equal("negative", matrix.ClassNames[1]);

            Assert.Equal(9, matrix[0, 0]);
            Assert.Equal(9, matrix["positive", "positive"]);
            Assert.Equal(0, matrix[0, 1]);
            Assert.Equal(0, matrix["positive", "negative"]);

            Assert.Equal(7, matrix[1, 0]);
            Assert.Equal(7, matrix["negative", "positive"]);
            Assert.Equal(2, matrix[1, 1]);
            Assert.Equal(2, matrix["negative", "negative"]);
        }
        private void ValidateBinaryMetricsLightGBM(BinaryClassificationMetrics metrics)
        {
            Assert.Equal(.6111, metrics.Accuracy, 4);
            Assert.Equal(.8, metrics.Auc, 1);
            Assert.Equal(0.88, metrics.Auprc, 2);
            Assert.Equal(1, metrics.Entropy, 3);
            Assert.Equal(.72, metrics.F1Score, 4);
            Assert.Equal(0.96456100297125325, metrics.LogLoss, 4);
            Assert.Equal(3.5438997028746755, metrics.LogLossReduction, 4);
            Assert.Equal(1, metrics.NegativePrecision, 3);
            Assert.Equal(.222, metrics.NegativeRecall, 3);
            Assert.Equal(.562, metrics.PositivePrecision, 3);
            Assert.Equal(1, metrics.PositiveRecall);

            var matrix = metrics.ConfusionMatrix;

            Assert.Equal(2, matrix.Order);
            Assert.Equal(2, matrix.ClassNames.Count);
            Assert.Equal("positive", matrix.ClassNames[0]);
            Assert.Equal("negative", matrix.ClassNames[1]);

            Assert.Equal(9, matrix[0, 0]);
            Assert.Equal(9, matrix["positive", "positive"]);
            Assert.Equal(0, matrix[0, 1]);
            Assert.Equal(0, matrix["positive", "negative"]);

            Assert.Equal(7, matrix[1, 0]);
            Assert.Equal(7, matrix["negative", "positive"]);
            Assert.Equal(2, matrix[1, 1]);
            Assert.Equal(2, matrix["negative", "negative"]);
        }
示例#7
0
        public static void CalcularModelo()
        {
            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader <SentimentData>(_dataPath, useHeader: false, separator: "tab"));

            pipeline.Add(new TextFeaturizer("Features", "SentimentText"));

            pipeline.Add(new FastTreeBinaryClassifier()
            {
                NumLeaves = 5, NumTrees = 5, MinDocumentsInLeafs = 2
            });

            PredictionModel <SentimentData, SentimentPrediction> model = pipeline.Train <SentimentData, SentimentPrediction>();


            var testData  = new TextLoader <SentimentData>(_testDataPath, useHeader: false, separator: "tab");
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine();

            IEnumerable <SentimentData> sentiments = new[] {
                new SentimentData
                {
                    SentimentText = "Contoso's 11 is a wonderful experience",
                    Sentiment     = 0
                },
                new SentimentData
                {
                    SentimentText = "The acting in this movie is very bad",
                    Sentiment     = 0
                },
                new SentimentData
                {
                    SentimentText = "Joe versus the Volcano Coffee Company is a great film.",
                    Sentiment     = 0
                }
            };

            IEnumerable <SentimentPrediction> predictions = model.Predict(sentiments);

            Console.WriteLine();
            Console.WriteLine("Sentiment Predictions");
            Console.WriteLine("---------------------");

            var sentimentsAndPredictions = sentiments.Zip(predictions, (sentiment, prediction) => (sentiment, prediction));

            foreach (var item in sentimentsAndPredictions)
            {
                Console.WriteLine($"Sentiment: {item.sentiment.SentimentText} | Prediction: {(item.prediction.Sentiment ? "Positive" : "Negative")}");
            }
            Console.WriteLine();
        }
示例#8
0
        //   public static ITransformer BuildAndTrainModel(MLContext mlContext, IDataView splitTrainSet)
        //   {

        //       var estimator = mlContext.Transforms.Text.FeaturizeText(outputColumnName: "Features", inputColumnName: nameof(SentimentData.SentimentText))
        //       .Append(mlContext.BinaryClassification.Trainers.SdcaLogisticRegression(labelColumnName: "Label", featureColumnName: "Features"));
        //      // var svmEstimator =  mlContext.Transforms.NormalizeBinning("Price", maximumBinCount: 2);
        //     //  var svmEstimator = mlContext.Transforms.Text.FeaturizeText(outputColumnName: "Features", inputColumnName: nameof(SentimentData.SentimentText))
        ////.Append(mlContext.BinaryClassification.Trainers.LinearSvm(labelColumnName: "Label", featureColumnName: "Features"));
        //       Console.WriteLine("=============== Create and Train the Model ===============");
        //       var model = estimator.Fit(splitTrainSet);
        //       Console.WriteLine("=============== End of training ===============");
        //       Console.WriteLine();

        //       return model;
        //   }
        public static void EvaluateNonCalibrated(MLContext mlContext, ITransformer model, IDataView splitTestSet)
        {
            Console.WriteLine("=============== Evaluating Model accuracy with Test data===============");
            IDataView predictions = model.Transform(splitTestSet);
            BinaryClassificationMetrics metrics = mlContext.BinaryClassification.EvaluateNonCalibrated(predictions, "Label");

            Console.WriteLine();
            Console.WriteLine("Model quality metrics evaluation");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.AreaUnderRocCurve:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End of model evaluation ===============");
            StringBuilder writeFile = new StringBuilder();

            writeFile.Append("Model quality metrics evaluation\n");
            writeFile.Append($"Accuracy: {metrics.Accuracy:P2}");
            writeFile.Append($"Auc: {metrics.AreaUnderRocCurve:P2}");
            writeFile.Append($"F1Score: {metrics.F1Score:P2}");
            writeFile.Append("=============== End of model evaluation ===============\n");
            using (System.IO.StreamWriter file =
                       new System.IO.StreamWriter(@"C:\Users\siust\OneDrive\Desktop\test.txt", true))
            {
                file.WriteLine(writeFile);
            }
        }
示例#9
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            // Evaluates.
            var testData = new TextLoader(_testDataPath).CreateFrom <SentimentData>();

            // BinaryClassificationEvaluator computes the quality metrics for the PredictionModel
            // using the specified data set.
            var evaluator = new BinaryClassificationEvaluator();

            // BinaryClassificationMetrics contains the overall metrics computed by binary classification evaluators.
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            // The Accuracy metric gets the accuracy of a classifier, which is the proportion
            // of correct predictions in the test set.

            // The Auc metric gets the area under the ROC curve.
            // The area under the ROC curve is equal to the probability that the classifier ranks
            // a randomly chosen positive instance higher than a randomly chosen negative one
            // (assuming 'positive' ranks higher than 'negative').

            // The F1Score metric gets the classifier's F1 score.
            // The F1 score is the harmonic mean of precision and recall:
            //  2 * precision * recall / (precision + recall).

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
示例#10
0
        private void ValidateBinaryMetrics(BinaryClassificationMetrics metrics)
        {
            Assert.Equal(.5556, metrics.Accuracy, 4);
            Assert.Equal(.8, metrics.Auc, 1);
            Assert.Equal(.87, metrics.Auprc, 2);
            Assert.Equal(1, metrics.Entropy, 3);
            Assert.Equal(.6923, metrics.F1Score, 4);
            Assert.Equal(.969, metrics.LogLoss, 3);
            Assert.Equal(3.083, metrics.LogLossReduction, 3);
            Assert.Equal(1, metrics.NegativePrecision, 3);
            Assert.Equal(.111, metrics.NegativeRecall, 3);
            Assert.Equal(.529, metrics.PositivePrecision, 3);
            Assert.Equal(1, metrics.PositiveRecall);

            var matrix = metrics.ConfusionMatrix;

            Assert.Equal(2, matrix.Order);
            Assert.Equal(2, matrix.ClassNames.Count);
            Assert.Equal("positive", matrix.ClassNames[0]);
            Assert.Equal("negative", matrix.ClassNames[1]);

            Assert.Equal(9, matrix[0, 0]);
            Assert.Equal(9, matrix["positive", "positive"]);
            Assert.Equal(0, matrix[0, 1]);
            Assert.Equal(0, matrix["positive", "negative"]);

            Assert.Equal(8, matrix[1, 0]);
            Assert.Equal(8, matrix["negative", "positive"]);
            Assert.Equal(1, matrix[1, 1]);
            Assert.Equal(1, matrix["negative", "negative"]);
        }
示例#11
0
        public static void Print(string name, BinaryClassificationMetrics metrics, double?fBeta = null)
        {
            var counts = metrics.ConfusionMatrix.Counts;
            var tp     = counts[0][0];
            var fn     = counts[0][1];
            var fp     = counts[1][0];
            var tn     = counts[1][1];

            Console.WriteLine($"*************************************************************************************************************");
            Console.WriteLine($"*       Metrics for {name} binary classification model");
            Console.WriteLine($"*------------------------------------------------------------------------------------------------------------");
            Console.WriteLine($"*       Accuracy:          {metrics.Accuracy:P2}");
            //Console.WriteLine($"*       AUC:               {metrics.AreaUnderRocCurve:P2}");
            //Console.WriteLine($"*       AUC recall Curve:  {metrics.AreaUnderPrecisionRecallCurve:P2}");
            Console.WriteLine($"*       F1Score:           {metrics.F1Score:P2}");
            Console.WriteLine($"*       FBeta:             {(fBeta.HasValue ? metrics.FBeta(fBeta.Value) : metrics.FBeta()):P2}");
            Console.WriteLine($"*       PositivePrecision: {metrics.PositivePrecision:P2}");
            Console.WriteLine($"*       PositiveRecall:    {metrics.PositiveRecall:P2}      ");
            Console.WriteLine($"*       NegativePrecision: {metrics.NegativePrecision:P2}");
            Console.WriteLine($"*       NegativeRecall:    {metrics.NegativeRecall:P2}");
            Console.WriteLine($"*       True Matrix:       TP: {tp,6:N0}   TN: {tn,6:N0}  All: {tp + tn:N0}");
            Console.WriteLine($"*       False Matrix:      FP: {fp,6:N0}   FN: {fn,6:N0}  All: {fp + fn:N0}");

            Console.WriteLine($"*************************************************************************************************************");
            Console.WriteLine();
        }
        public static void Execute()
        {
            Console.WriteLine("Executing Diabetes Experiment");
            Console.WriteLine("Creating new model");
            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader <DiabetesData>(dataPath, separator: ","));

            var features = new string[] { "BMI", "Age", "Pregnancies", "PlasmaGlucoseConcentration", "TricepsSkinFoldThickness" };

            pipeline.Add(new ColumnConcatenator("Features", features));

            var algorithm = new BinaryLogisticRegressor();

            pipeline.Add(algorithm);

            model = pipeline.Train <DiabetesData, DiabetesPrediction>();

            var testData  = new TextLoader <DiabetesData>(testDataPath, separator: ",");
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");

            var    score             = metrics.Accuracy + metrics.Auc + metrics.F1Score;
            double previousHighScore = 0;

            if (File.Exists(modelStatsPath))
            {
                var previousModelData = File.ReadAllLines(modelStatsPath);
                previousHighScore = double.Parse(previousModelData[0]);
            }

            if (score > previousHighScore)
            {
                File.WriteAllText(modelStatsPath, score.ToString() + Environment.NewLine);
                File.AppendAllLines(modelStatsPath, new List <string>
                {
                    $"Accuracy: {metrics.Accuracy:P2}",
                    $"Auc: {metrics.Auc:P2}",
                    $"F1Score: {metrics.F1Score:P2}"
                });
                File.AppendAllText(modelStatsPath, "Features:" + Environment.NewLine);
                File.AppendAllLines(modelStatsPath, features);
                File.AppendAllText(modelStatsPath, "Algorithm: " + algorithm.GetType().Name);
                model.WriteAsync(modelPath);
                Console.WriteLine("New model is better");
            }
            else
            {
                Console.WriteLine("Old model is better");
            }
            Console.ReadLine();
        }
示例#13
0
 private void LogResult(string algorithm, BinaryClassificationMetrics binaryClassificationMetrics)
 {
     Console.WriteLine($"------------- {algorithm} - EVALUATION RESULTS -------------");
     Console.WriteLine($"Accurancy = {binaryClassificationMetrics.Accuracy}");
     Console.WriteLine($"AUC = {binaryClassificationMetrics.AreaUnderRocCurve}");
     Console.WriteLine($"F1Score = {binaryClassificationMetrics.F1Score}");
     Console.WriteLine($"------------- {algorithm} - END EVALUATION -------------");
 }
示例#14
0
        public BinaryClassificationMetrics Evaluate()
        {
            var testData  = new TextLoader(TestDataPath).CreateFrom <SentimentData>();
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(_model, testData);

            return(metrics);
        }
示例#15
0
 public static void PrintBinaryClassificationMetrics(string name, BinaryClassificationMetrics metrics)
 {
     Console.WriteLine($"************************************************************");
     Console.WriteLine($"*       Metrics for {name} binary classification model      ");
     Console.WriteLine($"*-----------------------------------------------------------");
     Console.WriteLine($"*       Accuracy: {metrics.Accuracy:P2}");
     Console.WriteLine($"*       Auc:      {metrics.Auc:P2}");
     Console.WriteLine($"************************************************************");
 }
示例#16
0
 private static void PrintMetrics(string name, BinaryClassificationMetrics metrics)
 {
     Console.WriteLine($"*************************************************");
     Console.WriteLine($"*       Metrics for {name}          ");
     Console.WriteLine($"*------------------------------------------------");
     Console.WriteLine($"*       Accuracy: {metrics.Accuracy}");
     Console.WriteLine($"*       Entropy: {metrics.Entropy}");
     Console.WriteLine($"*************************************************");
 }
示例#17
0
 public static void PrintBinaryClassificationMetrics(BinaryClassificationMetrics metrics)
 {
     Console.WriteLine($"************************************************************");
     Console.WriteLine($"*       Metrics for binary classification model      ");
     Console.WriteLine($"*-----------------------------------------------------------");
     Console.WriteLine($"*       Accuracy: {metrics.Accuracy:P2}");
     Console.WriteLine($"*       Auc:      {metrics.AreaUnderRocCurve:P2}");
     Console.WriteLine($"************************************************************");
 }
示例#18
0
 // Pretty-print BinaryClassificationMetrics objects.
 private static void PrintMetrics(BinaryClassificationMetrics metrics)
 {
     Console.WriteLine($"Accuracy: {metrics.Accuracy:F2}");
     Console.WriteLine($"AUC: {metrics.AreaUnderRocCurve:F2}");
     Console.WriteLine($"F1 Score: {metrics.F1Score:F2}");
     Console.WriteLine($"Negative Precision: {metrics.NegativePrecision:F2}");
     Console.WriteLine($"Negative Recall: {metrics.NegativeRecall:F2}");
     Console.WriteLine($"Positive Precision: {metrics.PositivePrecision:F2}");
     Console.WriteLine($"Positive Recall: {metrics.PositiveRecall:F2}");
 }
        private static TMetrics GetAverageMetrics(IEnumerable <TMetrics> metrics, TMetrics metricsClosestToAvg)
        {
            if (typeof(TMetrics) == typeof(BinaryClassificationMetrics))
            {
                var newMetrics = metrics.Select(x => x as BinaryClassificationMetrics);
                Contracts.Assert(newMetrics != null);

                var result = new BinaryClassificationMetrics(
                    auc: GetAverageOfNonNaNScores(newMetrics.Select(x => x.AreaUnderRocCurve)),
                    accuracy: GetAverageOfNonNaNScores(newMetrics.Select(x => x.Accuracy)),
                    positivePrecision: GetAverageOfNonNaNScores(newMetrics.Select(x => x.PositivePrecision)),
                    positiveRecall: GetAverageOfNonNaNScores(newMetrics.Select(x => x.PositiveRecall)),
                    negativePrecision: GetAverageOfNonNaNScores(newMetrics.Select(x => x.NegativePrecision)),
                    negativeRecall: GetAverageOfNonNaNScores(newMetrics.Select(x => x.NegativeRecall)),
                    f1Score: GetAverageOfNonNaNScores(newMetrics.Select(x => x.F1Score)),
                    auprc: GetAverageOfNonNaNScores(newMetrics.Select(x => x.AreaUnderPrecisionRecallCurve)),
                    // Return ConfusionMatrix from the fold closest to average score
                    confusionMatrix: (metricsClosestToAvg as BinaryClassificationMetrics).ConfusionMatrix);
                return(result as TMetrics);
            }

            if (typeof(TMetrics) == typeof(MulticlassClassificationMetrics))
            {
                var newMetrics = metrics.Select(x => x as MulticlassClassificationMetrics);
                Contracts.Assert(newMetrics != null);

                var result = new MulticlassClassificationMetrics(
                    accuracyMicro: GetAverageOfNonNaNScores(newMetrics.Select(x => x.MicroAccuracy)),
                    accuracyMacro: GetAverageOfNonNaNScores(newMetrics.Select(x => x.MacroAccuracy)),
                    logLoss: GetAverageOfNonNaNScores(newMetrics.Select(x => x.LogLoss)),
                    logLossReduction: GetAverageOfNonNaNScores(newMetrics.Select(x => x.LogLossReduction)),
                    topKPredictionCount: newMetrics.ElementAt(0).TopKPredictionCount,
                    topKAccuracy: GetAverageOfNonNaNScores(newMetrics.Select(x => x.TopKAccuracy)),
                    // Return PerClassLogLoss and ConfusionMatrix from the fold closest to average score
                    perClassLogLoss: (metricsClosestToAvg as MulticlassClassificationMetrics).PerClassLogLoss.ToArray(),
                    confusionMatrix: (metricsClosestToAvg as MulticlassClassificationMetrics).ConfusionMatrix);
                return(result as TMetrics);
            }

            if (typeof(TMetrics) == typeof(RegressionMetrics))
            {
                var newMetrics = metrics.Select(x => x as RegressionMetrics);
                Contracts.Assert(newMetrics != null);

                var result = new RegressionMetrics(
                    l1: GetAverageOfNonNaNScores(newMetrics.Select(x => x.MeanAbsoluteError)),
                    l2: GetAverageOfNonNaNScores(newMetrics.Select(x => x.MeanSquaredError)),
                    rms: GetAverageOfNonNaNScores(newMetrics.Select(x => x.RootMeanSquaredError)),
                    lossFunction: GetAverageOfNonNaNScores(newMetrics.Select(x => x.LossFunction)),
                    rSquared: GetAverageOfNonNaNScores(newMetrics.Select(x => x.RSquared)));
                return(result as TMetrics);
            }

            throw new NotImplementedException($"Metric {typeof(TMetrics)} not implemented");
        }
        public void TrainAndPredictSentimentModelTest()
        {
            var pipeline  = PreparePipeline();
            var model     = pipeline.Train <SentimentData, SentimentPrediction>();
            var testData  = PrepareTextLoaderTestData();
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            ValidateExamples(model);
            ValidateBinaryMetrics(metrics);
        }
        void ReconfigurablePrediction()
        {
            var dataPath     = GetDataPath(SentimentDataPath);
            var testDataPath = GetDataPath(SentimentTestPath);

            using (var env = new TlcEnvironment(seed: 1, conc: 1))
            {
                // Pipeline
                var loader = new TextLoader(env, MakeSentimentTextLoaderArgs(), new MultiFileSource(dataPath));

                var trans = TextTransform.Create(env, MakeSentimentTextTransformArgs(), loader);

                // Train
                var trainer = new LinearClassificationTrainer(env, new LinearClassificationTrainer.Arguments
                {
                    NumThreads = 1
                });

                var        cached     = new CacheDataView(env, trans, prefetch: null);
                var        trainRoles = new RoleMappedData(cached, label: "Label", feature: "Features");
                IPredictor predictor  = trainer.Train(new Runtime.TrainContext(trainRoles));
                using (var ch = env.Start("Calibrator training"))
                {
                    predictor = CalibratorUtils.TrainCalibrator(env, ch, new PlattCalibratorTrainer(env), int.MaxValue, predictor, trainRoles);
                }

                var scoreRoles = new RoleMappedData(trans, label: "Label", feature: "Features");
                IDataScorerTransform scorer = ScoreUtils.GetScorer(predictor, scoreRoles, env, trainRoles.Schema);

                var dataEval = new RoleMappedData(scorer, label: "Label", feature: "Features", opt: true);

                var evaluator = new BinaryClassifierMamlEvaluator(env, new BinaryClassifierMamlEvaluator.Arguments()
                {
                });
                var metricsDict = evaluator.Evaluate(dataEval);

                var metrics = BinaryClassificationMetrics.FromMetrics(env, metricsDict["OverallMetrics"], metricsDict["ConfusionMatrix"])[0];

                var bindable  = ScoreUtils.GetSchemaBindableMapper(env, predictor, null);
                var mapper    = bindable.Bind(env, trainRoles.Schema);
                var newScorer = new BinaryClassifierScorer(env, new BinaryClassifierScorer.Arguments {
                    Threshold = 0.01f, ThresholdColumn = DefaultColumnNames.Probability
                },
                                                           scoreRoles.Data, mapper, trainRoles.Schema);

                dataEval = new RoleMappedData(newScorer, label: "Label", feature: "Features", opt: true);
                var newEvaluator = new BinaryClassifierMamlEvaluator(env, new BinaryClassifierMamlEvaluator.Arguments()
                {
                    Threshold = 0.01f, UseRawScoreThreshold = false
                });
                metricsDict = newEvaluator.Evaluate(dataEval);
                var newMetrics = BinaryClassificationMetrics.FromMetrics(env, metricsDict["OverallMetrics"], metricsDict["ConfusionMatrix"])[0];
            }
        }
示例#22
0
 /// <summary>
 /// Check that a <see cref="BinaryClassificationMetrics"/> object is valid.
 /// </summary>
 /// <param name="metrics">The metrics object.</param>
 public static void AssertMetrics(BinaryClassificationMetrics metrics)
 {
     Assert.InRange(metrics.Accuracy, 0, 1);
     Assert.InRange(metrics.AreaUnderRocCurve, 0, 1);
     Assert.InRange(metrics.AreaUnderPrecisionRecallCurve, 0, 1);
     Assert.InRange(metrics.F1Score, 0, 1);
     Assert.InRange(metrics.NegativePrecision, 0, 1);
     Assert.InRange(metrics.NegativeRecall, 0, 1);
     Assert.InRange(metrics.PositivePrecision, 0, 1);
     Assert.InRange(metrics.PositiveRecall, 0, 1);
 }
示例#23
0
        //evaluate the model's performance
        //load the test data
        //TextLoader

        //create a binary evaluator
        //BinaryClassificationEvaluator


        //Evaluate the model and generate metrics for evaluation
        //BinaryClassificationMetrics


        //output metrics
        //cw tab

        #endregion

        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            var testData  = new TextLoader(_testDataPath).CreateFrom <SentimentData>();
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
示例#24
0
        public static void Evalua(PredictionModel <DatosSentimiento, PredictSentimiento> modelo)
        {
            var datosPrueba = (new TextLoader <DatosSentimiento>(_rutaDatosEntrenamiento, useHeader: false, separator: "tab"));
            var evaluador   = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metricas = evaluador.Evaluate(modelo, datosPrueba);

            Console.WriteLine();
            Console.WriteLine("Evaluación de métricas de calidad del modelo de Predicción");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Precisión: {metricas.Accuracy:P2}");
            Console.WriteLine($"AUC: {metricas.Auc:P2}");
        }
 private static BinaryClassificationMetrics BinaryClassifierDelta(
     BinaryClassificationMetrics a, BinaryClassificationMetrics b)
 {
     return(new BinaryClassificationMetrics(
                auc: a.AreaUnderRocCurve - b.AreaUnderRocCurve,
                accuracy: a.Accuracy - b.Accuracy,
                positivePrecision: a.PositivePrecision - b.PositivePrecision,
                positiveRecall: a.PositiveRecall - b.PositiveRecall,
                negativePrecision: a.NegativePrecision - b.NegativePrecision,
                negativeRecall: a.NegativeRecall - b.NegativeRecall,
                f1Score: a.F1Score - b.F1Score,
                auprc: a.AreaUnderPrecisionRecallCurve - b.AreaUnderPrecisionRecallCurve));
 }
        private BinaryClassificationMetrics EvaluateBinary(IHostEnvironment env, IDataView scoredData)
        {
            var dataEval = new RoleMappedData(scoredData, label: "Label", feature: "Features", opt: true);

            // Evaluate.
            // It does not work. It throws error "Failed to find 'Score' column" when Evaluate is called
            //var evaluator = new BinaryClassifierEvaluator(env, new BinaryClassifierEvaluator.Arguments());

            var evaluator  = new BinaryClassifierMamlEvaluator(env, new BinaryClassifierMamlEvaluator.Arguments());
            var metricsDic = evaluator.Evaluate(dataEval);

            return(BinaryClassificationMetrics.FromMetrics(env, metricsDic["OverallMetrics"], metricsDic["ConfusionMatrix"])[0]);
        }
示例#27
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model, string _testDataPath)
        {
            var testData  = new TextLoader <SentimentData>(_testDataPath, useHeader: false, separator: "tab");
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
示例#28
0
        static void Main(string[] args)
        {
            var pipeline = new LearningPipeline();

            var loader = new TextLoader(dataPath).CreateFrom <SentimentData>(useHeader: true, '\t');

            pipeline.Add(loader);

            pipeline.Add(new TextFeaturizer("Features", "SentimentText")
            {
                StopWordsRemover = new PredefinedStopWordsRemover(),
                KeepPunctuations = false,
                TextCase         = TextNormalizerTransformCaseNormalizationMode.Lower,
                VectorNormalizer = TextTransformTextNormKind.L2
            });

            pipeline.Add(new StochasticDualCoordinateAscentBinaryClassifier()
            {
                NumThreads = 8, Shuffle = true, NormalizeFeatures = NormalizeOption.Yes
            });

            PredictionModel <SentimentData, SentimentPrediction> model = pipeline.Train <SentimentData, SentimentPrediction>();

            IEnumerable <SentimentData> sentiments = new[]
            {
                new SentimentData
                {
                    SentimentText = "I hated the movie."
                },
                new SentimentData
                {
                    SentimentText = "The movie was entertaining the whole time, i really enjoyed it."
                }
            };

            IEnumerable <SentimentPrediction> predictions = model.Predict(sentiments);

            foreach (var item in predictions)
            {
                Console.WriteLine($"Prediction: {(item.Sentiment ? "Positive" : "Negative")}");
            }

            var evulatorTrained = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metricsTrained = evulatorTrained.Evaluate(model, loader);

            Console.WriteLine("ACCURACY OF MODEL ON TRAINED DATA: " + metricsTrained.Accuracy);

            model.WriteAsync(trainedModelPath);

            Console.Read();
        }
示例#29
0
 public static void PrintBinaryClassificationMetrics(string name, BinaryClassificationMetrics metrics)
 {
     Console.WriteLine($"************************************************************");
     Console.WriteLine($"*       Metrics for {name} binary classification model      ");
     Console.WriteLine($"*-----------------------------------------------------------");
     Console.WriteLine($"*       Accuracy: {metrics.Accuracy:P2}");
     Console.WriteLine($"*       Area Under Curve:      {metrics.AreaUnderRocCurve:P2}");
     Console.WriteLine($"*       Area under Precision recall Curve:  {metrics.AreaUnderPrecisionRecallCurve:P2}");
     Console.WriteLine($"*       F1Score:  {metrics.F1Score:P2}");
     Console.WriteLine($"*       PositivePrecision:  {metrics.PositivePrecision:#.##}");
     Console.WriteLine($"*       PositiveRecall:  {metrics.PositiveRecall:#.##}");
     Console.WriteLine($"*       NegativePrecision:  {metrics.NegativePrecision:#.##}");
     Console.WriteLine($"*       NegativeRecall:  {metrics.NegativeRecall:P2}");
     Console.WriteLine($"************************************************************");
 }
示例#30
0
        public BinaryClassificationMetrics Evaluate(IDataView data, string labelColumn = DefaultColumnNames.Label,
                                                    string probabilityColumn           = DefaultColumnNames.Probability)
        {
            var ci  = EvaluateUtils.GetScoreColumnInfo(_env, data.Schema, null, DefaultColumnNames.Score, MetadataUtils.Const.ScoreColumnKind.BinaryClassification);
            var map = new KeyValuePair <RoleMappedSchema.ColumnRole, string>[]
            {
                RoleMappedSchema.CreatePair(MetadataUtils.Const.ScoreValueKind.Probability, probabilityColumn),
                RoleMappedSchema.CreatePair(MetadataUtils.Const.ScoreValueKind.Score, ci.Name)
            };
            var rmd = new RoleMappedData(data, labelColumn, DefaultColumnNames.Features, opt: true, custom: map);

            var metricsDict = _evaluator.Evaluate(rmd);

            return(BinaryClassificationMetrics.FromMetrics(_env, metricsDict["OverallMetrics"], metricsDict["ConfusionMatrix"]).Single());
        }