예제 #1
0
        private static void Evaluate(PredictionModel <TitanicData, TitanicPrediction> model)
        {
            // To evaluate how good the model predicts values, the model is ran against new set
            // of data (test data) that was not involved in training.
            var testData = new TextLoader(TestDataPath).CreateFrom <TitanicData>(useHeader: true, separator: ',');

            // BinaryClassificationEvaluator performs evaluation for Binary Classification type of ML problems.
            var evaluator = new BinaryClassificationEvaluator();

            Console.WriteLine("=============== Evaluating model ===============");

            var metrics = evaluator.Evaluate(model, testData);

            // BinaryClassificationMetrics contains the overall metrics computed by binary classification evaluators
            // The Accuracy metric gets the accuracy of a classifier which is the proportion
            //of correct predictions in the test set.

            // The Auc metric gets the area under the ROC curve.
            // The area under the ROC curve is equal to the probability that the classifier ranks
            // a randomly chosen positive instance higher than a randomly chosen negative one
            // (assuming 'positive' ranks higher than 'negative').

            // The F1Score metric gets the classifier's F1 score.
            // The F1 score is the harmonic mean of precision and recall:
            //  2 * precision * recall / (precision + recall).

            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End evaluating ===============");
            Console.WriteLine();
        }
예제 #2
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            // Evaluates.
            var testData = new TextLoader(_testDataPath).CreateFrom <SentimentData>();

            // BinaryClassificationEvaluator computes the quality metrics for the PredictionModel
            // using the specified data set.
            var evaluator = new BinaryClassificationEvaluator();

            // BinaryClassificationMetrics contains the overall metrics computed by binary classification evaluators.
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            // The Accuracy metric gets the accuracy of a classifier, which is the proportion
            // of correct predictions in the test set.

            // The Auc metric gets the area under the ROC curve.
            // The area under the ROC curve is equal to the probability that the classifier ranks
            // a randomly chosen positive instance higher than a randomly chosen negative one
            // (assuming 'positive' ranks higher than 'negative').

            // The F1Score metric gets the classifier's F1 score.
            // The F1 score is the harmonic mean of precision and recall:
            //  2 * precision * recall / (precision + recall).

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
예제 #3
0
        public BinaryClassificationMetrics Evaluate(PredictionModel <BinaryClassificationData, BinaryClassificationPrediction> model, string testDataLocation)
        {
            var testData = new TextLoader(testDataLocation).CreateFrom <BinaryClassificationData>(useHeader: true, separator: ';');
            var metrics  = new BinaryClassificationEvaluator().Evaluate(model, testData);

            return(metrics);
        }
예제 #4
0
파일: Program.cs 프로젝트: sjison/ML
        /// <summary>
        /// Evaluates the trained model for quality assurance against a second data set.
        ///
        /// Loads the test dataset.
        /// Creates the binary evaluator.
        /// Evaluates the model and create metrics.
        ///
        /// Displays the metrics.
        /// </summary>
        /// <param name="model"></param>
        internal static void Evaluate(
            PredictionModel <ClassificationData, ClassPrediction> model,
            InputData input)
        {
            // loads the new test dataset with the same schema.
            // You can evaluate the model using this dataset as a quality check.

            //var testData = new TextLoader(_testDataPath).CreateFrom<SentimentData>();
            var testData = new TextLoader(input.TestData).CreateFrom <ClassificationData>();

            // Computes the quality metrics for the PredictionModel using the specified dataset.
            var evaluator = new BinaryClassificationEvaluator();

            // The BinaryClassificationMetrics contains the overall metrics computed by binary
            // classification evaluators. To display these to determine the quality of the model,
            // you need to get the metrics first.
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            // Displaying the metrics for model validation
            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"     Auc: {metrics.Auc:P2}");
            Console.WriteLine($" F1Score: {metrics.F1Score:P2}");
        }
예제 #5
0
        public static void CalcularModelo()
        {
            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader <SentimentData>(_dataPath, useHeader: false, separator: "tab"));

            pipeline.Add(new TextFeaturizer("Features", "SentimentText"));

            pipeline.Add(new FastTreeBinaryClassifier()
            {
                NumLeaves = 5, NumTrees = 5, MinDocumentsInLeafs = 2
            });

            PredictionModel <SentimentData, SentimentPrediction> model = pipeline.Train <SentimentData, SentimentPrediction>();


            var testData  = new TextLoader <SentimentData>(_testDataPath, useHeader: false, separator: "tab");
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine();

            IEnumerable <SentimentData> sentiments = new[] {
                new SentimentData
                {
                    SentimentText = "Contoso's 11 is a wonderful experience",
                    Sentiment     = 0
                },
                new SentimentData
                {
                    SentimentText = "The acting in this movie is very bad",
                    Sentiment     = 0
                },
                new SentimentData
                {
                    SentimentText = "Joe versus the Volcano Coffee Company is a great film.",
                    Sentiment     = 0
                }
            };

            IEnumerable <SentimentPrediction> predictions = model.Predict(sentiments);

            Console.WriteLine();
            Console.WriteLine("Sentiment Predictions");
            Console.WriteLine("---------------------");

            var sentimentsAndPredictions = sentiments.Zip(predictions, (sentiment, prediction) => (sentiment, prediction));

            foreach (var item in sentimentsAndPredictions)
            {
                Console.WriteLine($"Sentiment: {item.sentiment.SentimentText} | Prediction: {(item.prediction.Sentiment ? "Positive" : "Negative")}");
            }
            Console.WriteLine();
        }
예제 #6
0
        public LotteryPredictionResult PredictionOneToFile(string webRootPath, string noSite, string noType, TrainingData data, string lotteryCode)
        {
            var    pipeline = new LearningPipeline();
            string dataPath = webRootPath + $"/TrainingGround/{noSite}{noType}.txt";

            pipeline.Add(new TextLoader(dataPath).CreateFrom <TrainingData>(separator: ','));
            pipeline.Add(new Dictionarizer("Label"));
            pipeline.Add(new ColumnConcatenator("Features", TrainingData.GetColumns()));
            pipeline.Add(new LogisticRegressionBinaryClassifier());
            pipeline.Add(new PredictedLabelColumnOriginalValueConverter()
            {
                PredictedLabelColumn = "PredictedLabel"
            });
            _logger.LogInformation("Start PredictionOne :" + lotteryCode + "—" + noSite + noType);
            var               model      = pipeline.Train <TrainingData, LotteryPrediction>();
            var               testData   = new TextLoader(dataPath).CreateFrom <TrainingData>(separator: ',');
            var               evaluator  = new BinaryClassificationEvaluator();
            var               metrics    = evaluator.Evaluate(model, testData);
            TrainingData      newPoint   = data;
            LotteryPrediction prediction = model.Predict(newPoint);
            string            result     = prediction.PredictedLabels;

            _logger.LogInformation("End PredictionOne :" + lotteryCode + "—" + noSite + noType);
            return(new LotteryPredictionResult()
            {
                PredictionType = noType,
                PredictionSite = noSite,
                PredictionResult = result,
                LotteryCode = lotteryCode
            });
        }
        public static void PrintPredictionsAndEvaluate(FeatureVector predictions)
        {
            for (int i = 0; i < predictions.ColumnName.Count; i++)
            {
                Console.Write(predictions.ColumnName[i] + "\t");
            }
            Console.WriteLine();

            for (int i = 0; i < predictions.Values[0].Length; i++)
            {
                for (int j = 0; j < predictions.Values.Count; j++)
                {
                    Console.Write(predictions.Values[j][i] + "\t");
                }
                Console.WriteLine();
            }

            BinaryClassificationEvaluator bce = new BinaryClassificationEvaluator();

            bce.evaluate(predictions);
            Console.WriteLine("TN: " + bce.confusionMatrix.TN);
            Console.WriteLine("TP: " + bce.confusionMatrix.TP);
            Console.WriteLine("FN: " + bce.confusionMatrix.FN);
            Console.WriteLine("FP: " + bce.confusionMatrix.FP);
            Console.WriteLine("ACCURACY = " + bce.Accuracy);
        }
예제 #8
0
파일: MyML.cs 프로젝트: longphui/LH.Lottery
        public static void GetMyPrediction()
        {
            var    pipeline = new LearningPipeline();
            string dataPath = AppDomain.CurrentDomain.BaseDirectory + "/datamodel/myMLData.txt";

            pipeline.Add(new TextLoader(dataPath).CreateFrom <myData>(separator: ' '));
            pipeline.Add(new Dictionarizer("Label"));
            pipeline.Add(new ColumnConcatenator("Features", "XCoord", "YCoord", "ZCoord"));
            pipeline.Add(new LogisticRegressionBinaryClassifier());
            pipeline.Add(new PredictedLabelColumnOriginalValueConverter()
            {
                PredictedLabelColumn = "PredictedLabel"
            });
            Console.WriteLine("\nStarting training\n");
            var    model     = pipeline.Train <myData, myPrediction>();
            var    testData  = new TextLoader(dataPath).CreateFrom <myData>(separator: ' ');
            var    evaluator = new BinaryClassificationEvaluator();
            var    metrics   = evaluator.Evaluate(model, testData);
            double acc       = metrics.Accuracy * 100;

            Console.WriteLine("Model accuracy = " + acc.ToString("F2") + "%");
            myData newPoint = new myData()
            {
                x = 9,
                y = 8,
                z = 10
            };
            myPrediction prediction = model.Predict(newPoint);
            string       result     = prediction.PredictedLabels;

            Console.WriteLine("Prediction = " + result);
            Console.WriteLine("\nEnd ML.NET demo");
            Console.ReadLine();
        }
예제 #9
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            var testData = new List <SentimentData>()
            {
                new SentimentData {
                    Sentiment     = 6f,
                    SentimentText = "such good thing"
                },
                new SentimentData {
                    Sentiment     = -9.3f,
                    SentimentText = "f*****g article"
                }
            };

            var collection = CollectionDataSource.Create(testData);
            var evaluator  = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, collection);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
예제 #10
0
        public void Evaluation()
        {
            var dataPath     = GetDataPath(SentimentDataPath);
            var testDataPath = GetDataPath(SentimentDataPath);
            var pipeline     = new Legacy.LearningPipeline();

            var loader = new TextLoader(dataPath).CreateFrom <SentimentData>();

            loader.Arguments.HasHeader = true;
            pipeline.Add(loader);
            pipeline.Add(MakeSentimentTextTransform());
            pipeline.Add(new FastTreeBinaryClassifier()
            {
                NumLeaves = 5, NumTrees = 5, MinDocumentsInLeafs = 2
            });
            pipeline.Add(new PredictedLabelColumnOriginalValueConverter()
            {
                PredictedLabelColumn = "PredictedLabel"
            });
            var model = pipeline.Train <SentimentData, SentimentPrediction>();
            var testLearningPipelineItem = new TextLoader(testDataPath).CreateFrom <SentimentData>();

            testLearningPipelineItem.Arguments.HasHeader = true;
            var evaluator = new BinaryClassificationEvaluator();
            var metrics   = evaluator.Evaluate(model, testLearningPipelineItem);
        }
        public static void Execute()
        {
            Console.WriteLine("Executing Diabetes Experiment");
            Console.WriteLine("Creating new model");
            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader <DiabetesData>(dataPath, separator: ","));

            var features = new string[] { "BMI", "Age", "Pregnancies", "PlasmaGlucoseConcentration", "TricepsSkinFoldThickness" };

            pipeline.Add(new ColumnConcatenator("Features", features));

            var algorithm = new BinaryLogisticRegressor();

            pipeline.Add(algorithm);

            model = pipeline.Train <DiabetesData, DiabetesPrediction>();

            var testData  = new TextLoader <DiabetesData>(testDataPath, separator: ",");
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");

            var    score             = metrics.Accuracy + metrics.Auc + metrics.F1Score;
            double previousHighScore = 0;

            if (File.Exists(modelStatsPath))
            {
                var previousModelData = File.ReadAllLines(modelStatsPath);
                previousHighScore = double.Parse(previousModelData[0]);
            }

            if (score > previousHighScore)
            {
                File.WriteAllText(modelStatsPath, score.ToString() + Environment.NewLine);
                File.AppendAllLines(modelStatsPath, new List <string>
                {
                    $"Accuracy: {metrics.Accuracy:P2}",
                    $"Auc: {metrics.Auc:P2}",
                    $"F1Score: {metrics.F1Score:P2}"
                });
                File.AppendAllText(modelStatsPath, "Features:" + Environment.NewLine);
                File.AppendAllLines(modelStatsPath, features);
                File.AppendAllText(modelStatsPath, "Algorithm: " + algorithm.GetType().Name);
                model.WriteAsync(modelPath);
                Console.WriteLine("New model is better");
            }
            else
            {
                Console.WriteLine("Old model is better");
            }
            Console.ReadLine();
        }
예제 #12
0
        public BinaryClassificationMetrics Evaluate()
        {
            var testData  = new TextLoader(TestDataPath).CreateFrom <SentimentData>();
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(_model, testData);

            return(metrics);
        }
예제 #13
0
        public BinaryClassificationMetrics Test(string testDataPath, PredictionModel <Data, Prediction> model)
        {
            var testData  = new TextLoader(testDataPath).CreateFrom <Data>();
            var evaluator = new BinaryClassificationEvaluator();
            var metrics   = evaluator.Evaluate(model, testData);

            return(metrics);
        }
예제 #14
0
        private void TestModel()
        {
            var evaluator = new BinaryClassificationEvaluator();
            var testData  = new TextLoader <ManifestDataTraining>(testPath, useHeader: true, separator: ";");
            var metrics   = evaluator.Evaluate(model, testData);

            Console.WriteLine($"Accuracy = {metrics.Accuracy}");
        }
예제 #15
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            var testData  = new TextLoader(_testDataPath).CreateFrom <SentimentData>();
            var evaluator = new BinaryClassificationEvaluator();
            var metrics   = evaluator.Evaluate(model, testData);

            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
예제 #16
0
        public void TrainAndPredictSentimentModelTest()
        {
            var pipeline  = PreparePipeline();
            var model     = pipeline.Train <SentimentData, SentimentPrediction>();
            var testData  = PrepareTextLoaderTestData();
            var evaluator = new BinaryClassificationEvaluator();
            var metrics   = evaluator.Evaluate(model, testData);

            ValidateExamples(model);
            ValidateBinaryMetrics(metrics);
        }
예제 #17
0
        public static void Evalua(PredictionModel <DatosSentimiento, PredictSentimiento> modelo)
        {
            var datosPrueba = (new TextLoader <DatosSentimiento>(_rutaDatosEntrenamiento, useHeader: false, separator: "tab"));
            var evaluador   = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metricas = evaluador.Evaluate(modelo, datosPrueba);

            Console.WriteLine();
            Console.WriteLine("Evaluación de métricas de calidad del modelo de Predicción");
            Console.WriteLine("--------------------------------");
            Console.WriteLine($"Precisión: {metricas.Accuracy:P2}");
            Console.WriteLine($"AUC: {metricas.Auc:P2}");
        }
예제 #18
0
        TrainAndGetMetrics(ILearningPipelineLoader dataTrain, ILearningPipelineLoader dataTest, ILearningPipelineItem trainer)
        {
            var pipeline = new LearningPipeline();

            pipeline.Add(dataTrain);
            pipeline.Add(trainer);
            var model     = pipeline.Train <MLNetData, MLNetPredict>();
            var evaluator = new BinaryClassificationEvaluator();
            var metrics   = evaluator.Evaluate(model, dataTest);

            return(metrics.Accuracy, metrics.Auc, metrics.F1Score, model);
        }
예제 #19
0
        public static void Evaluate(PredictionModel <DiabetesData, DiabetesPrediction> model)
        {
            var testData  = new TextLoader(_testDataPath).CreateFrom <DiabetesData>(separator: ',');
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
예제 #20
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            var testData  = new TextLoader <SentimentData>(_testDataPath, useHeader: false, separator: "tab");
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
예제 #21
0
        static void Main(string[] args)
        {
            var pipeline = new LearningPipeline();

            var loader = new TextLoader(dataPath).CreateFrom <SentimentData>(useHeader: true, '\t');

            pipeline.Add(loader);

            pipeline.Add(new TextFeaturizer("Features", "SentimentText")
            {
                StopWordsRemover = new PredefinedStopWordsRemover(),
                KeepPunctuations = false,
                TextCase         = TextNormalizerTransformCaseNormalizationMode.Lower,
                VectorNormalizer = TextTransformTextNormKind.L2
            });

            pipeline.Add(new StochasticDualCoordinateAscentBinaryClassifier()
            {
                NumThreads = 8, Shuffle = true, NormalizeFeatures = NormalizeOption.Yes
            });

            PredictionModel <SentimentData, SentimentPrediction> model = pipeline.Train <SentimentData, SentimentPrediction>();

            IEnumerable <SentimentData> sentiments = new[]
            {
                new SentimentData
                {
                    SentimentText = "I hated the movie."
                },
                new SentimentData
                {
                    SentimentText = "The movie was entertaining the whole time, i really enjoyed it."
                }
            };

            IEnumerable <SentimentPrediction> predictions = model.Predict(sentiments);

            foreach (var item in predictions)
            {
                Console.WriteLine($"Prediction: {(item.Sentiment ? "Positive" : "Negative")}");
            }

            var evulatorTrained = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metricsTrained = evulatorTrained.Evaluate(model, loader);

            Console.WriteLine("ACCURACY OF MODEL ON TRAINED DATA: " + metricsTrained.Accuracy);

            model.WriteAsync(trainedModelPath);

            Console.Read();
        }
예제 #22
0
        static void Main(string[] args)
        {
            var trainDataPath = Path.Combine(Environment.CurrentDirectory, "Data", "requestClassifier-trainData.tsv");
            var testDataPath  = Path.Combine(Environment.CurrentDirectory, "Data", "requestClassifier-testData.tsv");
            var modelPath     = Path.Combine(Environment.CurrentDirectory, "Data", "Model.zip");

            Console.WriteLine("Welcome! Let's predict which department to forward each requests to. As of now we have 2 departments: Administration and Registration");

            Console.WriteLine("Initialize pipeline by loading training data, editing metadata, and selecting ML algorithm");
            var pipeline = new LearningPipeline()
            {
                new TextLoader(trainDataPath).CreateFrom <UserRequest>(useHeader: true),
                new TextFeaturizer("Features", "Question"),
                new FastTreeBinaryClassifier()
                {
                    NumLeaves = 5, NumTrees = 5, MinDocumentsInLeafs = 2
                }
            };

            Console.WriteLine("Let's train our model with all the specs in our learning pipeline and we'll write it to model to disk");
            var model = pipeline.Train <UserRequest, DepartmentPrepiction>();

            model.WriteAsync(modelPath).Wait();

            Console.WriteLine("Let's test our model with test data to see exactly how it performs");
            var testData  = new TextLoader(testDataPath).CreateFrom <UserRequest>(useHeader: true);
            var evaluator = new BinaryClassificationEvaluator();
            var metrics   = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");

            Console.WriteLine("Now let's try to use our model to with live data");
            do
            {
                Console.WriteLine("Ask a question now");
                string question = Console.ReadLine();

                var prediction = model.Predict(new UserRequest {
                    Question = question
                });
                model.TryGetScoreLabelNames(out string[] data);
                Console.WriteLine($"Predicted Department: {prediction}");
                Console.WriteLine("Press <ENTER> to continue");
            }while (Console.ReadKey().Key == ConsoleKey.Enter);
        }
예제 #23
0
        private static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model, string name)
        {
            var testData  = new TextLoader(TestDataPath).CreateFrom <SentimentData>();
            var evaluator = new BinaryClassificationEvaluator();

            Console.WriteLine("=============== Evaluating model {0} ===============", name);
            var metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End evaluating ===============");
            Console.WriteLine();
        }
예제 #24
0
        public static void Evalua(PredictionModel <DatosSentimiento, PrediccSentimiento> modelo)
        {
            var datosPrueba = new TextLoader <DatosSentimiento>(_rutaDatosPrueba, useHeader: false, separator: "tab");
            var evaluador   = new BinaryClassificationEvaluator();                          //Obtener Metricas de evaluacion
            BinaryClassificationMetrics metricas = evaluador.Evaluate(modelo, datosPrueba); //modelo:es el modelo de prediccion entrenado que vamos a evaluar

            Console.WriteLine();
            Console.WriteLine("Evaluación de métricas de calidad del Modelo de Predicción");
            Console.WriteLine("---------------------------------");
            Console.WriteLine($"Precisión: {metricas.Accuracy:P2}"); //La presicion indica que tan acertado ha sido el algoritmo durante la prediccion
            Console.WriteLine($"AUC: {metricas.Auc:P2}");            //Medida del rendimiento para´problemas de clasificacion binaria (1.0 correcto)
            Console.WriteLine($"Log-loss: {metricas.LogLoss:P2}");
            Console.WriteLine($"F1SCore: {metricas.F1Score:P2}");
            //AUC=78%-> de cada 100 elementos se han clasificado 78 correctamente
        }
예제 #25
0
        public void Evaluate(PredictionModel <NrlResult, ClusterPrediction> model,
                             IEnumerable <NrlResult> nrlResults)
        {
            var testData = CollectionDataSource.Create(nrlResults);

            var evaluator = new BinaryClassificationEvaluator();

            Console.WriteLine("=============== Evaluating model ===============");

            var metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            Console.WriteLine("=============== End evaluating ===============");
            Console.WriteLine();
        }
예제 #26
0
        /// <summary>
        /// Evaluates trained machine learning model for text sentiment analysis.
        /// </summary>
        /// <param name="trainedModel">Trained machine learning model for text sentiment analysis.</param>
        /// <returns>Overall metrics of trained machine learning model for text sentiment analysis.</returns>
        public BinaryClassificationMetrics Evaluate(PredictionModel <DataModel, PredictionModel> trainedModel)
        {
            // 1) Load test data.
            var testModels = new TextLoader(Constants.TestDataFileLocation).CreateFrom <DataModel>();

            // 2) Evaluate trained model.
            var modelMetrics = new BinaryClassificationEvaluator().Evaluate(trainedModel, testModels);

            Console.WriteLine($"*************************************************");
            Console.WriteLine("Prediction model quality metrics after evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {modelMetrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {modelMetrics.Auc:P2}");
            Console.WriteLine($"F1Score: {modelMetrics.F1Score:P2}");
            Console.WriteLine($"*************************************************");

            return(modelMetrics);
        }
예제 #27
0
        static void Main(string[] args)
        {
            //1. Build an ML.NET pipeline for training a sentiment analysis model
            Console.WriteLine("Training a model for Sentiment Analysis using ML.NET");
            var pipeline = new LearningPipeline();

            // 1a. Load the training data using a TextLoader.
            pipeline.Add(new TextLoader(@"..\..\..\Data\wikipedia-detox-250-line-data.tsv").CreateFrom <SentimentData>(useHeader: true));

            // 1b. Featurize the text into a numeric vector that can be used by the machine learning algorithm.
            pipeline.Add(new TextFeaturizer("Features", "SentimentText"));

            // 1c. Add AveragedPerceptron (a linear learner) to the pipeline.
            pipeline.Add(new AveragedPerceptronBinaryClassifier()
            {
                NumIterations = 10
            });

            // 1d. Get a model by training the pipeline that was built.
            PredictionModel <SentimentData, SentimentPrediction> model =
                pipeline.Train <SentimentData, SentimentPrediction>();

            // 2. Evaluate the model to see how well it performs on different data (output the percent of examples classified correctly).
            Console.WriteLine("Training of model is complete \nTesting the model with test data");
            var testData  = new TextLoader(@"..\..\..\Data\wikipedia-detox-250-line-test.tsv").CreateFrom <SentimentData>(useHeader: true);
            var evaluator = new BinaryClassificationEvaluator();
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine($"Accuracy of trained model for test data is: {metrics.Accuracy:P2}");

            // 3. Save the model to file so it can be used in another app.
            model.WriteAsync("sentiment_model.zip");

            // 4. Use the model for a single prediction.
            SentimentData testInput = new SentimentData {
                SentimentText = "ML.NET is fun, more samples at https://github.com/dotnet/machinelearning-samples"
            };
            var sentiment = (model.Predict(testInput).Sentiment == true) ? "Positive" : "Negative";

            /* This template uses a minimal dataset to build a sentiment analysis model which leads to relatively low accuracy.
             * In order to build a sentiment analysis model with higher accuracy please follow the walkthrough at https://aka.ms/mlnetsentimentanalysis*/
            Console.WriteLine("Predicted sentiment for \"" + testInput.SentimentText + "\" is:" + sentiment);
            Console.ReadKey();
        }
예제 #28
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model, IMongoDatabase db)
        {
            // Evaluates.
            // <Snippet13>
            var collection = db.GetCollection <SentimentData>("review_test");
            var documents  = collection.Find <SentimentData>(new BsonDocument()).ToEnumerable();
            var testData   = CollectionDataSource.Create(documents);
            // </Snippet13>

            // BinaryClassificationEvaluator computes the quality metrics for the PredictionModel
            // using the specified data set.
            // <Snippet14>
            var evaluator = new BinaryClassificationEvaluator();
            // </Snippet14>

            // BinaryClassificationMetrics contains the overall metrics computed by binary classification evaluators.
            // <Snippet15>
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            // </Snippet15>

            // The Accuracy metric gets the accuracy of a classifier, which is the proportion
            // of correct predictions in the test set.

            // The Auc metric gets the area under the ROC curve.
            // The area under the ROC curve is equal to the probability that the classifier ranks
            // a randomly chosen positive instance higher than a randomly chosen negative one
            // (assuming 'positive' ranks higher than 'negative').

            // The F1Score metric gets the classifier's F1 score.
            // The F1 score is the harmonic mean of precision and recall:
            //  2 * precision * recall / (precision + recall).

            // <Snippet16>
            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
            // </Snippet16>
        }
예제 #29
0
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            // Carga el conjunto de datos de prueba.
            var testData = new TextLoader(_testDataPath).CreateFrom <SentimentData>();

            // Crea el evaluador binario.
            var evaluator = new BinaryClassificationEvaluator();

            // Evalúa el modelo y crea métricas.
            evaluator.Evaluate(model, testData);

            // Muestra las métricas.
            BinaryClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"Accuracy: {metrics.Accuracy:P2}");
            Console.WriteLine($"Auc: {metrics.Auc:P2}");
            Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
예제 #30
0
        static void Main(string[] args)
        {
            string trainSetPath = "train_data.csv";
            string testSetPath  = "test_data.csv";

            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader <Passenger>(trainSetPath, useHeader: true, separator: ","));

            pipeline.Add(new ColumnDropper()
            {
                Column = new string[] { "Cabin", "Ticket" }
            });

            pipeline.Add(new MissingValueSubstitutor(new string[] { "Age" })
            {
                ReplacementKind = NAReplaceTransformReplacementKind.Mean
            });

            pipeline.Add(new CategoricalOneHotVectorizer("Sex", "Embarked"));

            pipeline.Add(new ColumnConcatenator(
                             "Features", "Age", "Pclass", "SibSp", "Parch", "Sex", "Embarked"));

            pipeline.Add(new FastTreeBinaryClassifier());

            var model = pipeline.Train <Passenger, PredictedData>();

            var testLoader = new TextLoader <Passenger>(testSetPath, useHeader: true, separator: ",");

            var evaluator = new BinaryClassificationEvaluator();

            var metrics = evaluator.Evaluate(model, testLoader);

            Console.WriteLine($"Accuracy: {metrics.Accuracy} F1 Score: {metrics.F1Score}");

            Console.WriteLine($"True Positive: {metrics.ConfusionMatrix[0, 0]} False Positive: {metrics.ConfusionMatrix[0, 1]}");
            Console.WriteLine($"False Negative: {metrics.ConfusionMatrix[1, 0]} True Negative: {metrics.ConfusionMatrix[1, 1]}");
        }