public IActionResult Index(BillsViewModel bvm) { MLContext mlContext = new MLContext(seed: 9997); BillsModelTrainer bmt = new BillsModelTrainer(); var data = bmt.GetRawData(mlContext, "2018Bills.csv"); var trainer = mlContext.MulticlassClassification.Trainers.NaiveBayes(labelColumnName: "Label", featureColumnName: "Features"); var model = bmt.TrainModel(mlContext, data, trainer); PredictionEngineBase <RawInput, Prediction> predictor = mlContext.Model.CreatePredictionEngine <RawInput, Prediction>(model); var outcome = predictor.Predict(new RawInput { Game = 0, Quarterback = bvm.Quarterback, Location = bvm.Location.ToString(), NumberOfPointsScored = bvm.NumberOfPointsScored, TopReceiver = bvm.TopReceiver, TopRunner = bvm.TopRunner, NumberOfSacks = 0, NumberOfDefensiveTurnovers = 0, MinutesPossession = 0, Outcome = "WHO KNOWS?" }); return(Content($"Under these conditions, the most likely outcome is a {outcome.Outcome.ToLower()}.")); }
public void Setup() { mlContext = new MLContext(seed: 9997); bmt = new BillsModelTrainer(); var data = bmt.GetRawData(mlContext, "Resources\\2018Bills.csv"); var split = mlContext.Data.TrainTestSplit(data, testFraction: 0.25); trainer = mlContext.MulticlassClassification.Trainers.NaiveBayes(labelColumnName: "Label", featureColumnName: "Features"); model = bmt.TrainModel(mlContext, split.TrainSet, trainer); predictor = mlContext.Model.CreatePredictionEngine <RawInput, Prediction>(model); }
public void BasicEvaluationTest(string trainerToUse) { mlContext = new MLContext(seed: 9997); bmt = new BillsModelTrainer(); var data = bmt.GetRawData(mlContext, "Resources\\2018Bills.csv"); var split = mlContext.Data.TrainTestSplit(data, testFraction: 0.4); // If we wish to review the split data, we can run these. var trainSet = mlContext.Data.CreateEnumerable <RawInput>(split.TrainSet, reuseRowObject: false); var testSet = mlContext.Data.CreateEnumerable <RawInput>(split.TestSet, reuseRowObject: false); IEstimator <ITransformer> newTrainer; switch (trainerToUse) { case "Naive Bayes": newTrainer = mlContext.MulticlassClassification.Trainers.NaiveBayes(labelColumnName: "Label", featureColumnName: "Features"); break; case "L-BFGS": newTrainer = mlContext.MulticlassClassification.Trainers.LbfgsMaximumEntropy(labelColumnName: "Label", featureColumnName: "Features"); break; case "SDCA Non-Calibrated": newTrainer = mlContext.MulticlassClassification.Trainers.SdcaNonCalibrated(labelColumnName: "Label", featureColumnName: "Features"); break; default: newTrainer = mlContext.MulticlassClassification.Trainers.NaiveBayes(labelColumnName: "Label", featureColumnName: "Features"); break; } var newModel = bmt.TrainModel(mlContext, split.TrainSet, newTrainer); var metrics = mlContext.MulticlassClassification.Evaluate(newModel.Transform(split.TestSet)); Console.WriteLine($"Macro Accuracy = {metrics.MacroAccuracy}; Micro Accuracy = {metrics.MicroAccuracy}"); Console.WriteLine($"Confusion Matrix with {metrics.ConfusionMatrix.NumberOfClasses} classes."); Console.WriteLine($"{metrics.ConfusionMatrix.GetFormattedConfusionTable()}"); Assert.AreNotEqual(0, metrics.MacroAccuracy); }