Example #1
0
        private ClassificationMetrics Evaluate(IHostEnvironment env, IDataView scoredData)
        {
            var dataEval = new RoleMappedData(scoredData, label: "Label", feature: "Features", opt: true);

            // Evaluate.
            // It does not work. It throws error "Failed to find 'Score' column" when Evaluate is called
            //var evaluator = new MultiClassClassifierEvaluator(env, new MultiClassClassifierEvaluator.Arguments() { OutputTopKAcc = 3 });

            IMamlEvaluator evaluator = new MultiClassMamlEvaluator(env, new MultiClassMamlEvaluator.Arguments()
            {
                OutputTopKAcc = 3
            });
            var metricsDic = evaluator.Evaluate(dataEval);

            return(ClassificationMetrics.FromMetrics(env, metricsDic["OverallMetrics"], metricsDic["ConfusionMatrix"])[0]);
        }
Example #2
0
        private static async Task TrainAsync()
        {
            Console.WriteLine("============ 準備訓練資料 ==============");

            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader(DataPath).CreateFrom <GitHubIssue>(useHeader: true));

            // 把兩個用來訓練的欄位變成數字的vector
            pipeline.Add(new TextFeaturizer("Title", "Title"));
            pipeline.Add(new TextFeaturizer("Description", "Description"));

            // 把Area轉換成為Dictionary數字
            pipeline.Add(new Dictionarizer(("Area", "Label")));

            // Title 和 Description合并變成訓練的欄位
            pipeline.Add(new ColumnConcatenator("Features", "Title", "Description"));

            // 使用StochasticDualCoordinateAscent演算法
            pipeline.Add(new StochasticDualCoordinateAscentClassifier());

            // 把判斷出來的數字轉回文字版本
            pipeline.Add(new PredictedLabelColumnOriginalValueConverter()
            {
                PredictedLabelColumn = "PredictedLabel"
            });


            Console.WriteLine("=============== 訓練模型 ===============");

            var model = pipeline.Train <GitHubIssue, GithubIssueLabelPrediction>();

            var testData = new TextLoader(TestPath).CreateFrom <GitHubIssue>(useHeader: true);

            var evaluator = new ClassificationEvaluator();

            ClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("Micro-Accuracy: {0}", metrics.AccuracyMicro);

            await model.WriteAsync(ModelPath);

            Console.WriteLine("=============== 訓練完成 ===============");
            Console.WriteLine("Model路徑: {0}", ModelPath);
        }
Example #3
0
        /// <param name="dataSet"> training set used for error estimation </param>
        /// <returns> neural network model with optimized architecture for provided data set </returns>
        public virtual NeuralNetwork createOptimalModel(DataSet dataSet)
        {
            List <int> neurons = new List <int>();

            neurons.Add(minNeuronsPerLayer);
            findArchitectures(1, minNeuronsPerLayer, neurons);

            LOG.info("Total [{}] different network topologies found", allArchitectures.Count);

            foreach (List <int> architecture in allArchitectures)
            {
                architecture.Insert(0, dataSet.InputSize);
                architecture.Add(dataSet.OutputSize);

                LOG.info("Architecture: [{}]", architecture);

                MultiLayerPerceptron network  = new MultiLayerPerceptron(architecture);
                LearningListener     listener = new LearningListener(10, learningRule.MaxIterations);
                learningRule.addListener(listener);
                network.LearningRule = learningRule;

                errorEstimationMethod = new CrossValidation(network, dataSet, 10);
                errorEstimationMethod.run();
                // FIX
                var evaluator = errorEstimationMethod.getEvaluator <ClassifierEvaluator.MultiClass>(typeof(ClassifierEvaluator.MultiClass));

                ClassificationMetrics[] result = ClassificationMetrics.createFromMatrix(evaluator.Result);

                // nadji onaj sa najmanjim f measure
                if (optimalResult == null || optimalResult.FMeasure < result[0].FMeasure)
                {
                    LOG.info("Architecture [{}] became optimal architecture  with metrics {}", architecture, result);
                    optimalResult      = result[0];
                    optimalClassifier  = network;
                    optimalArchitecure = architecture;
                }

                LOG.info("#################################################################");
            }


            LOG.info("Optimal Architecture: {}", optimalArchitecure);
            return(optimalClassifier);
        }
Example #4
0
        public static void Evaluate(PredictionModel <MNISTData, MNISTPrediction> model)
        {
            //Creation d'un analyseur de donnée pour le fichier test
            var testData = new TextLoader(_testDataPath)
            {
                Arguments = new TextLoaderArguments
                {
                    Separator = new[] { ',' },
                    HasHeader = true,
                    Column    = new[]
                    {
                        new TextLoaderColumn()
                        {
                            Name   = "Label",
                            Source = new [] { new TextLoaderRange(0) },
                            Type   = DataKind.Num
                        },

                        new TextLoaderColumn()
                        {
                            Name   = "Features",
                            Source = new [] { new TextLoaderRange(1, 784) },
                            Type   = DataKind.Num
                        }
                    }
                }
            };

            var evaluator = new ClassificationEvaluator();

            //On évalue ici notre modèle selon les données test, les résultats statistiques sont contenus dans metrics
            ClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");

            //deux exemples de statistques utiles : la précision macroscopique et la matrice de confusion
            Console.WriteLine("Macro Acc : {0}", metrics.AccuracyMacro);
            Console.WriteLine("----------------------------------");
            Console.WriteLine("Confusion Matrix");
            PrintMatrix(metrics.ConfusionMatrix);
        }
        private void CompareMatrics(ClassificationMetrics metrics)
        {
            Assert.Equal(.98, metrics.AccuracyMacro);
            Assert.Equal(.98, metrics.AccuracyMicro, 2);
            Assert.Equal(.06, metrics.LogLoss, 2);
            Assert.InRange(metrics.LogLossReduction, 94, 96);
            Assert.Equal(1, metrics.TopKAccuracy);

            Assert.Equal(3, metrics.PerClassLogLoss.Length);
            Assert.Equal(0, metrics.PerClassLogLoss[0], 1);
            Assert.Equal(.1, metrics.PerClassLogLoss[1], 1);
            Assert.Equal(.1, metrics.PerClassLogLoss[2], 1);

            ConfusionMatrix matrix = metrics.ConfusionMatrix;

            Assert.Equal(3, matrix.Order);
            Assert.Equal(3, matrix.ClassNames.Count);
            Assert.Equal("0", matrix.ClassNames[0]);
            Assert.Equal("1", matrix.ClassNames[1]);
            Assert.Equal("2", matrix.ClassNames[2]);

            Assert.Equal(50, matrix[0, 0]);
            Assert.Equal(50, matrix["0", "0"]);
            Assert.Equal(0, matrix[0, 1]);
            Assert.Equal(0, matrix["0", "1"]);
            Assert.Equal(0, matrix[0, 2]);
            Assert.Equal(0, matrix["0", "2"]);

            Assert.Equal(0, matrix[1, 0]);
            Assert.Equal(0, matrix["1", "0"]);
            Assert.Equal(48, matrix[1, 1]);
            Assert.Equal(48, matrix["1", "1"]);
            Assert.Equal(2, matrix[1, 2]);
            Assert.Equal(2, matrix["1", "2"]);

            Assert.Equal(0, matrix[2, 0]);
            Assert.Equal(0, matrix["2", "0"]);
            Assert.Equal(1, matrix[2, 1]);
            Assert.Equal(1, matrix["2", "1"]);
            Assert.Equal(49, matrix[2, 2]);
            Assert.Equal(49, matrix["2", "2"]);
        }
        public static async Task Main(List <User> patient)
        {
            //call Train method
            PredictionModel <FoodCustomClass, ClassPrediction> model = Train();
            await model.WriteAsync(_modelPath);

            //Call FoodClassificationTest class for prediction
            var prediction = model.Predict(FoodClassificationTest.FoodClassification());

            //Store each food in a list
            List <FoodCustomClass> foods = FoodClassificationTest.FoodClassification();

            var testData  = new TextLoader(_testdataPath).CreateFrom <FoodCustomClass>(separator: '*');
            var evaluator = new ClassificationEvaluator();
            ClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            // Displaying the metrics for model validation
            System.Diagnostics.Debug.WriteLine("PredictionModel quality metrics evaluation");
            System.Diagnostics.Debug.WriteLine("------------------------------------------");
            System.Diagnostics.Debug.WriteLine($"*       MicroAccuracy:    {metrics.AccuracyMicro:0.###}");
            System.Diagnostics.Debug.WriteLine($"*       MacroAccuracy:    {metrics.AccuracyMacro:0.###}");
            System.Diagnostics.Debug.WriteLine($"*       LogLoss:          {metrics.LogLoss:#.###}");
            System.Diagnostics.Debug.WriteLine($"*       LogLossReduction: {metrics.LogLossReduction:#.###}");


            //Combine List foods with their respective results after classification

            /*
             * foodsAndClassification contain each food from FoodDB database classified in terms
             * of the quantity of each macronutrient present
             */
            var foodsAndClassification = foods.Zip(prediction, (n, w) => new { Food = n, Category = w });

            //call ID3Tree.cs and send foodsAndClassification as parameter to ID3Tree constructor
            var function = new ID3Tree().ID3(foodsAndClassification, patient);

            //foreach (var nw in foodsAndClassification)
            //{
            //    System.Diagnostics.Debug.WriteLine($"{nw.Food.Food}: {nw.Category.Predicted}" +"\n" + $"{float.Parse(nw.Category.Score.First().ToString(), CultureInfo.InvariantCulture.NumberFormat)}");
            //}
        }
Example #7
0
        public void SetupPredictBenchmarks()
        {
            _trainedModel = Train(_dataPath);
            _consumer.Consume(_trainedModel.Predict(_example));

            var testData  = new Legacy.Data.TextLoader(_dataPath).CreateFrom <IrisData>(useHeader: true);
            var evaluator = new ClassificationEvaluator();

            _metrics = evaluator.Evaluate(_trainedModel, testData);

            _batches = new IrisData[_batchSizes.Length][];
            for (int i = 0; i < _batches.Length; i++)
            {
                var batch = new IrisData[_batchSizes[i]];
                _batches[i] = batch;
                for (int bi = 0; bi < batch.Length; bi++)
                {
                    batch[bi] = _example;
                }
            }
        }
Example #8
0
        //test the model
        public static void Evaluate(PredictionModel <SentimentData, SentimentPrediction> model)
        {
            //load the test data
            var testData = new TextLoader(_testDataPath).CreateFrom <SentimentData>(useHeader: true);

            //computes the quality metrics for the PredictionModel
            var evaluator = new ClassificationEvaluator();

            //to get metrices computed by binary classification evaluator
            ClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine();
            Console.WriteLine("PredictionModel quality metrics evaluation");
            Console.WriteLine("------------------------------------------");
            Console.WriteLine($"LogLoss: { metrics.LogLoss:P2}");
            //Console.WriteLine($"ConfusionMatrix: { metrics.ConfusionMatrix:P2}");
            Console.WriteLine($"AccuracyMicro: { metrics.AccuracyMicro:P2}");
            Console.WriteLine($"Accuracy: {metrics.AccuracyMacro:P2}");
            //Console.WriteLine($"Auc: {metrics.Auc:P2}");
            //Console.WriteLine($"F1Score: {metrics.F1Score:P2}");
        }
        public void Setup()
        {
            s_dataPath     = Program.GetDataPath("iris.txt");
            s_trainedModel = TrainCore();
            IrisPrediction prediction = s_trainedModel.Predict(s_example);

            var testData  = new TextLoader(s_dataPath).CreateFrom <IrisData>(useHeader: true);
            var evaluator = new ClassificationEvaluator();

            s_metrics = evaluator.Evaluate(s_trainedModel, testData);

            s_batches = new IrisData[s_batchSizes.Length][];
            for (int i = 0; i < s_batches.Length; i++)
            {
                var batch = new IrisData[s_batchSizes[i]];
                s_batches[i] = batch;
                for (int bi = 0; bi < batch.Length; bi++)
                {
                    batch[bi] = s_example;
                }
            }
        }
Example #10
0
        void Metacomponents()
        {
            var dataPath = GetDataPath(IrisDataPath);
            var pipeline = new LearningPipeline(seed: 1, conc: 1);

            pipeline.Add(new TextLoader(dataPath).CreateFrom <IrisData>(useHeader: false));
            pipeline.Add(new Dictionarizer(new[] { "Label" }));
            pipeline.Add(new ColumnConcatenator(outputColumn: "Features",
                                                "SepalLength", "SepalWidth", "PetalLength", "PetalWidth"));

            // This will throw exception during training time if you specify any other than binary classifier.
            pipeline.Add(OneVersusAll.With(new StochasticDualCoordinateAscentBinaryClassifier()));

            var model = pipeline.Train <IrisData, IrisPrediction>();

            var testData  = new TextLoader(dataPath).CreateFrom <IrisData>(useHeader: false);
            var evaluator = new ClassificationEvaluator();
            ClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            var prediction = model.Predict(new IrisData {
                PetalLength = 1, PetalWidth = 2, SepalLength = 1.4f, SepalWidth = 1.6f
            });
        }
        public void TrainAndPredictIrisModelTest()
        {
            string dataPath = GetDataPath("iris.txt");

            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader <IrisData>(dataPath, useHeader: false, separator: "tab"));
            pipeline.Add(new ColumnConcatenator(outputColumn: "Features",
                                                "SepalLength", "SepalWidth", "PetalLength", "PetalWidth"));

            pipeline.Add(new StochasticDualCoordinateAscentClassifier());

            PredictionModel <IrisData, IrisPrediction> model = pipeline.Train <IrisData, IrisPrediction>();

            IrisPrediction prediction = model.Predict(new IrisData()
            {
                SepalLength = 3.3f,
                SepalWidth  = 1.6f,
                PetalLength = 0.2f,
                PetalWidth  = 5.1f,
            });

            Assert.Equal(1, prediction.PredictedLabels[0], 2);
            Assert.Equal(0, prediction.PredictedLabels[1], 2);
            Assert.Equal(0, prediction.PredictedLabels[2], 2);

            prediction = model.Predict(new IrisData()
            {
                SepalLength = 3.1f,
                SepalWidth  = 5.5f,
                PetalLength = 2.2f,
                PetalWidth  = 6.4f,
            });

            Assert.Equal(0, prediction.PredictedLabels[0], 2);
            Assert.Equal(0, prediction.PredictedLabels[1], 2);
            Assert.Equal(1, prediction.PredictedLabels[2], 2);

            prediction = model.Predict(new IrisData()
            {
                SepalLength = 3.1f,
                SepalWidth  = 2.5f,
                PetalLength = 1.2f,
                PetalWidth  = 4.4f,
            });

            Assert.Equal(.2, prediction.PredictedLabels[0], 1);
            Assert.Equal(.8, prediction.PredictedLabels[1], 1);
            Assert.Equal(0, prediction.PredictedLabels[2], 2);

            // Note: Testing against the same data set as a simple way to test evaluation.
            // This isn't appropriate in real-world scenarios.
            string testDataPath = GetDataPath("iris.txt");
            var    testData     = new TextLoader <IrisData>(testDataPath, useHeader: false, separator: "tab");

            var evaluator = new ClassificationEvaluator();

            evaluator.OutputTopKAcc = 3;
            ClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Assert.Equal(.98, metrics.AccuracyMacro);
            Assert.Equal(.98, metrics.AccuracyMicro, 2);
            Assert.Equal(.06, metrics.LogLoss, 2);
            Assert.InRange(metrics.LogLossReduction, 94, 96);
            Assert.Equal(1, metrics.TopKAccuracy);

            Assert.Equal(3, metrics.PerClassLogLoss.Length);
            Assert.Equal(0, metrics.PerClassLogLoss[0], 1);
            Assert.Equal(.1, metrics.PerClassLogLoss[1], 1);
            Assert.Equal(.1, metrics.PerClassLogLoss[2], 1);

            ConfusionMatrix matrix = metrics.ConfusionMatrix;

            Assert.Equal(3, matrix.Order);
            Assert.Equal(3, matrix.ClassNames.Count);
            Assert.Equal("0", matrix.ClassNames[0]);
            Assert.Equal("1", matrix.ClassNames[1]);
            Assert.Equal("2", matrix.ClassNames[2]);

            Assert.Equal(50, matrix[0, 0]);
            Assert.Equal(50, matrix["0", "0"]);
            Assert.Equal(0, matrix[0, 1]);
            Assert.Equal(0, matrix["0", "1"]);
            Assert.Equal(0, matrix[0, 2]);
            Assert.Equal(0, matrix["0", "2"]);

            Assert.Equal(0, matrix[1, 0]);
            Assert.Equal(0, matrix["1", "0"]);
            Assert.Equal(48, matrix[1, 1]);
            Assert.Equal(48, matrix["1", "1"]);
            Assert.Equal(2, matrix[1, 2]);
            Assert.Equal(2, matrix["1", "2"]);

            Assert.Equal(0, matrix[2, 0]);
            Assert.Equal(0, matrix["2", "0"]);
            Assert.Equal(1, matrix[2, 1]);
            Assert.Equal(1, matrix["2", "1"]);
            Assert.Equal(49, matrix[2, 2]);
            Assert.Equal(49, matrix["2", "2"]);
        }
        public void TrainAndPredictIrisModelWithStringLabelTest()
        {
            string dataPath = GetDataPath("iris.data");

            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader(dataPath).CreateFrom <IrisDataWithStringLabel>(useHeader: false, separator: ','));

            pipeline.Add(new Dictionarizer("Label"));  // "IrisPlantType" is used as "Label" because of column attribute name on the field.

            pipeline.Add(new ColumnConcatenator(outputColumn: "Features",
                                                "SepalLength", "SepalWidth", "PetalLength", "PetalWidth"));

            pipeline.Add(new StochasticDualCoordinateAscentClassifier());

            PredictionModel <IrisDataWithStringLabel, IrisPrediction> model = pipeline.Train <IrisDataWithStringLabel, IrisPrediction>();

            IrisPrediction prediction = model.Predict(new IrisDataWithStringLabel()
            {
                SepalLength = 3.3f,
                SepalWidth  = 1.6f,
                PetalLength = 0.2f,
                PetalWidth  = 5.1f,
            });

            Assert.Equal(1, prediction.PredictedLabels[0], 2);
            Assert.Equal(0, prediction.PredictedLabels[1], 2);
            Assert.Equal(0, prediction.PredictedLabels[2], 2);

            prediction = model.Predict(new IrisDataWithStringLabel()
            {
                SepalLength = 3.1f,
                SepalWidth  = 5.5f,
                PetalLength = 2.2f,
                PetalWidth  = 6.4f,
            });

            Assert.Equal(0, prediction.PredictedLabels[0], 2);
            Assert.Equal(0, prediction.PredictedLabels[1], 2);
            Assert.Equal(1, prediction.PredictedLabels[2], 2);

            prediction = model.Predict(new IrisDataWithStringLabel()
            {
                SepalLength = 3.1f,
                SepalWidth  = 2.5f,
                PetalLength = 1.2f,
                PetalWidth  = 4.4f,
            });

            Assert.Equal(.2, prediction.PredictedLabels[0], 1);
            Assert.Equal(.8, prediction.PredictedLabels[1], 1);
            Assert.Equal(0, prediction.PredictedLabels[2], 2);

            // Note: Testing against the same data set as a simple way to test evaluation.
            // This isn't appropriate in real-world scenarios.
            string testDataPath = GetDataPath("iris.data");
            var    testData     = new TextLoader(testDataPath).CreateFrom <IrisDataWithStringLabel>(useHeader: false, separator: ',');

            var evaluator = new ClassificationEvaluator();

            evaluator.OutputTopKAcc = 3;
            ClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Assert.Equal(.98, metrics.AccuracyMacro);
            Assert.Equal(.98, metrics.AccuracyMicro, 2);
            Assert.Equal(.06, metrics.LogLoss, 2);
            Assert.InRange(metrics.LogLossReduction, 94, 96);
            Assert.Equal(1, metrics.TopKAccuracy);

            Assert.Equal(3, metrics.PerClassLogLoss.Length);
            Assert.Equal(0, metrics.PerClassLogLoss[0], 1);
            Assert.Equal(.1, metrics.PerClassLogLoss[1], 1);
            Assert.Equal(.1, metrics.PerClassLogLoss[2], 1);

            ConfusionMatrix matrix = metrics.ConfusionMatrix;

            Assert.Equal(3, matrix.Order);
            Assert.Equal(3, matrix.ClassNames.Count);
            Assert.Equal("Iris-setosa", matrix.ClassNames[0]);
            Assert.Equal("Iris-versicolor", matrix.ClassNames[1]);
            Assert.Equal("Iris-virginica", matrix.ClassNames[2]);

            Assert.Equal(50, matrix[0, 0]);
            Assert.Equal(50, matrix["Iris-setosa", "Iris-setosa"]);
            Assert.Equal(0, matrix[0, 1]);
            Assert.Equal(0, matrix["Iris-setosa", "Iris-versicolor"]);
            Assert.Equal(0, matrix[0, 2]);
            Assert.Equal(0, matrix["Iris-setosa", "Iris-virginica"]);

            Assert.Equal(0, matrix[1, 0]);
            Assert.Equal(0, matrix["Iris-versicolor", "Iris-setosa"]);
            Assert.Equal(48, matrix[1, 1]);
            Assert.Equal(48, matrix["Iris-versicolor", "Iris-versicolor"]);
            Assert.Equal(2, matrix[1, 2]);
            Assert.Equal(2, matrix["Iris-versicolor", "Iris-virginica"]);

            Assert.Equal(0, matrix[2, 0]);
            Assert.Equal(0, matrix["Iris-virginica", "Iris-setosa"]);
            Assert.Equal(1, matrix[2, 1]);
            Assert.Equal(1, matrix["Iris-virginica", "Iris-versicolor"]);
            Assert.Equal(49, matrix[2, 2]);
            Assert.Equal(49, matrix["Iris-virginica", "Iris-virginica"]);
        }
Example #13
0
        private static void Problem2()
        {
            // Define pipeline
            var pipeline = new LearningPipeline();

            pipeline.Add(new TextLoader("problem2_train.csv").CreateFrom <DrinkData>(useHeader: true, separator: ','));

            pipeline.Add(new TextFeaturizer("FullName", "FullName"));
            pipeline.Add(new TextFeaturizer("Country", "Country"));
            pipeline.Add(new ColumnConcatenator("Features", "FullName", "Country"));

            pipeline.Add(new Dictionarizer(("Type", "Label")));

            pipeline.Add(new StochasticDualCoordinateAscentClassifier {
            });

            pipeline.Add(new PredictedLabelColumnOriginalValueConverter()
            {
                PredictedLabelColumn = "PredictedLabel"
            });

            // Train model
            var stopWatch = new Stopwatch();

            stopWatch.Start();
            var model = pipeline.Train <DrinkData, DrinkPrediction>();

            stopWatch.Stop();
            Console.WriteLine($"Trained the model in: {stopWatch.ElapsedMilliseconds / 1000} seconds.");

            // Evaluate model
            var testData = new TextLoader("problem2_validate.csv").CreateFrom <DrinkData>(useHeader: true, separator: ',');

            var evaluator = new ClassificationEvaluator {
                OutputTopKAcc = 1
            };
            ClassificationMetrics metrics = evaluator.Evaluate(model, testData);

            Console.WriteLine(metrics.TopKAccuracy.ToString("P"));

            // Use model
            IEnumerable <DrinkData> drinks = new[]
            {
                new DrinkData {
                    FullName = "Weird Stout"
                },
                new DrinkData {
                    FullName = "Folkes Röda IPA"
                },
                new DrinkData {
                    FullName = "Fryken Havre Ale"
                },
                new DrinkData {
                    FullName = "Barolo Gramolere"
                },
                new DrinkData {
                    FullName = "Château de Lavison"
                },
                new DrinkData {
                    FullName = "Korlat Cabernet Sauvignon"
                },
                new DrinkData {
                    FullName = "Glengoyne 25 Years"
                },
                new DrinkData {
                    FullName = "Oremus Late Harvest Tokaji Cuvée"
                },
                new DrinkData {
                    FullName = "Izadi Blanco"
                },
                new DrinkData {
                    FullName = "Ca'Montini Prosecco Extra Dry"
                }
            };

            string[] names;
            model.TryGetScoreLabelNames(out names);

            var predictions = model.Predict(drinks).ToList();

            Console.ReadLine();
        }