private static void BuildLogitModel(double[][] trainInput, int[] trainOutput, double[][] testInput, int[] testOutput)
        {
            var logit = new MultinomialLogisticLearning <GradientDescent>()
            {
                MiniBatchSize = 500
            };
            var logitModel = logit.Learn(trainInput, trainOutput);

            int[] inSamplePreds  = logitModel.Decide(trainInput);
            int[] outSamplePreds = logitModel.Decide(testInput);

            // Accuracy
            double inSampleAccuracy  = 1 - new ZeroOneLoss(trainOutput).Loss(inSamplePreds);
            double outSampleAccuracy = 1 - new ZeroOneLoss(testOutput).Loss(outSamplePreds);

            Console.WriteLine("* In-Sample Accuracy: {0:0.0000}", inSampleAccuracy);
            Console.WriteLine("* Out-of-Sample Accuracy: {0:0.0000}", outSampleAccuracy);

            // Build confusion matrix
            int[][] confMatrix = BuildConfusionMatrix(
                testOutput, outSamplePreds, 10
                );
            System.IO.File.WriteAllLines(
                Path.Combine(
                    @"\\Mac\Home\Documents\c-sharp-machine-learning\ch.8\input-data",
                    "logit-conf-matrix.csv"
                    ),
                confMatrix.Select(x => String.Join(",", x))
                );

            // Precision Recall
            PrintPrecisionRecall(confMatrix);
            DrawROCCurve(testOutput, outSamplePreds, 10, "Logit");
        }
Esempio n. 2
0
        public MultinomialLogisticRegression MachineLearning()
        {
            // Используем градиентный спуск
            var mll = new MultinomialLogisticLearning <GradientDescent>();

            return(mll.Learn(DataTestInput, DataTestOutput));
        }
Esempio n. 3
0
        static public int[] MultiNomialLogisticRegressionBFGS(double [][] input, int [] labels, string fName)
        {
            /* The L-BFGS algorithm is a member of the broad family of quasi-Newton optimization methods.
             * L-BFGS stands for 'Limited memory BFGS'. Indeed, L-BFGS uses a limited memory variation of
             * the Broyden–Fletcher–Goldfarb–Shanno (BFGS) update to approximate the inverse Hessian matrix
             * (denoted by Hk). Unlike the original BFGS method which stores a dense approximation, L-BFGS
             * stores only a few vectors that represent the approximation implicitly. Due to its moderate
             * memory requirement, L-BFGS method is particularly well suited for optimization problems with
             * a large number of variables.
             */

            // Create a lbfgs model
            var mlbfgs = new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>();

            // Estimate using the data against a logistic regression
            MultinomialLogisticRegression mlr = mlbfgs.Learn(input, labels);

            //
            // Create a cross validation model derived from the training set to measure the performance of this
            // predictive model and estimate how well we expect the model will generalize. The algorithm executes
            // multiple rounds of cross validation on different partitions and averages the results.
            //
            int folds = 4; // could play around with this later
            var cv    = CrossValidation.Create(k: folds, learner: (p) => new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>(),
                                               loss: (actual, expected, p) => new ZeroOneLoss(expected).Loss(actual),
                                               fit: (teacher, x, y, w) => teacher.Learn(x, y, w),
                                               x: input, y: labels);
            var result = cv.Learn(input, labels);
            GeneralConfusionMatrix gcm = result.ToConfusionMatrix(input, labels);
            ConfusionMatrix        cm  = ConfusionMatrix.Estimate(mlr, input, labels);

            //
            //output relevant statistics
            //
            Funcs.Utility.OutPutStats(result.NumberOfSamples, result.NumberOfInputs,
                                      result.Training.Mean, gcm.Accuracy, cm.FalsePositives, cm.FalseNegatives, cm.FScore);

            // Compute the model predictions and return the values
            int[] answers = mlr.Decide(input);

            // And also the probability of each of the answers
            double[][] probabilities = mlr.Probabilities(input);

            // Now we can check how good our model is at predicting
            double error = new Accord.Math.Optimization.Losses.ZeroOneLoss(labels).Loss(answers);

            mlr.Save(fName, compression: SerializerCompression.None);

            return(answers);
        }
Esempio n. 4
0
        public void RegressTest2()
        {
            Accord.Math.Random.Generator.Seed = 0;

            double[][] inputs;
            int[]      outputs;

            MultinomialLogisticRegressionTest.CreateInputOutputsExample1(out inputs, out outputs);

            // Create an algorithm to estimate the regression
            var msgd = new MultinomialLogisticLearning <ConjugateGradient>();

            // Now, we can iteratively estimate our model
            MultinomialLogisticRegression mlr = msgd.Learn(inputs, outputs);

            int[] predicted = mlr.Decide(inputs);

            double acc = new ZeroOneLoss(outputs).Loss(predicted);

            Assert.AreEqual(0.61088435374149663, acc, 1e-8);
        }
Esempio n. 5
0
        public void GradientTest()
        {
            double[][] inputs;
            int[]      outputs;

            MultinomialLogisticRegressionTest.CreateInputOutputsExample1(out inputs, out outputs);

            // Create an algorithm to estimate the regression
            var msgd = new MultinomialLogisticLearning <ConjugateGradient>();

            msgd.Method.MaxIterations = 1;

            msgd.Learn(inputs, outputs);

            int variables = inputs.Columns() * outputs.DistinctCount();
            var fd        = new FiniteDifferences(variables, msgd.crossEntropy);

            double[] probe    = { 0.1, 0.2, 0.5, 0.6, 0.2, 0.1 };
            double[] expected = fd.Compute(probe);
            double[] actual   = msgd.crossEntropyGradient(probe);

            Assert.IsTrue(expected.IsEqual(actual, 1e-5));
        }
Esempio n. 6
0
        static void Main(string[] args)
        {
            // sample input
            double[][] inputs =
            {
                new double[] { 0, 0 },
                new double[] { 1, 0 },
                new double[] { 0, 1 },
                new double[] { 1, 1 },
            };

            // sample binary output
            int[] outputs =
            {
                0,
                1,
                1,
                0,
            };

            // sample binary output for Neural Network
            double[][] nnOutputs =
            {
                new double[] { 1, 0 },
                new double[] { 0, 1 },
                new double[] { 0, 1 },
                new double[] { 1, 0 },
            };

            // sample multinomial output
            int[] multiOutputs =
            {
                0,
                1,
                1,
                2,
            };

            // 1. Binary Logistic Regression
            var learner = new IterativeReweightedLeastSquares <LogisticRegression>()
            {
                MaxIterations = 100
            };
            var model = learner.Learn(inputs, outputs);

            var preds = model.Decide(inputs);

            Console.WriteLine("\n\n*Binary Logistic Regression Predictions: {0}", String.Join(", ", preds));

            // 2. Multinomial Logistic Regression
            var learner2 = new MultinomialLogisticLearning <GradientDescent>()
            {
                MiniBatchSize = 4
            };
            var model2 = learner2.Learn(inputs, multiOutputs);

            var preds2 = model2.Decide(inputs);

            Console.WriteLine("\n\n*Multinomial Logistic Regression Predictions: {0}", String.Join(", ", preds2));

            // 3. Binary Naive Bayes Classifier
            var learner3 = new NaiveBayesLearning <NormalDistribution>();
            var model3   = learner3.Learn(inputs, outputs);

            var preds3 = model2.Decide(inputs);

            Console.WriteLine("\n\n*Binary Naive Bayes Predictions: {0}", String.Join(", ", preds3));

            // 4. RandomForest
            var learner4 = new RandomForestLearning()
            {
                NumberOfTrees = 3,

                CoverageRatio = 0.9,

                SampleRatio = 0.9
            };
            var model4 = learner4.Learn(inputs, outputs);

            var preds4 = model4.Decide(inputs);

            Console.WriteLine("\n\n*Binary RandomForest Classifier Predictions: {0}", String.Join(", ", preds4));

            // 5. SVM
            var learner5 = new SequentialMinimalOptimization <Gaussian>();
            var model5   = learner.Learn(inputs, outputs);

            var preds5 = model5.Decide(inputs);

            Console.WriteLine("\n\n*Binary SVM Predictions: {0}", String.Join(", ", preds5));

            // 6. Neural Network
            var network = new ActivationNetwork(
                new BipolarSigmoidFunction(2),
                2,
                1,
                2
                );

            var teacher = new LevenbergMarquardtLearning(network);

            Console.WriteLine("\n-- Training Neural Network");
            int    numEpoch = 3;
            double error    = Double.PositiveInfinity;

            for (int i = 0; i < numEpoch; i++)
            {
                error = teacher.RunEpoch(inputs, nnOutputs);
                Console.WriteLine("* Epoch {0} - error: {1:0.0000}", i + 1, error);
            }

            double[][] nnPreds = inputs.Select(
                x => network.Compute(x)
                ).ToArray();

            int[] preds6 = nnPreds.Select(
                x => x.ToList().IndexOf(x.Max())
                ).ToArray();

            Console.WriteLine("\n\n*Binary Neural Network Predictions: {0}", String.Join(", ", preds6));


            Console.WriteLine("\n\n\n\nDONE!!");
            Console.ReadKey();
        }
Esempio n. 7
0
        public void LearnTest1()
        {
            #region doc_learn_0
            // Declare a simple classification/regression
            // problem with 5 input variables (a,b,c,d,e):
            double[][] inputs =
            {
                new double[] { 1, 4, 2, 0, 1 },
                new double[] { 1, 3, 2, 0, 1 },
                new double[] { 3, 0, 1, 1, 1 },
                new double[] { 3, 0, 1, 0, 1 },
                new double[] { 0, 5, 5, 5, 5 },
                new double[] { 1, 5, 5, 5, 5 },
                new double[] { 1, 0, 0, 0, 0 },
                new double[] { 1, 0, 0, 0, 0 },
                new double[] { 2, 4, 2, 0, 1 },
                new double[] { 2, 4, 2, 0, 1 },
                new double[] { 2, 6, 2, 0, 1 },
                new double[] { 2, 7, 5, 0, 1 },
            };

            // Class labels for each of the inputs
            int[] outputs =
            {
                0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 0, 0
            };
            #endregion

            {
                #region doc_learn_cg
                // Create a Conjugate Gradient algorithm to estimate the regression
                var mcg = new MultinomialLogisticLearning <ConjugateGradient>();

                // Now, we can estimate our model using Conjugate Gradient
                MultinomialLogisticRegression mlr = mcg.Learn(inputs, outputs);

                // We can compute the model answers
                int[] answers = mlr.Decide(inputs);

                // And also the probability of each of the answers
                double[][] probabilities = mlr.Probabilities(inputs);

                // Now we can check how good our model is at predicting
                double error = new ZeroOneLoss(outputs).Loss(answers);
                #endregion

                Assert.AreEqual(0, error, 1e-5);
            }

            {
                #region doc_learn_gd
                // Create a Conjugate Gradient algorithm to estimate the regression
                var mgd = new MultinomialLogisticLearning <GradientDescent>();

                // Now, we can estimate our model using Gradient Descent
                MultinomialLogisticRegression mlr = mgd.Learn(inputs, outputs);

                // We can compute the model answers
                int[] answers = mlr.Decide(inputs);

                // And also the probability of each of the answers
                double[][] probabilities = mlr.Probabilities(inputs);

                // Now we can check how good our model is at predicting
                double error = new ZeroOneLoss(outputs).Loss(answers);
                #endregion

                Assert.AreEqual(0, error, 1e-5);
            }

            {
                #region doc_learn_bfgs
                // Create a Conjugate Gradient algorithm to estimate the regression
                var mlbfgs = new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>();

                // Now, we can estimate our model using BFGS
                MultinomialLogisticRegression mlr = mlbfgs.Learn(inputs, outputs);

                // We can compute the model answers
                int[] answers = mlr.Decide(inputs);

                // And also the probability of each of the answers
                double[][] probabilities = mlr.Probabilities(inputs);

                // Now we can check how good our model is at predicting
                double error = new ZeroOneLoss(outputs).Loss(answers);
                #endregion

                Assert.AreEqual(0, error, 1e-5);
            }
        }
        public void LearnTest1()
        {
            double[][] inputs =
            {
                new double[] { 1, 4, 2, 0, 1 },
                new double[] { 1, 3, 2, 0, 1 },
                new double[] { 3, 0, 1, 1, 1 },
                new double[] { 3, 0, 1, 0, 1 },
                new double[] { 0, 5, 5, 5, 5 },
                new double[] { 1, 5, 5, 5, 5 },
                new double[] { 1, 0, 0, 0, 0 },
                new double[] { 1, 0, 0, 0, 0 },
                new double[] { 2, 4, 2, 0, 1 },
                new double[] { 2, 4, 2, 0, 1 },
                new double[] { 2, 6, 2, 0, 1 },
                new double[] { 2, 7, 5, 0, 1 },
            };

            int[] outputs =
            {
                0, 0,
                1, 1,
                2, 2,
                3, 3,
                0, 0, 0, 0
            };

            // Create an algorithm to estimate the regression
            var mcg = new MultinomialLogisticLearning <ConjugateGradient>();

            // Now, we can iteratively estimate our model
            MultinomialLogisticRegression mlr = mcg.Learn(inputs, outputs);

            int[] predicted = mlr.Decide(inputs);

            double error = new ZeroOneLoss(outputs).Loss(predicted);

            Assert.AreEqual(0, error);


            // Create an algorithm to estimate the regression
            var mgd = new MultinomialLogisticLearning <GradientDescent>();

            // Now, we can iteratively estimate our model
            mlr = mgd.Learn(inputs, outputs);

            predicted = mlr.Decide(inputs);

            error = new ZeroOneLoss(outputs).Loss(predicted);
            Assert.AreEqual(0, error, 1e-5);


            // Create an algorithm to estimate the regression
            var mlbfgs = new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>();

            // Now, we can iteratively estimate our model
            mlr = mlbfgs.Learn(inputs, outputs);

            predicted = mlr.Decide(inputs);

            error = new ZeroOneLoss(outputs).Loss(predicted);
            Assert.AreEqual(0, error, 1e-5);
        }
Esempio n. 9
0
        private MultinomialLogisticRegression LearnLogReg(double[][] XKnownTrainSet, int[] YKnownTrainSet)
        {
            var LogRegLearning = new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>();

            return(LogRegLearning.Learn(XKnownTrainSet, YKnownTrainSet));
        }
Esempio n. 10
0
        public static void TrainClassifiers()
        {
            // -------------------------- Logistic Regression ----------------------------------

            var MLRG = new MultinomialLogisticLearning <GradientDescent>();

            Predictor.MultinomialLogisticRegression = MLRG.Learn(PredictorPointsTrain, FrequencyLabelsInt);



            // -------------------------- Random Forest ----------------------------------

            var teacher = new RandomForestLearning()
            {
                NumberOfTrees = NumTrees,
            };

            Predictor.RandomForest = teacher.Learn(PredictorPointsTrain, FrequencyLabelsInt);


            // -------------------------- Minimum Mean Distance ----------------------------------

            Predictor.MinimumMeanDistance = new MinimumMeanDistanceClassifier();

            // Compute the analysis and create a classifier
            Predictor.MinimumMeanDistance.Learn(PredictorPointsTrain, FrequencyLabelsInt);


            // -------------------------- Support Vector Machine ----------------------------------
            // Declare the parameters and ranges to be searched

            /*GridSearchRange[] ranges =
             * {
             *  new GridSearchRange("complexity", new double[] { 0.00000001, 5.20, 0.30, 0.50 } ),
             * };*/


            // Instantiate a new Grid Search algorithm for Kernel Support Vector Machines

            /*            var gridsearch = new GridSearch<SupportVectorMachine>(ranges);
             *
             *          // Set the fitting function for the algorithm
             *          gridsearch.Fitting = delegate (GridSearchParameterCollection parameters, out double error)
             *          {
             *              // The parameters to be tried will be passed as a function parameter.
             *              double complexity = parameters["complexity"].Value;
             *
             *              // Use the parameters to build the SVM model
             *              SupportVectorMachine ksvm = new SupportVectorMachine( 2);
             *
             *
             *              // Create a new learning algorithm for SVMs
             *              SequentialMinimalOptimization smo = new SequentialMinimalOptimization(ksvm, PredictorPointsTrain, FrequencyLabelsInt);
             *              smo.Complexity = complexity;
             *
             *              // Measure the model performance to return as an out parameter
             *              error = smo.Run();
             *
             *              return ksvm; // Return the current model
             *          };
             *
             *
             *          // Declare some out variables to pass to the grid search algorithm
             *          GridSearchParameterCollection bestParameters; double minError;
             *
             *          // Compute the grid search to find the best Support Vector Machine
             *          Predictor.SVM = gridsearch.Compute(out bestParameters, out minError);*/
        }