/// <summary> /// Trains the classifier and computes the training error if option provided. /// </summary> /// <param name="trainingData">The training data that will be used to train classifier.</param> /// <param name="trainingLabels">The training labels related to provided training data.</param> /// <param name="calculateError">The boolean check to tell if the training error should be calculated.</param> public override void Train(List <double[]> trainingData, List <int> trainingLabels, bool calculateError = true) { if (LearningAlgorithmName == LogisticRegressionOptimizationAlgorithm.ConjugateGradient) { LearningAlgorithm = new MultinomialLogisticLearning <ConjugateGradient>(); } else if (LearningAlgorithmName == LogisticRegressionOptimizationAlgorithm.GradientDescent) { LearningAlgorithm = new MultinomialLogisticLearning <GradientDescent>(); } else if (LearningAlgorithmName == LogisticRegressionOptimizationAlgorithm.BroydenFletcherGoldfarbShanno) { LearningAlgorithm = new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>(); } else { LearningAlgorithm = new LowerBoundNewtonRaphson() { MaxIterations = 100, Tolerance = 1e-6 }; } Model = LearningAlgorithm.Learn(trainingData.ToArray(), trainingLabels.ToArray()); Probabilities = Model.Probabilities(trainingData.ToArray()); if (calculateError == true) { CalculateTrainingError(trainingData, trainingLabels); } }
/// <summary> /// Вывод вероятности принадлежности каждого объекта к каждому классу. /// Для вычисления должен быть параметр testOutputs - ожидаемые значения /// </summary> public void PrintProbabilities(MultinomialLogisticRegression mlr) { double[][] probabilities = mlr.Probabilities(TestInputs); Console.WriteLine("Probabilities for {0}", Сlassifier); for (int m = 0; m < probabilities.Count(); m++) { for (int n = 0; n < probabilities[m].Count(); n++) { Console.WriteLine("([{0}, {1}]: {2})", m, n, probabilities[m][n]); } } }
static public int[] MultiNomialLogisticRegressionBFGS(double [][] input, int [] labels, string fName) { /* The L-BFGS algorithm is a member of the broad family of quasi-Newton optimization methods. * L-BFGS stands for 'Limited memory BFGS'. Indeed, L-BFGS uses a limited memory variation of * the Broyden–Fletcher–Goldfarb–Shanno (BFGS) update to approximate the inverse Hessian matrix * (denoted by Hk). Unlike the original BFGS method which stores a dense approximation, L-BFGS * stores only a few vectors that represent the approximation implicitly. Due to its moderate * memory requirement, L-BFGS method is particularly well suited for optimization problems with * a large number of variables. */ // Create a lbfgs model var mlbfgs = new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>(); // Estimate using the data against a logistic regression MultinomialLogisticRegression mlr = mlbfgs.Learn(input, labels); // // Create a cross validation model derived from the training set to measure the performance of this // predictive model and estimate how well we expect the model will generalize. The algorithm executes // multiple rounds of cross validation on different partitions and averages the results. // int folds = 4; // could play around with this later var cv = CrossValidation.Create(k: folds, learner: (p) => new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>(), loss: (actual, expected, p) => new ZeroOneLoss(expected).Loss(actual), fit: (teacher, x, y, w) => teacher.Learn(x, y, w), x: input, y: labels); var result = cv.Learn(input, labels); GeneralConfusionMatrix gcm = result.ToConfusionMatrix(input, labels); ConfusionMatrix cm = ConfusionMatrix.Estimate(mlr, input, labels); // //output relevant statistics // Funcs.Utility.OutPutStats(result.NumberOfSamples, result.NumberOfInputs, result.Training.Mean, gcm.Accuracy, cm.FalsePositives, cm.FalseNegatives, cm.FScore); // Compute the model predictions and return the values int[] answers = mlr.Decide(input); // And also the probability of each of the answers double[][] probabilities = mlr.Probabilities(input); // Now we can check how good our model is at predicting double error = new Accord.Math.Optimization.Losses.ZeroOneLoss(labels).Loss(answers); mlr.Save(fName, compression: SerializerCompression.None); return(answers); }
public void doc_learn() { #region doc_learn // Declare a very simple classification/regression // problem with only 2 input variables (x and y): double[][] inputs = { new[] { 3.0, 1.0 }, new[] { 7.0, 1.0 }, new[] { 3.0, 1.1 }, new[] { 3.0, 2.0 }, new[] { 6.0, 1.0 }, }; // Class labels for each of the inputs int[] outputs = { 0, 2, 0, 1, 2 }; // Create a estimation algorithm to estimate the regression LowerBoundNewtonRaphson lbnr = new LowerBoundNewtonRaphson() { MaxIterations = 100, Tolerance = 1e-6 }; // Now, we will iteratively estimate our model: MultinomialLogisticRegression mlr = lbnr.Learn(inputs, outputs); // We can compute the model answers int[] answers = mlr.Decide(inputs); // And also the probability of each of the answers double[][] probabilities = mlr.Probabilities(inputs); // Now we can check how good our model is at predicting double error = new ZeroOneLoss(outputs).Loss(answers); // We can also verify the classes with highest // probability are the ones being decided for: int[] argmax = probabilities.ArgMax(dimension: 1); // should be same as 'answers' #endregion Assert.AreEqual(0, error); Assert.AreEqual(answers, argmax); }
public void SaveProbabilities(MultinomialLogisticRegression mlr, string path = @"H:\Documents\Visual Studio 2015\Projects\ML\ML\SaveResults\") { string timeAfter = InitialTime(); double[][] probabilities = mlr.Probabilities(TestInputs); for (int m = 0; m < probabilities.Count(); m++) { for (int n = 0; n < probabilities[m].Count(); n++) { using (FileStream fs = new FileStream(path + timeAfter + "_Probabilities" + Сlassifier + ".txt", FileMode.Append)) { using (StreamWriter writer = new StreamWriter(fs)) { writer.WriteLine("([{0}, {1}]: {2})", m, n, probabilities[m][n]); } } } } }
private static void multinomial(double[][] inputs, int[] outputs) { var lbnr = new LowerBoundNewtonRaphson() { MaxIterations = 100, Tolerance = 1e-6 }; // Learn a multinomial logistic regression using the teacher: MultinomialLogisticRegression mlr = lbnr.Learn(inputs, outputs); // We can compute the model answers int[] answers = mlr.Decide(inputs); // And also the probability of each of the answers double[][] probabilities = mlr.Probabilities(inputs); // Now we can check how good our model is at predicting double error = new AccuracyLoss(outputs).Loss(answers); // We can also verify the classes with highest // probability are the ones being decided for: int[] argmax = probabilities.ArgMax(dimension: 1); // should be same as 'answers' }
public void LearnTest1() { #region doc_learn_0 // Declare a simple classification/regression // problem with 5 input variables (a,b,c,d,e): double[][] inputs = { new double[] { 1, 4, 2, 0, 1 }, new double[] { 1, 3, 2, 0, 1 }, new double[] { 3, 0, 1, 1, 1 }, new double[] { 3, 0, 1, 0, 1 }, new double[] { 0, 5, 5, 5, 5 }, new double[] { 1, 5, 5, 5, 5 }, new double[] { 1, 0, 0, 0, 0 }, new double[] { 1, 0, 0, 0, 0 }, new double[] { 2, 4, 2, 0, 1 }, new double[] { 2, 4, 2, 0, 1 }, new double[] { 2, 6, 2, 0, 1 }, new double[] { 2, 7, 5, 0, 1 }, }; // Class labels for each of the inputs int[] outputs = { 0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 0, 0 }; #endregion { #region doc_learn_cg // Create a Conjugate Gradient algorithm to estimate the regression var mcg = new MultinomialLogisticLearning <ConjugateGradient>(); // Now, we can estimate our model using Conjugate Gradient MultinomialLogisticRegression mlr = mcg.Learn(inputs, outputs); // We can compute the model answers int[] answers = mlr.Decide(inputs); // And also the probability of each of the answers double[][] probabilities = mlr.Probabilities(inputs); // Now we can check how good our model is at predicting double error = new ZeroOneLoss(outputs).Loss(answers); #endregion Assert.AreEqual(0, error, 1e-5); } { #region doc_learn_gd // Create a Conjugate Gradient algorithm to estimate the regression var mgd = new MultinomialLogisticLearning <GradientDescent>(); // Now, we can estimate our model using Gradient Descent MultinomialLogisticRegression mlr = mgd.Learn(inputs, outputs); // We can compute the model answers int[] answers = mlr.Decide(inputs); // And also the probability of each of the answers double[][] probabilities = mlr.Probabilities(inputs); // Now we can check how good our model is at predicting double error = new ZeroOneLoss(outputs).Loss(answers); #endregion Assert.AreEqual(0, error, 1e-5); } { #region doc_learn_bfgs // Create a Conjugate Gradient algorithm to estimate the regression var mlbfgs = new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>(); // Now, we can estimate our model using BFGS MultinomialLogisticRegression mlr = mlbfgs.Learn(inputs, outputs); // We can compute the model answers int[] answers = mlr.Decide(inputs); // And also the probability of each of the answers double[][] probabilities = mlr.Probabilities(inputs); // Now we can check how good our model is at predicting double error = new ZeroOneLoss(outputs).Loss(answers); #endregion Assert.AreEqual(0, error, 1e-5); } }