static public int [] MultiNomialLogRegressionLowerBoundNewtonRaphson(double [][] input1, int[] labels, string SaveFile) { // http://accord-framework.net/docs/html/T_Accord_Statistics_Models_Regression_MultinomialLogisticRegression.htm // Create a estimation algorithm to estimate the regression LowerBoundNewtonRaphson lbnr = new LowerBoundNewtonRaphson() { MaxIterations = 10, Tolerance = 1e-6 }; // ******************************************************************************* var cv = CrossValidation.Create( k: 10, // We will be using 10-fold cross validation // First we define the learning algorithm: learner: (p) => new LowerBoundNewtonRaphson(), // Now we have to specify how the n.b. performance should be measured: loss: (actual, expected, p) => new ZeroOneLoss(expected).Loss(actual), // This function can be used to perform any special // operations before the actual learning is done, but // here we will just leave it as simple as it can be: fit: (teach, x, y, w) => teach.Learn(x, y, w), // Finally, we have to pass the input and output data // that will be used in cross-validation. x: input1, y: labels ); // Genrate a cross validation of the data var cvresult = cv.Learn(input1, labels); // iteratively estimate the model MultinomialLogisticRegression mlr = lbnr.Learn(input1, labels); // Generate statistics from confusion matrices ConfusionMatrix cm = ConfusionMatrix.Estimate(mlr, input1, labels); GeneralConfusionMatrix gcm = cvresult.ToConfusionMatrix(input1, labels); Funcs.Utility.OutPutStats(cvresult.NumberOfSamples, cvresult.NumberOfInputs, cvresult.Training.Mean, gcm.Accuracy, cm.FalsePositives, cm.FalseNegatives, cm.FScore); // We can compute the model answers int[] answers = mlr.Decide(input1); string modelsavefile = SaveFile.Replace(".csv", ".MLR.save"); mlr.Save(modelsavefile, compression: SerializerCompression.None); return(answers); }
static public int[] MultiNomialLogisticRegressionBFGS(double [][] input, int [] labels, string fName) { /* The L-BFGS algorithm is a member of the broad family of quasi-Newton optimization methods. * L-BFGS stands for 'Limited memory BFGS'. Indeed, L-BFGS uses a limited memory variation of * the Broyden–Fletcher–Goldfarb–Shanno (BFGS) update to approximate the inverse Hessian matrix * (denoted by Hk). Unlike the original BFGS method which stores a dense approximation, L-BFGS * stores only a few vectors that represent the approximation implicitly. Due to its moderate * memory requirement, L-BFGS method is particularly well suited for optimization problems with * a large number of variables. */ // Create a lbfgs model var mlbfgs = new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>(); // Estimate using the data against a logistic regression MultinomialLogisticRegression mlr = mlbfgs.Learn(input, labels); // // Create a cross validation model derived from the training set to measure the performance of this // predictive model and estimate how well we expect the model will generalize. The algorithm executes // multiple rounds of cross validation on different partitions and averages the results. // int folds = 4; // could play around with this later var cv = CrossValidation.Create(k: folds, learner: (p) => new MultinomialLogisticLearning <BroydenFletcherGoldfarbShanno>(), loss: (actual, expected, p) => new ZeroOneLoss(expected).Loss(actual), fit: (teacher, x, y, w) => teacher.Learn(x, y, w), x: input, y: labels); var result = cv.Learn(input, labels); GeneralConfusionMatrix gcm = result.ToConfusionMatrix(input, labels); ConfusionMatrix cm = ConfusionMatrix.Estimate(mlr, input, labels); // //output relevant statistics // Funcs.Utility.OutPutStats(result.NumberOfSamples, result.NumberOfInputs, result.Training.Mean, gcm.Accuracy, cm.FalsePositives, cm.FalseNegatives, cm.FScore); // Compute the model predictions and return the values int[] answers = mlr.Decide(input); // And also the probability of each of the answers double[][] probabilities = mlr.Probabilities(input); // Now we can check how good our model is at predicting double error = new Accord.Math.Optimization.Losses.ZeroOneLoss(labels).Loss(answers); mlr.Save(fName, compression: SerializerCompression.None); return(answers); }