Exemplo n.º 1
0
        public void FindMinimum_BigRosenbrock_Hard()
        {
            var obj    = ObjectiveFunction.Gradient(BigRosenbrockFunction.Value, BigRosenbrockFunction.Gradient);
            var solver = new LimitedMemoryBfgsMinimizer(1e-5, 1e-5, 1e-5, 5, 1000);
            var result = solver.FindMinimum(obj, new DenseVector(new[] { -1.2 * 100.0, 1.0 * 100.0 }));

            Assert.That(Math.Abs(result.MinimizingPoint[0] - BigRosenbrockFunction.Minimum[0]), Is.LessThan(1e-3));
            Assert.That(Math.Abs(result.MinimizingPoint[1] - BigRosenbrockFunction.Minimum[1]), Is.LessThan(1e-3));
        }
Exemplo n.º 2
0
        public void FindMinimum_Rosenbrock_Overton()
        {
            var obj    = ObjectiveFunction.Gradient(RosenbrockFunction.Value, RosenbrockFunction.Gradient);
            var solver = new LimitedMemoryBfgsMinimizer(1e-5, 1e-5, 1e-5, 5, 100);
            var result = solver.FindMinimum(obj, new DenseVector(new[] { -0.9, -0.5 }));

            Assert.That(Math.Abs(result.MinimizingPoint[0] - RosenbrockFunction.Minimum[0]), Is.LessThan(1e-3));
            Assert.That(Math.Abs(result.MinimizingPoint[1] - RosenbrockFunction.Minimum[1]), Is.LessThan(1e-3));
        }
Exemplo n.º 3
0
        public void Thurber_LBfgs_Dif()
        {
            var obj    = ObjectiveFunction.NonlinearFunction(ThurberModel, ThurberX, ThurberY, accuracyOrder: 6);
            var solver = new LimitedMemoryBfgsMinimizer(1e-10, 1e-10, 1e-10, 1000);
            var result = solver.FindMinimum(obj, ThurberStart);

            for (int i = 0; i < result.MinimizingPoint.Count; i++)
            {
                AssertHelpers.AlmostEqualRelative(ThurberPbest[i], result.MinimizingPoint[i], 6);
            }
        }
Exemplo n.º 4
0
        public void Rat43_LBfgs_Dif()
        {
            var obj    = ObjectiveFunction.NonlinearFunction(Rat43Model, Rat43X, Rat43Y, accuracyOrder: 6);
            var solver = new LimitedMemoryBfgsMinimizer(1e-10, 1e-10, 1e-10, 1000);
            var result = solver.FindMinimum(obj, Rat43Start2);

            for (int i = 0; i < result.MinimizingPoint.Count; i++)
            {
                AssertHelpers.AlmostEqualRelative(Rat43Pbest[i], result.MinimizingPoint[i], 2);
            }
        }
Exemplo n.º 5
0
        public void Mgh_Tests(TestFunctions.TestCase test_case)
        {
            var obj    = new MghObjectiveFunction(test_case.Function, true, true);
            var solver = new LimitedMemoryBfgsMinimizer(1e-8, 1e-8, 1e-8, 5, 1000);

            var result = solver.FindMinimum(obj, test_case.InitialGuess);

            if (test_case.MinimizingPoint != null)
            {
                Assert.That((result.MinimizingPoint - test_case.MinimizingPoint).L2Norm(), Is.LessThan(1e-3));
            }

            var val1    = result.FunctionInfoAtMinimum.Value;
            var val2    = test_case.MinimalValue;
            var abs_min = Math.Min(Math.Abs(val1), Math.Abs(val2));
            var abs_err = Math.Abs(val1 - val2);
            var rel_err = abs_err / abs_min;
            var success = (abs_min <= 1 && abs_err < 1e-3) || (abs_min > 1 && rel_err < 1e-3);

            Assert.That(success, "Minimal function value is not as expected.");
        }
Exemplo n.º 6
0
        static void Main(string[] args)
        {
            if (!System.Console.IsOutputRedirected)
            {
                System.Console.Clear();
            }

            CultureInfo.CurrentCulture = CultureInfo.CreateSpecificCulture("en-US");

            System.Console.WriteLine("Multi-class Classification and Neural Networks ex.4");
            System.Console.WriteLine("===================================================\n");

            var M = Matrix <double> .Build;
            var V = Vector <double> .Build;

            // Setup the parameters you will use for this exercise
            int input_layer_size  = 400;  // 20x20 Input Images of Digits
            int hidden_layer_size = 25;   // 25 hidden units
            int num_labels        = 10;   // 10 labels, from 1 to 10
                                          // (note that we have mapped "0" to label 10)

            //  =========== Part 1: Loading and Visualizing Data =============
            //  We start the exercise by first loading and visualizing the dataset.
            //  You will be working with a dataset that contains handwritten digits.
            //

            // read all matrices of a file by name into a dictionary
            Dictionary <string, Matrix <double> > ms = MatlabReader.ReadAll <double>("data\\ex3data1.mat");

            Matrix <double> X = ms["X"];
            Vector <double> y = ms["y"].Column(0);

            // get a casual sequence of 100 int numbers
            var srs = new MathNet.Numerics.Random.SystemRandomSource();
            var seq = srs.NextInt32Sequence(0, 5000).Take(100).ToList();

            // Randomly select 100 data points to display
            Vector <double>[] sel = new Vector <double> [100];
            int             idx   = 0;
            Vector <double> v     = V.Dense(400);

            foreach (int i in seq)
            {
                sel[idx++] = X.Row(i);
            }

            // display
            DisplayData(sel);

            Pause();

            // ================ Part 2: Loading Parameters ================
            // In this part of the exercise, we load some pre-initialized
            // neural network parameters.

            System.Console.WriteLine("\nLoading Saved Neural Network Parameters ...\n");

            // read all matrices of a file by name into a dictionary
            Dictionary <string, Matrix <double> > mr = MatlabReader.ReadAll <double>("data\\ex3weights.mat");

            Matrix <double> theta1 = mr["Theta1"];      // 25 X 401
            Matrix <double> theta2 = mr["Theta2"];      // 10 X 26

            // Unroll parameters
            Vector <double> nn_params = NeuralNetwork.UnrollParameters(theta1, theta2);

            Pause();

            //  ================ Part 3: Compute Cost (Feedforward) ================
            //  To the neural network, you should first start by implementing the
            //  feedforward part of the neural network that returns the cost only. You
            //  should complete the code in nnCostFunction.m to return cost. After
            //  implementing the feedforward to compute the cost, you can verify that
            //  your implementation is correct by verifying that you get the same cost
            //  as us for the fixed debugging parameters.
            //
            //  We suggest implementing the feedforward cost *without* regularization
            //  first so that it will be easier for you to debug. Later, in part 4, you
            //  will get to implement the regularized cost.


            System.Console.WriteLine("\nFeedforward Using Neural Network ...\n");

            // Weight regularization parameter (we set this to 0 here).

            NeuralNetwork nn = new NeuralNetwork(X, y, input_layer_size, hidden_layer_size, num_labels);

            nn.Lambda = 0.0;
            double J = nn.Cost(nn_params);

            System.Console.WriteLine("Cost at parameters (loaded from ex4weights): {0:f6}\n(this value should be about 0.287629)\n", J);
            Pause();

            // =============== Part 4: Implement Regularization ===============
            // Once your cost function implementation is correct, you should now
            // continue to implement the regularization with the cost.
            //

            System.Console.WriteLine("\nChecking Cost Function (w/ Regularization) ... \n");

            // Weight regularization parameter (we set this to 1 here).
            nn.Lambda = 1.0;

            J = nn.Cost(nn_params);

            System.Console.WriteLine("Cost at parameters (loaded from ex4weights): {0:f6} \n(this value should be about 0.383770)\n", J);
            Pause();

            // ================ Part 5: Sigmoid Gradient  ================
            //  Before you start implementing the neural network, you will first
            //  implement the gradient for the sigmoid function. You should complete the
            //  code in the sigmoidGradient.m file.

            System.Console.WriteLine("\nEvaluating sigmoid gradient...\n");

            var g = nn.SigmoidGradient(V.DenseOfArray(new[] { -1.0, -0.5, 0, 0.5, 1 }));

            System.Console.WriteLine("Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:\n  ");
            System.Console.WriteLine("{0:f5} ", g);
            System.Console.WriteLine("\n\n");

            Pause();


            // ================ Part 6: Initializing Pameters ================
            // In this part of the exercise, you will be starting to implment a two
            // layer neural network that classifies digits. You will start by
            // implementing a function to initialize the weights of the neural network
            // (randInitializeWeights.m)

            System.Console.WriteLine("\nInitializing Neural Network Parameters ...\n");

            Matrix <double> initial_Theta1 = RandInitializeWeights(input_layer_size + 1, hidden_layer_size);
            Matrix <double> initial_Theta2 = RandInitializeWeights(hidden_layer_size + 1, num_labels);

            // Unroll parameters
            Vector <double> initial_nn_params = NeuralNetwork.UnrollParameters(initial_Theta1, initial_Theta2);

            Pause();
            // =============== Part 7: Implement Backpropagation ===============
            // Once your cost matches up with ours, you should proceed to implement the
            // backpropagation algorithm for the neural network. You should add to the
            // code you've written in nnCostFunction.m to return the partial
            // derivatives of the parameters.

            System.Console.WriteLine("\nChecking Backpropagation... \n");
            CheckGradient(0);

            Pause();

            // =============== Part 8: Implement Regularization ===============
            //  Once your backpropagation implementation is correct, you should now
            //  continue to implement the regularization with the cost and gradient.

            System.Console.WriteLine("\nChecking Backpropagation (w/ Regularization) ... \n");

            //  Check gradients by running checkNNGradients
            double lambda = 3;

            CheckGradient(lambda);

            // Also output the costFunction debugging values
            nn.Lambda = lambda;
            double debug_J = nn.Cost(nn_params);

            System.Console.WriteLine("\n\nCost at (fixed) debugging parameters (w/ lambda = {0:f1}): {1:f6} " +
                                     "\n(for lambda = 3, this value should be about 0.576051)\n\n", lambda, debug_J);

            Pause();

            // =================== Part 8: Training NN ===================
            //  You have now implemented all the code necessary to train a neural
            //  network. To train your neural network, we will now use "fmincg", which
            //  is a function which works similarly to "fminunc". Recall that these
            //  advanced optimizers are able to train our cost functions efficiently as
            //  long as we provide them with the gradient computations.

            System.Console.WriteLine("\nTraining Neural Network... \n");

            //  After you have completed the assignment, change the MaxIter to a larger
            //  value to see how more training helps.
            int maxIter = 40;

            //  You should also try different values of lambda
            lambda    = 1;
            nn.Lambda = lambda;


            var obj    = ObjectiveFunction.Gradient(nn.Cost, nn.Gradient);
            var solver = new LimitedMemoryBfgsMinimizer(1e-5, 1e-5, 1e-5, 5, maxIter);
            var result = solver.FindMinimum(obj, initial_nn_params);

            System.Console.WriteLine("Reason For Exit: {0}", result.ReasonForExit);
            System.Console.WriteLine("Iterations: {0}", result.Iterations);
            System.Console.WriteLine("Cost: {0:e}", result.FunctionInfoAtMinimum.Value);


            Pause();

            // ================= Part 10: Implement Predict =================
            //  After training the neural network, we would like to use it to predict
            //  the labels. You will now implement the "predict" function to use the
            //  neural network to predict the labels of the training set. This lets
            //  you compute the training set accuracy.

            Vector <double> pred = nn.Predict(result.MinimizingPoint, X);
            Vector <double> comp = V.Dense(y.Count);

            for (int i = 0; i < y.Count; i++)
            {
                if (pred[i] == y[i])
                {
                    comp[i] = 1;
                }
                else
                {
                    comp[i] = 0;
                }
            }


            double accuracy = comp.Mean() * 100;

            System.Console.WriteLine("\nTraining Set Accuracy: {0:f5}\n", accuracy);

            Pause();
        }