public void Rosenbrock()
        {
            // starting point for search is somewhere
            var initialTheta = Vector <double> .Build.DenseOfArray(new[] { -1D, 1D });

            // define the hypothesis with default parameter
            var rosenbrockParameter = Vector <double> .Build.DenseOfArray(new[] { 1D, 100D });

            var hypothesis = new RosenbrockHypothesis();

            // cost function is sum of squared errors
            var costFunction = new FunctionValueOptimization <double>(hypothesis, rosenbrockParameter);

            // define the optimization problem
            var problem = new OptimizationProblem <double, IDifferentiableCostFunction <double> >(costFunction, initialTheta);

            // optimize!
            var gd = new HagerZhangCG()
            {
                MaxIterations  = 10000,
                ErrorTolerance = 1E-8D
            };
            var result = gd.Minimize(problem);

            // assert!
            var coefficients = result.Coefficients;

            coefficients[0].Should().BeApproximately(rosenbrockParameter[0], 1E-5D, "because the Rosenbrock function has a minimum at x={0}, y={1}", rosenbrockParameter[0], Math.Sqrt(rosenbrockParameter[0]));
            coefficients[1].Should().BeApproximately(Math.Sqrt(rosenbrockParameter[0]), 1E-5D, "because the Rosenbrock function has a minimum at x={0}, y={1}", rosenbrockParameter[0], Math.Sqrt(rosenbrockParameter[0]));
        }
        public void FletcherReevesRosenbrock()
        {
            // starting point for search is somewhere
            var initialTheta = Vector<double>.Build.DenseOfArray(new[] { -1D, 1D });

            // define the hypothesis with default parameter
            var rosenbrockParameter = Vector<double>.Build.DenseOfArray(new[] { 1D, 100D });
            var hypothesis = new RosenbrockHypothesis();

            // cost function is sum of squared errors
            var costFunction = new FunctionValueOptimization<double>(hypothesis, rosenbrockParameter);

            // define the optimization problem
            var problem = new OptimizationProblem<double, IDifferentiableCostFunction<double>>(costFunction, initialTheta);

            // define the line search algorithm
            var lineSearch = new SecantMethod()
            {
                ErrorTolerance = 1E-6D
            };

            // optimize!
            var gd = new FletcherReevesCG(lineSearch)
            {
                MaxIterations = 10000,
                ErrorTolerance = 1E-8D
            };
            var result = gd.Minimize(problem);

            // assert!
            var coefficients = result.Coefficients;
            coefficients[0].Should().BeApproximately(rosenbrockParameter[0], 1E-5D, "because the Rosenbrock function has a minimum at x={0}, y={1}", rosenbrockParameter[0], Math.Sqrt(rosenbrockParameter[0]));
            coefficients[1].Should().BeApproximately(Math.Sqrt(rosenbrockParameter[0]), 1E-5D, "because the Rosenbrock function has a minimum at x={0}, y={1}", rosenbrockParameter[0], Math.Sqrt(rosenbrockParameter[0]));
        }
Beispiel #3
0
        public void PolakRibiereRosenbrockParameterFitWithResidualSumOfSquares()
        {
            // parameter is default Rosenbrock
            var realTheta = Vector <double> .Build.DenseOfArray(new[] { 1D, 105D });

            var initialTheta = Vector <double> .Build.DenseOfArray(new[] { 2D, 200D });

            // define the hypothesis
            var hypothesis = new RosenbrockHypothesis();

            // define a probability distribution
            var distribution = new ContinuousUniform(-10D, 10D);

            // obtain the test data
            const int dataPoints  = 10;
            var       trainingSet = new List <DataPoint <double> >(dataPoints);

            for (int i = 0; i < dataPoints; ++i)
            {
                var inputs = Vector <double> .Build.Random(2, distribution);

                var output = hypothesis.Evaluate(realTheta, inputs);
                trainingSet.Add(new DataPoint <double>(inputs, output));
            }
            ;

            // cost function is sum of squared errors
            var costFunction = new ResidualSumOfSquaresCostFunction(hypothesis, trainingSet);

            // define the optimization problem
            var problem = new OptimizationProblem <double, IDifferentiableCostFunction <double> >(costFunction, initialTheta);

            // define the line search algorithm
            var lineSearch = new SecantMethod();

            // optimize!
            var gd = new PolakRibiereCG(lineSearch)
            {
                ErrorTolerance = 1E-8D
            };
            var result = gd.Minimize(problem);

            // assert!
            var coefficients = result.Coefficients;

            coefficients[0].Should().BeApproximately(realTheta[0], 1D, "because that's the functions [a] parameter");
            coefficients[1].Should().BeApproximately(realTheta[1], 1D, "because that's the functions [b] parameter");
        }
        public void AlphaOfMinimizerIsFound()
        {
            var theta = Vector<double>.Build.DenseOfArray(new[] {1.0D, 100.0D});
            var x0 = Vector<double>.Build.DenseOfArray(new[] {-1.5D, 0.6D});
            var rosenbrock = new RosenbrockHypothesis();

            // evaluate at the point
            var value = rosenbrock.Evaluate(theta, x0);
            var gradient = rosenbrock.Jacobian(theta, x0);

            // determine the initial search direction
            var direction = -gradient.Normalize(2);

            // create a wrapper cost function
            var wrapper = new FunctionValueOptimization<double>(rosenbrock, theta);

            // perform a line search
            var lineSearch = new HagerZhangLineSearch();
            var alpha = lineSearch.Minimize(wrapper, x0, direction, 0.0D);

            alpha.Should().BeApproximately(0.235552763819095D, 1E-5D, "because that is the alpha value of the minimum along the search direction");
        }
Beispiel #5
0
        public void AlphaOfMinimizerIsFound()
        {
            var theta = Vector <double> .Build.DenseOfArray(new[] { 1.0D, 100.0D });

            var x0 = Vector <double> .Build.DenseOfArray(new[] { -1.5D, 0.6D });

            var rosenbrock = new RosenbrockHypothesis();

            // evaluate at the point
            var value    = rosenbrock.Evaluate(theta, x0);
            var gradient = rosenbrock.Jacobian(theta, x0);

            // determine the initial search direction
            var direction = -gradient.Normalize(2);

            // create a wrapper cost function
            var wrapper = new FunctionValueOptimization <double>(rosenbrock, theta);

            // perform a line search
            var lineSearch = new HagerZhangLineSearch();
            var alpha      = lineSearch.Minimize(wrapper, x0, direction, 0.0D);

            alpha.Should().BeApproximately(0.235552763819095D, 1E-5D, "because that is the alpha value of the minimum along the search direction");
        }
        public void RosenbrockParameterFitWithResidualSumOfSquares()
        {
            // parameter is default Rosenbrock
            var realTheta = Vector<double>.Build.DenseOfArray(new []{1D, 105D});
            var initialTheta = Vector<double>.Build.DenseOfArray(new []{2D, 200D});

            // define the hypothesis
            var hypothesis = new RosenbrockHypothesis();

            // define a probability distribution
            var distribution = new ContinuousUniform(-10D, 10D);

            // obtain the test data
            const int dataPoints = 10;
            var trainingSet = new List<DataPoint<double>>(dataPoints);
            for (int i = 0; i < dataPoints; ++i)
            {
                var inputs = Vector<double>.Build.Random(2, distribution);
                var output = hypothesis.Evaluate(realTheta, inputs);
                trainingSet.Add(new DataPoint<double>(inputs, output));
            };

            // cost function is sum of squared errors
            var costFunction = new ResidualSumOfSquaresCostFunction(hypothesis, trainingSet);

            // define the optimization problem
            var problem = new OptimizationProblem<double, IDifferentiableCostFunction<double>>(costFunction, initialTheta);

            // optimize!
            var gd = new ResilientErrorGD
            {
                ErrorTolerance = 0.0D // TODO: actually use it
            };
            var result = gd.Minimize(problem);

            // assert!
            var coefficients = result.Coefficients;
            coefficients[0].Should().BeApproximately(realTheta[0], 1D, "because that's the functions [a] parameter");
            coefficients[1].Should().BeApproximately(realTheta[1], 1D, "because that's the functions [b] parameter");
        }