Пример #1
0
        //Коэф
        private Vector GetCoefficient()
        {
            var n = matrix.GetLength(0) - 1;
            var a = new double[n][];

            for (var i = 0; i < n; i++)
            {
                a[i] = new double[n];
            }
            for (var i = 0; i < n; i++)
            {
                for (var j = 0; j < n; j++)
                {
                    a[i][j] = matrix[i, j];
                }
            }
            var b = new double[n];

            for (var i = 0; i < n; i++)
            {
                b[i] = matrix[i, n];
            }

            var sys = new ConjugateGradient(new Matrix(a), new Matrix(b));

            return(sys.Iteration());
        }
Пример #2
0
        public void TestRosenbrock()
        {
            Rosenbrock        cf    = new Rosenbrock();
            EndCriteria       ec    = new EndCriteria();
            ConjugateGradient optim = new ConjugateGradient(cf, ec);
            //  new SecantLineSearch(cf,ec));

            DoubleVector x0 = new DoubleVector(new double[5] {
                1.3, 0.7, 0.8, 1.9, 1.2
            });

            optim.Minimize(x0);

            //Console.WriteLine(optim.IterationVectors[0].ToString());
            //Console.WriteLine(optim.IterationVectors[1].ToString());
            //Console.WriteLine(optim.IterationVectors[2].ToString());

            //Console.WriteLine(optim.SolutionVector.ToString());

            Assert.AreEqual(optim.SolutionValue, 0.0, 0.1);
            Assert.AreEqual(optim.SolutionVector[0], 1.0, 0.1);
            Assert.AreEqual(optim.SolutionVector[1], 1.0, 0.1);
            Assert.AreEqual(optim.SolutionVector[2], 1.0, 0.1);
            Assert.AreEqual(optim.SolutionVector[3], 1.0, 0.2);
            Assert.AreEqual(optim.SolutionVector[4], 1.0, 0.4);
        }
Пример #3
0
 /// <summary>
 ///   Constructs a new Conjugate Gradient learning algorithm.
 /// </summary>
 ///
 public ConjugateGradientHiddenLearning(HiddenConditionalRandomField <T> model)
     : base(model)
 {
     cg           = new ConjugateGradient(model.Function.Weights.Length);
     cg.Progress += new EventHandler <OptimizationProgressEventArgs>(cg_Progress);
     cg.Function  = base.Objective;
     cg.Gradient  = base.Gradient;
 }
Пример #4
0
 /// <summary>
 ///   Constructs a new Conjugate Gradient learning algorithm.
 /// </summary>
 ///
 public ConjugateGradientHiddenLearning(HiddenConditionalRandomField <T> model)
     : base(model)
 {
     cg           = new ConjugateGradient(model.Function.Weights.Length);
     cg.Tolerance = 1e-3;
     cg.Function  = base.Objective;
     cg.Gradient  = base.Gradient;
 }
Пример #5
0
        /// <summary>
        /// Inheritors of this class should create the optimization algorithm in this
        /// method, using the current <see cref="P:Accord.Statistics.Models.Fields.Learning.BaseHiddenGradientOptimizationLearning`2.MaxIterations" /> and <see cref="P:Accord.Statistics.Models.Fields.Learning.BaseHiddenGradientOptimizationLearning`2.Tolerance" />
        /// settings.
        /// </summary>
        /// <returns>ConjugateGradient.</returns>
        protected override ConjugateGradient CreateOptimizer()
        {
            var cg = new ConjugateGradient(Model.Function.Weights.Length)
            {
                Tolerance     = Tolerance,
                MaxIterations = MaxIterations,
            };

            cg.Progress += new EventHandler <OptimizationProgressEventArgs>(progressChanged);
            return(cg);
        }
Пример #6
0
        /// <summary>
        ///   Constructs a new Conjugate Gradient learning algorithm.
        /// </summary>
        ///
        public HiddenConjugateGradientLearning(HiddenConditionalRandomField <T> model)
        {
            Model = model;

            calculator = new ForwardBackwardGradient <T>(model);

            optimizer           = new ConjugateGradient(model.Function.Weights.Length);
            optimizer.Progress += new EventHandler <OptimizationProgressEventArgs>(progressChanged);
            optimizer.Function  = calculator.Objective;
            optimizer.Gradient  = calculator.Gradient;
        }
        public static void Run(string meshPath, string finiteElementType, double accuracy)
        {
            var totalTimer     = StartMeasuringTaskTime("Total");
            var readInputTimer = StartMeasuringTaskTime("Read input files");
            var mesh           = Mesh.ReadFromFile($"{meshPath}.mesh");

            ShowMeshParameters(mesh);
            StopAndShowTaskTime(readInputTimer);

            var calculationTimer = StartMeasuringTaskTime("Calculation");

            var feSpace    = CreateFiniteElementSpace(finiteElementType, mesh);
            var g          = new LambdaVectorField(v => 0);
            var conditions = new Dictionary <int, IVectorField>()
            {
                [1] = g, [2] = g, [3] = g, [4] = g
            };

            int    stepCount = 20;
            double t         = 0,
                   dt        = 1.0 / stepCount;
            var bilinearForm = new BilinearForm(
                (u, v, du, dv) => dt * Vector2.Dot(du, dv) + u * v);
            var solver = new ConjugateGradient(accuracy);

            IVectorField previous = new LambdaVectorField((x, y) => Sin(PI * x) * Sin(PI * y));

            for (int i = 0; i < stepCount; i++)
            {
                t += dt;
                Func <Vector2, double> f = v => (1 + 2 * PI * PI) * Exp(t) * Sin(PI * v.x) * Sin(PI * v.y);
                var rhs             = new LambdaVectorField(v => previous.GetValueAt(v, 0) + dt * f(v));
                var laplaceEquation = new Problem(feSpace, conditions, bilinearForm, rhs, solver);
                previous = laplaceEquation.Solve();
            }

            StopAndShowTaskTime(calculationTimer);
            var errorCalculationTimer = StartMeasuringTaskTime("Error calculation");

            var solution = (FiniteElementVectorField)previous;
            Func <double, double, double> uExact = (x, y) => Exp(t) * Sin(PI * x) * Sin(PI * y);
            var error = CalculateError(feSpace, uExact, solution);

            Console.WriteLine($"L2 Error = {error}");

            StopAndShowTaskTime(errorCalculationTimer);

            var outputTimer = StartMeasuringTaskTime("Output");

            InOut.WriteSolutionToFile($"{meshPath}.sol", mesh, solution);

            StopAndShowTaskTime(outputTimer);
            StopAndShowTaskTime(totalTimer);
        }
Пример #8
0
        public void TrainBackPropagation(double[] features, int[] classes, int iterations)
        {
            training_features = Matrix.FromDoubleArray(features, input_layer);
            training_classes  = Matrix.Unroll(classes, output_layer);

            ConjugateGradient cg = new ConjugateGradient(
                ((input_layer + 1) * hidden_layer) + ((hidden_layer + 1) * output_layer),
                CostFunction, Gradient);

            cg.MaxIterations = iterations;
            cg.Progress     += ConjugateDescentProgress;
            cg.Minimize();
            double[] solution = cg.Solution;

            theta_1 = Matrix.FromDoubleArray(solution.Take((input_layer + 1) * hidden_layer).ToArray(), hidden_layer);
            theta_2 = Matrix.FromDoubleArray(solution.Skip((input_layer + 1) * hidden_layer).ToArray(), output_layer);
        }
Пример #9
0
        public static void Run(string meshPath, string finiteElementType, double accuracy)
        {
            var totalTimer     = StartMeasuringTaskTime("Total");
            var readInputTimer = StartMeasuringTaskTime("Read input files");
            var mesh           = Mesh.ReadFromFile($"{meshPath}.mesh");

            ShowMeshParameters(mesh);
            StopAndShowTaskTime(readInputTimer);

            var calculationTimer = StartMeasuringTaskTime("Calculation");

            var feSpace    = CreateFiniteElementSpace(finiteElementType, mesh);
            var g          = new LambdaVectorField(v => 0);
            var conditions = new Dictionary <int, IVectorField>()
            {
                [1] = g, [2] = g, [3] = g, [4] = g
            };

            var bilinearForm = new BilinearForm(
                (u, v, du, dv) => Vector2.Dot(du, dv) + u * v);
            var rhs = new LambdaVectorField((x, y) => (1 + 2 * PI * PI) * Sin(PI * x) * Sin(PI * y));

            var solver = new ConjugateGradient(accuracy);

            var poisson  = new Problem(feSpace, conditions, bilinearForm, rhs, solver);
            var solution = poisson.Solve();

            StopAndShowTaskTime(calculationTimer);
            var errorCalculationTimer = StartMeasuringTaskTime("Error calculation");

            Func <double, double, double> uExact = (x, y) => Sin(PI * x) * Sin(PI * y);
            var error = CalculateError(feSpace, uExact, solution);

            Console.WriteLine($"L2 Error = {error}");

            StopAndShowTaskTime(errorCalculationTimer);

            var outputTimer = StartMeasuringTaskTime("Output");

            InOut.WriteSolutionToFile($"{meshPath}.sol", mesh, solution);

            StopAndShowTaskTime(outputTimer);
            StopAndShowTaskTime(totalTimer);
        }
Пример #10
0
        public void MinimizeTest2()
        {
            Func <double[], double>   f = BroydenFletcherGoldfarbShannoTest.rosenbrockFunction;
            Func <double[], double[]> g = BroydenFletcherGoldfarbShannoTest.rosenbrockGradient;

            Assert.AreEqual(104, f(new[] { -1.0, 2.0 }));


            int n = 2; // number of variables

            double[] initial = { -1.2, 1 };

            ConjugateGradient cg = new ConjugateGradient(n, f, g);

            cg.Method = ConjugateGradientMethod.PolakRibiere;

            Assert.IsTrue(cg.Minimize(initial));
            double actual   = cg.Value;
            double expected = 0;

            Assert.AreEqual(expected, actual, 1e-6);

            double[] result = cg.Solution;

            Assert.AreEqual(125, cg.Evaluations);
            Assert.AreEqual(32, cg.Iterations);
            Assert.AreEqual(1.0, result[0], 1e-3);
            Assert.AreEqual(1.0, result[1], 1e-3);
            Assert.IsFalse(double.IsNaN(result[0]));
            Assert.IsFalse(double.IsNaN(result[1]));

            double y = f(result);

            double[] d = g(result);

            Assert.AreEqual(0.0, y, 1e-6);
            Assert.AreEqual(0.0, d[0], 1e-3);
            Assert.AreEqual(0.0, d[1], 1e-3);

            Assert.IsFalse(double.IsNaN(y));
            Assert.IsFalse(double.IsNaN(d[0]));
            Assert.IsFalse(double.IsNaN(d[1]));
        }
Пример #11
0
        public void TestRosenbrock()
        {
            var cf    = new Rosenbrock();
            var ec    = new EndCriteria();
            var optim = new ConjugateGradient(cf, ec);
            //  new SecantLineSearch(cf,ec));

            var x0 = new DoubleVector(new double[5] {
                1.3, 0.7, 0.8, 1.9, 1.2
            });

            optim.Minimize(x0);

            Assert.AreEqual(optim.SolutionValue, 0.0, 0.1);
            Assert.AreEqual(optim.SolutionVector[0], 1.0, 0.1);
            Assert.AreEqual(optim.SolutionVector[1], 1.0, 0.1);
            Assert.AreEqual(optim.SolutionVector[2], 1.0, 0.1);
            Assert.AreEqual(optim.SolutionVector[3], 1.0, 0.2);
            Assert.AreEqual(optim.SolutionVector[4], 1.0, 0.4);
        }
Пример #12
0
        public void ConstructorTest2()
        {
            Accord.Math.Tools.SetupGenerator(0);

            var function = new NonlinearObjectiveFunction(2,
                                                          function: x => x[0] * x[1],
                                                          gradient: x => new[] { x[1], x[0] });

            NonlinearConstraint[] constraints =
            {
                new NonlinearConstraint(function,
                                        function: x => 1.0 - x[0] * x[0] - x[1] * x[1],
                                        gradient: x => new [] { -2 * x[0],         -2 * x[1] }),
                new NonlinearConstraint(function,
                                        function: x => x[0],
                                        gradient: x => new [] { 1.0,                     0.0 }),
            };

            var target = new ConjugateGradient(2);

            target.Tolerance = 0;
            AugmentedLagrangian solver = new AugmentedLagrangian(target, function, constraints);

            Assert.IsTrue(solver.Minimize());
            double minimum = solver.Value;

            double[] solution = solver.Solution;

            double sqrthalf = Math.Sqrt(0.5);

            Assert.AreEqual(-0.5, minimum, 1e-5);
            Assert.AreEqual(sqrthalf, solution[0], 1e-5);
            Assert.AreEqual(-sqrthalf, solution[1], 1e-5);

            double expectedMinimum = function.Function(solver.Solution);

            Assert.AreEqual(expectedMinimum, minimum);
        }
Пример #13
0
        public void AugmentedLagrangianSolverConstructorTest6()
        {
            // maximize 2x + 3y, s.t. 2x² + 2y² <= 50 and x+y = 1

            // Max x' * c
            //  x

            // s.t. x' * A * x <= k
            //      x' * i     = 1
            // lower_bound < x < upper_bound

            double[] c = { 2, 3 };
            double[,] A = { { 2, 0 }, { 0, 2 } };
            double k = 50;

            // Create the objective function
            var objective = new NonlinearObjectiveFunction(2,
                                                           function: (x) => x.InnerProduct(c),
                                                           gradient: (x) => c
                                                           );

            // Test objective
            for (int i = 0; i < 10; i++)
            {
                for (int j = 0; j < 10; j++)
                {
                    double expected = i * 2 + j * 3;
                    double actual   = objective.Function(new double[] { i, j });
                    Assert.AreEqual(expected, actual);
                }
            }


            // Create the optimization constraints
            var constraints = new List <NonlinearConstraint>();

            constraints.Add(new QuadraticConstraint(objective,
                                                    quadraticTerms: A,
                                                    shouldBe: ConstraintType.LesserThanOrEqualTo, value: k
                                                    ));

            constraints.Add(new NonlinearConstraint(objective,
                                                    function: (x) => x.Sum(),
                                                    gradient: (x) => new[] { 1.0, 1.0 },
                                                    shouldBe: ConstraintType.EqualTo, value: 1,
                                                    withinTolerance: 1e-10
                                                    ));


            // Test first constraint
            for (int i = 0; i < 10; i++)
            {
                for (int j = 0; j < 10; j++)
                {
                    double expected = i * (2 * i + 0 * j) + j * (0 * i + 2 * j);
                    double actual   = constraints[0].Function(new double[] { i, j });
                    Assert.AreEqual(expected, actual);
                }
            }


            // Test second constraint
            for (int i = 0; i < 10; i++)
            {
                for (int j = 0; j < 10; j++)
                {
                    double expected = i + j;
                    double actual   = constraints[1].Function(new double[] { i, j });
                    Assert.AreEqual(expected, actual);
                }
            }

            // Create the solver algorithm
            var inner = new ConjugateGradient(2);

            AugmentedLagrangianSolver solver =
                new AugmentedLagrangianSolver(inner, constraints);

            double maxValue = solver.Maximize(objective);

            Assert.AreEqual(6, maxValue, 0.01);
            Assert.AreEqual(-3, solver.Solution[0], 0.01);
            Assert.AreEqual(4, solver.Solution[1], 0.01);
        }
Пример #14
0
 internal static global::System.Runtime.InteropServices.HandleRef getCPtr(ConjugateGradient obj)
 {
     return((obj == null) ? new global::System.Runtime.InteropServices.HandleRef(null, global::System.IntPtr.Zero) : obj.swigCPtr);
 }
Пример #15
0
    private static void test11()

    //****************************************************************************80
    //
    //  Purpose:
    //
    //    TEST11 assemble, factor and solve using WATHEN_ST + CG_ST.
    //
    //  Licensing:
    //
    //    This code is distributed under the GNU LGPL license.
    //
    //  Modified:
    //
    //    08 June 2014
    //
    //  Author:
    //
    //    John Burkardt
    //
    {
        int i;

        Console.WriteLine("");
        Console.WriteLine("TEST11");
        Console.WriteLine("  Assemble, factor and solve a Wathen system");
        Console.WriteLine("  defined by WATHEN_ST and CG_ST.");
        Console.WriteLine("");

        const int nx = 1;
        const int ny = 1;

        Console.WriteLine("  Elements in X direction NX = " + nx + "");
        Console.WriteLine("  Elements in Y direction NY = " + ny + "");
        Console.WriteLine("  Number of elements = " + nx * ny + "");
        //
        //  Compute the number of unknowns.
        //
        int n = WathenMatrix.wathen_order(nx, ny);

        Console.WriteLine("  Number of nodes N = " + n + "");
        //
        //  Set up a random solution X1.
        //
        int seed = 123456789;

        double[] x1 = UniformRNG.r8vec_uniform_01_new(n, ref seed);
        //
        //  Compute the matrix size.
        //
        int nz_num = WathenMatrix.wathen_st_size(nx, ny);

        Console.WriteLine("  Number of nonzeros NZ_NUM = " + nz_num + "");
        //
        //  Compute the matrix.
        //
        seed = 123456789;
        int[]    row = new int[nz_num];
        int[]    col = new int[nz_num];
        double[] a   = WathenMatrix.wathen_st(nx, ny, nz_num, ref seed, ref row, ref col);
        //
        //  Compute the corresponding right hand side B.
        //
        double[] b = MatbyVector.mv_st(n, n, nz_num, row, col, a, x1);
        //
        //  Solve the linear system.
        //
        double[] x2 = new double[n];
        for (i = 0; i < n; i++)
        {
            x2[i] = 1.0;
        }

        ConjugateGradient.cg_st(n, nz_num, row, col, a, b, ref x2);
        //
        //  Compute the maximum solution error.
        //
        double e = typeMethods.r8vec_diff_norm_li(n, x1, x2);

        Console.WriteLine("  Maximum solution error is " + e + "");
    }
Пример #16
0
    public static IEnumerable <double> Conj(int[,] d, Vector2[] positions, double eps = 0.0001, int maxIter = 1000)
    {
        int n = positions.Length;

        // first find the laplacian for the left hand side
        var laplacian_w = new double[n, n];

        Majorization.WeightLaplacian(d, laplacian_w, n);

        // cut out the first row and column
        var Lw = new double[n - 1, n - 1];

        for (int i = 1; i < n; i++)
        {
            for (int j = 1; j < n; j++)
            {
                Lw[i - 1, j - 1] = laplacian_w[i, j];
            }
        }

        // delta = w_ij * d_ij as in Majorization, Gansner et al.
        var deltas = new double[n, n];

        for (int i = 0; i < n; i++)
        {
            for (int j = 0; j < n; j++)
            {
                double dist = d[i, j];
                deltas[i, j] = 1.0 / dist;
            }
        }

        var LXt    = new double[n, n];  // the laplacian for the right hand side
        var LXt_Xt = new double[n - 1]; // skip the first position as it's fixed to (0,0)
        var Xt1    = new double[n - 1]; // X(t+1)

        // temporary variables to speed up conjugate gradient
        var r  = new double[n - 1];
        var p  = new double[n - 1];
        var Ap = new double[n - 1];

        double prevStress = GraphIO.CalculateStress(d, positions, n);

        // majorize
        for (int k = 0; k < maxIter; k++)
        {
            PositionLaplacian(deltas, positions, LXt, n);

            // solve for x axis
            Multiply_x(LXt, positions, LXt_Xt);
            ConjugateGradient.Cg(Lw, Xt1, LXt_Xt, r, p, Ap, .1, 10);
            for (int i = 1; i < n; i++)
            {
                positions[i].x = Xt1[i - 1];
            }

            // solve for y axis
            Multiply_y(LXt, positions, LXt_Xt);
            ConjugateGradient.Cg(Lw, Xt1, LXt_Xt, r, p, Ap, .1, 10);
            for (int i = 1; i < n; i++)
            {
                positions[i].y = Xt1[i - 1];
            }

            double stress = GraphIO.CalculateStress(d, positions, n);
            yield return(stress);

            if ((prevStress - stress) / prevStress < eps)
            {
                yield break;
            }
            prevStress = stress;
        }
    }
Пример #17
0
    private static void test115()

    //****************************************************************************80
    //
    //  Purpose:
    //
    //    TEST115 assembles, factors and solves using WATHEN_GB and CG_GB.
    //
    //  Licensing:
    //
    //    This code is distributed under the GNU LGPL license.
    //
    //  Modified:
    //
    //    08 June 2014
    //
    //  Author:
    //
    //    John Burkardt
    //
    {
        int i;
        int md = 0;
        int ml = 0;
        int mu = 0;

        Console.WriteLine("");
        Console.WriteLine("TEST115");
        Console.WriteLine("  Assemble, factor and solve a Wathen system");
        Console.WriteLine("  using WATHEN_GB and CG_GB.");
        Console.WriteLine("");

        const int nx = 4;
        const int ny = 4;

        Console.WriteLine("  Elements in X direction NX = " + nx + "");
        Console.WriteLine("  Elements in Y direction NY = " + ny + "");
        Console.WriteLine("  Number of elements = " + nx * ny + "");
        //
        //  Compute the number of unknowns.
        //
        int n = WathenMatrix.wathen_order(nx, ny);

        Console.WriteLine("  Number of nodes N = " + n + "");
        //
        //  Compute the bandwidth.
        //
        WathenMatrix.wathen_bandwidth(nx, ny, ref ml, ref md, ref mu);
        Console.WriteLine("  Lower bandwidth ML = " + ml + "");
        Console.WriteLine("  Upper bandwidth MU = " + mu + "");
        //
        //  Set up a random solution X1.
        //
        int seed = 123456789;

        double[] x1 = UniformRNG.r8vec_uniform_01_new(n, ref seed);
        //
        //  Compute the matrix.
        //
        seed = 123456789;
        double[] a = WathenMatrix.wathen_gb(nx, ny, n, ref seed);
        //
        //  Compute the corresponding right hand side B.
        //
        double[] b = MatbyVector.mv_gb(n, n, ml, mu, a, x1);
        //
        //  Solve the linear system.
        //
        double[] x2 = new double[n];
        for (i = 0; i < n; i++)
        {
            x2[i] = 1.0;
        }

        ConjugateGradient.cg_gb(n, ml, mu, a, b, ref x2);
        //
        //  Compute the maximum solution error.
        //
        double e = typeMethods.r8vec_diff_norm_li(n, x1, x2);

        Console.WriteLine("  Maximum solution error is " + e + "");
    }
Пример #18
0
 internal static global::System.Runtime.InteropServices.HandleRef getCPtr(ConjugateGradient obj) {
   return (obj == null) ? new global::System.Runtime.InteropServices.HandleRef(null, global::System.IntPtr.Zero) : obj.swigCPtr;
 }
Пример #19
0
        private static void TestGmres()
        {
            var luSolver = new LUSolver();

            var solver = new MINRES();
            var cg     = new ConjugateGradient();

            SparseMatrix A = new SparseMatrix(6, 6);

            A.Rows[0] = SparseVector.GetSparseElement(new double[] { 10.0, 0.0, 0.0, 0.0, 0.0, 0.0 });
            A.Rows[1] = SparseVector.GetSparseElement(new double[] { 0.0, 10.0, -3.0, -1.0, 0.0, 0.0 });
            A.Rows[2] = SparseVector.GetSparseElement(new double[] { 0.0, 0.0, 15.0, 0.0, 0.0, 0.0 });
            A.Rows[3] = SparseVector.GetSparseElement(new double[] { -2.0, 0.0, 0.0, 10.0, -1.0, 0.0 });
            A.Rows[4] = SparseVector.GetSparseElement(new double[] { -1.0, 0.0, 0.0, -5.0, 1.0, -3.0 });
            A.Rows[5] = SparseVector.GetSparseElement(new double[] { -1.0, -2.0, 0.0, 0.0, 0.0, 6.0 });

            double[] b = new double[] { 10.0, 7.0, 45.0, 33.0, -34.0, 31.0 };
            double[] x = new double[b.Length];

            for (int i = 0; i < x.Length; i++)
            {
                x[i] = 0.0;
            }

            //symmetrize system
            SparseMatrix At = SparseMatrix.Transpose(A);
            //SparseMatrix AA = SparseMatrix.Square(At);
            SparseMatrix AA = SparseMatrix.Multiply(At, A);

            double[] Ab = SparseMatrix.Multiply(At, b);

            var cgout = solver.Solve(AA, Ab, x, 10);
            var out1  = solver.Solve(AA, Ab, x, 30000);

            var solver1 = new GMRES();

            var out2 = solver1.Solve(A, b, x, 30, 6);
            //var lout = luSolver.Solve(A, b, out bool valid);

            HouseholderQR hs = new HouseholderQR();

            SparseMatrix B = new SparseMatrix(5, 3);

            B.Rows[0] = SparseVector.GetSparseElement(new double[] { 12.0, -51.0, 4.0 });
            B.Rows[1] = SparseVector.GetSparseElement(new double[] { 6.0, 167.0, -68.0 });
            B.Rows[2] = SparseVector.GetSparseElement(new double[] { -4.0, 24.0, -41.0 });
            B.Rows[3] = SparseVector.GetSparseElement(new double[] { -1.0, 1.0, 0.0 });
            B.Rows[4] = SparseVector.GetSparseElement(new double[] { 2.0, 0.0, 3.0 });

            hs.Householder(B);
            hs.Solve(A, b);

            Lemke lm = new Lemke(SharpEngineMathUtility.Solver.SolverType.HouseHolderQR);

            SparseMatrix M = new SparseMatrix(3, 3);

            M.Rows[0] = SparseVector.GetSparseElement(new double[] { 21.0, 0.0, 0.0 });
            M.Rows[1] = SparseVector.GetSparseElement(new double[] { 28.0, 14.0, 0.0 });
            M.Rows[2] = SparseVector.GetSparseElement(new double[] { 24.0, 24.0, 12.0 });

            double[] q = new double[] { -1.0, -1.0, -1.0 };

            lm.Solve(M, q, 10);

            SparseMatrix M1 = new SparseMatrix(2, 2);

            M1.Rows[0] = SparseVector.GetSparseElement(new double[] { 2.0, 1.0 });
            M1.Rows[1] = SparseVector.GetSparseElement(new double[] { 1.0, 2.0 });

            double[] q1 = new double[] { -5.0, -6.0 };

            lm.Solve(M1, q1, 10);

            double[] q2 = new double[] { 1.0, 2.0 };

            lm.Solve(M1, q2, 10);

            SparseMatrix M3 = new SparseMatrix(3, 3);

            M3.Rows[0] = SparseVector.GetSparseElement(new double[] { 1.0, 0.0, 2.0 });
            M3.Rows[1] = SparseVector.GetSparseElement(new double[] { 3.0, 2.0, -1.0 });
            M3.Rows[2] = SparseVector.GetSparseElement(new double[] { -2.0, 1.0, 0.0 });

            double[] q3 = new double[] { -1.0, 2.0, -3.0 };

            lm.Solve(M3, q3, 10);

            SparseMatrix M4 = new SparseMatrix(3, 3);

            M4.Rows[0] = SparseVector.GetSparseElement(new double[] { 0.0, 0.0, 1.0 });
            M4.Rows[1] = SparseVector.GetSparseElement(new double[] { 0.0, 2.0, 1.0 });
            M4.Rows[2] = SparseVector.GetSparseElement(new double[] { -1.0, -1.0, 0.0 });

            double[] q4 = new double[] { -6.0, 0.0, 4.0 };

            lm.Solve(M4, q4, 10);

            SparseMatrix M5 = new SparseMatrix(3, 3);

            M5.Rows[0] = SparseVector.GetSparseElement(new double[] { 1.0, 2.0, 0.0 });
            M5.Rows[1] = SparseVector.GetSparseElement(new double[] { 0.0, 1.0, 2.0 });
            M5.Rows[2] = SparseVector.GetSparseElement(new double[] { 2.0, 0.0, 1.0 });

            double[] q5 = new double[] { -1.0, -1.0, -1.0 };

            lm.Solve(M5, q5, 10);

            SparseMatrix M6 = new SparseMatrix(4, 4);

            M6.Rows[0] = SparseVector.GetSparseElement(new double[] { 1.0, 1.0, 3.0, 4.0 });
            M6.Rows[1] = SparseVector.GetSparseElement(new double[] { 5.0, 3.0, 1.0, 1.0 });
            M6.Rows[2] = SparseVector.GetSparseElement(new double[] { 2.0, 1.0, 2.0, 2.0 });
            M6.Rows[3] = SparseVector.GetSparseElement(new double[] { 1.0, 4.0, 1.0, 1.0 });

            double[] q6 = new double[] { -1.0, 2.0, 1.0, 3.0 };

            lm.Solve(M6, q6, 10);

            SparseMatrix M8 = new SparseMatrix(6, 6);

            M8.Rows[0] = SparseVector.GetSparseElement(new double[] { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 });
            M8.Rows[1] = SparseVector.GetSparseElement(new double[] { 2.0, 4.0, 8.0, 16.0, 32.0, 64.0 });
            M8.Rows[2] = SparseVector.GetSparseElement(new double[] { 3.0, 9.0, 27.0, 81.0, 243.0, 729.0 });
            M8.Rows[3] = SparseVector.GetSparseElement(new double[] { 4.0, 16.0, 64.0, 256.0, 1024.0, 4096.0 });
            M8.Rows[4] = SparseVector.GetSparseElement(new double[] { 5.0, 25.0, 125.0, 625.0, 3125.0, 15625.0 });
            M8.Rows[5] = SparseVector.GetSparseElement(new double[] { 6.0, 36.0, 216.0, 1296.0, 7776.0, 46656.0 });

            var ll1 = luSolver.Solve(M8, new double[6], out bool valid1);
        }