Ejemplo n.º 1
0
        public void EigenSystem()
        {
            //double[,] A =
            //{
            //    {1, 2, 3},
            //    {6, 2, 0},
            //    {0, 0, 1}
            //};

            double[,] A =
            {
                { -1, -6 },
                {  2,  6 }
            };

            double[,] B =
            {
                { 2, 0, 0 },
                { 0, 2, 0 },
                { 0, 0, 2 }
            };
            var eig = new GeneralizedEigenvalueDecomposition(A, Matrix.Identity(3));

            var V = eig.Eigenvectors;
            var D = eig.DiagonalMatrix;
        }
Ejemplo n.º 2
0
        /// <summary>Eigenvalue decomposition.</summary>
        protected static double[] eig(double[,] a, double[,] b, out double[,] V)
        {
            var eig = new GeneralizedEigenvalueDecomposition(a, b);

            V = eig.Eigenvectors;
            return(eig.RealEigenvalues);
        }
Ejemplo n.º 3
0
        /// <summary>Eigenvalue decomposition.</summary>
        protected static double[] eig(double[,] a, double[,] b, out double[,] V, out double[] alphar, out double[] beta)
        {
            var eig = new GeneralizedEigenvalueDecomposition(a, b);

            V      = eig.Eigenvectors;
            beta   = eig.Betas;
            alphar = eig.RealAlphas;
            return(eig.RealEigenvalues);
        }
        public void GeneralizedEigenvalueDecompositionConstructorTest()
        {
            // Suppose we have the following
            // matrices A and B shown below:

            double[,] A =
            {
                { 1, 2, 3 },
                { 8, 1, 4 },
                { 3, 2, 3 }
            };

            double[,] B =
            {
                { 5, 1, 1 },
                { 1, 5, 1 },
                { 1, 1, 5 }
            };

            // Now, suppose we would like to find values for λ
            // that are solutions for the equation det(A - λB) = 0

            // For this, we can use a Generalized Eigendecomposition
            var gevd = new GeneralizedEigenvalueDecomposition(A, B);

            // Now, if A and B are Hermitian and B is positive
            // -definite, then the eigenvalues λ will be real:
            double[] lambda = gevd.RealEigenvalues;

            // Check if they are indeed a solution:
            for (int i = 0; i < lambda.Length; i++)
            {
                // Compute the determinant equation show above
                double det = Matrix.Determinant(A.Subtract(lambda[i].Multiply(B))); // almost zero

                Assert.IsTrue(det < 1e-6);
            }


            double[,] expectedVectors =
            {
                { 0.427490473174445, -0.459244062074000, -0.206685960405416 },
                {                 1,                  1,                 -1 },
                { 0.615202547759401, -0.152331764458173,  0.779372135871111 }
            };

            double[,] expectedValues =
            {
                { 1.13868666711946,                  0,                  0 },
                {                0, -0.748168231839396,                  0 },
                {                0,                  0, -0.104804149565775 }
            };


            Assert.IsTrue(Matrix.IsEqual(gevd.Eigenvectors, expectedVectors, 0.00000000001));
            Assert.IsTrue(Matrix.IsEqual(gevd.DiagonalMatrix, expectedValues, 0.00000000001));
        }
        public void GeneralizedEigenvalueDecompositionConstructorTest4()
        {
            var A = new double[3, 3];

            A[0, 0] = 2.6969840958234776;
            A[0, 1] = 3.0761868753825254;
            A[0, 2] = -1.9236284084262458;
            A[1, 0] = -0.09975623250927601;
            A[1, 1] = 3.1520214626342158;
            A[1, 2] = 2.3928828222643972;
            A[2, 0] = 5.2090689722490815;
            A[2, 1] = 2.32098631016956;
            A[2, 2] = 5.522974475996091;


            var B = new double[3, 3];

            B[0, 0] = -16.753710484948808;
            B[0, 1] = -14.715495544818925;
            B[0, 2] = -41.589502695291074;
            B[1, 0] = -31.78618974973736;
            B[1, 1] = -14.30788463834109;
            B[1, 2] = -18.388254830328865;
            B[2, 0] = -3.2512542741611838;
            B[2, 1] = -18.774698582838617;
            B[2, 2] = -1.5640121915210088;


            var gevd = new GeneralizedEigenvalueDecomposition(A, B);

            var V = gevd.Eigenvectors;
            var D = gevd.DiagonalMatrix;

            // A*V = B*V*D
            var AV  = A.Multiply(V);
            var BVD = B.Multiply(V).Multiply(D);

            Assert.IsTrue(Matrix.IsEqual(AV, BVD, 0.000001));

            double[,] expectedVectors =
            {
                {                   1,  -0.120763598920560, -0.636412048994645 },
                {  -0.942794724207834,                  -1, -0.363587951005355 },
                { -0.0572052757921662, -0.0606762790704327,                  1 },
            };

            double[,] expectedValues =
            {
                { 0.186046511627907,                  0,                 0 },
                {                 0, -0.170549605858232,                 0 },
                {                 0,                  0, 0.186046511627907 }
            };

            //Assert.IsTrue(Matrix.IsEqual(V, expectedVectors,0.001));
            Assert.IsTrue(Matrix.IsEqual(D, expectedValues, 0.00001));
        }
Ejemplo n.º 6
0
 public void Test1()
 {
     double[,] A =
     {
         {  5, 6,  3 },
         { -1, 0,  1 },
         {  1, 2, -1 }
         //{0,2 },
         //{3,5 }
     };       //     var t = Matrix.
     var ev = new GeneralizedEigenvalueDecomposition(A, Matrix.Identity(3));
 }
        public void GeneralizedEigenvalueDecompositionConstructorTest2()
        {
            double[,] A = Matrix.Identity(100);
            double[,] B = Matrix.Identity(100);

            GeneralizedEigenvalueDecomposition gevd = new GeneralizedEigenvalueDecomposition(A, B);

            double[,] expectedVectors = Matrix.Identity(100);
            double[,] expectedValues  = Matrix.Identity(100);

            Assert.IsTrue(Matrix.IsEqual(gevd.Eigenvectors, expectedVectors));
            Assert.IsTrue(Matrix.IsEqual(gevd.DiagonalMatrix, expectedValues));
        }
Ejemplo n.º 8
0
        private static GeneralizedEigenvalueDecomposition GetGeneralizedEigenvalueDecomposition(IMatrix <double> matrix)
        {
            double[,] m = new double[matrix.Size, matrix.Size];
            for (int i = 0; i < matrix.Size; i++)
            {
                for (int j = 0; j < matrix.Size; j++)
                {
                    m[i, j] = matrix[i, j];
                }
            }
            var decomposition = new GeneralizedEigenvalueDecomposition(m, Matrix.Identity(matrix.Size));

            return(decomposition);
        }
        public void GeneralizedEigenvalueDecompositionConstructorTest3()
        {
            for (int i = 0; i < 10000; i++)
            {
                for (int j = 1; j < 6; j++)
                {
                    var A = Matrix.Random(j, j, -1, 1);
                    var B = Matrix.Random(j, j, -1, 1);

                    var gevd = new GeneralizedEigenvalueDecomposition(A, B);

                    var V = gevd.Eigenvectors;
                    var D = gevd.DiagonalMatrix;

                    // A*V = B*V*D
                    var AV  = A.Multiply(V);
                    var BVD = B.Multiply(V).Multiply(D);

                    Assert.IsTrue(Matrix.IsEqual(AV, BVD, 0.0000001));
                }
            }

            for (int i = 0; i < 100; i++)
            {
                int j = 50;
                var A = Matrix.Random(j, j, -1, 1);
                var B = Matrix.Random(j, j, -1, 1);

                var gevd = new GeneralizedEigenvalueDecomposition(A, B);

                var V = gevd.Eigenvectors;
                var D = gevd.DiagonalMatrix;

                // A*V = B*V*D
                var AV  = A.Multiply(V);
                var BVD = B.Multiply(V).Multiply(D);

                Assert.IsTrue(Matrix.IsEqual(AV, BVD, 0.0000001));
            }
        }
Ejemplo n.º 10
0
        public void GeneralizedEigenvalueDecompositionConstructorTest()
        {
            double[,] A =
            {
                { 1, 2, 3 },
                { 8, 1, 4 },
                { 3, 2, 3 }
            };

            double[,] B =
            {
                { 5, 1, 1 },
                { 1, 5, 1 },
                { 1, 1, 5 }
            };

            GeneralizedEigenvalueDecomposition gevd = new GeneralizedEigenvalueDecomposition(A, B);

            double[,] expectedVectors =
            {
                { 0.427490473174445, -0.459244062074000, -0.206685960405416 },
                {                 1,                  1,                 -1 },
                { 0.615202547759401, -0.152331764458173,  0.779372135871111 }
            };

            double[,] expectedValues =
            {
                { 1.13868666711946,                  0,                  0 },
                {                0, -0.748168231839396,                  0 },
                {                0,                  0, -0.104804149565775 }
            };


            Assert.IsTrue(Matrix.IsEqual(gevd.Eigenvectors, expectedVectors, 0.00000000001));
            Assert.IsTrue(Matrix.IsEqual(gevd.DiagonalMatrix, expectedValues, 0.00000000001));
        }
        //---------------------------------------------


        #region Public Methods
        /// <summary>
        ///   Computes the Multi-Class Linear Discriminant Analysis algorithm.
        /// </summary>
        ///
        public virtual void Compute()
        {
            // Compute entire data set measures
            Means = Statistics.Tools.Mean(source);
            StandardDeviations = Statistics.Tools.StandardDeviation(source, totalMeans);
            double total = dimension;

            // Initialize the scatter matrices
            this.Sw = new double[dimension, dimension];
            this.Sb = new double[dimension, dimension];


            // For each class
            for (int c = 0; c < Classes.Count; c++)
            {
                // Get the class subset
                double[,] subset = Classes[c].Subset;
                int count = subset.GetLength(0);

                // Get the class mean
                double[] mean = Statistics.Tools.Mean(subset);


                // Continue constructing the Within-Class Scatter Matrix
                double[,] Swi = Statistics.Tools.Scatter(subset, mean, (double)count);

                // Sw = Sw + Swi
                for (int i = 0; i < dimension; i++)
                {
                    for (int j = 0; j < dimension; j++)
                    {
                        Sw[i, j] += Swi[i, j];
                    }
                }


                // Continue constructing the Between-Class Scatter Matrix
                double[] d = mean.Subtract(totalMeans);
                double[,] Sbi = Matrix.OuterProduct(d, d).Multiply(total);

                // Sb = Sb + Sbi
                for (int i = 0; i < dimension; i++)
                {
                    for (int j = 0; j < dimension; j++)
                    {
                        Sb[i, j] += Sbi[i, j];
                    }
                }


                // Store some additional information
                this.classScatter[c] = Swi;
                this.classCount[c]   = count;
                this.classMeans[c]   = mean;
                this.classStdDevs[c] = Statistics.Tools.StandardDeviation(subset, mean);
            }


            // Compute the generalized eigenvalue decomposition
            GeneralizedEigenvalueDecomposition gevd = new GeneralizedEigenvalueDecomposition(Sb, Sw);

            // Get the eigenvalues and corresponding eigenvectors
            double[] evals = gevd.RealEigenvalues;
            double[,] eigs = gevd.Eigenvectors;

            // Sort eigenvalues and vectors in descending order
            eigs = Matrix.Sort(evals, eigs, new GeneralComparer(ComparerDirection.Descending, true));


            // Store information
            this.Eigenvalues        = evals;
            this.DiscriminantMatrix = eigs;

            // Create projections into latent space
            this.result = source.Multiply(eigenvectors);


            // Compute feature space means for later classification
            for (int c = 0; c < Classes.Count; c++)
            {
                projectedMeans[c] = classMeans[c].Multiply(eigs);
            }


            // Computes additional information about the analysis and creates the
            //  object-oriented structure to hold the discriminants found.
            CreateDiscriminants();
        }
Ejemplo n.º 12
0
        /// <summary>  Calculates the Fredholm integral that provides eigenvalues and eigenfunctions of the expansion.</summary>
        /// <param name="KarLoeveTerms">The kar loeve terms.</param>
        /// <param name="domainBounds">The domain bounds.</param>
        /// <param name="sigmaSquare">The sigma square.</param>
        /// <param name="partition">The partition.</param>
        /// <param name="correlationLength">Length of the correlation.</param>
        /// <returns></returns>
        public Tuple <double[], double[], double[, ]> KarhunenLoeveFredholmWithFEM(int KarLoeveTerms, double[] domainBounds, double sigmaSquare, int partition, double correlationLength)
        {
            //FEM parameters
            int ned  = 1;         //number of dof per node
            int nen  = 2;         //number of nodes per element
            int nnp  = partition; //number of nodal points
            int nfe  = nnp - 1;   //number of finite elements
            int neq  = ned * nen; //number of element equations
            int ndof = ned * nnp; // number of degrees of freedom
            int GaussLegendreOrder = 3;

            double[] xCoordinates = new double[partition];
            int[,] IEN = new int[2, nfe];  //local to global
            int[] ID = new int[partition];

            for (int i = 0; i < partition; i++)
            {
                //LagrangianShapeFunctons();
                xCoordinates[i] = domainBounds[0] + (domainBounds[1] - domainBounds[0]) / nfe * i;
                ID[i]           = i;
            }

            for (int i = 0; i < nfe; i++)
            {
                IEN[0, i] = i;
                IEN[1, i] = i + 1;
            }

            // localization matrix
            int[,] LM = new int[partition - 1, 2];
            for (int i = 0; i < partition - 1; i++)
            {
                LM[i, 0] = i;
                LM[i, 1] = i + 1;
            }

            // computing B matrix
            double[] GaussLegendreCoordinates = gauss_quad().Item1;
            double[] GaussLegendreWeights     = gauss_quad().Item2;
            double[,] Bmatrix = new double[ndof, ndof];
            for (int i = 0; i < nfe; i++)
            {
                double[,] Be = new double[neq, neq];
                double det_Je = (xCoordinates[IEN[1, i]] - xCoordinates[IEN[0, i]]) / 2;
                for (int j = 0; j < GaussLegendreOrder; j++)
                {
                    double   xi_gl = GaussLegendreCoordinates[j];
                    double   w_gl  = GaussLegendreWeights[j];
                    double[] NN    = LagrangianShapeFunctions(xi_gl);
                    //Be=Be+NN'*NN*det_Je*w_gl(j)
                    Be[0, 0] = Be[0, 0] + NN[0] * NN[0] * det_Je * w_gl;
                    Be[1, 0] = Be[1, 0] + NN[0] * NN[1] * det_Je * w_gl;
                    Be[0, 1] = Be[0, 1] + NN[1] * NN[0] * det_Je * w_gl;
                    Be[1, 1] = Be[1, 1] + NN[1] * NN[1] * det_Je * w_gl;
                }
                Bmatrix[LM[i, 0], LM[i, 0]] = Bmatrix[LM[i, 0], LM[i, 0]] + Be[0, 0];
                Bmatrix[LM[i, 1], LM[i, 0]] = Bmatrix[LM[i, 1], LM[i, 0]] + Be[1, 0];
                Bmatrix[LM[i, 0], LM[i, 1]] = Bmatrix[LM[i, 0], LM[i, 1]] + Be[0, 1];
                Bmatrix[LM[i, 1], LM[i, 1]] = Bmatrix[LM[i, 1], LM[i, 1]] + Be[1, 1];
            }

            // computing C matrix
            double[,] Cmatrix = new double[ndof, ndof];
            for (int i = 0; i < nfe; i++)
            {
                double[] xe     = { xCoordinates[IEN[0, i]], xCoordinates[IEN[1, i]] };
                double   det_Je = (xCoordinates[IEN[1, i]] - xCoordinates[IEN[0, i]]) / 2;
                for (int j = 0; j < nfe; j++)
                {
                    double[,] Cef = new double[neq, neq];
                    double[] xf     = { xCoordinates[IEN[0, j]], xCoordinates[IEN[1, j]] };
                    double   det_Jf = (xCoordinates[IEN[1, j]] - xCoordinates[IEN[0, j]]) / 2;
                    for (int k = 0; k < GaussLegendreOrder; k++)
                    {
                        double   xi_gl_e = GaussLegendreCoordinates[k];
                        double[] NNe     = LagrangianShapeFunctions(xi_gl_e);
                        double   xpk     = NNe[0] * xe[0] + NNe[1] * xe[1];
                        for (int l = 0; l < GaussLegendreOrder; l++)
                        {
                            double   xi_gl_f = GaussLegendreCoordinates[l];
                            double[] NNf     = LagrangianShapeFunctions(xi_gl_f);
                            double   xpl     = NNf[0] * xf[0] + NNf[1] * xf[1];
                            //element C matrix
                            Cef[0, 0] = Cef[0, 0] + GaussianKernelCovarianceFunction(xpk, xpl, sigmaSquare, correlationLength) * NNe[0] * NNf[0] * det_Je * det_Jf * GaussLegendreWeights[k] * GaussLegendreWeights[l];
                            Cef[1, 0] = Cef[1, 0] + GaussianKernelCovarianceFunction(xpk, xpl, sigmaSquare, correlationLength) * NNe[0] * NNf[1] * det_Je * det_Jf * GaussLegendreWeights[k] * GaussLegendreWeights[l];
                            Cef[0, 1] = Cef[0, 1] + GaussianKernelCovarianceFunction(xpk, xpl, sigmaSquare, correlationLength) * NNe[1] * NNf[0] * det_Je * det_Jf * GaussLegendreWeights[k] * GaussLegendreWeights[l];
                            Cef[1, 1] = Cef[1, 1] + GaussianKernelCovarianceFunction(xpk, xpl, sigmaSquare, correlationLength) * NNe[1] * NNf[1] * det_Je * det_Jf * GaussLegendreWeights[k] * GaussLegendreWeights[l];
                        }
                    }
                    Cmatrix[LM[i, 0], LM[j, 0]] = Cmatrix[LM[i, 0], LM[j, 0]] + Cef[0, 0];
                    Cmatrix[LM[i, 0], LM[j, 1]] = Cmatrix[LM[i, 0], LM[j, 1]] + Cef[0, 1];
                    Cmatrix[LM[i, 1], LM[j, 0]] = Cmatrix[LM[i, 1], LM[j, 0]] + Cef[1, 0];
                    Cmatrix[LM[i, 1], LM[j, 1]] = Cmatrix[LM[i, 1], LM[j, 1]] + Cef[1, 1];
                }
            }
            bool sort = true;
            var  gevd = new GeneralizedEigenvalueDecomposition(Cmatrix, Bmatrix, sort);

            double[] lambdaAll = gevd.RealEigenvalues;
            double[] lambda    = lambdaAll.Skip(0).Take(KarLoeveTerms).ToArray();
            double[,] EigenvectorsAll = gevd.Eigenvectors;
            double[,] Eigenvectors    = new double[partition, KarLoeveTerms];
            for (int i = 0; i < partition; i++)
            {
                for (int j = 0; j < KarLoeveTerms; j++)
                {
                    Eigenvectors[i, j] = EigenvectorsAll[i, j];  //each column corresponds to an eigenvector
                }
            }
            return(new Tuple <double[], double[], double[, ]>(xCoordinates, lambda, Eigenvectors));
        }
        //---------------------------------------------


        #region Public Methods
        /// <summary>
        ///   Computes the Multi-Class Kernel Discriminant Analysis algorithm.
        /// </summary>
        ///
        public override void Compute()
        {
            // Get some initial information
            int dimension = Source.GetLength(0);

            double[,] source = Source;
            double total = dimension;


            // Create the Gram (Kernel) Matrix
            double[,] K = new double[dimension, dimension];
            for (int i = 0; i < dimension; i++)
            {
                double[] row = source.GetRow(i);
                for (int j = i; j < dimension; j++)
                {
                    double s = kernel.Function(row, source.GetRow(j));
                    K[i, j] = s; // Assume K will be symmetric
                    K[j, i] = s;
                }
            }


            // Compute entire data set measures
            base.Means = Statistics.Tools.Mean(K);
            base.StandardDeviations = Statistics.Tools.StandardDeviation(K, Means);


            // Initialize the kernel analogous scatter matrices
            double[,] Sb = new double[dimension, dimension];
            double[,] Sw = new double[dimension, dimension];


            // For each class
            for (int c = 0; c < Classes.Count; c++)
            {
                // Get the Kernel matrix class subset
                double[,] Kc = K.Submatrix(Classes[c].Indices);
                int count = Kc.GetLength(0);

                // Get the Kernel matrix class mean
                double[] mean = Statistics.Tools.Mean(Kc);


                // Construct the Kernel equivalent of the Within-Class Scatter matrix
                double[,] Swi = Statistics.Tools.Scatter(Kc, mean, (double)count);

                // Sw = Sw + Swi
                for (int i = 0; i < dimension; i++)
                {
                    for (int j = 0; j < dimension; j++)
                    {
                        Sw[i, j] += Swi[i, j];
                    }
                }


                // Construct the Kernel equivalent of the Between-Class Scatter matrix
                double[] d = mean.Subtract(base.Means);
                double[,] Sbi = Matrix.OuterProduct(d, d).Multiply(total);

                // Sb = Sb + Sbi
                for (int i = 0; i < dimension; i++)
                {
                    for (int j = 0; j < dimension; j++)
                    {
                        Sb[i, j] += Sbi[i, j];
                    }
                }


                // Store additional information
                base.ClassScatter[c]            = Swi;
                base.ClassCount[c]              = count;
                base.ClassMeans[c]              = mean;
                base.ClassStandardDeviations[c] = Statistics.Tools.StandardDeviation(Kc, mean);
            }


            // Add regularization to avoid singularity
            for (int i = 0; i < dimension; i++)
            {
                Sw[i, i] += regularization;
            }


            // Compute the generalized eigenvalue decomposition
            GeneralizedEigenvalueDecomposition gevd = new GeneralizedEigenvalueDecomposition(Sb, Sw);

            if (gevd.IsSingular) // check validity of the results
            {
                throw new SingularMatrixException("One of the matrices is singular. Please retry " +
                                                  "the method with a higher regularization constant.");
            }


            // Get the eigenvalues and corresponding eigenvectors
            double[] evals = gevd.RealEigenvalues;
            double[,] eigs = gevd.Eigenvectors;

            // Sort eigenvalues and vectors in descending order
            eigs = Matrix.Sort <double, double>(evals, eigs, new GeneralComparer(ComparerDirection.Descending, true));


            if (threshold > 0)
            {
                // We will be discarding less important
                // eigenvectors to conserve memory.

                // Calculate component proportions
                double sum = 0.0; // total variance
                for (int i = 0; i < dimension; i++)
                {
                    sum += Math.Abs(evals[i]);
                }

                if (sum > 0)
                {
                    int keep = 0;

                    // Now we will detect how many components we have
                    //  have to keep in order to achieve the level of
                    //  explained variance specified by the threshold.

                    while (keep < dimension)
                    {
                        // Get the variance explained by the component
                        double explainedVariance = Math.Abs(evals[keep]);

                        // Check its proportion
                        double proportion = explainedVariance / sum;

                        // Now, if the component explains an
                        // enough proportion of the variance,
                        if (proportion > threshold)
                        {
                            keep++; // We can keep it.
                        }
                        else
                        {
                            break;  // Otherwise we can stop, since the
                        }
                        // components are ordered by variance.
                    }

                    if (keep > 0)
                    {
                        // Resize the vectors keeping only needed components
                        eigs  = eigs.Submatrix(0, dimension - 1, 0, keep - 1);
                        evals = evals.Submatrix(0, keep - 1);
                    }
                    else
                    {
                        // No component will be kept.
                        eigs  = new double[dimension, 0];
                        evals = new double[0];
                    }
                }
            }


            // Store information
            base.Eigenvalues         = evals;
            base.DiscriminantMatrix  = eigs;
            base.ScatterBetweenClass = Sb;
            base.ScatterWithinClass  = Sw;


            // Project into the kernel discriminant space
            this.Result = K.Multiply(eigs);


            // Compute feature space means for later classification
            for (int c = 0; c < Classes.Count; c++)
            {
                ProjectionMeans[c] = ClassMeans[c].Multiply(eigs);
            }


            // Computes additional information about the analysis and creates the
            //  object-oriented structure to hold the discriminants found.
            CreateDiscriminants();
        }
Ejemplo n.º 14
0
        //---------------------------------------------


        #region Public Methods
        /// <summary>
        ///   Computes the Multi-Class Kernel Discriminant Analysis algorithm.
        /// </summary>
        public override void Compute()
        {
            // Get some initial information
            int dimension = Source.GetLength(0);

            double[,] source = Source;
            double total = dimension;


            // Create the Gram (Kernel) Matrix
            double[,] K = new double[dimension, dimension];
            for (int i = 0; i < dimension; i++)
            {
                for (int j = i; j < dimension; j++)
                {
                    double s = kernel.Function(source.GetRow(i), source.GetRow(j));
                    K[i, j] = s; // Assume K will be symmetric
                    K[j, i] = s;
                }
            }


            // Compute entire data set measures
            base.Means = GABIZ.Base.Statistics.Tools.Mean(K);
            base.StandardDeviations = GABIZ.Base.Statistics.Tools.StandardDeviation(K, Means);


            // Initialize the kernel analogous scatter matrices
            double[,] Sb = new double[dimension, dimension];
            double[,] Sw = new double[dimension, dimension];


            // For each class
            for (int c = 0; c < Classes.Count; c++)
            {
                // Get the Kernel matrix class subset
                double[,] Kc = K.Submatrix(Classes[c].Indexes);
                int count = Kc.GetLength(0);

                // Get the Kernel matrix class mean
                double[] mean = GABIZ.Base.Statistics.Tools.Mean(Kc);


                // Construct the Kernel equivalent of the Within-Class Scatter matrix
                double[,] Swi = GABIZ.Base.Statistics.Tools.Scatter(Kc, mean, (double)count);

                // Sw = Sw + Swi
                for (int i = 0; i < dimension; i++)
                {
                    for (int j = 0; j < dimension; j++)
                    {
                        Sw[i, j] += Swi[i, j];
                    }
                }


                // Construct the Kernel equivalent of the Between-Class Scatter matrix
                double[] d = mean.Subtract(base.Means);
                double[,] Sbi = Matrix.OuterProduct(d, d).Multiply(total);

                // Sb = Sb + Sbi
                for (int i = 0; i < dimension; i++)
                {
                    for (int j = 0; j < dimension; j++)
                    {
                        Sb[i, j] += Sbi[i, j];
                    }
                }


                // Store additional information
                base.ClassScatter[c]            = Swi;
                base.ClassCount[c]              = count;
                base.ClassMeans[c]              = mean;
                base.ClassStandardDeviations[c] = GABIZ.Base.Statistics.Tools.StandardDeviation(Kc, mean);
            }


            // Add regularization to avoid singularity
            for (int i = 0; i < dimension; i++)
            {
                Sw[i, i] += regularization;
            }


            // Compute the generalized eigenvalue decomposition
            GeneralizedEigenvalueDecomposition gevd = new GeneralizedEigenvalueDecomposition(Sb, Sw);

            if (gevd.IsSingular) // check validity of the results
            {
                throw new SingularMatrixException("One of the matrices is singular. Please retry " +
                                                  "the method with a higher regularization constant.");
            }


            // Get the eigenvalues and corresponding eigenvectors
            double[] evals = gevd.RealEigenvalues;
            double[,] eigs = gevd.Eigenvectors;

            // Sort eigenvalues and vectors in descending order
            eigs = Matrix.Sort(evals, eigs, new GeneralComparer(ComparerDirection.Descending, true));


            if (threshold > 0)
            {
                // Calculate proportions earlier
                double sum = 0.0;
                for (int i = 0; i < dimension; i++)
                {
                    sum += System.Math.Abs(evals[i]);
                }

                if (sum > 0)
                {
                    sum = 1.0 / sum;

                    // Discard less important eigenvectors to conserve memory
                    int keep = 0; while (keep < dimension &&
                                         System.Math.Abs(evals[keep]) * sum > threshold)
                    {
                        keep++;
                    }
                    eigs  = eigs.Submatrix(0, dimension - 1, 0, keep - 1);
                    evals = evals.Submatrix(0, keep - 1);
                }
            }


            // Store information
            base.Eigenvalues         = evals;
            base.DiscriminantMatrix  = eigs;
            base.ScatterBetweenClass = Sb;
            base.ScatterWithinClass  = Sw;


            // Project into the kernel discriminant space
            this.Result = K.Multiply(eigs);


            // Compute feature space means for later classification
            for (int c = 0; c < Classes.Count; c++)
            {
                double[] mean = new double[eigs.GetLength(1)];
                for (int i = 0; i < eigs.GetLength(0); i++)
                {
                    for (int j = 0; j < eigs.GetLength(1); j++)
                    {
                        mean[j] += ClassMeans[c][i] * eigs[i, j];
                    }
                }
                kernelClassMeans[c] = mean;
            }


            // Computes additional information about the analysis and creates the
            //  object-oriented structure to hold the discriminants found.
            CreateDiscriminants();
        }