/// <summary> /// Assume a tensor has been flattened to a vector as {A_{0,0}, A_{0,1},...._A_{0,m}, A_{1,0}, A_{1,1},...._A_{1,m},...,A_{n,0}, A_{n,1},...._A_{n,m}} /// (see <seealso cref="#flattenMatrix"/>) that is, the <b>last</b> index changes most rapidly. This produces the sum of penalty matrices (or order given by k) with each scaled /// by lambda. </summary> /// <param name="numElements"> The range of each index. In the example above, this would be {n,m} </param> /// <param name="k"> The difference order for each dimension </param> /// <param name="lambda"> The scaling for each dimension </param> /// <returns> A penalty matrix </returns> public static DoubleMatrix getPenaltyMatrix(int[] numElements, int[] k, double[] lambda) { ArgChecker.notEmpty(numElements, "size"); ArgChecker.notEmpty(k, "k"); ArgChecker.notEmpty(lambda, "lambda"); int dim = numElements.Length; ArgChecker.isTrue(dim == k.Length, "k different length to size"); ArgChecker.isTrue(dim == lambda.Length, "lambda different lenght to size"); DoubleMatrix p = (DoubleMatrix)MA.scale(getPenaltyMatrix(numElements, k[0], 0), lambda[0]); for (int i = 1; i < dim; i++) { DoubleMatrix temp = (DoubleMatrix)MA.scale(getPenaltyMatrix(numElements, k[i], i), lambda[i]); p = (DoubleMatrix)MA.add(p, temp); } return(p); }
private GeneralizedLeastSquareResults <T> solveImp <T>(IList <T> x, IList <double> y, IList <double> sigma, IList <System.Func <T, double> > basisFunctions, double lambda, int differenceOrder) { int n = x.Count; int m = basisFunctions.Count; double[] b = new double[m]; double[] invSigmaSqr = new double[n]; //JAVA TO C# CONVERTER NOTE: The following call to the 'RectangularArrays' helper class reproduces the rectangular array initialization that is automatic in Java: //ORIGINAL LINE: double[][] f = new double[m][n]; double[][] f = RectangularArrays.ReturnRectangularDoubleArray(m, n); int i, j, k; for (i = 0; i < n; i++) { double temp = sigma[i]; ArgChecker.isTrue(temp > 0, "sigma must be greater than zero"); invSigmaSqr[i] = 1.0 / temp / temp; } for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { f[i][j] = basisFunctions[i](x[j]); } } double sum; for (i = 0; i < m; i++) { sum = 0; for (k = 0; k < n; k++) { sum += y[k] * f[i][k] * invSigmaSqr[k]; } b[i] = sum; } DoubleArray mb = DoubleArray.copyOf(b); DoubleMatrix ma = getAMatrix(f, invSigmaSqr); if (lambda > 0.0) { DoubleMatrix d = getDiffMatrix(m, differenceOrder); ma = (DoubleMatrix)_algebra.add(ma, _algebra.scale(d, lambda)); } DecompositionResult decmp = _decomposition.apply(ma); DoubleArray w = decmp.solve(mb); DoubleMatrix covar = decmp.solve(DoubleMatrix.identity(m)); double chiSq = 0; for (i = 0; i < n; i++) { double temp = 0; for (k = 0; k < m; k++) { temp += w.get(k) * f[k][i]; } chiSq += FunctionUtils.square(y[i] - temp) * invSigmaSqr[i]; } return(new GeneralizedLeastSquareResults <T>(basisFunctions, chiSq, w, covar)); }