private void updateAlphaVec() { // Function calculates the alpha vector with given // fixed pillars+values // Write Matrix M double tmp = 0.0; for (int rowIt = 0; rowIt < xSize_; ++rowIt) { yVec_[rowIt] = this.yBegin_[rowIt]; tmp = 1.0 / gammaFunc(this.xBegin_[rowIt]); for (int colIt = 0; colIt < xSize_; ++colIt) { M_[rowIt, colIt] = kernelAbs(this.xBegin_[rowIt], this.xBegin_[colIt]) * tmp; } } // Solve y=M*\alpha for \alpha alphaVec_ = MatrixUtilities.qrSolve(M_, yVec_); // check if inversion worked up to a reasonable precision. // I've chosen not to check determinant(M_)!=0 before solving Vector test = M_ * alphaVec_; Vector diffVec = Vector.Abs((M_ * alphaVec_) - yVec_); for (int i = 0; i < diffVec.size(); ++i) { Utils.QL_REQUIRE(diffVec[i] < invPrec_, () => "Inversion failed in 1d kernel interpolation"); } }
private void updateAlphaVec() { // Function calculates the alpha vector with given // fixed pillars+values Vector Xk = new Vector(2), Xn = new Vector(2); int rowCnt = 0, colCnt = 0; double tmpVar = 0.0; // write y-vector and M-Matrix for (int j = 0; j < ySize_; ++j) { for (int i = 0; i < xSize_; ++i) { yVec_[rowCnt] = this.zData_[i, j]; // calculate X_k Xk[0] = this.xBegin_[i]; Xk[1] = this.yBegin_[j]; tmpVar = 1 / gammaFunc(Xk); colCnt = 0; for (int jM = 0; jM < ySize_; ++jM) { for (int iM = 0; iM < xSize_; ++iM) { Xn[0] = this.xBegin_[iM]; Xn[1] = this.yBegin_[jM]; M_[rowCnt, colCnt] = kernelAbs(Xk, Xn) * tmpVar; colCnt++; // increase column counter }// end iM }// end jM rowCnt++; // increase row counter } // end i }// end j alphaVec_ = MatrixUtilities.qrSolve(M_, yVec_); // check if inversion worked up to a reasonable precision. // I've chosen not to check determinant(M_)!=0 before solving Vector diffVec = Vector.Abs(M_ * alphaVec_ - yVec_); for (int i = 0; i < diffVec.size(); ++i) { Utils.QL_REQUIRE(diffVec[i] < invPrec_, () => "inversion failed in 2d kernel interpolation"); } }
//! QR Solve /*! This implementation is based on MINPACK * (<http://www.netlib.org/minpack>, * <http://www.netlib.org/cephes/linalg.tgz>) * * Given an m by n matrix A, an n by n diagonal matrix d, * and an m-vector b, the problem is to determine an x which * solves the system * * A*x = b , d*x = 0 , * * in the least squares sense. * * d is an input array of length n which must contain the * diagonal elements of the matrix d. * * See lmdiff.cpp for further details. */ public static Vector qrSolve(Matrix a, Vector b, bool pivot = true, Vector d = null) { int m = a.rows(); int n = a.columns(); if (d == null) { d = new Vector(); } Utils.QL_REQUIRE(b.Count == m, () => "dimensions of A and b don't match"); Utils.QL_REQUIRE(d.Count == n || d.empty(), () => "dimensions of A and d don't match"); Matrix q = new Matrix(m, n), r = new Matrix(n, n); List <int> lipvt = MatrixUtilities.qrDecomposition(a, ref q, ref r, pivot); List <int> ipvt = new List <int>(n); ipvt = lipvt; //std::copy(lipvt.begin(), lipvt.end(), ipvt.get()); Matrix aT = Matrix.transpose(a); Matrix rT = Matrix.transpose(r); Vector sdiag = new Vector(n); Vector wa = new Vector(n); Vector ld = new Vector(n, 0.0); if (!d.empty()) { ld = d; //std::copy(d.begin(), d.end(), ld.begin()); } Vector x = new Vector(n); Vector qtb = Matrix.transpose(q) * b; MINPACK.qrsolv(n, rT, n, ipvt, ld, qtb, x, sdiag, wa); return(x); }
//! Returns the pseudo square root of a real symmetric matrix /*! Given a matrix \f$ M \f$, the result \f$ S \f$ is defined * as the matrix such that \f$ S S^T = M. \f$ * If the matrix is not positive semi definite, it can * return an approximation of the pseudo square root * using a (user selected) salvaging algorithm. * * For more information see: "The most general methodology to create * a valid correlation matrix for risk management and option pricing * purposes", by R. Rebonato and P. Jдckel. * The Journal of Risk, 2(2), Winter 1999/2000 * http://www.rebonato.com/correlationmatrix.pdf * * Revised and extended in "Monte Carlo Methods in Finance", * by Peter Jдckel, Chapter 6. * * \pre the given matrix must be symmetric. * * \relates Matrix * * \warning Higham algorithm only works for correlation matrices. * * \test * - the correctness of the results is tested by reproducing * known good data. * - the correctness of the results is tested by checking * returned values against numerical calculations. */ public static Matrix pseudoSqrt(Matrix matrix, SalvagingAlgorithm sa) { int size = matrix.rows(); #if QL_EXTRA_SAFETY_CHECKS checkSymmetry(matrix); #else if (size != matrix.columns()) { throw new Exception("non square matrix: " + size + " rows, " + matrix.columns() + " columns"); } #endif // spectral (a.k.a Principal Component) analysis SymmetricSchurDecomposition jd = new SymmetricSchurDecomposition(matrix); Matrix diagonal = new Matrix(size, size, 0.0); // salvaging algorithm Matrix result = new Matrix(size, size); bool negative; switch (sa) { case SalvagingAlgorithm.None: // eigenvalues are sorted in decreasing order if (!(jd.eigenvalues()[size - 1] >= -1e-16)) { throw new Exception("negative eigenvalue(s) (" + jd.eigenvalues()[size - 1] + ")"); } result = MatrixUtilities.CholeskyDecomposition(matrix, true); break; case SalvagingAlgorithm.Spectral: // negative eigenvalues set to zero for (int i = 0; i < size; i++) { diagonal[i, i] = Math.Sqrt(Math.Max(jd.eigenvalues()[i], 0.0)); } result = jd.eigenvectors() * diagonal; normalizePseudoRoot(matrix, result); break; case SalvagingAlgorithm.Hypersphere: // negative eigenvalues set to zero negative = false; for (int i = 0; i < size; ++i) { diagonal[i, i] = Math.Sqrt(Math.Max(jd.eigenvalues()[i], 0.0)); if (jd.eigenvalues()[i] < 0.0) { negative = true; } } result = jd.eigenvectors() * diagonal; normalizePseudoRoot(matrix, result); if (negative) { result = hypersphereOptimize(matrix, result, false); } break; case SalvagingAlgorithm.LowerDiagonal: // negative eigenvalues set to zero negative = false; for (int i = 0; i < size; ++i) { diagonal[i, i] = Math.Sqrt(Math.Max(jd.eigenvalues()[i], 0.0)); if (jd.eigenvalues()[i] < 0.0) { negative = true; } } result = jd.eigenvectors() * diagonal; normalizePseudoRoot(matrix, result); if (negative) { result = hypersphereOptimize(matrix, result, true); } break; case SalvagingAlgorithm.Higham: int maxIterations = 40; double tol = 1e-6; result = highamImplementation(matrix, maxIterations, tol); result = MatrixUtilities.CholeskyDecomposition(result, true); break; default: throw new Exception("unknown salvaging algorithm"); } return(result); }
// Optimization function for hypersphere and lower-diagonal algorithm private static Matrix hypersphereOptimize(Matrix targetMatrix, Matrix currentRoot, bool lowerDiagonal) { int i, j, k, size = targetMatrix.rows(); Matrix result = new Matrix(currentRoot); Vector variance = new Vector(size); for (i = 0; i < size; i++) { variance[i] = Math.Sqrt(targetMatrix[i, i]); } if (lowerDiagonal) { Matrix approxMatrix = result * Matrix.transpose(result); result = MatrixUtilities.CholeskyDecomposition(approxMatrix, true); for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { result[i, j] /= Math.Sqrt(approxMatrix[i, i]); } } } else { for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { result[i, j] /= variance[i]; } } } ConjugateGradient optimize = new ConjugateGradient(); EndCriteria endCriteria = new EndCriteria(100, 10, 1e-8, 1e-8, 1e-8); HypersphereCostFunction costFunction = new HypersphereCostFunction(targetMatrix, variance, lowerDiagonal); NoConstraint constraint = new NoConstraint(); // hypersphere vector optimization if (lowerDiagonal) { Vector theta = new Vector(size * (size - 1) / 2); const double eps = 1e-16; for (i = 1; i < size; i++) { for (j = 0; j < i; j++) { theta[i * (i - 1) / 2 + j] = result[i, j]; if (theta[i * (i - 1) / 2 + j] > 1 - eps) { theta[i * (i - 1) / 2 + j] = 1 - eps; } if (theta[i * (i - 1) / 2 + j] < -1 + eps) { theta[i * (i - 1) / 2 + j] = -1 + eps; } for (k = 0; k < j; k++) { theta[i * (i - 1) / 2 + j] /= Math.Sin(theta[i * (i - 1) / 2 + k]); if (theta[i * (i - 1) / 2 + j] > 1 - eps) { theta[i * (i - 1) / 2 + j] = 1 - eps; } if (theta[i * (i - 1) / 2 + j] < -1 + eps) { theta[i * (i - 1) / 2 + j] = -1 + eps; } } theta[i * (i - 1) / 2 + j] = Math.Acos(theta[i * (i - 1) / 2 + j]); if (j == i - 1) { if (result[i, i] < 0) { theta[i * (i - 1) / 2 + j] = -theta[i * (i - 1) / 2 + j]; } } } } Problem p = new Problem(costFunction, constraint, theta); optimize.minimize(p, endCriteria); theta = p.currentValue(); result.fill(1); for (i = 0; i < size; i++) { for (k = 0; k < size; k++) { if (k > i) { result[i, k] = 0; } else { for (j = 0; j <= k; j++) { if (j == k && k != i) { result[i, k] *= Math.Cos(theta[i * (i - 1) / 2 + j]); } else if (j != i) { result[i, k] *= Math.Sin(theta[i * (i - 1) / 2 + j]); } } } } } } else { Vector theta = new Vector(size * (size - 1)); const double eps = 1e-16; for (i = 0; i < size; i++) { for (j = 0; j < size - 1; j++) { theta[j * size + i] = result[i, j]; if (theta[j * size + i] > 1 - eps) { theta[j * size + i] = 1 - eps; } if (theta[j * size + i] < -1 + eps) { theta[j * size + i] = -1 + eps; } for (k = 0; k < j; k++) { theta[j * size + i] /= Math.Sin(theta[k * size + i]); if (theta[j * size + i] > 1 - eps) { theta[j * size + i] = 1 - eps; } if (theta[j * size + i] < -1 + eps) { theta[j * size + i] = -1 + eps; } } theta[j * size + i] = Math.Acos(theta[j * size + i]); if (j == size - 2) { if (result[i, j + 1] < 0) { theta[j * size + i] = -theta[j * size + i]; } } } } Problem p = new Problem(costFunction, constraint, theta); optimize.minimize(p, endCriteria); theta = p.currentValue(); result.fill(1); for (i = 0; i < size; i++) { for (k = 0; k < size; k++) { for (j = 0; j <= k; j++) { if (j == k && k != size - 1) { result[i, k] *= Math.Cos(theta[j * size + i]); } else if (j != size - 1) { result[i, k] *= Math.Sin(theta[j * size + i]); } } } } } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { result[i, j] *= variance[i]; } } return(result); }