/// <summary>Creates a 'n x r' matrix B such that B * B' is a correlation matrix and 'near' to the specified symmetric, normalized matrix of dimension n. A rank reduction will apply if r is strict less than n. /// </summary> /// <param name="rawCorrelationMatrix">The symmetric, normalized matrix where to find the 'nearest' correlation matrix.</param> /// <param name="state">The state of the operation in its <see cref="PseudoSqrtMatrixDecomposer.State"/> representation (output).</param> /// <param name="triangularMatrixType">A value indicating which part of <paramref name="rawCorrelationMatrix"/> to take into account.</param> /// <param name="outputEntries">This argument will be used to store the matrix entries of the resulting matrix B, i.e. the return value array points to this array if != <c>null</c>; otherwise a memory allocation will be done.</param> /// <param name="worksspaceContainer">A specific <see cref="PseudoSqrtMatrixDecomposer.WorkspaceContainer"/> object to reduce memory allocation; ignored if <c>null</c>.</param> /// <returns>A <see cref="DenseMatrix"/> object that represents a matrix B such that B * B' is the 'nearest' correlation matrix with respect to <paramref name="rawCorrelationMatrix"/>.</returns> /// <remarks>In general the return object does <b>not</b> represents the pseudo-root of <paramref name="rawCorrelationMatrix"/>, i.e. output of the Cholesky decomposition. /// <para>The parameters <paramref name="outputEntries"/>, <paramref name="worksspaceContainer"/> allows to avoid memory allocation and to re-use arrays if the calculation of correlation matrices will be done often.</para></remarks> public override DenseMatrix Create(DenseMatrix rawCorrelationMatrix, out State state, double[] outputEntries = null, PseudoSqrtMatrixDecomposer.WorkspaceContainer worksspaceContainer = null, BLAS.TriangularMatrixType triangularMatrixType = BLAS.TriangularMatrixType.LowerTriangularMatrix) { if (rawCorrelationMatrix.IsQuadratic == false) { throw new ArgumentException("rawCorrelationMatrix"); } int n = rawCorrelationMatrix.RowCount; if ((outputEntries == null) || (outputEntries.Length < n * n)) { outputEntries = new double[n * n]; } var ws = worksspaceContainer as Workspace; if ((ws == null) || (ws.Dimension < n)) { ws = new Workspace(n); } int m; BLAS.Level1.dcopy(n * n, rawCorrelationMatrix.Data, ws.data); LAPACK.EigenValues.Symmetric.driver_dsyevr(LapackEigenvalues.SymmetricGeneralJob.All, n, ws.data, out m, ws.eigenValues, outputEntries, ws.isuppz, ws.work, ws.iwork); var originalEigenValueDataTable = InfoOutputDetailLevel.IsAtLeastAsComprehensiveAs(InfoOutputDetailLevel.High) ? CreateDataTableWithEigenvalues("Eigenvalues.Original", m, ws.eigenValues) : null; int rank = n; int minNumberOfEigenvaluesToSetZero = n - Math.Min(MaximalRank ?? n, n); int i = 0; while ((i < minNumberOfEigenvaluesToSetZero) || (ws.eigenValues[i] < 0.0)) { ws.eigenValues[i] = 0.0; i++; rank--; } var adjustedEigenValueDataTable = InfoOutputDetailLevel.IsAtLeastAsComprehensiveAs(InfoOutputDetailLevel.High) ? CreateDataTableWithEigenvalues("Eigenvalues.Adjusted", m, ws.eigenValues) : null; VectorUnit.Basics.Sqrt(n, ws.eigenValues); // calculate sqrt of eigenvalues only once, i.e. the array 'eigenValues' contains the sqrt of the eigenvalues! for (i = 0; i < n; i++) { var t_i = 0.0; for (int j = n - 1; j >= n - rank; j--) { t_i += outputEntries[i + j * n] * outputEntries[i + j * n] * ws.eigenValues[j] * ws.eigenValues[j]; outputEntries[i + j * n] *= ws.eigenValues[j]; } BLAS.Level1.dscal(rank, 1.0 / Math.Sqrt(t_i), outputEntries, -n, i + (n - 1) * n); // [i+j*n] *= 1/Math.Sqrt(tempValue) for j = n-1, ..., n-rank } /* The eigenvalues are in ascending order. Thefore the first (and not last) 'rank' columns of the eigenvectors are not required. Therefore we swap the relevant part */ BLAS.Level1.dscal(n * (n - rank), 0.0, outputEntries); BLAS.Level1.dswap(n * rank, outputEntries, 1, outputEntries, 1, n * (n - rank), 0); state = State.Create(rank, detailProperties: new[] { InfoOutputProperty.Create("Eigenvalues set to 0.0", n - rank) }, detailDataTables: new[] { originalEigenValueDataTable, adjustedEigenValueDataTable }, iterationsNeeded: 1, infoOutputDetailLevel: InfoOutputDetailLevel); return(new DenseMatrix(n, rank, outputEntries)); }
/// <summary>Finds the argmin of <see cref="IOneDimOptimizerAlgorithm.Function"/>. /// </summary> /// <param name="x">An initial guess of the algorithm (if applicable); on exit this argument contains the argmin.</param> /// <param name="argMin">The estimated argmin of the objective function (output).</param> /// <param name="minimum">The minimum, i.e. the function value with respect to <paramref name="argMin"/> which represents the argmin (output).</param> /// <returns>The state of the algorithm, i.e. an indicating whether <paramref name="argMin"/> and <paramref name="minimum"/> contains valid data.</returns> public OneDimOptimizer.State FindMinimum(double x, out double argMin, out double minimum) { if (m_Constraint.IntervalRepresentation.GetPointPosition(x) != PointRegionRelation.InsideOrBoundaryPoint) { throw new ArgumentException("Initial point is not a feasible point."); } var bracketingResultState = m_Optimizer.BracketingApproach.TryGetBracketingTriple(m_ObjectiveFunction.GetValue, m_Constraint.IntervalRepresentation.Infimum, m_Constraint.IntervalRepresentation.Supremum, x, out BracketingTriple triple, out double fa, out double fb, out double fc, out int evaluationsNeeded); switch (bracketingResultState) { case MinimumBracketingResultState.ProperResult: return(FindMinimum(triple.A, triple.B, triple.C, evaluationsNeeded, out argMin, out minimum)); case MinimumBracketingResultState.FlatBracketingTriple: argMin = triple.A; minimum = fa; return(State.Create(OptimizerErrorClassification.ProperResult, argMin, minimum, evaluationsNeeded: evaluationsNeeded, iterationsNeeded: 1, details: InfoOutputProperty.Create("Bracketing Result State", bracketingResultState))); default: // return the smallest function values but indicating that no Bracketing triple has been found if (fa < fb) { if (fa < fc) // fa=f(a) is smallest value { argMin = triple.A; minimum = fa; } else // fc =f(c) is smallest value { argMin = triple.C; minimum = fc; } } else // fb <= fa { if (fc < fb) { argMin = triple.C; minimum = fc; } else { argMin = triple.B; minimum = fb; } } double estimatedAbsoluteError = Math.Max(Math.Abs(triple.B - triple.A), Math.Max(Math.Abs(triple.C - triple.A), Math.Abs(triple.B - triple.C))); double estimatedRelativeError = estimatedAbsoluteError / (MachineConsts.Epsilon + Math.Abs(triple.A) + Math.Abs(triple.B) + Math.Abs(triple.C)); return(State.Create(OptimizerErrorClassification.Unknown, argMin, minimum, evaluationsNeeded, iterationsNeeded: 1, details: new[] { InfoOutputProperty.Create("Bracketing Result State", bracketingResultState), InfoOutputProperty.Create("estimatedAbsoluteError", estimatedAbsoluteError), InfoOutputProperty.Create("estimatedRelativeError", estimatedRelativeError) })); } }
/// <summary>Creates a new <see cref="State"/> object. /// </summary> /// <param name="classification">The classification of the result.</param> /// <param name="minimum">The estimated minimum of the specific algorithm.</param> /// <param name="evaluationsNeeded">The number of function evaluations needed by the algorithm to reach the desired accuracy.</param> /// <param name="iterationsNeeded">The number of iterations needed by the algorithm to reach the desired accuracy.</param> /// <param name="details">Additional details in its <see cref="InfoOutputProperty"/> representation.</param> /// <returns>A <see cref="State"/> object that represents the state of a specific calculation.</returns> public static State Create(OptimizerErrorClassification classification, double minimum, int evaluationsNeeded = Int32.MaxValue, int iterationsNeeded = Int32.MaxValue, params InfoOutputProperty[] details) { var properties = new List <InfoOutputProperty>() { InfoOutputProperty.Create("Minimum", minimum) }; if (details != null) { properties.AddRange(details); } return(new State(classification, properties, evaluationsNeeded, iterationsNeeded)); }
/// <summary>Creates a new <see cref="State"/> object. /// </summary> /// <param name="classification">The classification of the result.</param> /// <param name="root">The estimated root of the specific algorithm.</param> /// <param name="functionValue">The function value at <paramref name="root"/>; should be almost <c>0.0</c>.</param> /// <param name="evaluationsNeeded">The number of function evaluations needed by the algorithm to reach the desired accuracy.</param> /// <param name="iterationsNeeded">The number of iterations needed by the algorithm to reach the desired accuracy.</param> /// <param name="details">Additional details in its <see cref="InfoOutputProperty"/> representation.</param> /// <returns>A <see cref="State"/> object that represents the state of a specific calculation.</returns> public static State Create(EquationSolverErrorClassification classification, double root, double functionValue, int evaluationsNeeded = Int32.MaxValue, int iterationsNeeded = Int32.MaxValue, params InfoOutputProperty[] details) { var properties = new List <InfoOutputProperty>() { InfoOutputProperty.Create("Root", root), InfoOutputProperty.Create("FunctionValue", functionValue) }; if (details != null) { properties.AddRange(details); } return(new State(classification, properties, evaluationsNeeded, iterationsNeeded)); }
/// <summary>Gets informations of the current object as a specific <see cref="InfoOutput" /> instance. /// </summary> /// <param name="infoOutput">The <see cref="InfoOutput" /> object which is to be filled with informations concering the current instance.</param> /// <param name="categoryName">The name of the category, i.e. all informations will be added to these category.</param> public void FillInfoOutput(InfoOutput infoOutput, string categoryName = InfoOutput.GeneralCategoryName) { var infoPackage = infoOutput.AcquirePackage(categoryName); if (m_PropertyCollectionParentNode.HasChildNodes == true) { for (int j = 0; j < m_PropertyCollectionParentNode.ChildNodes.Count; j++) { var node = m_PropertyCollectionParentNode.ChildNodes[j]; var InfoProperty = new InfoOutputProperty(node.Name, node.InnerText); infoPackage.Add(InfoProperty); } } }
/// <summary>Finds the minimum of the objective function. This implementation does not take into account a probably given derivative. /// </summary> /// <param name="a">The first point of bracket triple.</param> /// <param name="b">The second point of bracket triple.</param> /// <param name="c">The third point of bracket triple.</param> /// <param name="evaluationsNeeded">The number of function evaluations which are already needed.</param> /// <param name="argMin">The estimated argmin of the objective function (output).</param> /// <param name="minimum">The minimum, i.e. the function value with respect to <paramref name="argMin"/> which represents the argmin (output).</param> /// <returns>The state of the algorithm.</returns> /// <remarks>The implementation is based on Press, et al. (1992) "Numerical recipes in C", 2nd ed., p.401f.</remarks> private State FindMinimum(double a, double b, double c, int evaluationsNeeded, out double argMin, out double minimum) { double x1, x2; double x0 = a; double x3 = c; if (Math.Abs(c - b) > Math.Abs(b - a)) { x1 = b; x2 = b + MathConsts.TwoMinusGoldenRatio * (c - b); } else { x2 = b; x1 = b - MathConsts.TwoMinusGoldenRatio * (b - a); } double f1 = m_ObjectiveFunction.GetValue(x1); double f2 = m_ObjectiveFunction.GetValue(x2); evaluationsNeeded += 2; var resultStatus = OptimizerErrorClassification.NoResult; int k = 1; while (k <= AbortCondition.MaxIterations) { if (Math.Abs(x3 - x0) <= AbortCondition.Tolerance * (Math.Abs(x1) + Math.Abs(x2))) { resultStatus = OptimizerErrorClassification.ProperResult; } if (evaluationsNeeded >= m_Optimizer.AbortCondition.MaxEvaluations) { resultStatus = OptimizerErrorClassification.EvaluationLimitExceeded; } if (resultStatus != OptimizerErrorClassification.NoResult) { break; // exit loop } /* do some iteration step */ if (f2 < f1) { x0 = x1; x1 = x2; x2 = MathConsts.GoldenRatioMinusOne * x1 + MathConsts.TwoMinusGoldenRatio * x3; f1 = f2; f2 = m_ObjectiveFunction.GetValue(x2); } else { x3 = x2; x2 = x1; x1 = MathConsts.GoldenRatioMinusOne * x2 + MathConsts.TwoMinusGoldenRatio * x0; f2 = f1; f1 = m_ObjectiveFunction.GetValue(x1); } evaluationsNeeded++; k++; } /* prepare output and state: */ if (f1 < f2) { argMin = x1; minimum = f1; } else { argMin = x2; minimum = f2; } if (k == AbortCondition.MaxIterations) { resultStatus = OptimizerErrorClassification.IterationLimitExceeded; } return(State.Create(resultStatus, argMin, minimum, evaluationsNeeded, k, InfoOutputProperty.Create("x0", x0), InfoOutputProperty.Create("x1", x1), InfoOutputProperty.Create("x2", x2), InfoOutputProperty.Create("f1", f1), InfoOutputProperty.Create("f2", f2))); }
/// <summary>Finds the minimum of the objective function. This implementation does take into account the specified derivative. /// </summary> /// <param name="a">The first point of bracket triple.</param> /// <param name="b">The second point of bracket triple.</param> /// <param name="c">The third point of bracket triple.</param> /// <param name="evaluationsNeeded">The number of function evaluations which are already needed.</param> /// <param name="argMin">The estimated argmin of the objective function (output).</param> /// <param name="minimum">The minimum, i.e. the function value with respect to <paramref name="argMin"/> which represents the argmin (output).</param> /// <returns>The state of the algorithm.</returns> /// <remarks>The implementation is based on Press, et al. (1992) "Numerical recipes in C", 2nd ed., p.402ff.</remarks> private State OptimizeWithDerivative(double a, double b, double c, int evaluationsNeeded, out double argMin, out double minimum) { var objectiveFunction = (OneDimOptimizerDifferentiableFunction)m_ObjectiveFunction; double tolerance, twoTimesTolerance; double d = 0.0, e = 0.0; double xm = 0.0, fb, fu, fv, fw, fx, u, v, w, x; double dw, dv; double aTemp = a < c ? a : c; double bTemp = a > c ? a : c; x = w = v = b; fw = fv = fx = fb = objectiveFunction.GetValue(b, out double dx); dw = dv = dx; var resultStatus = OptimizerErrorClassification.NoResult; for (int k = 1; k <= AbortCondition.MaxIterations; k++) { xm = 0.5 * (aTemp + bTemp); tolerance = AbortCondition.Tolerance * Math.Abs(x) + MachineConsts.ExtremeTinyEpsilon; twoTimesTolerance = 2.0 * tolerance; if (Math.Abs(x - xm) <= (twoTimesTolerance - 0.5 * (bTemp - aTemp))) { resultStatus = OptimizerErrorClassification.ProperResult; } if (evaluationsNeeded >= AbortCondition.MaxEvaluations) { resultStatus = OptimizerErrorClassification.EvaluationLimitExceeded; } if (resultStatus != OptimizerErrorClassification.NoResult) { argMin = x; minimum = fx; return(State.Create(resultStatus, argMin, minimum, evaluationsNeeded, k, InfoOutputProperty.Create("x", x), InfoOutputProperty.Create("fx", fx), InfoOutputProperty.Create("w", w), InfoOutputProperty.Create("fw", fw), InfoOutputProperty.Create("v", v), InfoOutputProperty.Create("fv", fv), InfoOutputProperty.Create("b", b), InfoOutputProperty.Create("fb", fb))); } /* do some iteration step */ if (Math.Abs(e) > tolerance) { /* maybe there is no derivative given because of the given constraints: */ if (!Double.IsNaN(dw) && !Double.IsNaN(dv) && !Double.IsNaN(dx)) { double d1 = 2.0 * (bTemp - aTemp); double d2 = d1; if (dw != dx) { d1 = (w - x) * dx / (dx - dw); } if (dv != dx) { d2 = (v - x) * dx / (dx - dv); } double u1 = x + d1; double u2 = x + d2; bool ok1 = (aTemp - u1) * (u1 - bTemp) > 0.0 && dx * d1 <= 0.0; bool ok2 = (aTemp - u2) * (u2 - bTemp) > 0.0 && dx * d2 <= 0.0; double olde = e; e = d; if (ok1 || ok2) { if (ok1 && ok2) { d = (Math.Abs(d1) < Math.Abs(d2) ? d1 : d2); } else if (ok1) { d = d1; } else { d = d2; } if (Math.Abs(d) <= Math.Abs(0.5 * olde)) { u = x + d; if (u - aTemp < twoTimesTolerance || bTemp - u < twoTimesTolerance) { d = (xm - x >= 0.0) ? tolerance : -tolerance; } } else { e = dx >= 0.0 ? aTemp - x : bTemp - x; d = 0.5 * e; } } else { e = dx >= 0.0 ? aTemp - x : bTemp - x; d = 0.5 * e; } } else { double r = (x - w) * (fx - fv); double q = (x - v) * (fx - fw); double p = (x - v) * q - (x - w) * r; q = 2.0 * (q - r); if (q > 0.0) { p = -p; } q = Math.Abs(q); double etemp = e; e = d; if (Math.Abs(p) >= Math.Abs(0.5 * q * etemp) || p <= q * (aTemp - x) || p >= q * (bTemp - x)) { e = x >= xm ? aTemp - x : bTemp - x; d = MathConsts.TwoMinusGoldenRatio * e; } else { d = p / q; u = x + d; if (u - aTemp < twoTimesTolerance || bTemp - u < twoTimesTolerance) { d = (xm - x >= 0) ? tolerance : -tolerance; } } } } else { if (Double.IsNaN(dx)) { e = x >= xm ? aTemp - x : bTemp - x; d = MathConsts.TwoMinusGoldenRatio * e; } else { e = dx >= 0.0 ? aTemp - x : bTemp - x; d = 0.5 * e; } } double du; if (Math.Abs(d) >= tolerance) { u = x + d; fu = objectiveFunction.GetValue(u, out du); evaluationsNeeded++; } else { u = x + (d >= 0.0 ? tolerance : -tolerance); fu = objectiveFunction.GetValue(u, out du); evaluationsNeeded++; if (fu > fx) { argMin = x; minimum = fx; return(State.Create(OptimizerErrorClassification.ProperResult, argMin, minimum, evaluationsNeeded, k, InfoOutputProperty.Create("x", x), InfoOutputProperty.Create("fx", fx), InfoOutputProperty.Create("w", w), InfoOutputProperty.Create("fw", fw), InfoOutputProperty.Create("v", v), InfoOutputProperty.Create("fv", fv), InfoOutputProperty.Create("b", b), InfoOutputProperty.Create("fb", fb))); } } if (fu <= fx) { if (u >= x) { aTemp = x; } else { bTemp = x; } Copy(ref v, ref fv, ref dv, w, fw, dw); Copy(ref w, ref fw, ref dw, x, fx, dx); Copy(ref x, ref fx, ref dx, u, fu, du); } else { if (u < x) { aTemp = u; } else { bTemp = u; } if (fu <= fw || w == x) { Copy(ref v, ref fv, ref dv, w, fw, dw); Copy(ref w, ref fw, ref dw, u, fu, du); } else if (fu < fv || v == x || v == w) { Copy(ref v, ref fv, ref dv, u, fu, du); } } } argMin = x; minimum = fx; return(State.Create(OptimizerErrorClassification.IterationLimitExceeded, argMin, minimum, evaluationsNeeded, AbortCondition.MaxIterations, InfoOutputProperty.Create("x", x), InfoOutputProperty.Create("fx", fx), InfoOutputProperty.Create("w", w), InfoOutputProperty.Create("fw", fw), InfoOutputProperty.Create("v", v), InfoOutputProperty.Create("fv", fv), InfoOutputProperty.Create("b", b), InfoOutputProperty.Create("fb", fb))); }
/// <summary>Finds the minimum of the objective function. This implementation does not take into account a probably given derivative. /// </summary> /// <param name="a">The first point of bracket triple.</param> /// <param name="b">The second point of bracket triple.</param> /// <param name="c">The third point of bracket triple.</param> /// <param name="evaluationsNeeded">The number of function evaluations which are already needed.</param> /// <param name="argMin">The estimated argmin of the objective function (output).</param> /// <param name="minimum">The minimum, i.e. the function value with respect to <paramref name="argMin"/> which represents the argmin (output).</param> /// <returns>The state of the algorithm.</returns> /// <remarks>The implementation is based on Press, et al. (1992) "Numerical recipes in C", 2nd ed., p.402ff.</remarks> private State OptimizeWithoutDerivative(double a, double b, double c, int evaluationsNeeded, out double argMin, out double minimum) { double tolerance, twoTimesTolerance; double d = 0.0, e = 0.0; double xm = 0.0, fb, fu, fv, fw, fx, u, v, w, x; double aTemp = a < c ? a : c; double bTemp = a > c ? a : c; x = w = v = b; fw = fv = fx = fb = m_ObjectiveFunction.GetValue(b); var resultStatus = OptimizerErrorClassification.NoResult; for (int k = 1; k <= AbortCondition.MaxIterations; k++) { xm = 0.5 * (aTemp + bTemp); tolerance = AbortCondition.Tolerance * Math.Abs(x) + MachineConsts.ExtremeTinyEpsilon; twoTimesTolerance = 2.0 * tolerance; if (Math.Abs(x - xm) <= (twoTimesTolerance - 0.5 * (bTemp - aTemp))) { resultStatus = OptimizerErrorClassification.ProperResult; } if (evaluationsNeeded >= AbortCondition.MaxEvaluations) { resultStatus = OptimizerErrorClassification.EvaluationLimitExceeded; } if (resultStatus != OptimizerErrorClassification.NoResult) { argMin = x; minimum = fx; return(State.Create(resultStatus, argMin, minimum, evaluationsNeeded, k, InfoOutputProperty.Create("x", x), InfoOutputProperty.Create("fx", fx), InfoOutputProperty.Create("w", w), InfoOutputProperty.Create("fw", fw), InfoOutputProperty.Create("v", v), InfoOutputProperty.Create("fv", fv), InfoOutputProperty.Create("b", b), InfoOutputProperty.Create("fb", fb))); } /* do some iteration step */ if (Math.Abs(e) > tolerance) { double r = (x - w) * (fx - fv); double q = (x - v) * (fx - fw); double p = (x - v) * q - (x - w) * r; q = 2.0 * (q - r); if (q > 0.0) { p = -p; } q = Math.Abs(q); double etemp = e; e = d; if (Math.Abs(p) >= Math.Abs(0.5 * q * etemp) || p <= q * (aTemp - x) || p >= q * (bTemp - x)) { e = x >= xm ? aTemp - x : bTemp - x; d = MathConsts.TwoMinusGoldenRatio * e; } else { d = p / q; u = x + d; if (u - aTemp < twoTimesTolerance || bTemp - u < twoTimesTolerance) { d = (xm - x >= 0) ? tolerance : -tolerance; } } } else { e = x >= xm ? aTemp - x : bTemp - x; d = MathConsts.TwoMinusGoldenRatio * e; } u = Math.Abs(d) >= tolerance ? x + d : x + ((d >= 0) ? tolerance : -tolerance); fu = m_ObjectiveFunction.GetValue(u); evaluationsNeeded++; if (fu <= fx) { if (u >= x) { aTemp = x; } else { bTemp = x; } v = w; w = x; x = u; fv = fw; fw = fx; fx = fu; } else { if (u < x) { aTemp = u; } else { bTemp = u; } if (fu <= fw || w == x) { v = w; w = u; fv = fw; fw = fu; } else if (fu <= fv || v == x || v == w) { v = u; fv = fu; } } } argMin = x; minimum = fx; return(State.Create(OptimizerErrorClassification.IterationLimitExceeded, argMin, minimum, evaluationsNeeded, AbortCondition.MaxIterations, InfoOutputProperty.Create("x", x), InfoOutputProperty.Create("fx", fx), InfoOutputProperty.Create("w", w), InfoOutputProperty.Create("fw", fw), InfoOutputProperty.Create("v", v), InfoOutputProperty.Create("fv", fv), InfoOutputProperty.Create("b", b), InfoOutputProperty.Create("fb", fb))); }
/// <summary>Creates a 'n x r' matrix B such that B * B' is a correlation matrix and 'near' to the specified symmetric, normalized matrix of dimension n. A rank reduction will apply if r is strict less than n. /// </summary> /// <param name="rawCorrelationMatrix">The symmetric, normalized matrix where to find the 'nearest' correlation matrix.</param> /// <param name="state">The state of the operation in its <see cref="PseudoSqrtMatrixDecomposer.State"/> representation (output).</param> /// <param name="triangularMatrixType">A value indicating which part of <paramref name="rawCorrelationMatrix"/> to take into account.</param> /// <param name="outputEntries">This argument will be used to store the matrix entries of the resulting matrix B, i.e. the return value array points to this array if != <c>null</c>; otherwise a memory allocation will be done.</param> /// <param name="worksspaceContainer">A specific <see cref="PseudoSqrtMatrixDecomposer.WorkspaceContainer"/> object to reduce memory allocation; ignored if <c>null</c>.</param> /// <returns>A <see cref="DenseMatrix"/> object that represents a matrix B such that B * B' is the 'nearest' correlation matrix with respect to <paramref name="rawCorrelationMatrix"/>.</returns> /// <remarks>In general the return object does <b>not</b> represents the pseudo-root of <paramref name="rawCorrelationMatrix"/>, i.e. output of the Cholesky decomposition. /// <para>The parameters <paramref name="outputEntries"/>, <paramref name="worksspaceContainer"/> allows to avoid memory allocation and to re-use arrays if the calculation of correlation matrices will be done often.</para></remarks> public override DenseMatrix Create(DenseMatrix rawCorrelationMatrix, out State state, double[] outputEntries = null, WorkspaceContainer worksspaceContainer = null, BLAS.TriangularMatrixType triangularMatrixType = BLAS.TriangularMatrixType.LowerTriangularMatrix) { if (rawCorrelationMatrix.IsQuadratic == false) { throw new ArgumentException("rawCorrelationMatrix"); } int n = rawCorrelationMatrix.RowCount; if ((outputEntries == null) || (outputEntries.Length < n * n)) { outputEntries = new double[n * n]; } var ws = worksspaceContainer as Workspace; if ((ws == null) || (ws.Dimension < n)) { ws = new Workspace(n); } /* calculate an initial value for the EZI approach, i.e. apply the Eigenvalue zeroing (without normalization): */ int initialRank; BLAS.Level1.dcopy(n * n, rawCorrelationMatrix.Data, ws.p); f1(n, ws.p, 0, ws, outputEntries, out initialRank); var detailDataTables = new List <DataTable>(); if (InfoOutputDetailLevel.IsAtLeastAsComprehensiveAs(InfoOutputDetailLevel.High)) { detailDataTables.Add(CreateDataTableWithEigenvalues("Eigenvalues.Adjusted[0]", initialRank, ws.eigenValues)); } BLAS.Level3.dgemm(n, n, n, 1.0, outputEntries, outputEntries, 0.0, ws.p, transposeB: BLAS.MatrixTransposeState.Transpose); BLAS.Level1.dcopy(n * n, ws.p, ws.q); int minNumberOfEigenvaluesToSetZero = n - Math.Min(MaximalRank ?? n, initialRank); var a = 1.0; int rank = initialRank; for (int k = 1; k < AbortCondition.MaxIterations; k++) { f1(n, ws.q, minNumberOfEigenvaluesToSetZero, ws, outputEntries, out rank); if (InfoOutputDetailLevel.IsAtLeastAsComprehensiveAs(InfoOutputDetailLevel.Full)) { detailDataTables.Add(CreateDataTableWithEigenvalues(String.Format("Eigenvalues.Adjusted[{0}", k), rank, ws.eigenValues)); } BLAS.Level3.dgemm(n, n, n, 1.0, outputEntries, outputEntries, 0.0, ws.q, transposeB: BLAS.MatrixTransposeState.Transpose); /* normalize the correlation matrix, i.e. calculate <q> */ for (int i = 0; i < n; i++) { var t_i = 0.0; for (int j = n - 1; j >= n - rank; j--) { t_i += outputEntries[i + j * n] * outputEntries[i + j * n]; } BLAS.Level1.dscal(rank, 1.0 / Math.Sqrt(t_i), outputEntries, -n, i + (n - 1) * n); // [i+j*n] *= 1/Math.Sqrt(t_i) for j = n-1, ..., n-rank } BLAS.Level3.dgemm(n, n, n, 1.0, outputEntries, outputEntries, 0.0, ws.qNormalized, transposeB: BLAS.MatrixTransposeState.Transpose); if (AbortCondition.IsSatisfied(n, ws.p, ws.q, ws.qNormalized, ref a) == true) { /* The eigenvalues are in ascending order. Thefore the first (and not last) 'rank' columns of the eigenvectors are not required. Therefore we swap the relevant part: */ BLAS.Level1.dswap(n * rank, outputEntries, 1, outputEntries, 1, n * (n - rank), 0); state = State.Create(rank, new InfoOutputProperty[] { InfoOutputProperty.Create("Eigenvalues set to 0.0", n - rank) }, detailDataTables, iterationsNeeded: k, infoOutputDetailLevel: InfoOutputDetailLevel); return(new DenseMatrix(n, rank, outputEntries)); } BLAS.Level1.dcopy(n * n, ws.q, ws.p); // set p := q for (int j = 0; j < n; j++) { ws.q[j * n + j] = 1.0; } } /* The eigenvalues are in ascending order. Thefore the first (and not last) 'rank' columns of the eigenvectors are not required. Therefore we swap the relevant part: */ BLAS.Level1.dswap(n * rank, outputEntries, 1, outputEntries, 1, n * (n - rank), 0); state = State.Create(rank, new InfoOutputProperty[] { InfoOutputProperty.Create("Number of EigenValues set to 0.0", n - rank) }, detailDataTables, AbortCondition.MaxIterations, infoOutputDetailLevel: InfoOutputDetailLevel); return(new DenseMatrix(n, rank, outputEntries)); }
/// <summary>Finds the minimum and argmin of <see cref="IMultiDimOptimizerAlgorithm.Function" />. /// </summary> /// <param name="x">An array with at least <see cref="IMultiDimOptimizerAlgorithm.Dimension" /> elements which is perhaps taken into account as an initial guess of the algorithm; on exit this argument contains the argmin.</param> /// <param name="minimum">The minimum, i.e. the function value with respect to <paramref name="x" /> which represents the argmin (output).</param> /// <returns>The state of the algorithm, i.e. an indicating whether <paramref name="x" /> and <paramref name="minimum" /> contain valid data.</returns> /// <exception cref="NotOperableException">Thrown, if no objective function is specified.</exception> public State FindMinimum(double[] x, out double minimum) { if (m_NLoptFunction == null) { throw new NotOperableException("No objective function specified."); } var nloptResultCode = m_NLoptPtr.FindMinimum(x, out minimum); switch (nloptResultCode) { case NLoptResultCode.Success: case NLoptResultCode.FToleranceReached: case NLoptResultCode.XToleranceReached: return(MultiDimOptimizer.State.Create(OptimizerErrorClassification.ProperResult, minimum, details: InfoOutputProperty.Create("NLopt result code", nloptResultCode))); case NLoptResultCode.MaximalNumberOfFunctionEvaluationsReached: return(MultiDimOptimizer.State.Create(OptimizerErrorClassification.EvaluationLimitExceeded, minimum)); case NLoptResultCode.MaximalTimeReached: return(MultiDimOptimizer.State.Create(OptimizerErrorClassification.EvaluationTimeExceeded, minimum)); default: return(MultiDimOptimizer.State.Create(OptimizerErrorClassification.Unknown, minimum, details: InfoOutputProperty.Create("NLopt result code", nloptResultCode))); } }