public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); currentProblem_ = P; initCostValues_ = P.costFunction().values(x_); int m = initCostValues_.size(); int n = x_.size(); Vector xx = new Vector(x_); Vector fvec = new Vector(m), diag = new Vector(n); int mode = 1; double factor = 1; int nprint = 0; int info = 0; int nfev =0; Matrix fjac = new Matrix(m, n); int ldfjac = m; List<int> ipvt = new InitializedList<int>(n); Vector qtf = new Vector(n), wa1 = new Vector(n), wa2 = new Vector(n), wa3 = new Vector(n), wa4 = new Vector(m); // call lmdif to minimize the sum of the squares of m functions // in n variables by the Levenberg-Marquardt algorithm. MINPACK.lmdif(m, n, xx, ref fvec, endCriteria.functionEpsilon(), xtol_, gtol_, endCriteria.maxIterations(), epsfcn_, diag, mode, factor, nprint, ref info, ref nfev, ref fjac, ldfjac, ref ipvt, ref qtf, wa1, wa2, wa3, wa4, fcn); info_ = info; // check requirements & endCriteria evaluation if(info == 0) throw new ApplicationException("MINPACK: improper input parameters"); //if(info == 6) throw new ApplicationException("MINPACK: ftol is too small. no further " + // "reduction in the sum of squares is possible."); if (info != 6) ecType = EndCriteria.Type.StationaryFunctionValue; //QL_REQUIRE(info != 5, "MINPACK: number of calls to fcn has reached or exceeded maxfev."); endCriteria.checkMaxIterations(nfev, ref ecType); if(info == 7) throw new ApplicationException("MINPACK: xtol is too small. no further " + "improvement in the approximate " + "solution x is possible."); if(info == 8) throw new ApplicationException("MINPACK: gtol is too small. fvec is " + "orthogonal to the columns of the " + "jacobian to machine precision."); // set problem x_ = new Vector(xx.GetRange(0, n)); P.setCurrentValue(x_); P.setFunctionValue(P.costFunction().value(x_)); return ecType; }
public void OptimizersTest() { //("Testing optimizers..."); setup(); // Loop over problems (currently there is only 1 problem) for (int i=0; i<costFunctions_.Count; ++i) { Problem problem = new Problem(costFunctions_[i], constraints_[i], initialValues_[i]); Vector initialValues = problem.currentValue(); // Loop over optimizers for (int j = 0; j < (optimizationMethods_[i]).Count; ++j) { double rootEpsilon = endCriterias_[i].rootEpsilon(); int endCriteriaTests = 1; // Loop over rootEpsilon for(int k=0; k<endCriteriaTests; ++k) { problem.setCurrentValue(initialValues); EndCriteria endCriteria = new EndCriteria(endCriterias_[i].maxIterations(), endCriterias_[i].maxStationaryStateIterations(), rootEpsilon, endCriterias_[i].functionEpsilon(), endCriterias_[i].gradientNormEpsilon()); rootEpsilon *= .1; EndCriteria.Type endCriteriaResult = optimizationMethods_[i][j].optimizationMethod.minimize(problem, endCriteria); Vector xMinCalculated = problem.currentValue(); Vector yMinCalculated = problem.values(xMinCalculated); // Check optimization results vs known solution if (endCriteriaResult==EndCriteria.Type.None || endCriteriaResult==EndCriteria.Type.MaxIterations || endCriteriaResult==EndCriteria.Type.Unknown) Assert.Fail("function evaluations: " + problem.functionEvaluation() + " gradient evaluations: " + problem.gradientEvaluation() + " x expected: " + xMinExpected_[i] + " x calculated: " + xMinCalculated + " x difference: " + (xMinExpected_[i]- xMinCalculated) + " rootEpsilon: " + endCriteria.rootEpsilon() + " y expected: " + yMinExpected_[i] + " y calculated: " + yMinCalculated + " y difference: " + (yMinExpected_[i]- yMinCalculated) + " functionEpsilon: " + endCriteria.functionEpsilon() + " endCriteriaResult: " + endCriteriaResult); } } } }
//! solve the optimization problem P public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { // Initializations double ftol = endCriteria.functionEpsilon(); int maxStationaryStateIterations_ = endCriteria.maxStationaryStateIterations(); EndCriteria.Type ecType = EndCriteria.Type.None; // reset end criteria P.reset(); // reset problem Vector x_ = P.currentValue(); // store the starting point int iterationNumber_ =0; // stationaryStateIterationNumber_=0 lineSearch_.searchDirection = new Vector(x_.Count); // dimension line search bool done = false; // function and squared norm of gradient values; double fnew; double fold; double gold2; double c; double fdiff; double normdiff; // classical initial value for line-search step double t = 1.0; // Set gradient g at the size of the optimization problem search direction int sz = lineSearch_.searchDirection.Count; Vector g = new Vector(sz); Vector d = new Vector(sz); Vector sddiff = new Vector(sz); // Initialize cost function, gradient g and search direction P.setFunctionValue(P.valueAndGradient(g, x_)); P.setGradientNormValue(Vector.DotProduct(g, g)); lineSearch_.searchDirection = g * -1.0; // Loop over iterations do { // Linesearch t = lineSearch_.value(P, ref ecType, endCriteria, t); // don't throw: it can fail just because maxIterations exceeded //QL_REQUIRE(lineSearch_->succeed(), "line-search failed!"); if (lineSearch_.succeed()) { // Updates d = lineSearch_.searchDirection; // New point x_ = lineSearch_.lastX(); // New function value fold = P.functionValue(); P.setFunctionValue(lineSearch_.lastFunctionValue()); // New gradient and search direction vectors g = lineSearch_.lastGradient(); // orthogonalization coef gold2 = P.gradientNormValue(); P.setGradientNormValue(lineSearch_.lastGradientNorm2()); c = P.gradientNormValue() / gold2; // conjugate gradient search direction sddiff = ((g*-1.0) + c * d) - lineSearch_.searchDirection; normdiff = Math.Sqrt(Vector.DotProduct(sddiff, sddiff)); lineSearch_.searchDirection = (g*-1.0) + c * d; // Now compute accuracy and check end criteria // Numerical Recipes exit strategy on fx (see NR in C++, p.423) fnew = P.functionValue(); fdiff = 2.0 *Math.Abs(fnew-fold) / (Math.Abs(fnew) + Math.Abs(fold) + Double.Epsilon); if (fdiff < ftol || endCriteria.checkMaxIterations(iterationNumber_, ref ecType)) { endCriteria.checkStationaryFunctionValue(0.0, 0.0, ref maxStationaryStateIterations_, ref ecType); endCriteria.checkMaxIterations(iterationNumber_, ref ecType); return ecType; } //done = endCriteria(iterationNumber_, // stationaryStateIterationNumber_, // true, //FIXME: it should be in the problem // fold, // std::sqrt(gold2), // P.functionValue(), // std::sqrt(P.gradientNormValue()), // ecType); P.setCurrentValue(x_); // update problem current value ++iterationNumber_; // Increase iteration number } else { done =true; } } while (!done); P.setCurrentValue(x_); return ecType; }
//! solve the optimization problem P public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { // Initializations double ftol = endCriteria.functionEpsilon(); int maxStationaryStateIterations_ = endCriteria.maxStationaryStateIterations(); EndCriteria.Type ecType = EndCriteria.Type.None; // reset end criteria P.reset(); // reset problem Vector x_ = P.currentValue(); // store the starting point int iterationNumber_ = 0; // stationaryStateIterationNumber_=0 lineSearch_.searchDirection = new Vector(x_.Count); // dimension line search bool done = false; // function and squared norm of gradient values; double fnew; double fold; double gold2; double c; double fdiff; double normdiff; // classical initial value for line-search step double t = 1.0; // Set gradient g at the size of the optimization problem search direction int sz = lineSearch_.searchDirection.Count; Vector g = new Vector(sz); Vector d = new Vector(sz); Vector sddiff = new Vector(sz); // Initialize cost function, gradient g and search direction P.setFunctionValue(P.valueAndGradient(g, x_)); P.setGradientNormValue(Vector.DotProduct(g, g)); lineSearch_.searchDirection = g * -1.0; // Loop over iterations do { // Linesearch t = lineSearch_.value(P, ref ecType, endCriteria, t); // don't throw: it can fail just because maxIterations exceeded //QL_REQUIRE(lineSearch_->succeed(), "line-search failed!"); if (lineSearch_.succeed()) { // Updates d = lineSearch_.searchDirection; // New point x_ = lineSearch_.lastX(); // New function value fold = P.functionValue(); P.setFunctionValue(lineSearch_.lastFunctionValue()); // New gradient and search direction vectors g = lineSearch_.lastGradient(); // orthogonalization coef gold2 = P.gradientNormValue(); P.setGradientNormValue(lineSearch_.lastGradientNorm2()); c = P.gradientNormValue() / gold2; // conjugate gradient search direction sddiff = ((g * -1.0) + c * d) - lineSearch_.searchDirection; normdiff = Math.Sqrt(Vector.DotProduct(sddiff, sddiff)); lineSearch_.searchDirection = (g * -1.0) + c * d; // Now compute accuracy and check end criteria // Numerical Recipes exit strategy on fx (see NR in C++, p.423) fnew = P.functionValue(); fdiff = 2.0 * Math.Abs(fnew - fold) / (Math.Abs(fnew) + Math.Abs(fold) + Double.Epsilon); if (fdiff < ftol || endCriteria.checkMaxIterations(iterationNumber_, ref ecType)) { endCriteria.checkStationaryFunctionValue(0.0, 0.0, ref maxStationaryStateIterations_, ref ecType); endCriteria.checkMaxIterations(iterationNumber_, ref ecType); return(ecType); } //done = endCriteria(iterationNumber_, // stationaryStateIterationNumber_, // true, //FIXME: it should be in the problem // fold, // std::sqrt(gold2), // P.functionValue(), // std::sqrt(P.gradientNormValue()), // ecType); P.setCurrentValue(x_); // update problem current value ++iterationNumber_; // Increase iteration number } else { done = true; } } while (!done); P.setCurrentValue(x_); return(ecType); }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { // Initializations double ftol = endCriteria.functionEpsilon(); int maxStationaryStateIterations_ = endCriteria.maxStationaryStateIterations(); EndCriteria.Type ecType = EndCriteria.Type.None; // reset end criteria P.reset(); // reset problem Vector x_ = P.currentValue(); // store the starting point int iterationNumber_ = 0; // dimension line search lineSearch_.searchDirection = new Vector(x_.size()); bool done = false; // function and squared norm of gradient values double fnew, fold, gold2; double fdiff; // classical initial value for line-search step double t = 1.0; // Set gradient g at the size of the optimization problem // search direction int sz = lineSearch_.searchDirection.size(); Vector prevGradient = new Vector(sz), d = new Vector(sz), sddiff = new Vector(sz), direction = new Vector(sz); // Initialize cost function, gradient prevGradient and search direction P.setFunctionValue(P.valueAndGradient(prevGradient, x_)); P.setGradientNormValue(Vector.DotProduct(prevGradient, prevGradient)); lineSearch_.searchDirection = prevGradient * -1; bool first_time = true; // Loop over iterations do { // Linesearch if (!first_time) { prevGradient = lineSearch_.lastGradient(); } t = (lineSearch_.value(P, ref ecType, endCriteria, t)); // don't throw: it can fail just because maxIterations exceeded if (lineSearch_.succeed()) { // Updates // New point x_ = lineSearch_.lastX(); // New function value fold = P.functionValue(); P.setFunctionValue(lineSearch_.lastFunctionValue()); // New gradient and search direction vectors // orthogonalization coef gold2 = P.gradientNormValue(); P.setGradientNormValue(lineSearch_.lastGradientNorm2()); // conjugate gradient search direction direction = getUpdatedDirection(P, gold2, prevGradient); sddiff = direction - lineSearch_.searchDirection; lineSearch_.searchDirection = direction; // Now compute accuracy and check end criteria // Numerical Recipes exit strategy on fx (see NR in C++, p.423) fnew = P.functionValue(); fdiff = 2.0 * Math.Abs(fnew - fold) / (Math.Abs(fnew) + Math.Abs(fold) + Const.QL_EPSILON); if (fdiff < ftol || endCriteria.checkMaxIterations(iterationNumber_, ref ecType)) { endCriteria.checkStationaryFunctionValue(0.0, 0.0, ref maxStationaryStateIterations_, ref ecType); endCriteria.checkMaxIterations(iterationNumber_, ref ecType); return(ecType); } P.setCurrentValue(x_); // update problem current value ++iterationNumber_; // Increase iteration number first_time = false; } else { done = true; } }while (!done); P.setCurrentValue(x_); return(ecType); }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); currentProblem_ = P; initCostValues_ = P.costFunction().values(x_); int m = initCostValues_.size(); int n = x_.size(); if (useCostFunctionsJacobian_) { initJacobian_ = new Matrix(m, n); P.costFunction().jacobian(initJacobian_, x_); } Vector xx = new Vector(x_); Vector fvec = new Vector(m), diag = new Vector(n); int mode = 1; double factor = 1; int nprint = 0; int info = 0; int nfev = 0; Matrix fjac = new Matrix(m, n); int ldfjac = m; List <int> ipvt = new InitializedList <int>(n); Vector qtf = new Vector(n), wa1 = new Vector(n), wa2 = new Vector(n), wa3 = new Vector(n), wa4 = new Vector(m); // call lmdif to minimize the sum of the squares of m functions // in n variables by the Levenberg-Marquardt algorithm. Func <int, int, Vector, int, Matrix> j = null; if (useCostFunctionsJacobian_) { j = jacFcn; } // requirements; check here to get more detailed error messages. Utils.QL_REQUIRE(n > 0, () => "no variables given"); Utils.QL_REQUIRE(m >= n, () => $"less functions ({m}) than available variables ({n})"); Utils.QL_REQUIRE(endCriteria.functionEpsilon() >= 0.0, () => "negative f tolerance"); Utils.QL_REQUIRE(xtol_ >= 0.0, () => "negative x tolerance"); Utils.QL_REQUIRE(gtol_ >= 0.0, () => "negative g tolerance"); Utils.QL_REQUIRE(endCriteria.maxIterations() > 0, () => "null number of evaluations"); MINPACK.lmdif(m, n, xx, ref fvec, endCriteria.functionEpsilon(), xtol_, gtol_, endCriteria.maxIterations(), epsfcn_, diag, mode, factor, nprint, ref info, ref nfev, ref fjac, ldfjac, ref ipvt, ref qtf, wa1, wa2, wa3, wa4, fcn, j); info_ = info; // check requirements & endCriteria evaluation Utils.QL_REQUIRE(info != 0, () => "MINPACK: improper input parameters"); if (info != 6) { ecType = EndCriteria.Type.StationaryFunctionValue; } endCriteria.checkMaxIterations(nfev, ref ecType); Utils.QL_REQUIRE(info != 7, () => "MINPACK: xtol is too small. no further " + "improvement in the approximate " + "solution x is possible."); Utils.QL_REQUIRE(info != 8, () => "MINPACK: gtol is too small. fvec is " + "orthogonal to the columns of the " + "jacobian to machine precision."); // set problem x_ = new Vector(xx.GetRange(0, n)); P.setCurrentValue(x_); P.setFunctionValue(P.costFunction().value(x_)); return(ecType); }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); currentProblem_ = P; initCostValues_ = P.costFunction().values(x_); int m = initCostValues_.size(); int n = x_.size(); if (useCostFunctionsJacobian_) { initJacobian_ = new Matrix(m, n); P.costFunction().jacobian(initJacobian_, x_); } Vector xx = new Vector(x_); Vector fvec = new Vector(m), diag = new Vector(n); int mode = 1; double factor = 1; int nprint = 0; int info = 0; int nfev = 0; Matrix fjac = new Matrix(m, n); int ldfjac = m; List <int> ipvt = new InitializedList <int>(n); Vector qtf = new Vector(n), wa1 = new Vector(n), wa2 = new Vector(n), wa3 = new Vector(n), wa4 = new Vector(m); // call lmdif to minimize the sum of the squares of m functions // in n variables by the Levenberg-Marquardt algorithm. Func <int, int, Vector, int, Matrix> j = null; if (useCostFunctionsJacobian_) { j = jacFcn; } MINPACK.lmdif(m, n, xx, ref fvec, endCriteria.functionEpsilon(), xtol_, gtol_, endCriteria.maxIterations(), epsfcn_, diag, mode, factor, nprint, ref info, ref nfev, ref fjac, ldfjac, ref ipvt, ref qtf, wa1, wa2, wa3, wa4, fcn, j); info_ = info; // check requirements & endCriteria evaluation if (info == 0) { throw new ApplicationException("MINPACK: improper input parameters"); } //if(info == 6) throw new ApplicationException("MINPACK: ftol is too small. no further " + // "reduction in the sum of squares is possible."); if (info != 6) { ecType = EndCriteria.Type.StationaryFunctionValue; } //QL_REQUIRE(info != 5, "MINPACK: number of calls to fcn has reached or exceeded maxfev."); endCriteria.checkMaxIterations(nfev, ref ecType); if (info == 7) { throw new ApplicationException("MINPACK: xtol is too small. no further " + "improvement in the approximate " + "solution x is possible."); } if (info == 8) { throw new ApplicationException("MINPACK: gtol is too small. fvec is " + "orthogonal to the columns of the " + "jacobian to machine precision."); } // set problem x_ = new Vector(xx.GetRange(0, n)); P.setCurrentValue(x_); P.setFunctionValue(P.costFunction().value(x_)); return(ecType); }