//! Calibrate to a set of market instruments (caps/swaptions) /*! An additional constraint can be passed which must be * satisfied in addition to the constraints of the model. */ //public void calibrate(List<CalibrationHelper> instruments, OptimizationMethod method, EndCriteria endCriteria, // Constraint constraint = new Constraint(), List<double> weights = new List<double>()) { public void calibrate(List <CalibrationHelper> instruments, OptimizationMethod method, EndCriteria endCriteria, Constraint additionalConstraint, List <double> weights) { if (!(weights.Count == 0 || weights.Count == instruments.Count)) { throw new ApplicationException("mismatch between number of instruments and weights"); } Constraint c; if (additionalConstraint.empty()) { c = constraint_; } else { c = new CompositeConstraint(constraint_, additionalConstraint); } List <double> w = weights.Count == 0 ? new InitializedList <double>(instruments.Count, 1.0): weights; CalibrationFunction f = new CalibrationFunction(this, instruments, w); Problem prob = new Problem(f, c, parameters()); shortRateEndCriteria_ = method.minimize(prob, endCriteria); Vector result = new Vector(prob.currentValue()); setParams(result); // recheck Vector shortRateProblemValues_ = prob.values(result); notifyObservers(); }
public DVPLI.EstimationResult Estimate(List <object> data, DVPLI.IEstimationSettings settings = null, DVPLI.IController controller = null, Dictionary <string, object> properties = null) { DVPLI.InterestRateMarketData irmd = data[0] as DVPLI.InterestRateMarketData; //Date today = new Date(15, Month.February, 2002); //Date settlement = new Date(19, Month.February, 2002); Settings.setEvaluationDate(irmd.Date); Handle <YieldTermStructure> termStructure = new Handle <YieldTermStructure>(new Utilities.ZeroRateFunction(irmd.Date, irmd.ZRMarketDates, irmd.ZRMarket)); //termStructure.link HullWhite model = new HullWhite(termStructure); IborIndex index = new Euribor6M(termStructure); IPricingEngine engine = new JamshidianSwaptionEngine(model); List <CalibrationHelper> swaptions = new List <CalibrationHelper>(); for (int i = 0; i < irmd.SwapDates.Length; i++) { for (int j = 0; j < irmd.SwapDuration.Length; j++) { Quote vol = new SimpleQuote(irmd.SwaptionsVolatility[j, i]); CalibrationHelper helper = new SwaptionHelper(new Period((int)irmd.SwapDates[i], TimeUnit.Years), new Period((int)irmd.SwapDuration[j], TimeUnit.Years), new Handle <Quote>(vol), index, new Period(1, TimeUnit.Years), new Thirty360(), new Actual360(), termStructure, false); helper.setPricingEngine(engine); swaptions.Add(helper); } } // Set up the optimization problem LevenbergMarquardt optimizationMethod = new LevenbergMarquardt(1.0e-8, 1.0e-8, 1.0e-8); EndCriteria endCriteria = new EndCriteria(10000, 100, 1e-6, 1e-8, 1e-8); //Optimize model.calibrate(swaptions, optimizationMethod, endCriteria, new Constraint(), new List <double>()); EndCriteria.Type ecType = model.endCriteria(); Vector xMinCalculated = model.parameters(); double yMinCalculated = model.value(xMinCalculated, swaptions); Vector xMinExpected = new Vector(2); double yMinExpected = model.value(xMinExpected, swaptions); DVPLI.EstimationResult r = new DVPLI.EstimationResult(new string[] { "Alpha", "Sigma" }, new double[] { xMinCalculated[0], xMinCalculated[1] }); return(r); }
//! Perform line search public override double value(Problem P, ref EndCriteria.Type ecType, EndCriteria endCriteria, double t_ini) { Constraint constraint = P.constraint(); succeed_ = true; bool maxIter = false; double qtold; double t = t_ini; int loopNumber = 0; double q0 = P.functionValue(); double qp0 = P.gradientNormValue(); qt_ = q0; qpt_ = (gradient_.Count == 0) ? qp0 : -Vector.DotProduct(gradient_, searchDirection_); // Initialize gradient gradient_ = new Vector(P.currentValue().Count); // Compute new point xtd_ = P.currentValue().Clone(); t = update(ref xtd_, searchDirection_, t, constraint); // Compute function value at the new point qt_ = P.value(xtd_); // Enter in the loop if the criterion is not satisfied if ((qt_ - q0) > -alpha_ * t * qpt_) { do { loopNumber++; // Decrease step t *= beta_; // Store old value of the function qtold = qt_; // New point value xtd_ = P.currentValue(); t = update(ref xtd_, searchDirection_, t, constraint); // Compute function value at the new point qt_ = P.value(xtd_); P.gradient(ref gradient_, xtd_); // and it squared norm maxIter = endCriteria.checkMaxIterations(loopNumber, ref ecType); }while ((((qt_ - q0) > (-alpha_ * t * qpt_)) || ((qtold - q0) <= (-alpha_ * t * qpt_ / beta_))) && (!maxIter)); } if (maxIter) { succeed_ = false; } // Compute new gradient P.gradient(ref gradient_, xtd_); // and it squared norm qpt_ = Vector.DotProduct(gradient_, gradient_); // Return new step value return(t); }
//! minimize the optimization problem P public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); int iterationNumber_ = 0; int stationaryStateIterationNumber_ = 0; lineSearch_.searchDirection = new Vector(x_.Count); bool end; // function and squared norm of gradient values; double normdiff; // classical initial value for line-search step double t = 1.0; // Set gold at the size of the optimization problem search direction Vector gold = new Vector(lineSearch_.searchDirection.Count); Vector gdiff = new Vector(lineSearch_.searchDirection.Count); P.setFunctionValue(P.valueAndGradient(gold, x_)); lineSearch_.searchDirection = gold * -1.0; P.setGradientNormValue(Vector.DotProduct(gold, gold)); normdiff = Math.Sqrt(P.gradientNormValue()); do { // Linesearch t = lineSearch_.value(P, ref ecType, endCriteria, t); if (!(lineSearch_.succeed())) { throw new ApplicationException("line-search failed!"); } // End criteria // FIXME: it's never been used! ??? // , normdiff end = endCriteria.value(iterationNumber_, ref stationaryStateIterationNumber_, true, P.functionValue(), Math.Sqrt(P.gradientNormValue()), lineSearch_.lastFunctionValue(), Math.Sqrt(lineSearch_.lastGradientNorm2()), ref ecType); // Updates // New point x_ = lineSearch_.lastX(); // New function value P.setFunctionValue(lineSearch_.lastFunctionValue()); // New gradient and search direction vectors gdiff = gold - lineSearch_.lastGradient(); normdiff = Math.Sqrt(Vector.DotProduct(gdiff, gdiff)); gold = lineSearch_.lastGradient(); lineSearch_.searchDirection = gold * -1.0; // New gradient squared norm P.setGradientNormValue(lineSearch_.lastGradientNorm2()); // Increase interation number ++iterationNumber_; } while (end == false); P.setCurrentValue(x_); return(ecType); }
//! Calibrate to a set of market instruments (caps/swaptions) /*! An additional constraint can be passed which must be * satisfied in addition to the constraints of the model. */ //public void calibrate(List<CalibrationHelper> instruments, OptimizationMethod method, EndCriteria endCriteria, // Constraint constraint = new Constraint(), List<double> weights = new List<double>()) { public void calibrate(List <CalibrationHelper> instruments, OptimizationMethod method, EndCriteria endCriteria, Constraint additionalConstraint = null, List <double> weights = null, List <bool> fixParameters = null) { if (weights == null) { weights = new List <double>(); } if (additionalConstraint == null) { additionalConstraint = new Constraint(); } Utils.QL_REQUIRE(weights.empty() || weights.Count == instruments.Count, () => "mismatch between number of instruments (" + instruments.Count + ") and weights(" + weights.Count + ")"); Constraint c; if (additionalConstraint.empty()) { c = constraint_; } else { c = new CompositeConstraint(constraint_, additionalConstraint); } List <double> w = weights.Count == 0 ? new InitializedList <double>(instruments.Count, 1.0): weights; Vector prms = parameters(); List <bool> all = new InitializedList <bool>(prms.size(), false); Projection proj = new Projection(prms, fixParameters ?? all); CalibrationFunction f = new CalibrationFunction(this, instruments, w, proj); ProjectedConstraint pc = new ProjectedConstraint(c, proj); Problem prob = new Problem(f, pc, proj.project(prms)); shortRateEndCriteria_ = method.minimize(prob, endCriteria); Vector result = new Vector(prob.currentValue()); setParams(proj.include(result)); Vector shortRateProblemValues_ = prob.values(result); notifyObservers(); //CalibrationFunction f = new CalibrationFunction(this, instruments, w); //Problem prob = new Problem(f, c, parameters()); //shortRateEndCriteria_ = method.minimize(prob, endCriteria); //Vector result = new Vector(prob.currentValue()); //setParams(result); //// recheck //Vector shortRateProblemValues_ = prob.values(result); //notifyObservers(); }
public EndCriteria.Type endCriteria() { EndCriteria.Type ret = (EndCriteria.Type)NQuantLibcPINVOKE.ShortRateModelHandle_endCriteria(swigCPtr); if (NQuantLibcPINVOKE.SWIGPendingException.Pending) { throw NQuantLibcPINVOKE.SWIGPendingException.Retrieve(); } return(ret); }
public EndCriteria.Type endCriteria() { EndCriteria.Type ret = (EndCriteria.Type)NQuantLibcPINVOKE.NoArbSabrInterpolatedSmileSection_endCriteria(swigCPtr); if (NQuantLibcPINVOKE.SWIGPendingException.Pending) { throw NQuantLibcPINVOKE.SWIGPendingException.Retrieve(); } return(ret); }
public bool checkZeroGradientNorm(double gradientNorm, ref EndCriteria.Type ecType) { if (gradientNorm >= gradientNormEpsilon_) { return(false); } ecType = Type.ZeroGradientNorm; return(true); }
public EndCriteria.Type endCriteria() { EndCriteria.Type ret = (EndCriteria.Type)NQuantLibcPINVOKE.MarkovFunctional_endCriteria(swigCPtr); if (NQuantLibcPINVOKE.SWIGPendingException.Pending) { throw NQuantLibcPINVOKE.SWIGPendingException.Retrieve(); } return(ret); }
//! Test if the number of iteration is below MaxIterations public bool checkMaxIterations(int iteration, ref EndCriteria.Type ecType) { if (iteration < maxIterations_) { return(false); } ecType = Type.MaxIterations; return(true); }
public void OptimizersTest() { //("Testing optimizers..."); setup(); // Loop over problems (currently there is only 1 problem) for (int i = 0; i < costFunctions_.Count; ++i) { Problem problem = new Problem(costFunctions_[i], constraints_[i], initialValues_[i]); Vector initialValues = problem.currentValue(); // Loop over optimizers for (int j = 0; j < (optimizationMethods_[i]).Count; ++j) { double rootEpsilon = endCriterias_[i].rootEpsilon(); int endCriteriaTests = 1; // Loop over rootEpsilon for (int k = 0; k < endCriteriaTests; ++k) { problem.setCurrentValue(initialValues); EndCriteria endCriteria = new EndCriteria(endCriterias_[i].maxIterations(), endCriterias_[i].maxStationaryStateIterations(), rootEpsilon, endCriterias_[i].functionEpsilon(), endCriterias_[i].gradientNormEpsilon()); rootEpsilon *= .1; EndCriteria.Type endCriteriaResult = optimizationMethods_[i][j].optimizationMethod.minimize(problem, endCriteria); Vector xMinCalculated = problem.currentValue(); Vector yMinCalculated = problem.values(xMinCalculated); // Check optimization results vs known solution if (endCriteriaResult == EndCriteria.Type.None || endCriteriaResult == EndCriteria.Type.MaxIterations || endCriteriaResult == EndCriteria.Type.Unknown) { Assert.Fail("function evaluations: " + problem.functionEvaluation() + " gradient evaluations: " + problem.gradientEvaluation() + " x expected: " + xMinExpected_[i] + " x calculated: " + xMinCalculated + " x difference: " + (xMinExpected_[i] - xMinCalculated) + " rootEpsilon: " + endCriteria.rootEpsilon() + " y expected: " + yMinExpected_[i] + " y calculated: " + yMinCalculated + " y difference: " + (yMinExpected_[i] - yMinCalculated) + " functionEpsilon: " + endCriteria.functionEpsilon() + " endCriteriaResult: " + endCriteriaResult); } } } } }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { EndCriteria.Type ecType = EndCriteria.Type.None; upperBound_ = P.constraint().upperBound(P.currentValue()); lowerBound_ = P.constraint().lowerBound(P.currentValue()); currGenSizeWeights_ = new Vector(configuration().populationMembers, configuration().stepsizeWeight); currGenCrossover_ = new Vector(configuration().populationMembers, configuration().crossoverProbability); List <Candidate> population = new InitializedList <Candidate>(configuration().populationMembers); population.ForEach((ii, vv) => population[ii] = new Candidate(P.currentValue().size())); fillInitialPopulation(population, P); //original quantlib use partial_sort as only first elements is needed double fxOld = population.Min(x => x.cost); bestMemberEver_ = (Candidate)population.First(x => x.cost.IsEqual(fxOld)).Clone(); int iteration = 0, stationaryPointIteration = 0; // main loop - calculate consecutive emerging populations while (!endCriteria.checkMaxIterations(iteration++, ref ecType)) { calculateNextGeneration(population, P.costFunction()); double fxNew = population.Min(x => x.cost); Candidate tmp = (Candidate)population.First(x => x.cost.IsEqual(fxNew)).Clone(); if (fxNew < bestMemberEver_.cost) { bestMemberEver_ = tmp; } if (endCriteria.checkStationaryFunctionValue(fxOld, fxNew, ref stationaryPointIteration, ref ecType)) { break; } fxOld = fxNew; } P.setCurrentValue(bestMemberEver_.values); P.setFunctionValue(bestMemberEver_.cost); return(ecType); }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { int stationaryStateIterations_ = 0; EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x = P.currentValue(); iteration_ = 0; n_ = x.size(); ptry_ = new Vector(n_, 0.0); // build vertices vertices_ = new InitializedList <Vector>(n_ + 1, x); for (i_ = 0; i_ < n_; i_++) { Vector direction = new Vector(n_, 0.0); direction[i_] = 1.0; Vector tmp = vertices_[i_ + 1]; P.constraint().update(ref tmp, direction, lambda_); vertices_[i_ + 1] = tmp; } values_ = new Vector(n_ + 1, 0.0); for (i_ = 0; i_ <= n_; i_++) { if (!P.constraint().test(vertices_[i_])) { values_[i_] = Double.MaxValue; } else { values_[i_] = P.value(vertices_[i_]); } if (Double.IsNaN(ytry_)) { // handle NAN values_[i_] = Double.MaxValue; } } // minimize T_ = T0_; yb_ = Double.MaxValue; pb_ = new Vector(n_, 0.0); do { iterationT_ = iteration_; do { sum_ = new Vector(n_, 0.0); for (i_ = 0; i_ <= n_; i_++) { sum_ += vertices_[i_]; } tt_ = -T_; ilo_ = 0; ihi_ = 1; ynhi_ = values_[0] + tt_ * Math.Log(rng_.next().value); ylo_ = ynhi_; yhi_ = values_[1] + tt_ * Math.Log(rng_.next().value); if (ylo_ > yhi_) { ihi_ = 0; ilo_ = 1; ynhi_ = yhi_; yhi_ = ylo_; ylo_ = ynhi_; } for (i_ = 2; i_ < n_ + 1; i_++) { yt_ = values_[i_] + tt_ * Math.Log(rng_.next().value); if (yt_ <= ylo_) { ilo_ = i_; ylo_ = yt_; } if (yt_ > yhi_) { ynhi_ = yhi_; ihi_ = i_; yhi_ = yt_; } else { if (yt_ > ynhi_) { ynhi_ = yt_; } } } // GSL end criterion in x (cf. above) if (endCriteria.checkStationaryPoint(simplexSize(), 0.0, ref stationaryStateIterations_, ref ecType) || endCriteria.checkMaxIterations(iteration_, ref ecType)) { // no matter what, we return the best ever point ! P.setCurrentValue(pb_); P.setFunctionValue(yb_); return(ecType); } iteration_ += 2; amotsa(P, -1.0); if (ytry_ <= ylo_) { amotsa(P, 2.0); } else { if (ytry_ >= ynhi_) { ysave_ = yhi_; amotsa(P, 0.5); if (ytry_ >= ysave_) { for (i_ = 0; i_ < n_ + 1; i_++) { if (i_ != ilo_) { for (j_ = 0; j_ < n_; j_++) { sum_[j_] = 0.5 * (vertices_[i_][j_] + vertices_[ilo_][j_]); vertices_[i_][j_] = sum_[j_]; } values_[i_] = P.value(sum_); } } iteration_ += n_; for (i_ = 0; i_ < n_; i_++) { sum_[i_] = 0.0; } for (i_ = 0; i_ <= n_; i_++) { sum_ += vertices_[i_]; } } } else { iteration_ += 1; } } }while (iteration_ < iterationT_ + (scheme_ == Scheme.ConstantFactor ? m_ : 1)); switch (scheme_) { case Scheme.ConstantFactor: T_ *= (1.0 - epsilon_); break; case Scheme.ConstantBudget: if (iteration_ <= K_) { T_ = T0_ * Math.Pow(1.0 - Convert.ToDouble(iteration_) / Convert.ToDouble(K_), alpha_); } else { T_ = 0.0; } break; } }while (true); }
//! solve the optimization problem P public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { // Initializations double ftol = endCriteria.functionEpsilon(); int maxStationaryStateIterations_ = endCriteria.maxStationaryStateIterations(); EndCriteria.Type ecType = EndCriteria.Type.None; // reset end criteria P.reset(); // reset problem Vector x_ = P.currentValue(); // store the starting point int iterationNumber_ = 0; // stationaryStateIterationNumber_=0 lineSearch_.searchDirection = new Vector(x_.Count); // dimension line search bool done = false; // function and squared norm of gradient values; double fnew; double fold; double gold2; double c; double fdiff; double normdiff; // classical initial value for line-search step double t = 1.0; // Set gradient g at the size of the optimization problem search direction int sz = lineSearch_.searchDirection.Count; Vector g = new Vector(sz); Vector d = new Vector(sz); Vector sddiff = new Vector(sz); // Initialize cost function, gradient g and search direction P.setFunctionValue(P.valueAndGradient(g, x_)); P.setGradientNormValue(Vector.DotProduct(g, g)); lineSearch_.searchDirection = g * -1.0; // Loop over iterations do { // Linesearch t = lineSearch_.value(P, ref ecType, endCriteria, t); // don't throw: it can fail just because maxIterations exceeded //QL_REQUIRE(lineSearch_->succeed(), "line-search failed!"); if (lineSearch_.succeed()) { // Updates d = lineSearch_.searchDirection; // New point x_ = lineSearch_.lastX(); // New function value fold = P.functionValue(); P.setFunctionValue(lineSearch_.lastFunctionValue()); // New gradient and search direction vectors g = lineSearch_.lastGradient(); // orthogonalization coef gold2 = P.gradientNormValue(); P.setGradientNormValue(lineSearch_.lastGradientNorm2()); c = P.gradientNormValue() / gold2; // conjugate gradient search direction sddiff = ((g * -1.0) + c * d) - lineSearch_.searchDirection; normdiff = Math.Sqrt(Vector.DotProduct(sddiff, sddiff)); lineSearch_.searchDirection = (g * -1.0) + c * d; // Now compute accuracy and check end criteria // Numerical Recipes exit strategy on fx (see NR in C++, p.423) fnew = P.functionValue(); fdiff = 2.0 * Math.Abs(fnew - fold) / (Math.Abs(fnew) + Math.Abs(fold) + Double.Epsilon); if (fdiff < ftol || endCriteria.checkMaxIterations(iterationNumber_, ref ecType)) { endCriteria.checkStationaryFunctionValue(0.0, 0.0, ref maxStationaryStateIterations_, ref ecType); endCriteria.checkMaxIterations(iterationNumber_, ref ecType); return(ecType); } //done = endCriteria(iterationNumber_, // stationaryStateIterationNumber_, // true, //FIXME: it should be in the problem // fold, // std::sqrt(gold2), // P.functionValue(), // std::sqrt(P.gradientNormValue()), // ecType); P.setCurrentValue(x_); // update problem current value ++iterationNumber_; // Increase iteration number } else { done = true; } } while (!done); P.setCurrentValue(x_); return(ecType); }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); currentProblem_ = P; initCostValues_ = P.costFunction().values(x_); int m = initCostValues_.size(); int n = x_.size(); if (useCostFunctionsJacobian_) { initJacobian_ = new Matrix(m, n); P.costFunction().jacobian(initJacobian_, x_); } Vector xx = new Vector(x_); Vector fvec = new Vector(m), diag = new Vector(n); int mode = 1; double factor = 1; int nprint = 0; int info = 0; int nfev = 0; Matrix fjac = new Matrix(m, n); int ldfjac = m; List <int> ipvt = new InitializedList <int>(n); Vector qtf = new Vector(n), wa1 = new Vector(n), wa2 = new Vector(n), wa3 = new Vector(n), wa4 = new Vector(m); // call lmdif to minimize the sum of the squares of m functions // in n variables by the Levenberg-Marquardt algorithm. Func <int, int, Vector, int, Matrix> j = null; if (useCostFunctionsJacobian_) { j = jacFcn; } // requirements; check here to get more detailed error messages. Utils.QL_REQUIRE(n > 0, () => "no variables given"); Utils.QL_REQUIRE(m >= n, () => $"less functions ({m}) than available variables ({n})"); Utils.QL_REQUIRE(endCriteria.functionEpsilon() >= 0.0, () => "negative f tolerance"); Utils.QL_REQUIRE(xtol_ >= 0.0, () => "negative x tolerance"); Utils.QL_REQUIRE(gtol_ >= 0.0, () => "negative g tolerance"); Utils.QL_REQUIRE(endCriteria.maxIterations() > 0, () => "null number of evaluations"); MINPACK.lmdif(m, n, xx, ref fvec, endCriteria.functionEpsilon(), xtol_, gtol_, endCriteria.maxIterations(), epsfcn_, diag, mode, factor, nprint, ref info, ref nfev, ref fjac, ldfjac, ref ipvt, ref qtf, wa1, wa2, wa3, wa4, fcn, j); info_ = info; // check requirements & endCriteria evaluation Utils.QL_REQUIRE(info != 0, () => "MINPACK: improper input parameters"); if (info != 6) { ecType = EndCriteria.Type.StationaryFunctionValue; } endCriteria.checkMaxIterations(nfev, ref ecType); Utils.QL_REQUIRE(info != 7, () => "MINPACK: xtol is too small. no further " + "improvement in the approximate " + "solution x is possible."); Utils.QL_REQUIRE(info != 8, () => "MINPACK: gtol is too small. fvec is " + "orthogonal to the columns of the " + "jacobian to machine precision."); // set problem x_ = new Vector(xx.GetRange(0, n)); P.setCurrentValue(x_); P.setFunctionValue(P.costFunction().value(x_)); return(ecType); }
public CalibratedModel(int nArguments) { arguments_ = new InitializedList<Parameter>(nArguments); constraint_ = new PrivateConstraint(arguments_); shortRateEndCriteria_ = EndCriteria.Type.None; }
//! Test if the function value is below functionEpsilon public bool checkStationaryFunctionAccuracy(double f, bool positiveOptimization, ref EndCriteria.Type ecType) { if (!positiveOptimization) { return(false); } if (f >= functionEpsilon_) { return(false); } ecType = Type.StationaryFunctionAccuracy; return(true); }
//! Test if the function variation is below functionEpsilon public bool checkStationaryFunctionValue(double fxOld, double fxNew, ref int statStateIterations, ref EndCriteria.Type ecType) { if (Math.Abs(fxNew - fxOld) >= functionEpsilon_) { statStateIterations = 0; return(false); } ++statStateIterations; if (statStateIterations <= maxStationaryStateIterations_) { return(false); } ecType = Type.StationaryFunctionValue; return(true); }
//! Test if the root variation is below rootEpsilon public bool checkStationaryPoint(double xOld, double xNew, ref int statStateIterations, ref EndCriteria.Type ecType) { if (Math.Abs(xNew - xOld) >= rootEpsilon_) { statStateIterations = 0; return(false); } ++statStateIterations; if (statStateIterations <= maxStationaryStateIterations_) { return(false); } ecType = Type.StationaryPoint; return(true); }
// ! Test if the number of iterations is not too big // and if a minimum point is not reached public bool value(int iteration, ref int statStateIterations, bool positiveOptimization, double fold, double UnnamedParameter1, double fnew, double normgnew, ref EndCriteria.Type ecType) { return(checkMaxIterations(iteration, ref ecType) || checkStationaryFunctionValue(fold, fnew, ref statStateIterations, ref ecType) || checkStationaryFunctionAccuracy(fnew, positiveOptimization, ref ecType) || checkZeroGradientNorm(normgnew, ref ecType)); }
public void calculate() { validCurve_ = false; int nInsts = ts_.instruments_.Count, i; // ensure rate helpers are sorted ts_.instruments_.Sort((x, y) => x.latestDate().CompareTo(y.latestDate())); // check that there is no instruments with the same maturity for (i = 1; i < nInsts; ++i) { Date m1 = ts_.instruments_[i - 1].latestDate(), m2 = ts_.instruments_[i].latestDate(); Utils.QL_REQUIRE(m1 != m2, () => "two instruments have the same maturity (" + m1 + ")"); } // check that there is no instruments with invalid quote Utils.QL_REQUIRE((i = ts_.instruments_.FindIndex(x => !x.quoteIsValid())) == -1, () => "instrument " + i + " (maturity: " + ts_.instruments_[i].latestDate() + ") has an invalid quote"); // setup instruments and register with them ts_.instruments_.ForEach((x, j) => ts_.setTermStructure(j)); // set initial guess only if the current curve cannot be used as guess if (validCurve_) { Utils.QL_REQUIRE(ts_.data_.Count == nInsts + 1, () => "dimension mismatch: expected " + nInsts + 1 + ", actual " + ts_.data_.Count); } else { ts_.data_ = new InitializedList <double>(nInsts + 1); ts_.data_[0] = ts_.initialValue(); } // calculate dates and times ts_.dates_ = new InitializedList <Date>(nInsts + 1); ts_.times_ = new InitializedList <double>(nInsts + 1); ts_.dates_[0] = ts_.initialDate(); ts_.times_[0] = ts_.timeFromReference(ts_.dates_[0]); for (i = 0; i < nInsts; ++i) { ts_.dates_[i + 1] = ts_.instruments_[i].latestDate(); ts_.times_[i + 1] = ts_.timeFromReference(ts_.dates_[i + 1]); if (!validCurve_) { ts_.data_[i + 1] = ts_.data_[i]; } } LevenbergMarquardt solver = new LevenbergMarquardt(ts_.accuracy_, ts_.accuracy_, ts_.accuracy_); EndCriteria endCriteria = new EndCriteria(100, 10, 0.00, ts_.accuracy_, 0.00); PositiveConstraint posConstraint = new PositiveConstraint(); NoConstraint noConstraint = new NoConstraint(); Constraint solverConstraint = forcePositive_ ? (Constraint)posConstraint : (Constraint)noConstraint; // now start the bootstrapping. int iInst = localisation_ - 1; int dataAdjust = (ts_.interpolator_ as ConvexMonotone).dataSizeAdjustment; do { int initialDataPt = iInst + 1 - localisation_ + dataAdjust; Vector startArray = new Vector(localisation_ + 1 - dataAdjust); for (int j = 0; j < startArray.size() - 1; ++j) { startArray[j] = ts_.data_[initialDataPt + j]; } // here we are extending the interpolation a point at a // time... but the local interpolator can make an // approximation for the final localisation period. // e.g. if the localisation is 2, then the first section // of the curve will be solved using the first 2 // instruments... with the local interpolator making // suitable boundary conditions. ts_.interpolation_ = (ts_.interpolator_ as ConvexMonotone).localInterpolate(ts_.times_, iInst + 2, ts_.data_, localisation_, ts_.interpolation_ as ConvexMonotoneInterpolation, nInsts + 1); if (iInst >= localisation_) { startArray[localisation_ - dataAdjust] = ts_.guess(iInst, ts_, false, 0); } else { startArray[localisation_ - dataAdjust] = ts_.data_[0]; } var currentCost = new PenaltyFunction <T, U>(ts_, initialDataPt, ts_.instruments_, iInst - localisation_ + 1, iInst + 1); Problem toSolve = new Problem(currentCost, solverConstraint, startArray); EndCriteria.Type endType = solver.minimize(toSolve, endCriteria); // check the end criteria Utils.QL_REQUIRE(endType == EndCriteria.Type.StationaryFunctionAccuracy || endType == EndCriteria.Type.StationaryFunctionValue, () => "Unable to strip yieldcurve to required accuracy "); ++iInst; }while (iInst < nInsts); validCurve_ = true; }
public CalibratedModel(int nArguments) { arguments_ = new InitializedList <Parameter>(nArguments); constraint_ = new PrivateConstraint(arguments_); shortRateEndCriteria_ = EndCriteria.Type.None; }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); currentProblem_ = P; initCostValues_ = P.costFunction().values(x_); int m = initCostValues_.size(); int n = x_.size(); if (useCostFunctionsJacobian_) { initJacobian_ = new Matrix(m, n); P.costFunction().jacobian(initJacobian_, x_); } Vector xx = new Vector(x_); Vector fvec = new Vector(m), diag = new Vector(n); int mode = 1; double factor = 1; int nprint = 0; int info = 0; int nfev = 0; Matrix fjac = new Matrix(m, n); int ldfjac = m; List <int> ipvt = new InitializedList <int>(n); Vector qtf = new Vector(n), wa1 = new Vector(n), wa2 = new Vector(n), wa3 = new Vector(n), wa4 = new Vector(m); // call lmdif to minimize the sum of the squares of m functions // in n variables by the Levenberg-Marquardt algorithm. Func <int, int, Vector, int, Matrix> j = null; if (useCostFunctionsJacobian_) { j = jacFcn; } MINPACK.lmdif(m, n, xx, ref fvec, endCriteria.functionEpsilon(), xtol_, gtol_, endCriteria.maxIterations(), epsfcn_, diag, mode, factor, nprint, ref info, ref nfev, ref fjac, ldfjac, ref ipvt, ref qtf, wa1, wa2, wa3, wa4, fcn, j); info_ = info; // check requirements & endCriteria evaluation if (info == 0) { throw new ApplicationException("MINPACK: improper input parameters"); } //if(info == 6) throw new ApplicationException("MINPACK: ftol is too small. no further " + // "reduction in the sum of squares is possible."); if (info != 6) { ecType = EndCriteria.Type.StationaryFunctionValue; } //QL_REQUIRE(info != 5, "MINPACK: number of calls to fcn has reached or exceeded maxfev."); endCriteria.checkMaxIterations(nfev, ref ecType); if (info == 7) { throw new ApplicationException("MINPACK: xtol is too small. no further " + "improvement in the approximate " + "solution x is possible."); } if (info == 8) { throw new ApplicationException("MINPACK: gtol is too small. fvec is " + "orthogonal to the columns of the " + "jacobian to machine precision."); } // set problem x_ = new Vector(xx.GetRange(0, n)); P.setCurrentValue(x_); P.setFunctionValue(P.costFunction().value(x_)); return(ecType); }
//! Calibrate to a set of market instruments (caps/swaptions) /*! An additional constraint can be passed which must be satisfied in addition to the constraints of the model. */ //public void calibrate(List<CalibrationHelper> instruments, OptimizationMethod method, EndCriteria endCriteria, // Constraint constraint = new Constraint(), List<double> weights = new List<double>()) { public void calibrate(List<CalibrationHelper> instruments, OptimizationMethod method, EndCriteria endCriteria, Constraint additionalConstraint, List<double> weights) { if (!(weights.Count == 0 || weights.Count == instruments.Count)) throw new ApplicationException("mismatch between number of instruments and weights"); Constraint c; if (additionalConstraint.empty()) c = constraint_; else c = new CompositeConstraint(constraint_,additionalConstraint); List<double> w = weights.Count == 0 ? new InitializedList<double>(instruments.Count, 1.0): weights; CalibrationFunction f = new CalibrationFunction(this, instruments, w); Problem prob = new Problem(f, c, parameters()); shortRateEndCriteria_ = method.minimize(prob, endCriteria); Vector result = new Vector(prob.currentValue()); setParams(result); // recheck Vector shortRateProblemValues_ = prob.values(result); notifyObservers(); }
//! Perform line search public override double value(Problem P, // Optimization problem ref EndCriteria.Type ecType, EndCriteria endCriteria, double t_ini) // initial value of line-search step { Constraint constraint = P.constraint(); succeed_ = true; bool maxIter = false; double t = t_ini; int loopNumber = 0; double q0 = P.functionValue(); double qp0 = P.gradientNormValue(); double tl = 0.0; double tr = 0.0; qt_ = q0; qpt_ = (gradient_.empty()) ? qp0 : -Vector.DotProduct(gradient_, searchDirection_); // Initialize gradient gradient_ = new Vector(P.currentValue().size()); // Compute new point xtd_ = P.currentValue(); t = update(ref xtd_, searchDirection_, t, constraint); // Compute function value at the new point qt_ = P.value(xtd_); while ((qt_ - q0) < -beta_ * t * qpt_ || (qt_ - q0) > -alpha_ * t * qpt_) { if ((qt_ - q0) > -alpha_ * t * qpt_) { tr = t; } else { tl = t; } ++loopNumber; // calculate the new step if (Utils.close_enough(tr, 0.0)) { t *= extrapolation_; } else { t = (tl + tr) / 2.0; } // New point value xtd_ = P.currentValue(); t = update(ref xtd_, searchDirection_, t, constraint); // Compute function value at the new point qt_ = P.value(xtd_); P.gradient(gradient_, xtd_); // and it squared norm maxIter = endCriteria.checkMaxIterations(loopNumber, ref ecType); if (maxIter) { break; } } if (maxIter) { succeed_ = false; } // Compute new gradient P.gradient(gradient_, xtd_); // and it squared norm qpt_ = Vector.DotProduct(gradient_, gradient_); // Return new step value return(t); }
public void testCachedHullWhite() { //("Testing Hull-White calibration against cached values..."); Date today = new Date(15, Month.February, 2002); Date settlement = new Date(19, Month.February, 2002); Settings.setEvaluationDate(today); Handle <YieldTermStructure> termStructure = new Handle <YieldTermStructure>(Utilities.flatRate(settlement, 0.04875825, new Actual365Fixed())); //termStructure.link HullWhite model = new HullWhite(termStructure); CalibrationData[] data = { new CalibrationData(1, 5, 0.1148), new CalibrationData(2, 4, 0.1108), new CalibrationData(3, 3, 0.1070), new CalibrationData(4, 2, 0.1021), new CalibrationData(5, 1, 0.1000) }; IborIndex index = new Euribor6M(termStructure); IPricingEngine engine = new JamshidianSwaptionEngine(model); List <CalibrationHelper> swaptions = new List <CalibrationHelper>(); for (int i = 0; i < data.Length; i++) { Quote vol = new SimpleQuote(data[i].volatility); CalibrationHelper helper = new SwaptionHelper(new Period(data[i].start, TimeUnit.Years), new Period(data[i].length, TimeUnit.Years), new Handle <Quote>(vol), index, new Period(1, TimeUnit.Years), new Thirty360(), new Actual360(), termStructure, false); helper.setPricingEngine(engine); swaptions.Add(helper); } // Set up the optimization problem // Real simplexLambda = 0.1; // Simplex optimizationMethod(simplexLambda); LevenbergMarquardt optimizationMethod = new LevenbergMarquardt(1.0e-8, 1.0e-8, 1.0e-8); EndCriteria endCriteria = new EndCriteria(10000, 100, 1e-6, 1e-8, 1e-8); //Optimize model.calibrate(swaptions, optimizationMethod, endCriteria, new Constraint(), new List <double>()); EndCriteria.Type ecType = model.endCriteria(); // Check and print out results #if QL_USE_INDEXED_COUPON double cachedA = 0.0488199, cachedSigma = 0.00593579; #else double cachedA = 0.0488565, cachedSigma = 0.00593662; #endif double tolerance = 1.120e-5; //double tolerance = 1.0e-6; Vector xMinCalculated = model.parameters(); double yMinCalculated = model.value(xMinCalculated, swaptions); Vector xMinExpected = new Vector(2); xMinExpected[0] = cachedA; xMinExpected[1] = cachedSigma; double yMinExpected = model.value(xMinExpected, swaptions); if (Math.Abs(xMinCalculated[0] - cachedA) > tolerance || Math.Abs(xMinCalculated[1] - cachedSigma) > tolerance) { Assert.Fail("Failed to reproduce cached calibration results:\n" + "calculated: a = " + xMinCalculated[0] + ", " + "sigma = " + xMinCalculated[1] + ", " + "f(a) = " + yMinCalculated + ",\n" + "expected: a = " + xMinExpected[0] + ", " + "sigma = " + xMinExpected[1] + ", " + "f(a) = " + yMinExpected + ",\n" + "difference: a = " + (xMinCalculated[0] - xMinExpected[0]) + ", " + "sigma = " + (xMinCalculated[1] - xMinExpected[1]) + ", " + "f(a) = " + (yMinCalculated - yMinExpected) + ",\n" + "end criteria = " + ecType); } }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { // set up of the problem //double ftol = endCriteria.functionEpsilon(); // end criteria on f(x) (see Numerical Recipes in C++, p.410) double xtol = endCriteria.rootEpsilon(); // end criteria on x (see GSL v. 1.9, http://www.gnu.org/software/gsl/) int maxStationaryStateIterations_ = endCriteria.maxStationaryStateIterations(); EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); int iterationNumber_ = 0; // Initialize vertices of the simplex bool end = false; int n = x_.Count; vertices_ = new InitializedList <Vector>(n + 1, x_); for (int i = 0; i < n; i++) { Vector direction = new Vector(n, 0.0); Vector vertice = vertices_[i + 1]; direction[i] = 1.0; P.constraint().update(ref vertice, direction, lambda_); vertices_[i + 1] = vertice; } // Initialize function values at the vertices of the simplex values_ = new Vector(n + 1, 0.0); for (int i = 0; i <= n; i++) { values_[i] = P.value(vertices_[i]); } // Loop looking for minimum do { sum_ = new Vector(n, 0.0); for (int i = 0; i <= n; i++) { sum_ += vertices_[i]; } // Determine the best (iLowest), worst (iHighest) // and 2nd worst (iNextHighest) vertices int iLowest = 0; int iHighest; int iNextHighest; if (values_[0] < values_[1]) { iHighest = 1; iNextHighest = 0; } else { iHighest = 0; iNextHighest = 1; } for (int i = 1; i <= n; i++) { if (values_[i] > values_[iHighest]) { iNextHighest = iHighest; iHighest = i; } else { if ((values_[i] > values_[iNextHighest]) && i != iHighest) { iNextHighest = i; } } if (values_[i] < values_[iLowest]) { iLowest = i; } } // Now compute accuracy, update iteration number and check end criteria //// Numerical Recipes exit strategy on fx (see NR in C++, p.410) //double low = values_[iLowest]; //double high = values_[iHighest]; //double rtol = 2.0*std::fabs(high - low)/ // (std::fabs(high) + std::fabs(low) + QL_EPSILON); //++iterationNumber_; //if (rtol < ftol || // endCriteria.checkMaxIterations(iterationNumber_, ecType)) { // GSL exit strategy on x (see GSL v. 1.9, http://www.gnu.org/software/gsl double simplexSize = Utils.computeSimplexSize(vertices_); ++iterationNumber_; if (simplexSize < xtol || endCriteria.checkMaxIterations(iterationNumber_, ref ecType)) { endCriteria.checkStationaryPoint(0.0, 0.0, ref maxStationaryStateIterations_, ref ecType); endCriteria.checkMaxIterations(iterationNumber_, ref ecType); x_ = vertices_[iLowest]; double low = values_[iLowest]; P.setFunctionValue(low); P.setCurrentValue(x_); return(ecType); } // If end criteria is not met, continue double factor = -1.0; double vTry = extrapolate(ref P, iHighest, ref factor); if ((vTry <= values_[iLowest]) && (factor == -1.0)) { factor = 2.0; extrapolate(ref P, iHighest, ref factor); } else if (Math.Abs(factor) > Const.QL_EPSILON) { if (vTry >= values_[iNextHighest]) { double vSave = values_[iHighest]; factor = 0.5; vTry = extrapolate(ref P, iHighest, ref factor); if (vTry >= vSave && Math.Abs(factor) > Const.QL_EPSILON) { for (int i = 0; i <= n; i++) { if (i != iLowest) { #if QL_ARRAY_EXPRESSIONS vertices_[i] = 0.5 * (vertices_[i] + vertices_[iLowest]); #else vertices_[i] += vertices_[iLowest]; vertices_[i] *= 0.5; #endif values_[i] = P.value(vertices_[i]); } } } } } // If can't extrapolate given the constraints, exit if (Math.Abs(factor) <= Const.QL_EPSILON) { x_ = vertices_[iLowest]; double low = values_[iLowest]; P.setFunctionValue(low); P.setCurrentValue(x_); return(EndCriteria.Type.StationaryFunctionValue); } } while (end == false); throw new Exception("optimization failed: unexpected behaviour"); }
//! Perform line search public abstract double value(Problem P, ref EndCriteria.Type ecType, EndCriteria NamelessParameter3, double t_ini); // initial value of line-search step
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { // Initializations double ftol = endCriteria.functionEpsilon(); int maxStationaryStateIterations_ = endCriteria.maxStationaryStateIterations(); EndCriteria.Type ecType = EndCriteria.Type.None; // reset end criteria P.reset(); // reset problem Vector x_ = P.currentValue(); // store the starting point int iterationNumber_ = 0; // dimension line search lineSearch_.searchDirection = new Vector(x_.size()); bool done = false; // function and squared norm of gradient values double fnew, fold, gold2; double fdiff; // classical initial value for line-search step double t = 1.0; // Set gradient g at the size of the optimization problem // search direction int sz = lineSearch_.searchDirection.size(); Vector prevGradient = new Vector(sz), d = new Vector(sz), sddiff = new Vector(sz), direction = new Vector(sz); // Initialize cost function, gradient prevGradient and search direction P.setFunctionValue(P.valueAndGradient(prevGradient, x_)); P.setGradientNormValue(Vector.DotProduct(prevGradient, prevGradient)); lineSearch_.searchDirection = prevGradient * -1; bool first_time = true; // Loop over iterations do { // Linesearch if (!first_time) { prevGradient = lineSearch_.lastGradient(); } t = (lineSearch_.value(P, ref ecType, endCriteria, t)); // don't throw: it can fail just because maxIterations exceeded if (lineSearch_.succeed()) { // Updates // New point x_ = lineSearch_.lastX(); // New function value fold = P.functionValue(); P.setFunctionValue(lineSearch_.lastFunctionValue()); // New gradient and search direction vectors // orthogonalization coef gold2 = P.gradientNormValue(); P.setGradientNormValue(lineSearch_.lastGradientNorm2()); // conjugate gradient search direction direction = getUpdatedDirection(P, gold2, prevGradient); sddiff = direction - lineSearch_.searchDirection; lineSearch_.searchDirection = direction; // Now compute accuracy and check end criteria // Numerical Recipes exit strategy on fx (see NR in C++, p.423) fnew = P.functionValue(); fdiff = 2.0 * Math.Abs(fnew - fold) / (Math.Abs(fnew) + Math.Abs(fold) + Const.QL_EPSILON); if (fdiff < ftol || endCriteria.checkMaxIterations(iterationNumber_, ref ecType)) { endCriteria.checkStationaryFunctionValue(0.0, 0.0, ref maxStationaryStateIterations_, ref ecType); endCriteria.checkMaxIterations(iterationNumber_, ref ecType); return(ecType); } P.setCurrentValue(x_); // update problem current value ++iterationNumber_; // Increase iteration number first_time = false; } else { done = true; } }while (!done); P.setCurrentValue(x_); return(ecType); }