public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { EndCriteria.Type ecType = EndCriteria.Type.None; upperBound_ = P.constraint().upperBound(P.currentValue()); lowerBound_ = P.constraint().lowerBound(P.currentValue()); currGenSizeWeights_ = new Vector(configuration().populationMembers, configuration().stepsizeWeight); currGenCrossover_ = new Vector(configuration().populationMembers, configuration().crossoverProbability); List <Candidate> population = new InitializedList <Candidate>(configuration().populationMembers); population.ForEach((ii, vv) => population[ii] = new Candidate(P.currentValue().size())); fillInitialPopulation(population, P); //original quantlib use partial_sort as only first elements is needed double fxOld = population.Min(x => x.cost); bestMemberEver_ = (Candidate)population.First(x => x.cost.IsEqual(fxOld)).Clone(); int iteration = 0, stationaryPointIteration = 0; // main loop - calculate consecutive emerging populations while (!endCriteria.checkMaxIterations(iteration++, ref ecType)) { calculateNextGeneration(population, P.costFunction()); double fxNew = population.Min(x => x.cost); Candidate tmp = (Candidate)population.First(x => x.cost.IsEqual(fxNew)).Clone(); if (fxNew < bestMemberEver_.cost) { bestMemberEver_ = tmp; } if (endCriteria.checkStationaryFunctionValue(fxOld, fxNew, ref stationaryPointIteration, ref ecType)) { break; } fxOld = fxNew; } P.setCurrentValue(bestMemberEver_.values); P.setFunctionValue(bestMemberEver_.cost); return(ecType); }
private double extrapolate(ref Problem P, int iHighest, ref double factor) { Vector pTry; do { int dimensions = values_.Count - 1; double factor1 = (1.0 - factor) / dimensions; double factor2 = factor1 - factor; pTry = sum_ * factor1 - vertices_[iHighest] * factor2; factor *= 0.5; }while (!P.constraint().test(pTry) && Math.Abs(factor) > Const.QL_EPSILON); if (Math.Abs(factor) <= Const.QL_EPSILON) { return(values_[iHighest]); } factor *= 2.0; double vTry = P.value(pTry); if (vTry < values_[iHighest]) { values_[iHighest] = vTry; sum_ += pTry - vertices_[iHighest]; vertices_[iHighest] = pTry; } return(vTry); }
//! Perform line search public override double value(Problem P, ref EndCriteria.Type ecType, EndCriteria endCriteria, double t_ini) { //OptimizationMethod& method = P.method(); Constraint constraint = P.constraint(); succeed_ = true; bool maxIter = false; double qtold; double t = t_ini; int loopNumber = 0; double q0 = P.functionValue(); double qp0 = P.gradientNormValue(); qt_ = q0; qpt_ = (gradient_.Count == 0) ? qp0 : -Vector.DotProduct(gradient_, searchDirection_); // Initialize gradient gradient_ = new Vector(P.currentValue().Count); // Compute new point xtd_ = (Vector)P.currentValue().Clone(); t = update(ref xtd_, searchDirection_, t, constraint); // Compute function value at the new point qt_ = P.value(xtd_); // Enter in the loop if the criterion is not satisfied if ((qt_ - q0) > -alpha_ * t * qpt_) { do { loopNumber++; // Decrease step t *= beta_; // Store old value of the function qtold = qt_; // New point value xtd_ = P.currentValue(); t = update(ref xtd_, searchDirection_, t, constraint); // Compute function value at the new point qt_ = P.value(xtd_); P.gradient(gradient_, xtd_); // and it squared norm maxIter = endCriteria.checkMaxIterations(loopNumber, ref ecType); } while ((((qt_ - q0) > (-alpha_ * t * qpt_)) || ((qtold - q0) <= (-alpha_ * t * qpt_ / beta_))) && (!maxIter)); } if (maxIter) { succeed_ = false; } // Compute new gradient P.gradient(gradient_, xtd_); // and it squared norm qpt_ = Vector.DotProduct(gradient_, gradient_); // Return new step value return(t); }
public Vector fcn(int m, int n, Vector x, int iflag) { Vector xt = new Vector(x); Vector fvec; // constraint handling needs some improvement in the future: // starting point should not be close to a constraint violation if (currentProblem_.constraint().test(xt)) { fvec = new Vector(currentProblem_.values(xt)); } else { fvec = new Vector(initCostValues_); } return(fvec); }
//! Perform line search public override double value(Problem P, ref EndCriteria.Type ecType, EndCriteria endCriteria, double t_ini) { //OptimizationMethod& method = P.method(); Constraint constraint = P.constraint(); succeed_ = true; bool maxIter = false; double qtold; double t = t_ini; int loopNumber = 0; double q0 = P.functionValue(); double qp0 = P.gradientNormValue(); qt_ = q0; qpt_ = (gradient_.Count == 0) ? qp0 : -Vector.DotProduct(gradient_, searchDirection_); // Initialize gradient gradient_ = new Vector(P.currentValue().Count); // Compute new point xtd_ = (Vector)P.currentValue().Clone(); t = update(ref xtd_, searchDirection_, t, constraint); // Compute function value at the new point qt_ = P.value(xtd_); // Enter in the loop if the criterion is not satisfied if ((qt_ - q0) > -alpha_ * t * qpt_) { do { loopNumber++; // Decrease step t *= beta_; // Store old value of the function qtold = qt_; // New point value xtd_ = P.currentValue(); t = update(ref xtd_, searchDirection_, t, constraint); // Compute function value at the new point qt_ = P.value(xtd_); P.gradient(gradient_, xtd_); // and it squared norm maxIter = endCriteria.checkMaxIterations(loopNumber, ref ecType); } while ((((qt_ - q0) > (-alpha_ * t * qpt_)) || ((qtold - q0) <= (-alpha_ * t * qpt_ / beta_))) && (!maxIter)); } if (maxIter) succeed_ = false; // Compute new gradient P.gradient(gradient_, xtd_); // and it squared norm qpt_ = Vector.DotProduct(gradient_, gradient_); // Return new step value return t; }
protected void amotsa(Problem P, double fac) { fac1_ = (1.0 - fac) / Convert.ToDouble(n_); fac2_ = fac1_ - fac; for (j_ = 0; j_ < n_; j_++) { ptry_[j_] = sum_[j_] * fac1_ - vertices_[ihi_][j_] * fac2_; } if (!P.constraint().test(ptry_)) { ytry_ = Double.MaxValue; } else { ytry_ = P.value(ptry_); } if (Double.IsNaN(ytry_)) { ytry_ = Double.MaxValue; } if (ytry_ <= yb_) { yb_ = ytry_; pb_ = ptry_; } yflu_ = ytry_ - tt_ * Math.Log(rng_.next().value); if (yflu_ < yhi_) { values_[ihi_] = ytry_; yhi_ = yflu_; for (j_ = 0; j_ < n_; j_++) { sum_[j_] += ptry_[j_] - vertices_[ihi_][j_]; vertices_[ihi_][j_] = ptry_[j_]; } } ytry_ = yflu_; }
private double extrapolate(ref Problem P, int iHighest, ref double factor) { Vector pTry; do { int dimensions = values_.Count - 1; double factor1 = (1.0 - factor) / dimensions; double factor2 = factor1 - factor; // #if QL_ARRAY_EXPRESSIONS pTry = sum_ * factor1 - vertices_[iHighest] * factor2; //#else // // composite expressions fail to compile with gcc 3.4 on windows // pTry = sum_ * factor1; // pTry -= vertices_[iHighest] * factor2; //#endif factor *= 0.5; } while (!P.constraint().test(pTry) && Math.Abs(factor) > Const.QL_EPSILON); if (Math.Abs(factor) <= Const.QL_EPSILON) { return(values_[iHighest]); } factor *= 2.0; double vTry = P.value(pTry); if (vTry < values_[iHighest]) { values_[iHighest] = vTry; //#if QL_ARRAY_EXPRESSIONS sum_ += pTry - vertices_[iHighest]; //#else // sum_ += pTry; // sum_ -= vertices_[iHighest]; //#endif vertices_[iHighest] = pTry; } return(vTry); }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { // set up of the problem //double ftol = endCriteria.functionEpsilon(); // end criteria on f(x) (see Numerical Recipes in C++, p.410) double xtol = endCriteria.rootEpsilon(); // end criteria on x (see GSL v. 1.9, http://www.gnu.org/software/gsl/) int maxStationaryStateIterations_ = endCriteria.maxStationaryStateIterations(); EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); int iterationNumber_ = 0; // Initialize vertices of the simplex bool end = false; int n = x_.Count; vertices_ = new InitializedList <Vector>(n + 1, x_); for (int i = 0; i < n; i++) { Vector direction = new Vector(n, 0.0); Vector vertice = vertices_[i + 1]; direction[i] = 1.0; P.constraint().update(ref vertice, direction, lambda_); vertices_[i + 1] = vertice; } // Initialize function values at the vertices of the simplex values_ = new Vector(n + 1, 0.0); for (int i = 0; i <= n; i++) { values_[i] = P.value(vertices_[i]); } // Loop looking for minimum do { sum_ = new Vector(n, 0.0); for (int i = 0; i <= n; i++) { sum_ += vertices_[i]; } // Determine the best (iLowest), worst (iHighest) // and 2nd worst (iNextHighest) vertices int iLowest = 0; int iHighest; int iNextHighest; if (values_[0] < values_[1]) { iHighest = 1; iNextHighest = 0; } else { iHighest = 0; iNextHighest = 1; } for (int i = 1; i <= n; i++) { if (values_[i] > values_[iHighest]) { iNextHighest = iHighest; iHighest = i; } else { if ((values_[i] > values_[iNextHighest]) && i != iHighest) { iNextHighest = i; } } if (values_[i] < values_[iLowest]) { iLowest = i; } } // Now compute accuracy, update iteration number and check end criteria //// Numerical Recipes exit strategy on fx (see NR in C++, p.410) //double low = values_[iLowest]; //double high = values_[iHighest]; //double rtol = 2.0*std::fabs(high - low)/ // (std::fabs(high) + std::fabs(low) + QL_EPSILON); //++iterationNumber_; //if (rtol < ftol || // endCriteria.checkMaxIterations(iterationNumber_, ecType)) { // GSL exit strategy on x (see GSL v. 1.9, http://www.gnu.org/software/gsl double simplexSize = Utils.computeSimplexSize(vertices_); ++iterationNumber_; if (simplexSize < xtol || endCriteria.checkMaxIterations(iterationNumber_, ref ecType)) { endCriteria.checkStationaryPoint(0.0, 0.0, ref maxStationaryStateIterations_, ref ecType); endCriteria.checkMaxIterations(iterationNumber_, ref ecType); x_ = vertices_[iLowest]; double low = values_[iLowest]; P.setFunctionValue(low); P.setCurrentValue(x_); return(ecType); } // If end criteria is not met, continue double factor = -1.0; double vTry = extrapolate(ref P, iHighest, ref factor); if ((vTry <= values_[iLowest]) && (factor == -1.0)) { factor = 2.0; extrapolate(ref P, iHighest, ref factor); } else if (Math.Abs(factor) > Const.QL_EPSILON) { if (vTry >= values_[iNextHighest]) { double vSave = values_[iHighest]; factor = 0.5; vTry = extrapolate(ref P, iHighest, ref factor); if (vTry >= vSave && Math.Abs(factor) > Const.QL_EPSILON) { for (int i = 0; i <= n; i++) { if (i != iLowest) { #if QL_ARRAY_EXPRESSIONS vertices_[i] = 0.5 * (vertices_[i] + vertices_[iLowest]); #else vertices_[i] += vertices_[iLowest]; vertices_[i] *= 0.5; #endif values_[i] = P.value(vertices_[i]); } } } } } // If can't extrapolate given the constraints, exit if (Math.Abs(factor) <= Const.QL_EPSILON) { x_ = vertices_[iLowest]; double low = values_[iLowest]; P.setFunctionValue(low); P.setCurrentValue(x_); return(EndCriteria.Type.StationaryFunctionValue); } } while (end == false); throw new Exception("optimization failed: unexpected behaviour"); }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { // set up of the problem //double ftol = endCriteria.functionEpsilon(); // end criteria on f(x) (see Numerical Recipes in C++, p.410) double xtol = endCriteria.rootEpsilon(); // end criteria on x (see GSL v. 1.9, http://www.gnu.org/software/gsl/) int maxStationaryStateIterations_ = endCriteria.maxStationaryStateIterations(); EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x_ = P.currentValue(); int iterationNumber_ = 0; // Initialize vertices of the simplex bool end = false; int n = x_.Count; vertices_ = new InitializedList<Vector>(n + 1, x_); for (int i = 0; i < n; i++) { Vector direction = new Vector(n, 0.0); direction[i] = 1.0; P.constraint().update(vertices_[i + 1], direction, lambda_); } // Initialize function values at the vertices of the simplex values_ = new Vector(n + 1, 0.0); for (int i = 0; i <= n; i++) values_[i] = P.value(vertices_[i]); // Loop looking for minimum do { sum_ = new Vector(n, 0.0); for (int i = 0; i <= n; i++) sum_ += vertices_[i]; // Determine the best (iLowest), worst (iHighest) // and 2nd worst (iNextHighest) vertices int iLowest = 0; int iHighest; int iNextHighest; if (values_[0] < values_[1]) { iHighest = 1; iNextHighest = 0; } else { iHighest = 0; iNextHighest = 1; } for (int i = 1; i <= n; i++) { if (values_[i] > values_[iHighest]) { iNextHighest = iHighest; iHighest = i; } else { if ((values_[i] > values_[iNextHighest]) && i != iHighest) iNextHighest = i; } if (values_[i] < values_[iLowest]) iLowest = i; } // Now compute accuracy, update iteration number and check end criteria //// Numerical Recipes exit strategy on fx (see NR in C++, p.410) //double low = values_[iLowest]; //double high = values_[iHighest]; //double rtol = 2.0*std::fabs(high - low)/ // (std::fabs(high) + std::fabs(low) + QL_EPSILON); //++iterationNumber_; //if (rtol < ftol || // endCriteria.checkMaxIterations(iterationNumber_, ecType)) { // GSL exit strategy on x (see GSL v. 1.9, http://www.gnu.org/software/gsl double simplexSize = Utils.computeSimplexSize(vertices_); ++iterationNumber_; if (simplexSize < xtol || endCriteria.checkMaxIterations(iterationNumber_, ref ecType)) { endCriteria.checkStationaryPoint(0.0, 0.0, ref maxStationaryStateIterations_, ref ecType); endCriteria.checkMaxIterations(iterationNumber_, ref ecType); x_ = vertices_[iLowest]; double low = values_[iLowest]; P.setFunctionValue(low); P.setCurrentValue(x_); return ecType; } // If end criteria is not met, continue double factor = -1.0; double vTry = extrapolate(ref P, iHighest, ref factor); if ((vTry <= values_[iLowest]) && (factor == -1.0)) { factor = 2.0; extrapolate(ref P, iHighest, ref factor); } else if (Math.Abs(factor) > Const.QL_Epsilon) { if (vTry >= values_[iNextHighest]) { double vSave = values_[iHighest]; factor = 0.5; vTry = extrapolate(ref P, iHighest, ref factor); if (vTry >= vSave && Math.Abs(factor) > Const.QL_Epsilon) { for (int i = 0; i <= n; i++) { if (i != iLowest) { #if QL_ARRAY_EXPRESSIONS vertices_[i] = 0.5 * (vertices_[i] + vertices_[iLowest]); #else vertices_[i] += vertices_[iLowest]; vertices_[i] *= 0.5; #endif values_[i] = P.value(vertices_[i]); } } } } } // If can't extrapolate given the constraints, exit if (Math.Abs(factor) <= Const.QL_Epsilon) { x_ = vertices_[iLowest]; double low = values_[iLowest]; P.setFunctionValue(low); P.setCurrentValue(x_); return EndCriteria.Type.StationaryFunctionValue; } } while (end == false); throw new ApplicationException("optimization failed: unexpected behaviour"); }
private double extrapolate(ref Problem P, int iHighest, ref double factor) { Vector pTry; do { int dimensions = values_.Count - 1; double factor1 = (1.0 - factor) / dimensions; double factor2 = factor1 - factor; // #if QL_ARRAY_EXPRESSIONS pTry = sum_ * factor1 - vertices_[iHighest] * factor2; //#else // // composite expressions fail to compile with gcc 3.4 on windows // pTry = sum_ * factor1; // pTry -= vertices_[iHighest] * factor2; //#endif factor *= 0.5; } while (!P.constraint().test(pTry) && Math.Abs(factor) > Const.QL_Epsilon); if (Math.Abs(factor) <= Const.QL_Epsilon) { return values_[iHighest]; } factor *= 2.0; double vTry = P.value(pTry); if (vTry < values_[iHighest]) { values_[iHighest] = vTry; //#if QL_ARRAY_EXPRESSIONS sum_ += pTry - vertices_[iHighest]; //#else // sum_ += pTry; // sum_ -= vertices_[iHighest]; //#endif vertices_[iHighest] = pTry; } return vTry; }
public override EndCriteria.Type minimize(Problem P, EndCriteria endCriteria) { int stationaryStateIterations_ = 0; EndCriteria.Type ecType = EndCriteria.Type.None; P.reset(); Vector x = P.currentValue(); iteration_ = 0; n_ = x.size(); ptry_ = new Vector(n_, 0.0); // build vertices vertices_ = new InitializedList <Vector>(n_ + 1, x); for (i_ = 0; i_ < n_; i_++) { Vector direction = new Vector(n_, 0.0); direction[i_] = 1.0; Vector tmp = vertices_[i_ + 1]; P.constraint().update(ref tmp, direction, lambda_); vertices_[i_ + 1] = tmp; } values_ = new Vector(n_ + 1, 0.0); for (i_ = 0; i_ <= n_; i_++) { if (!P.constraint().test(vertices_[i_])) { values_[i_] = Double.MaxValue; } else { values_[i_] = P.value(vertices_[i_]); } if (Double.IsNaN(ytry_)) { // handle NAN values_[i_] = Double.MaxValue; } } // minimize T_ = T0_; yb_ = Double.MaxValue; pb_ = new Vector(n_, 0.0); do { iterationT_ = iteration_; do { sum_ = new Vector(n_, 0.0); for (i_ = 0; i_ <= n_; i_++) { sum_ += vertices_[i_]; } tt_ = -T_; ilo_ = 0; ihi_ = 1; ynhi_ = values_[0] + tt_ * Math.Log(rng_.next().value); ylo_ = ynhi_; yhi_ = values_[1] + tt_ * Math.Log(rng_.next().value); if (ylo_ > yhi_) { ihi_ = 0; ilo_ = 1; ynhi_ = yhi_; yhi_ = ylo_; ylo_ = ynhi_; } for (i_ = 2; i_ < n_ + 1; i_++) { yt_ = values_[i_] + tt_ * Math.Log(rng_.next().value); if (yt_ <= ylo_) { ilo_ = i_; ylo_ = yt_; } if (yt_ > yhi_) { ynhi_ = yhi_; ihi_ = i_; yhi_ = yt_; } else { if (yt_ > ynhi_) { ynhi_ = yt_; } } } // GSL end criterion in x (cf. above) if (endCriteria.checkStationaryPoint(simplexSize(), 0.0, ref stationaryStateIterations_, ref ecType) || endCriteria.checkMaxIterations(iteration_, ref ecType)) { // no matter what, we return the best ever point ! P.setCurrentValue(pb_); P.setFunctionValue(yb_); return(ecType); } iteration_ += 2; amotsa(P, -1.0); if (ytry_ <= ylo_) { amotsa(P, 2.0); } else { if (ytry_ >= ynhi_) { ysave_ = yhi_; amotsa(P, 0.5); if (ytry_ >= ysave_) { for (i_ = 0; i_ < n_ + 1; i_++) { if (i_ != ilo_) { for (j_ = 0; j_ < n_; j_++) { sum_[j_] = 0.5 * (vertices_[i_][j_] + vertices_[ilo_][j_]); vertices_[i_][j_] = sum_[j_]; } values_[i_] = P.value(sum_); } } iteration_ += n_; for (i_ = 0; i_ < n_; i_++) { sum_[i_] = 0.0; } for (i_ = 0; i_ <= n_; i_++) { sum_ += vertices_[i_]; } } } else { iteration_ += 1; } } }while (iteration_ < iterationT_ + (scheme_ == Scheme.ConstantFactor ? m_ : 1)); switch (scheme_) { case Scheme.ConstantFactor: T_ *= (1.0 - epsilon_); break; case Scheme.ConstantBudget: if (iteration_ <= K_) { T_ = T0_ * Math.Pow(1.0 - Convert.ToDouble(iteration_) / Convert.ToDouble(K_), alpha_); } else { T_ = 0.0; } break; } }while (true); }