public minlmstate() { _innerobj = new minlm.minlmstate(); }
public minlmstate(minlm.minlmstate obj) { _innerobj = obj; }
public static bool testminlm(bool silent) { bool result = new bool(); bool waserrors = new bool(); bool referror = new bool(); bool lin1error = new bool(); bool lin2error = new bool(); bool eqerror = new bool(); bool converror = new bool(); bool scerror = new bool(); bool restartserror = new bool(); bool othererrors = new bool(); int rkind = 0; int ckind = 0; int tmpkind = 0; double epsf = 0; double epsx = 0; double epsg = 0; int maxits = 0; int n = 0; int m = 0; double[] x = new double[0]; double[] xe = new double[0]; double[] b = new double[0]; double[] bl = new double[0]; double[] bu = new double[0]; double[] xlast = new double[0]; int i = 0; int j = 0; double v = 0; double s = 0; double stpmax = 0; double h = 0; double[,] a = new double[0,0]; double fprev = 0; double xprev = 0; minlm.minlmstate state = new minlm.minlmstate(); minlm.minlmreport rep = new minlm.minlmreport(); int i_ = 0; waserrors = false; referror = false; lin1error = false; lin2error = false; eqerror = false; converror = false; scerror = false; othererrors = false; restartserror = false; // // Reference problem. // See comments for RKindVsStateCheck() for more info about RKind. // // NOTES: we also test negative RKind's corresponding to "inexact" schemes // which use approximate finite difference Jacobian. // x = new double[3]; n = 3; m = 3; h = 0.0001; for(rkind=-2; rkind<=5; rkind++) { x[0] = 100*math.randomreal()-50; x[1] = 100*math.randomreal()-50; x[2] = 100*math.randomreal()-50; if( rkind==-2 ) { minlm.minlmcreatev(n, m, x, h, state); minlm.minlmsetacctype(state, 1); } if( rkind==-1 ) { minlm.minlmcreatev(n, m, x, h, state); minlm.minlmsetacctype(state, 0); } if( rkind==0 ) { minlm.minlmcreatefj(n, m, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, m, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } if( rkind==3 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 0); } if( rkind==4 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 1); } if( rkind==5 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 2); } while( minlm.minlmiteration(state) ) { // // (x-2)^2 + y^2 + (z-x)^2 // if( state.needfi ) { state.fi[0] = state.x[0]-2; state.fi[1] = state.x[1]; state.fi[2] = state.x[2]-state.x[0]; } if( state.needfij ) { state.fi[0] = state.x[0]-2; state.fi[1] = state.x[1]; state.fi[2] = state.x[2]-state.x[0]; state.j[0,0] = 1; state.j[0,1] = 0; state.j[0,2] = 0; state.j[1,0] = 0; state.j[1,1] = 1; state.j[1,2] = 0; state.j[2,0] = -1; state.j[2,1] = 0; state.j[2,2] = 1; } if( (state.needf | state.needfg) | state.needfgh ) { state.f = math.sqr(state.x[0]-2)+math.sqr(state.x[1])+math.sqr(state.x[2]-state.x[0]); } if( state.needfg | state.needfgh ) { state.g[0] = 2*(state.x[0]-2)+2*(state.x[0]-state.x[2]); state.g[1] = 2*state.x[1]; state.g[2] = 2*(state.x[2]-state.x[0]); } if( state.needfgh ) { state.h[0,0] = 4; state.h[0,1] = 0; state.h[0,2] = -2; state.h[1,0] = 0; state.h[1,1] = 2; state.h[1,2] = 0; state.h[2,0] = -2; state.h[2,1] = 0; state.h[2,2] = 2; } scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); referror = (((referror | rep.terminationtype<=0) | (double)(Math.Abs(x[0]-2))>(double)(0.001)) | (double)(Math.Abs(x[1]))>(double)(0.001)) | (double)(Math.Abs(x[2]-2))>(double)(0.001); } // // Reference bound constrained problem: // // min sum((x[i]-xe[i])^4) subject to 0<=x[i]<=1 // // NOTES: // 1. we test only two optimization modes - V and FGH, // because from algorithm internals we can assume that actual // mode being used doesn't matter for bound constrained optimization // process. // for(tmpkind=0; tmpkind<=1; tmpkind++) { for(n=1; n<=5; n++) { bl = new double[n]; bu = new double[n]; xe = new double[n]; x = new double[n]; for(i=0; i<=n-1; i++) { bl[i] = 0; bu[i] = 1; xe[i] = 3*math.randomreal()-1; x[i] = math.randomreal(); } if( tmpkind==0 ) { minlm.minlmcreatefgh(n, x, state); } if( tmpkind==1 ) { minlm.minlmcreatev(n, n, x, 1.0E-3, state); } minlm.minlmsetcond(state, 1.0E-6, 0, 0, 0); minlm.minlmsetbc(state, bl, bu); while( minlm.minlmiteration(state) ) { if( state.needfi ) { for(i=0; i<=n-1; i++) { state.fi[i] = Math.Pow(state.x[i]-xe[i], 2); } } if( (state.needf | state.needfg) | state.needfgh ) { state.f = 0; for(i=0; i<=n-1; i++) { state.f = state.f+Math.Pow(state.x[i]-xe[i], 4); } } if( state.needfg | state.needfgh ) { for(i=0; i<=n-1; i++) { state.g[i] = 4*Math.Pow(state.x[i]-xe[i], 3); } } if( state.needfgh ) { for(i=0; i<=n-1; i++) { for(j=0; j<=n-1; j++) { state.h[i,j] = 0; } } for(i=0; i<=n-1; i++) { state.h[i,i] = 12*Math.Pow(state.x[i]-xe[i], 2); } } } minlm.minlmresults(state, ref x, rep); if( rep.terminationtype==4 ) { for(i=0; i<=n-1; i++) { referror = referror | (double)(Math.Abs(x[i]-apserv.boundval(xe[i], bl[i], bu[i])))>(double)(5.0E-2); } } else { referror = true; } } } // // 1D problem #1 // // NOTES: we also test negative RKind's corresponding to "inexact" schemes // which use approximate finite difference Jacobian. // for(rkind=-2; rkind<=5; rkind++) { x = new double[1]; n = 1; m = 1; h = 0.00001; x[0] = 100*math.randomreal()-50; if( rkind==-2 ) { minlm.minlmcreatev(n, m, x, h, state); minlm.minlmsetacctype(state, 1); } if( rkind==-1 ) { minlm.minlmcreatev(n, m, x, h, state); minlm.minlmsetacctype(state, 0); } if( rkind==0 ) { minlm.minlmcreatefj(n, m, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, m, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } if( rkind==3 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 0); } if( rkind==4 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 1); } if( rkind==5 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 2); } while( minlm.minlmiteration(state) ) { if( state.needfi ) { state.fi[0] = Math.Sin(state.x[0]); } if( state.needfij ) { state.fi[0] = Math.Sin(state.x[0]); state.j[0,0] = Math.Cos(state.x[0]); } if( (state.needf | state.needfg) | state.needfgh ) { state.f = math.sqr(Math.Sin(state.x[0])); } if( state.needfg | state.needfgh ) { state.g[0] = 2*Math.Sin(state.x[0])*Math.Cos(state.x[0]); } if( state.needfgh ) { state.h[0,0] = 2*(Math.Cos(state.x[0])*Math.Cos(state.x[0])-Math.Sin(state.x[0])*Math.Sin(state.x[0])); } scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); lin1error = rep.terminationtype<=0 | (double)(Math.Abs(x[0]/Math.PI-(int)Math.Round(x[0]/Math.PI)))>(double)(0.001); } // // Linear equations: test normal optimization and optimization with restarts // for(n=1; n<=10; n++) { // // Prepare task // h = 0.00001; matgen.rmatrixrndcond(n, 100, ref a); x = new double[n]; xe = new double[n]; b = new double[n]; for(i=0; i<=n-1; i++) { xe[i] = 2*math.randomreal()-1; } for(i=0; i<=n-1; i++) { v = 0.0; for(i_=0; i_<=n-1;i_++) { v += a[i,i_]*xe[i_]; } b[i] = v; } // // Test different RKind // // NOTES: we also test negative RKind's corresponding to "inexact" schemes // which use approximate finite difference Jacobian. // for(rkind=-2; rkind<=5; rkind++) { // // Solve task (first attempt) // for(i=0; i<=n-1; i++) { x[i] = 2*math.randomreal()-1; } if( rkind==-2 ) { minlm.minlmcreatev(n, n, x, h, state); minlm.minlmsetacctype(state, 1); } if( rkind==-1 ) { minlm.minlmcreatev(n, n, x, h, state); minlm.minlmsetacctype(state, 0); } if( rkind==0 ) { minlm.minlmcreatefj(n, n, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, n, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } if( rkind==3 ) { minlm.minlmcreatevj(n, n, x, state); minlm.minlmsetacctype(state, 0); } if( rkind==4 ) { minlm.minlmcreatevj(n, n, x, state); minlm.minlmsetacctype(state, 1); } if( rkind==5 ) { minlm.minlmcreatevj(n, n, x, state); minlm.minlmsetacctype(state, 2); } while( minlm.minlmiteration(state) ) { axmb(state, a, b, n); scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); eqerror = eqerror | rep.terminationtype<=0; for(i=0; i<=n-1; i++) { eqerror = eqerror | (double)(Math.Abs(x[i]-xe[i]))>(double)(0.001); } // // Now we try to restart algorithm from new point // for(i=0; i<=n-1; i++) { x[i] = 2*math.randomreal()-1; } minlm.minlmrestartfrom(state, x); while( minlm.minlmiteration(state) ) { axmb(state, a, b, n); scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); restartserror = restartserror | rep.terminationtype<=0; for(i=0; i<=n-1; i++) { restartserror = restartserror | (double)(Math.Abs(x[i]-xe[i]))>(double)(0.001); } } } // // Testing convergence properties using // different optimizer types and different conditions. // // Only limited subset of optimizers is tested because some // optimizers converge too quickly. // s = 100; for(rkind=0; rkind<=5; rkind++) { // // Skip FGH optimizer - it converges too quickly // if( rkind==2 ) { continue; } // // Test // for(ckind=0; ckind<=3; ckind++) { epsg = 0; epsf = 0; epsx = 0; maxits = 0; if( ckind==0 ) { epsf = 0.000001; } if( ckind==1 ) { epsx = 0.000001; } if( ckind==2 ) { maxits = 2; } if( ckind==3 ) { epsg = 0.0001; } x = new double[3]; n = 3; m = 3; for(i=0; i<=2; i++) { x[i] = 6; } if( rkind==0 ) { minlm.minlmcreatefj(n, m, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, m, x, state); } ap.assert(rkind!=2); if( rkind==3 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 0); } if( rkind==4 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 1); } if( rkind==5 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 2); } minlm.minlmsetcond(state, epsg, epsf, epsx, maxits); while( minlm.minlmiteration(state) ) { if( state.needfi | state.needfij ) { state.fi[0] = s*(Math.Exp(state.x[0])-2); state.fi[1] = math.sqr(state.x[1])+1; state.fi[2] = state.x[2]-state.x[0]; } if( state.needfij ) { state.j[0,0] = s*Math.Exp(state.x[0]); state.j[0,1] = 0; state.j[0,2] = 0; state.j[1,0] = 0; state.j[1,1] = 2*state.x[1]; state.j[1,2] = 0; state.j[2,0] = -1; state.j[2,1] = 0; state.j[2,2] = 1; } if( (state.needf | state.needfg) | state.needfgh ) { state.f = s*math.sqr(Math.Exp(state.x[0])-2)+math.sqr(math.sqr(state.x[1])+1)+math.sqr(state.x[2]-state.x[0]); } if( state.needfg | state.needfgh ) { state.g[0] = s*2*(Math.Exp(state.x[0])-2)*Math.Exp(state.x[0])+2*(state.x[0]-state.x[2]); state.g[1] = 2*(math.sqr(state.x[1])+1)*2*state.x[1]; state.g[2] = 2*(state.x[2]-state.x[0]); } if( state.needfgh ) { state.h[0,0] = s*(4*math.sqr(Math.Exp(state.x[0]))-4*Math.Exp(state.x[0]))+2; state.h[0,1] = 0; state.h[0,2] = -2; state.h[1,0] = 0; state.h[1,1] = 12*math.sqr(state.x[1])+4; state.h[1,2] = 0; state.h[2,0] = -2; state.h[2,1] = 0; state.h[2,2] = 2; } scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); if( ckind==0 ) { converror = converror | (double)(Math.Abs(x[0]-Math.Log(2)))>(double)(0.05); converror = converror | (double)(Math.Abs(x[1]))>(double)(0.05); converror = converror | (double)(Math.Abs(x[2]-Math.Log(2)))>(double)(0.05); converror = converror | rep.terminationtype!=1; } if( ckind==1 ) { converror = converror | (double)(Math.Abs(x[0]-Math.Log(2)))>(double)(0.05); converror = converror | (double)(Math.Abs(x[1]))>(double)(0.05); converror = converror | (double)(Math.Abs(x[2]-Math.Log(2)))>(double)(0.05); converror = converror | rep.terminationtype!=2; } if( ckind==2 ) { converror = (converror | rep.terminationtype!=5) | rep.iterationscount!=maxits; } if( ckind==3 ) { converror = converror | (double)(Math.Abs(x[0]-Math.Log(2)))>(double)(0.05); converror = converror | (double)(Math.Abs(x[1]))>(double)(0.05); converror = converror | (double)(Math.Abs(x[2]-Math.Log(2)))>(double)(0.05); converror = converror | rep.terminationtype!=4; } } } // // Other properties: // 1. test reports (F should form monotone sequence) // 2. test maximum step // for(rkind=0; rkind<=5; rkind++) { // // reports: // * check that first report is initial point // * check that F is monotone decreasing // * check that last report is final result // n = 3; m = 3; s = 100; x = new double[n]; xlast = new double[n]; for(i=0; i<=n-1; i++) { x[i] = 6; } if( rkind==0 ) { minlm.minlmcreatefj(n, m, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, m, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } if( rkind==3 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 0); } if( rkind==4 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 1); } if( rkind==5 ) { minlm.minlmcreatevj(n, m, x, state); minlm.minlmsetacctype(state, 2); } minlm.minlmsetcond(state, 0, 0, 0, 4); minlm.minlmsetxrep(state, true); fprev = math.maxrealnumber; while( minlm.minlmiteration(state) ) { if( state.needfi | state.needfij ) { state.fi[0] = Math.Sqrt(s)*(Math.Exp(state.x[0])-2); state.fi[1] = state.x[1]; state.fi[2] = state.x[2]-state.x[0]; } if( state.needfij ) { state.j[0,0] = Math.Sqrt(s)*Math.Exp(state.x[0]); state.j[0,1] = 0; state.j[0,2] = 0; state.j[1,0] = 0; state.j[1,1] = 1; state.j[1,2] = 0; state.j[2,0] = -1; state.j[2,1] = 0; state.j[2,2] = 1; } if( (state.needf | state.needfg) | state.needfgh ) { state.f = s*math.sqr(Math.Exp(state.x[0])-2)+math.sqr(state.x[1])+math.sqr(state.x[2]-state.x[0]); } if( state.needfg | state.needfgh ) { state.g[0] = s*2*(Math.Exp(state.x[0])-2)*Math.Exp(state.x[0])+2*(state.x[0]-state.x[2]); state.g[1] = 2*state.x[1]; state.g[2] = 2*(state.x[2]-state.x[0]); } if( state.needfgh ) { state.h[0,0] = s*(4*math.sqr(Math.Exp(state.x[0]))-4*Math.Exp(state.x[0]))+2; state.h[0,1] = 0; state.h[0,2] = -2; state.h[1,0] = 0; state.h[1,1] = 2; state.h[1,2] = 0; state.h[2,0] = -2; state.h[2,1] = 0; state.h[2,2] = 2; } scerror = scerror | !rkindvsstatecheck(rkind, state); if( state.xupdated ) { othererrors = othererrors | (double)(state.f)>(double)(fprev); if( (double)(fprev)==(double)(math.maxrealnumber) ) { for(i=0; i<=n-1; i++) { othererrors = othererrors | (double)(state.x[i])!=(double)(x[i]); } } fprev = state.f; for(i_=0; i_<=n-1;i_++) { xlast[i_] = state.x[i_]; } } } minlm.minlmresults(state, ref x, rep); for(i=0; i<=n-1; i++) { othererrors = othererrors | (double)(x[i])!=(double)(xlast[i]); } } n = 1; x = new double[n]; x[0] = 100; stpmax = 0.05+0.05*math.randomreal(); minlm.minlmcreatefgh(n, x, state); minlm.minlmsetcond(state, 1.0E-9, 0, 0, 0); minlm.minlmsetstpmax(state, stpmax); minlm.minlmsetxrep(state, true); xprev = x[0]; while( minlm.minlmiteration(state) ) { if( (state.needf | state.needfg) | state.needfgh ) { state.f = Math.Exp(state.x[0])+Math.Exp(-state.x[0]); } if( state.needfg | state.needfgh ) { state.g[0] = Math.Exp(state.x[0])-Math.Exp(-state.x[0]); } if( state.needfgh ) { state.h[0,0] = Math.Exp(state.x[0])+Math.Exp(-state.x[0]); } othererrors = othererrors | (double)(Math.Abs(state.x[0]-xprev))>(double)((1+Math.Sqrt(math.machineepsilon))*stpmax); if( state.xupdated ) { xprev = state.x[0]; } } // // end // waserrors = ((((((referror | lin1error) | lin2error) | eqerror) | converror) | scerror) | othererrors) | restartserror; if( !silent ) { System.Console.Write("TESTING LEVENBERG-MARQUARDT OPTIMIZATION"); System.Console.WriteLine(); System.Console.Write("REFERENCE PROBLEMS: "); if( referror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("1-D PROBLEM #1: "); if( lin1error ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("1-D PROBLEM #2: "); if( lin2error ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("LINEAR EQUATIONS: "); if( eqerror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("RESTARTS: "); if( restartserror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("CONVERGENCE PROPERTIES: "); if( converror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("STATE FIELDS CONSISTENCY: "); if( scerror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("OTHER PROPERTIES: "); if( othererrors ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } if( waserrors ) { System.Console.Write("TEST FAILED"); System.Console.WriteLine(); } else { System.Console.Write("TEST PASSED"); System.Console.WriteLine(); } System.Console.WriteLine(); System.Console.WriteLine(); } result = !waserrors; return result; }
public static int Main(string[] args) { minlm.minlmstate state = new minlm.minlmstate(); minlm.minlmreport rep = new minlm.minlmreport(); double[] s = new double[0]; double x = 0; double y = 0; // // Example of solving simple task using FGH scheme. // // Function minimized: // F = (x-2*y)^2 + (x-2)^2 + (y-1)^2 // exact solution is (2,1). // s = new double[2]; s[0] = AP.Math.RandomReal() - 0.5; s[1] = AP.Math.RandomReal() - 0.5; minlm.minlmcreatefgh(2, ref s, ref state); minlm.minlmsetcond(ref state, 0.0, 0.0, 0.001, 0); while (minlm.minlmiteration(ref state)) { x = state.x[0]; y = state.x[1]; if (state.needf) { state.f = AP.Math.Sqr(x - 2 * y) + AP.Math.Sqr(x - 2) + AP.Math.Sqr(y - 1); } if (state.needfg) { state.f = AP.Math.Sqr(x - 2 * y) + AP.Math.Sqr(x - 2) + AP.Math.Sqr(y - 1); state.g[0] = 2 * (x - 2 * y) + 2 * (x - 2) + 0; state.g[1] = -(4 * (x - 2 * y)) + 0 + 2 * (y - 1); } if (state.needfgh) { state.f = AP.Math.Sqr(x - 2 * y) + AP.Math.Sqr(x - 2) + AP.Math.Sqr(y - 1); state.g[0] = 2 * (x - 2 * y) + 2 * (x - 2) + 0; state.g[1] = -(4 * (x - 2 * y)) + 0 + 2 * (y - 1); state.h[0, 0] = 4; state.h[1, 0] = -4; state.h[0, 1] = -4; state.h[1, 1] = 10; } } minlm.minlmresults(ref state, ref s, ref rep); // // output results // System.Console.Write("X = "); System.Console.Write("{0,4:F2}", s[0]); System.Console.Write(" (correct value - 2.00)"); System.Console.WriteLine(); System.Console.Write("Y = "); System.Console.Write("{0,4:F2}", s[1]); System.Console.Write(" (correct value - 1.00)"); System.Console.WriteLine(); System.Console.Write("TerminationType = "); System.Console.Write("{0,0:d}", rep.terminationtype); System.Console.Write(" (should be 2 - stopping when step is small enough)"); System.Console.WriteLine(); System.Console.Write("NFunc = "); System.Console.Write("{0,0:d}", rep.nfunc); System.Console.WriteLine(); System.Console.Write("NJac = "); System.Console.Write("{0,0:d}", rep.njac); System.Console.WriteLine(); System.Console.Write("NGrad = "); System.Console.Write("{0,0:d}", rep.ngrad); System.Console.WriteLine(); System.Console.Write("NHess = "); System.Console.Write("{0,0:d}", rep.nhess); System.Console.WriteLine(); return(0); }
public lsfitstate() { s = new double[0]; bndl = new double[0]; bndu = new double[0]; taskx = new double[0,0]; tasky = new double[0]; w = new double[0]; x = new double[0]; c = new double[0]; g = new double[0]; h = new double[0,0]; tmp = new double[0]; optstate = new minlm.minlmstate(); optrep = new minlm.minlmreport(); rstate = new rcommstate(); }
public override void init() { s = new double[0]; bndl = new double[0]; bndu = new double[0]; taskx = new double[0,0]; tasky = new double[0]; taskw = new double[0]; x = new double[0]; c = new double[0]; g = new double[0]; h = new double[0,0]; wcur = new double[0]; tmp = new double[0]; tmpf = new double[0]; tmpjac = new double[0,0]; tmpjacw = new double[0,0]; invrep = new matinv.matinvreport(); rep = new lsfitreport(); optstate = new minlm.minlmstate(); optrep = new minlm.minlmreport(); rstate = new rcommstate(); }
public static int Main(string[] args) { minlm.minlmstate state = new minlm.minlmstate(); minlm.minlmreport rep = new minlm.minlmreport(); int i = 0; double[] s = new double[0]; double[] x = new double[0]; double[] y = new double[0]; double fi = 0; int n = 0; int m = 0; // // Example of solving polynomial approximation task using FJ scheme. // // Data points: // xi are random numbers from [-1,+1], // // Function being fitted: // yi = exp(xi) - sin(xi) - x^3/3 // // Function being minimized: // F(a,b,c) = // (a + b*x0 + c*x0^2 - y0)^2 + // (a + b*x1 + c*x1^2 - y1)^2 + ... // n = 3; s = new double[n]; for(i=0; i<=n-1; i++) { s[i] = AP.Math.RandomReal()-0.5; } m = 100; x = new double[m]; y = new double[m]; for(i=0; i<=m-1; i++) { x[i] = (double)(2*i)/((double)(m-1))-1; y[i] = Math.Exp(x[i])-Math.Sin(x[i])-x[i]*x[i]*x[i]/3; } // // Now S stores starting point, X and Y store points being fitted. // minlm.minlmcreatefj(n, m, ref s, ref state); minlm.minlmsetcond(ref state, 0.0, 0.0, 0.001, 0); while( minlm.minlmiteration(ref state) ) { if( state.needf ) { state.f = 0; } for(i=0; i<=m-1; i++) { // // "a" is stored in State.X[0] // "b" - State.X[1] // "c" - State.X[2] // fi = state.x[0]+state.x[1]*x[i]+state.x[2]*AP.Math.Sqr(x[i])-y[i]; if( state.needf ) { // // F is equal to sum of fi squared. // state.f = state.f+AP.Math.Sqr(fi); } if( state.needfij ) { // // Fi // state.fi[i] = fi; // // dFi/da // state.j[i,0] = 1; // // dFi/db // state.j[i,1] = x[i]; // // dFi/dc // state.j[i,2] = AP.Math.Sqr(x[i]); } } } minlm.minlmresults(ref state, ref s, ref rep); // // output results // System.Console.Write("A = "); System.Console.Write("{0,4:F2}",s[0]); System.Console.WriteLine(); System.Console.Write("B = "); System.Console.Write("{0,4:F2}",s[1]); System.Console.WriteLine(); System.Console.Write("C = "); System.Console.Write("{0,4:F2}",s[2]); System.Console.WriteLine(); System.Console.Write("TerminationType = "); System.Console.Write("{0,0:d}",rep.terminationtype); System.Console.Write(" (should be 2 - stopping when step is small enough)"); System.Console.WriteLine(); return 0; }
/************************************************************************* This is "expert" 4PL/5PL fitting function, which can be used if you need better control over fitting process than provided by LogisticFit4() or LogisticFit5(). This function fits model of the form F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) (4PL model) or F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) (5PL model) Here: * A, D - unconstrained * B>=0 for 4PL, unconstrained for 5PL * C>0 * G>0 (if present) INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. CnstrLeft- optional equality constraint for model value at the left boundary (at X=0). Specify NAN (Not-a-Number) if you do not need constraint on the model value at X=0 (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. CnstrRight- optional equality constraint for model value at X=infinity. Specify NAN (Not-a-Number) if you do not need constraint on the model value (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. Is4PL - whether 4PL or 5PL models are fitted LambdaV - regularization coefficient, LambdaV>=0. Set it to zero unless you know what you are doing. EpsX - stopping condition (step size), EpsX>=0. Zero value means that small step is automatically chosen. See notes below for more information. RsCnt - number of repeated restarts from random points. 4PL/5PL models are prone to problem of bad local extrema. Utilizing multiple random restarts allows us to improve algorithm convergence. RsCnt>=0. Zero value means that function automatically choose small amount of restarts (recommended). OUTPUT PARAMETERS: A, B, C, D- parameters of 4PL model G - parameter of 5PL model; for Is4PL=True, G=1 is returned. Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc5() function. NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. EQUALITY CONSTRAINTS ON PARAMETERS 4PL/5PL solver supports equality constraints on model values at the left boundary (X=0) and right boundary (X=infinity). These constraints are completely optional and you can specify both of them, only one - or no constraints at all. Parameter CnstrLeft contains left constraint (or NAN for unconstrained fitting), and CnstrRight contains right one. For 4PL, left constraint ALWAYS corresponds to parameter A, and right one is ALWAYS constraint on D. That's because 4PL model is normalized in such way that B>=0. For 5PL model things are different. Unlike 4PL one, 5PL model is NOT symmetric with respect to change in sign of B. Thus, negative B's are possible, and left constraint may constrain parameter A (for positive B's) - or parameter D (for negative B's). Similarly changes meaning of right constraint. You do not have to decide what parameter to constrain - algorithm will automatically determine correct parameters as fitting progresses. However, question highlighted above is important when you interpret fitting results. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit45x(double[] x, double[] y, int n, double cnstrleft, double cnstrright, bool is4pl, double lambdav, double epsx, int rscnt, ref double a, ref double b, ref double c, ref double d, ref double g, lsfitreport rep) { int i = 0; int k = 0; int innerit = 0; int outerit = 0; int nz = 0; double v = 0; double b00 = 0; double b01 = 0; double b10 = 0; double b11 = 0; double b30 = 0; double b31 = 0; double[] p0 = new double[0]; double[] p1 = new double[0]; double[] p2 = new double[0]; double[] bndl = new double[0]; double[] bndu = new double[0]; double[] s = new double[0]; double[,] z = new double[0,0]; hqrnd.hqrndstate rs = new hqrnd.hqrndstate(); minlm.minlmstate state = new minlm.minlmstate(); minlm.minlmreport replm = new minlm.minlmreport(); int maxits = 0; double fbest = 0; double flast = 0; double flast2 = 0; double scalex = 0; double scaley = 0; double[] bufx = new double[0]; double[] bufy = new double[0]; double rss = 0; double tss = 0; double meany = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); a = 0; b = 0; c = 0; d = 0; g = 0; alglib.ap.assert(math.isfinite(epsx), "LogisticFitX: EpsX is infinite/NAN"); alglib.ap.assert(math.isfinite(lambdav), "LogisticFitX: LambdaV is infinite/NAN"); alglib.ap.assert(math.isfinite(cnstrleft) || Double.IsNaN(cnstrleft), "LogisticFitX: CnstrLeft is NOT finite or NAN"); alglib.ap.assert(math.isfinite(cnstrright) || Double.IsNaN(cnstrright), "LogisticFitX: CnstrRight is NOT finite or NAN"); alglib.ap.assert((double)(lambdav)>=(double)(0), "LogisticFitX: negative LambdaV"); alglib.ap.assert(n>0, "LogisticFitX: N<=0"); alglib.ap.assert(rscnt>=0, "LogisticFitX: RsCnt<0"); alglib.ap.assert((double)(epsx)>=(double)(0), "LogisticFitX: EpsX<0"); alglib.ap.assert(alglib.ap.len(x)>=n, "LogisticFitX: Length(X)<N"); alglib.ap.assert(alglib.ap.len(y)>=n, "LogisticFitX: Length(Y)<N"); alglib.ap.assert(apserv.isfinitevector(x, n), "LogisticFitX: X contains infinite/NAN values"); alglib.ap.assert(apserv.isfinitevector(y, n), "LogisticFitX: X contains infinite/NAN values"); hqrnd.hqrndseed(2211, 1033044, rs); clearreport(rep); if( (double)(epsx)==(double)(0) ) { epsx = 1.0E-10; } if( rscnt==0 ) { rscnt = 4; } maxits = 1000; // // Sort points by X. // Determine number of zero and non-zero values. // tsort.tagsortfastr(ref x, ref y, ref bufx, ref bufy, n); alglib.ap.assert((double)(x[0])>=(double)(0), "LogisticFitX: some X[] are negative"); nz = n; for(i=0; i<=n-1; i++) { if( (double)(x[i])>(double)(0) ) { nz = i; break; } } // // For NZ=N (all X[] are zero) special code is used. // For NZ<N we use general-purpose code. // rep.iterationscount = 0; if( nz==n ) { // // NZ=N, degenerate problem. // No need to run optimizer. // v = 0.0; for(i=0; i<=n-1; i++) { v = v+y[i]; } v = v/n; if( math.isfinite(cnstrleft) ) { a = cnstrleft; } else { a = v; } b = 1; c = 1; if( math.isfinite(cnstrright) ) { d = cnstrright; } else { d = a; } g = 1; } else { // // Non-degenerate problem. // Determine scale of data. // scalex = x[nz+(n-nz)/2]; alglib.ap.assert((double)(scalex)>(double)(0), "LogisticFitX: internal error"); v = 0.0; for(i=0; i<=n-1; i++) { v = v+y[i]; } v = v/n; scaley = 0.0; for(i=0; i<=n-1; i++) { scaley = scaley+math.sqr(y[i]-v); } scaley = Math.Sqrt(scaley/n); if( (double)(scaley)==(double)(0) ) { scaley = 1.0; } s = new double[5]; s[0] = scaley; s[1] = 0.1; s[2] = scalex; s[3] = scaley; s[4] = 0.1; p0 = new double[5]; p0[0] = 0; p0[1] = 0; p0[2] = 0; p0[3] = 0; p0[4] = 0; bndl = new double[5]; bndu = new double[5]; minlm.minlmcreatevj(5, n+5, p0, state); minlm.minlmsetscale(state, s); minlm.minlmsetcond(state, 0.0, 0.0, epsx, maxits); minlm.minlmsetxrep(state, true); // // Main loop - includes THREE (!) nested iterations: // // 1. Inner iteration is minimization of target function from // the current initial point P1 subject to boundary constraints // given by arrays BndL and BndU. // // 2. Middle iteration changes boundary constraints from tight to // relaxed ones: // * at the first middle iteration we optimize with "tight" // constraints on parameters B and C (P[1] and P[2]). It // allows us to find good initial point for the next middle // iteration without risk of running into "hard" points (B=0, C=0). // Initial point is initialized by outer iteration. // Solution is placed to P1. // * at the second middle iteration we relax boundary constraints // on B and C. Solution P1 from the first middle iteration is // used as initial point for the second one. // * both first and second iterations are 4PL models, even when // we fit 5PL. // * additionally, for 5PL models, we use results from the second // middle iteration is initial guess for 5PL fit. // * after middle iteration is over we compare quality of the // solution stored in P1 and offload it to A/B/C/D/G, if it // is better. // // 3. Outer iteration (starts below) changes following parameters: // * initial point // * "tight" constraints BndL/BndU // * "relaxed" constraints BndL/BndU // // Below we prepare combined matrix Z of optimization settings for // outer/middle iterations: // // [ P00 BndL00 BndU00 BndL01 BndU01 ] // [ ] // [ P10 BndL10 BndU10 BndL11 BndU11 ] // // Here: // * Pi0 is initial point for I-th outer iteration // * BndLij is lower boundary for I-th outer iteration, J-th inner iteration // * BndUij - same as BndLij // z = new double[rscnt, 5+4*5]; for(i=0; i<=rscnt-1; i++) { if( math.isfinite(cnstrleft) ) { z[i,0] = cnstrleft; } else { z[i,0] = y[0]+0.25*scaley*(hqrnd.hqrnduniformr(rs)-0.5); } z[i,1] = 0.5+hqrnd.hqrnduniformr(rs); z[i,2] = x[nz+hqrnd.hqrnduniformi(rs, n-nz)]; if( math.isfinite(cnstrright) ) { z[i,3] = cnstrright; } else { z[i,3] = y[n-1]+0.25*scaley*(hqrnd.hqrnduniformr(rs)-0.5); } z[i,4] = 1.0; if( math.isfinite(cnstrleft) ) { z[i,5+0] = cnstrleft; z[i,10+0] = cnstrleft; } else { z[i,5+0] = Double.NegativeInfinity; z[i,10+0] = Double.PositiveInfinity; } z[i,5+1] = 0.5; z[i,10+1] = 2.0; z[i,5+2] = 0.5*scalex; z[i,10+2] = 2.0*scalex; if( math.isfinite(cnstrright) ) { z[i,5+3] = cnstrright; z[i,10+3] = cnstrright; } else { z[i,5+3] = Double.NegativeInfinity; z[i,10+3] = Double.PositiveInfinity; } z[i,5+4] = 1.0; z[i,10+4] = 1.0; if( math.isfinite(cnstrleft) ) { z[i,15+0] = cnstrleft; z[i,20+0] = cnstrleft; } else { z[i,15+0] = Double.NegativeInfinity; z[i,20+0] = Double.PositiveInfinity; } z[i,15+1] = 0.01; z[i,20+1] = Double.PositiveInfinity; z[i,15+2] = math.machineepsilon*scalex; z[i,20+2] = Double.PositiveInfinity; if( math.isfinite(cnstrright) ) { z[i,15+3] = cnstrright; z[i,20+3] = cnstrright; } else { z[i,15+3] = Double.NegativeInfinity; z[i,20+3] = Double.PositiveInfinity; } z[i,15+4] = 1.0; z[i,20+4] = 1.0; } // // Run outer iterations // a = 0; b = 1; c = 1; d = 1; g = 1; fbest = math.maxrealnumber; p1 = new double[5]; p2 = new double[5]; for(outerit=0; outerit<=alglib.ap.rows(z)-1; outerit++) { // // Beginning of the middle iterations. // Prepare initial point P1. // for(i=0; i<=4; i++) { p1[i] = z[outerit,i]; } flast = math.maxrealnumber; for(innerit=0; innerit<=1; innerit++) { // // Set current boundary constraints. // Run inner iteration. // for(i=0; i<=4; i++) { bndl[i] = z[outerit,5+innerit*10+0+i]; bndu[i] = z[outerit,5+innerit*10+5+i]; } minlm.minlmsetbc(state, bndl, bndu); logisticfitinternal(x, y, n, true, lambdav, state, replm, ref p1, ref flast); rep.iterationscount = rep.iterationscount+replm.iterationscount; } // // Middle iteration: try to fit with 5-parameter logistic model (if needed). // // We perform two attempts to fit: one with B>0, another one with B<0. // For PL4, these are equivalent up to transposition of A/D, but for 5PL // sign of B is very important. // // NOTE: results of 4PL fit are used as initial point for 5PL. // if( !is4pl ) { // // Loosen constraints on G, // save constraints on A/B/D to B0/B1 // bndl[4] = 0.1; bndu[4] = 10.0; b00 = bndl[0]; b01 = bndu[0]; b10 = bndl[1]; b11 = bndu[1]; b30 = bndl[3]; b31 = bndu[3]; // // First attempt: fitting with positive B // p2[0] = p1[0]; p2[1] = p1[1]; p2[2] = p1[2]; p2[3] = p1[3]; p2[4] = p1[4]; bndl[0] = b00; bndu[0] = b01; bndl[1] = b10; bndu[1] = b11; bndl[3] = b30; bndu[3] = b31; minlm.minlmsetbc(state, bndl, bndu); logisticfitinternal(x, y, n, false, lambdav, state, replm, ref p2, ref flast2); rep.iterationscount = rep.iterationscount+replm.iterationscount; if( (double)(flast2)<(double)(flast) ) { for(i=0; i<=4; i++) { p1[i] = p2[i]; } flast = flast2; } // // First attempt: fitting with negative B // p2[0] = p1[3]; p2[1] = -p1[1]; p2[2] = p1[2]; p2[3] = p1[0]; p2[4] = p1[4]; bndl[0] = b30; bndu[0] = b31; bndl[1] = -b11; bndu[1] = -b10; bndl[3] = b00; bndu[3] = b01; minlm.minlmsetbc(state, bndl, bndu); logisticfitinternal(x, y, n, false, lambdav, state, replm, ref p2, ref flast2); rep.iterationscount = rep.iterationscount+replm.iterationscount; if( (double)(flast2)<(double)(flast) ) { for(i=0; i<=4; i++) { p1[i] = p2[i]; } flast = flast2; } } // // Middle iteration is done, compare its results with best value // found so far. // if( (double)(flast)<(double)(fbest) ) { a = p1[0]; b = p1[1]; c = p1[2]; d = p1[3]; g = p1[4]; fbest = flast; } } } // // Calculate errors // rep.rmserror = 0; rep.avgerror = 0; rep.avgrelerror = 0; rep.maxerror = 0; k = 0; rss = 0.0; tss = 0.0; meany = 0.0; for(i=0; i<=n-1; i++) { meany = meany+y[i]; } meany = meany/n; for(i=0; i<=n-1; i++) { // // Calculate residual from regression // if( (double)(x[i])>(double)(0) ) { v = d+(a-d)/Math.Pow(1.0+Math.Pow(x[i]/c, b), g)-y[i]; } else { if( (double)(b)>=(double)(0) ) { v = a-y[i]; } else { v = d-y[i]; } } // // Update RSS (residual sum of squares) and TSS (total sum of squares) // which are used to calculate coefficient of determination. // // NOTE: we use formula R2 = 1-RSS/TSS because it has nice property of // being equal to 0.0 if and only if model perfectly fits data. // // When we fit nonlinear models, there are exist multiple ways of // determining R2, each of them giving different results. Formula // above is the most intuitive one. // rss = rss+v*v; tss = tss+math.sqr(y[i]-meany); // // Update errors // rep.rmserror = rep.rmserror+math.sqr(v); rep.avgerror = rep.avgerror+Math.Abs(v); if( (double)(y[i])!=(double)(0) ) { rep.avgrelerror = rep.avgrelerror+Math.Abs(v/y[i]); k = k+1; } rep.maxerror = Math.Max(rep.maxerror, Math.Abs(v)); } rep.rmserror = Math.Sqrt(rep.rmserror/n); rep.avgerror = rep.avgerror/n; if( k>0 ) { rep.avgrelerror = rep.avgrelerror/k; } rep.r2 = 1.0-rss/tss; }
public static int Main(string[] args) { minlm.minlmstate state = new minlm.minlmstate(); minlm.minlmreport rep = new minlm.minlmreport(); double[] s = new double[0]; double x = 0; double y = 0; // // Example of solving simple task using FJ scheme. // // Function minimized: // F = (x-2*y)^2 + (x-2)^2 + (y-1)^2 // exact solution is (2,1). // s = new double[2]; s[0] = AP.Math.RandomReal()-0.5; s[1] = AP.Math.RandomReal()-0.5; minlm.minlmcreatefj(2, 3, ref s, ref state); minlm.minlmsetcond(ref state, 0.0, 0.0, 0.001, 0); while( minlm.minlmiteration(ref state) ) { x = state.x[0]; y = state.x[1]; if( state.needf ) { state.f = AP.Math.Sqr(x-2*y)+AP.Math.Sqr(x-2)+AP.Math.Sqr(y-1); } if( state.needfij ) { state.fi[0] = x-2*y; state.fi[1] = x-2; state.fi[2] = y-1; state.j[0,0] = 1; state.j[0,1] = -2; state.j[1,0] = 1; state.j[1,1] = 0; state.j[2,0] = 0; state.j[2,1] = 1; } } minlm.minlmresults(ref state, ref s, ref rep); // // output results // System.Console.Write("X = "); System.Console.Write("{0,4:F2}",s[0]); System.Console.Write(" (correct value - 2.00)"); System.Console.WriteLine(); System.Console.Write("Y = "); System.Console.Write("{0,4:F2}",s[1]); System.Console.Write(" (correct value - 1.00)"); System.Console.WriteLine(); System.Console.Write("TerminationType = "); System.Console.Write("{0,0:d}",rep.terminationtype); System.Console.Write(" (should be 2 - stopping when step is small enough)"); System.Console.WriteLine(); System.Console.Write("NFunc = "); System.Console.Write("{0,0:d}",rep.nfunc); System.Console.WriteLine(); System.Console.Write("NJac = "); System.Console.Write("{0,0:d}",rep.njac); System.Console.WriteLine(); System.Console.Write("NGrad = "); System.Console.Write("{0,0:d}",rep.ngrad); System.Console.WriteLine(); System.Console.Write("NHess = "); System.Console.Write("{0,0:d}",rep.nhess); System.Console.WriteLine(); return 0; }
public static bool testminlm(bool silent) { bool result = new bool(); bool waserrors = new bool(); bool referror = new bool(); bool lin1error = new bool(); bool lin2error = new bool(); bool eqerror = new bool(); bool converror = new bool(); bool scerror = new bool(); bool restartserror = new bool(); bool othererrors = new bool(); int rkind = 0; int ckind = 0; double epsf = 0; double epsx = 0; double epsg = 0; int maxits = 0; int n = 0; int m = 0; double[] x = new double[0]; double[] xe = new double[0]; double[] b = new double[0]; double[] xlast = new double[0]; int i = 0; int j = 0; int k = 0; double v = 0; double s = 0; double stpmax = 0; double[,] a = new double[0,0]; double fprev = 0; double xprev = 0; minlm.minlmstate state = new minlm.minlmstate(); minlm.minlmreport rep = new minlm.minlmreport(); int i_ = 0; waserrors = false; referror = false; lin1error = false; lin2error = false; eqerror = false; converror = false; scerror = false; othererrors = false; restartserror = false; // // Reference problem. // RKind is a algorithm selector: // * 0 = FJ // * 1 = FGJ // * 2 = FGH // x = new double[2+1]; n = 3; m = 3; for(rkind=0; rkind<=2; rkind++) { x[0] = 100*math.randomreal()-50; x[1] = 100*math.randomreal()-50; x[2] = 100*math.randomreal()-50; if( rkind==0 ) { minlm.minlmcreatefj(n, m, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, m, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } while( minlm.minlmiteration(state) ) { // // (x-2)^2 + y^2 + (z-x)^2 // state.f = math.sqr(state.x[0]-2)+math.sqr(state.x[1])+math.sqr(state.x[2]-state.x[0]); if( state.needfg | state.needfgh ) { state.g[0] = 2*(state.x[0]-2)+2*(state.x[0]-state.x[2]); state.g[1] = 2*state.x[1]; state.g[2] = 2*(state.x[2]-state.x[0]); } if( state.needfij ) { state.fi[0] = state.x[0]-2; state.fi[1] = state.x[1]; state.fi[2] = state.x[2]-state.x[0]; state.j[0,0] = 1; state.j[0,1] = 0; state.j[0,2] = 0; state.j[1,0] = 0; state.j[1,1] = 1; state.j[1,2] = 0; state.j[2,0] = -1; state.j[2,1] = 0; state.j[2,2] = 1; } if( state.needfgh ) { state.h[0,0] = 4; state.h[0,1] = 0; state.h[0,2] = -2; state.h[1,0] = 0; state.h[1,1] = 2; state.h[1,2] = 0; state.h[2,0] = -2; state.h[2,1] = 0; state.h[2,2] = 2; } scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); referror = (((referror | rep.terminationtype<=0) | (double)(Math.Abs(x[0]-2))>(double)(0.001)) | (double)(Math.Abs(x[1]))>(double)(0.001)) | (double)(Math.Abs(x[2]-2))>(double)(0.001); } // // 1D problem #1 // for(rkind=0; rkind<=2; rkind++) { x = new double[1]; n = 1; m = 1; x[0] = 100*math.randomreal()-50; if( rkind==0 ) { minlm.minlmcreatefj(n, m, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, m, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } while( minlm.minlmiteration(state) ) { state.f = math.sqr(Math.Sin(state.x[0])); if( state.needfg | state.needfgh ) { state.g[0] = 2*Math.Sin(state.x[0])*Math.Cos(state.x[0]); } if( state.needfij ) { state.fi[0] = Math.Sin(state.x[0]); state.j[0,0] = Math.Cos(state.x[0]); } if( state.needfgh ) { state.h[0,0] = 2*(Math.Cos(state.x[0])*Math.Cos(state.x[0])-Math.Sin(state.x[0])*Math.Sin(state.x[0])); } scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); lin1error = rep.terminationtype<=0 | (double)(Math.Abs(x[0]/Math.PI-(int)Math.Round(x[0]/Math.PI)))>(double)(0.001); } // // Linear equations: test normal optimization and optimization with restarts // for(n=1; n<=10; n++) { // // Prepare task // matgen.rmatrixrndcond(n, 100, ref a); x = new double[n]; xe = new double[n]; b = new double[n]; for(i=0; i<=n-1; i++) { xe[i] = 2*math.randomreal()-1; } for(i=0; i<=n-1; i++) { v = 0.0; for(i_=0; i_<=n-1;i_++) { v += a[i,i_]*xe[i_]; } b[i] = v; } // // Test different RKind // for(rkind=0; rkind<=2; rkind++) { // // Solve task (first attempt) // for(i=0; i<=n-1; i++) { x[i] = 2*math.randomreal()-1; } if( rkind==0 ) { minlm.minlmcreatefj(n, n, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, n, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } while( minlm.minlmiteration(state) ) { axmb(state, a, b, n); scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); eqerror = eqerror | rep.terminationtype<=0; for(i=0; i<=n-1; i++) { eqerror = eqerror | (double)(Math.Abs(x[i]-xe[i]))>(double)(0.001); } // // Now we try to restart algorithm from new point // for(i=0; i<=n-1; i++) { x[i] = 2*math.randomreal()-1; } minlm.minlmrestartfrom(state, x); while( minlm.minlmiteration(state) ) { axmb(state, a, b, n); scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); restartserror = restartserror | rep.terminationtype<=0; for(i=0; i<=n-1; i++) { restartserror = restartserror | (double)(Math.Abs(x[i]-xe[i]))>(double)(0.001); } } } // // Testing convergence properties using // different optimizer types and different conditions // s = 100; for(rkind=0; rkind<=2; rkind++) { for(ckind=0; ckind<=3; ckind++) { epsg = 0; epsf = 0; epsx = 0; maxits = 0; if( ckind==0 ) { epsf = 0.0001; } if( ckind==1 ) { epsx = 0.0001; } if( ckind==2 ) { maxits = 2; } if( ckind==3 ) { epsg = 0.0001; } x = new double[3]; n = 3; m = 3; for(i=0; i<=2; i++) { x[i] = 6; } if( rkind==0 ) { minlm.minlmcreatefj(n, m, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, m, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } minlm.minlmsetcond(state, epsg, epsf, epsx, maxits); while( minlm.minlmiteration(state) ) { if( (state.needf | state.needfg) | state.needfgh ) { state.f = s*math.sqr(Math.Exp(state.x[0])-2)+math.sqr(math.sqr(state.x[1])+1)+math.sqr(state.x[2]-state.x[0]); } if( state.needfg | state.needfgh ) { state.g[0] = s*2*(Math.Exp(state.x[0])-2)*Math.Exp(state.x[0])+2*(state.x[0]-state.x[2]); state.g[1] = 2*(math.sqr(state.x[1])+1)*2*state.x[1]; state.g[2] = 2*(state.x[2]-state.x[0]); } if( state.needfgh ) { state.h[0,0] = s*(4*math.sqr(Math.Exp(state.x[0]))-4*Math.Exp(state.x[0]))+2; state.h[0,1] = 0; state.h[0,2] = -2; state.h[1,0] = 0; state.h[1,1] = 12*math.sqr(state.x[1])+4; state.h[1,2] = 0; state.h[2,0] = -2; state.h[2,1] = 0; state.h[2,2] = 2; } if( state.needfij ) { state.fi[0] = s*(Math.Exp(state.x[0])-2); state.j[0,0] = s*Math.Exp(state.x[0]); state.j[0,1] = 0; state.j[0,2] = 0; state.fi[1] = math.sqr(state.x[1])+1; state.j[1,0] = 0; state.j[1,1] = 2*state.x[1]; state.j[1,2] = 0; state.fi[2] = state.x[2]-state.x[0]; state.j[2,0] = -1; state.j[2,1] = 0; state.j[2,2] = 1; } scerror = scerror | !rkindvsstatecheck(rkind, state); } minlm.minlmresults(state, ref x, rep); if( ckind==0 ) { converror = converror | (double)(Math.Abs(x[0]-Math.Log(2)))>(double)(0.05); converror = converror | (double)(Math.Abs(x[1]))>(double)(0.05); converror = converror | (double)(Math.Abs(x[2]-Math.Log(2)))>(double)(0.05); converror = converror | rep.terminationtype!=1; } if( ckind==1 ) { converror = converror | (double)(Math.Abs(x[0]-Math.Log(2)))>(double)(0.05); converror = converror | (double)(Math.Abs(x[1]))>(double)(0.05); converror = converror | (double)(Math.Abs(x[2]-Math.Log(2)))>(double)(0.05); converror = converror | rep.terminationtype!=2; } if( ckind==2 ) { converror = (converror | rep.terminationtype!=5) | rep.iterationscount!=maxits; } if( ckind==3 ) { converror = converror | (double)(Math.Abs(x[0]-Math.Log(2)))>(double)(0.05); converror = converror | (double)(Math.Abs(x[1]))>(double)(0.05); converror = converror | (double)(Math.Abs(x[2]-Math.Log(2)))>(double)(0.05); converror = converror | rep.terminationtype!=4; } } } // // Other properties: // 1. test reports (F should form monotone sequence) // 2. test maximum step // for(rkind=0; rkind<=2; rkind++) { // // reports: // * check that first report is initial point // * check that F is monotone decreasing // * check that last report is final result // n = 3; m = 3; s = 100; x = new double[n]; xlast = new double[n]; for(i=0; i<=n-1; i++) { x[i] = 6; } if( rkind==0 ) { minlm.minlmcreatefj(n, m, x, state); } if( rkind==1 ) { minlm.minlmcreatefgj(n, m, x, state); } if( rkind==2 ) { minlm.minlmcreatefgh(n, x, state); } minlm.minlmsetcond(state, 0, 0, 0, 4); minlm.minlmsetxrep(state, true); fprev = math.maxrealnumber; while( minlm.minlmiteration(state) ) { if( (state.needf | state.needfg) | state.needfgh ) { state.f = s*math.sqr(Math.Exp(state.x[0])-2)+math.sqr(state.x[1])+math.sqr(state.x[2]-state.x[0]); } if( state.needfg | state.needfgh ) { state.g[0] = s*2*(Math.Exp(state.x[0])-2)*Math.Exp(state.x[0])+2*(state.x[0]-state.x[2]); state.g[1] = 2*state.x[1]; state.g[2] = 2*(state.x[2]-state.x[0]); } if( state.needfgh ) { state.h[0,0] = s*(4*math.sqr(Math.Exp(state.x[0]))-4*Math.Exp(state.x[0]))+2; state.h[0,1] = 0; state.h[0,2] = -2; state.h[1,0] = 0; state.h[1,1] = 2; state.h[1,2] = 0; state.h[2,0] = -2; state.h[2,1] = 0; state.h[2,2] = 2; } if( state.needfij ) { state.fi[0] = Math.Sqrt(s)*(Math.Exp(state.x[0])-2); state.j[0,0] = Math.Sqrt(s)*Math.Exp(state.x[0]); state.j[0,1] = 0; state.j[0,2] = 0; state.fi[1] = state.x[1]; state.j[1,0] = 0; state.j[1,1] = 1; state.j[1,2] = 0; state.fi[2] = state.x[2]-state.x[0]; state.j[2,0] = -1; state.j[2,1] = 0; state.j[2,2] = 1; } scerror = scerror | !rkindvsstatecheck(rkind, state); if( state.xupdated ) { othererrors = othererrors | (double)(state.f)>(double)(fprev); if( (double)(fprev)==(double)(math.maxrealnumber) ) { for(i=0; i<=n-1; i++) { othererrors = othererrors | (double)(state.x[i])!=(double)(x[i]); } } fprev = state.f; for(i_=0; i_<=n-1;i_++) { xlast[i_] = state.x[i_]; } } } minlm.minlmresults(state, ref x, rep); for(i=0; i<=n-1; i++) { othererrors = othererrors | (double)(x[i])!=(double)(xlast[i]); } } n = 1; x = new double[n]; x[0] = 100; stpmax = 0.05+0.05*math.randomreal(); minlm.minlmcreatefgh(n, x, state); minlm.minlmsetcond(state, 1.0E-9, 0, 0, 0); minlm.minlmsetstpmax(state, stpmax); minlm.minlmsetxrep(state, true); xprev = x[0]; while( minlm.minlmiteration(state) ) { if( (state.needf | state.needfg) | state.needfgh ) { state.f = Math.Exp(state.x[0])+Math.Exp(-state.x[0]); } if( state.needfg | state.needfgh ) { state.g[0] = Math.Exp(state.x[0])-Math.Exp(-state.x[0]); } if( state.needfgh ) { state.h[0,0] = Math.Exp(state.x[0])+Math.Exp(-state.x[0]); } othererrors = othererrors | (double)(Math.Abs(state.x[0]-xprev))>(double)((1+Math.Sqrt(math.machineepsilon))*stpmax); if( state.xupdated ) { xprev = state.x[0]; } } // // end // waserrors = ((((((referror | lin1error) | lin2error) | eqerror) | converror) | scerror) | othererrors) | restartserror; if( !silent ) { System.Console.Write("TESTING LEVENBERG-MARQUARDT OPTIMIZATION"); System.Console.WriteLine(); System.Console.Write("REFERENCE PROBLEM: "); if( referror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("1-D PROBLEM #1: "); if( lin1error ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("1-D PROBLEM #2: "); if( lin2error ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("LINEAR EQUATIONS: "); if( eqerror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("RESTARTS: "); if( restartserror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("CONVERGENCE PROPERTIES: "); if( converror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("STATE FIELDS CONSISTENCY: "); if( scerror ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } System.Console.Write("OTHER PROPERTIES: "); if( othererrors ) { System.Console.Write("FAILED"); System.Console.WriteLine(); } else { System.Console.Write("OK"); System.Console.WriteLine(); } if( waserrors ) { System.Console.Write("TEST FAILED"); System.Console.WriteLine(); } else { System.Console.Write("TEST PASSED"); System.Console.WriteLine(); } System.Console.WriteLine(); System.Console.WriteLine(); } result = !waserrors; return result; }
public static int Main(string[] args) { minlm.minlmstate state = new minlm.minlmstate(); minlm.minlmreport rep = new minlm.minlmreport(); int i = 0; double[] s = new double[0]; double[] x = new double[0]; double[] y = new double[0]; double fi = 0; int n = 0; int m = 0; // // Example of solving polynomial approximation task using FJ scheme. // // Data points: // xi are random numbers from [-1,+1], // // Function being fitted: // yi = exp(xi) - sin(xi) - x^3/3 // // Function being minimized: // F(a,b,c) = // (a + b*x0 + c*x0^2 - y0)^2 + // (a + b*x1 + c*x1^2 - y1)^2 + ... // n = 3; s = new double[n]; for (i = 0; i <= n - 1; i++) { s[i] = AP.Math.RandomReal() - 0.5; } m = 100; x = new double[m]; y = new double[m]; for (i = 0; i <= m - 1; i++) { x[i] = (double)(2 * i) / ((double)(m - 1)) - 1; y[i] = Math.Exp(x[i]) - Math.Sin(x[i]) - x[i] * x[i] * x[i] / 3; } // // Now S stores starting point, X and Y store points being fitted. // minlm.minlmcreatefj(n, m, ref s, ref state); minlm.minlmsetcond(ref state, 0.0, 0.0, 0.001, 0); while (minlm.minlmiteration(ref state)) { if (state.needf) { state.f = 0; } for (i = 0; i <= m - 1; i++) { // // "a" is stored in State.X[0] // "b" - State.X[1] // "c" - State.X[2] // fi = state.x[0] + state.x[1] * x[i] + state.x[2] * AP.Math.Sqr(x[i]) - y[i]; if (state.needf) { // // F is equal to sum of fi squared. // state.f = state.f + AP.Math.Sqr(fi); } if (state.needfij) { // // Fi // state.fi[i] = fi; // // dFi/da // state.j[i, 0] = 1; // // dFi/db // state.j[i, 1] = x[i]; // // dFi/dc // state.j[i, 2] = AP.Math.Sqr(x[i]); } } } minlm.minlmresults(ref state, ref s, ref rep); // // output results // System.Console.Write("A = "); System.Console.Write("{0,4:F2}", s[0]); System.Console.WriteLine(); System.Console.Write("B = "); System.Console.Write("{0,4:F2}", s[1]); System.Console.WriteLine(); System.Console.Write("C = "); System.Console.Write("{0,4:F2}", s[2]); System.Console.WriteLine(); System.Console.Write("TerminationType = "); System.Console.Write("{0,0:d}", rep.terminationtype); System.Console.Write(" (should be 2 - stopping when step is small enough)"); System.Console.WriteLine(); return(0); }