public static bool testmlpe(bool silent) { bool result = new bool(); bool waserrors = new bool(); int passcount = 0; int maxn = 0; int maxhid = 0; int nf = 0; int nhid = 0; int nl = 0; int nhid1 = 0; int nhid2 = 0; int ec = 0; int nkind = 0; int algtype = 0; int tasktype = 0; int pass = 0; mlpe.mlpensemble ensemble = new mlpe.mlpensemble(); mlptrain.mlpreport rep = new mlptrain.mlpreport(); mlptrain.mlpcvreport oobrep = new mlptrain.mlpcvreport(); double[,] xy = new double[0,0]; int i = 0; int j = 0; int nin = 0; int nout = 0; int npoints = 0; double e = 0; int info = 0; int nless = 0; int nall = 0; int nclasses = 0; bool allsame = new bool(); bool inferrors = new bool(); bool procerrors = new bool(); bool trnerrors = new bool(); waserrors = false; inferrors = false; procerrors = false; trnerrors = false; passcount = 10; maxn = 4; maxhid = 4; // // General MLP ensembles tests // for(nf=1; nf<=maxn; nf++) { for(nl=1; nl<=maxn; nl++) { for(nhid1=0; nhid1<=maxhid; nhid1++) { for(nhid2=0; nhid2<=0; nhid2++) { for(nkind=0; nkind<=3; nkind++) { for(ec=1; ec<=3; ec++) { // // Skip meaningless parameters combinations // if( nkind==1 & nl<2 ) { continue; } if( nhid1==0 & nhid2!=0 ) { continue; } // // Tests // testinformational(nkind, nf, nhid1, nhid2, nl, ec, passcount, ref inferrors); testprocessing(nkind, nf, nhid1, nhid2, nl, ec, passcount, ref procerrors); } } } } } } // // network training must reduce error // test on random regression task // nin = 3; nout = 2; nhid = 5; npoints = 100; nless = 0; nall = 0; for(pass=1; pass<=10; pass++) { for(algtype=0; algtype<=1; algtype++) { for(tasktype=0; tasktype<=1; tasktype++) { if( tasktype==0 ) { xy = new double[npoints-1+1, nin+nout-1+1]; for(i=0; i<=npoints-1; i++) { for(j=0; j<=nin+nout-1; j++) { xy[i,j] = 2*AP.Math.RandomReal()-1; } } mlpe.mlpecreate1(nin, nhid, nout, 1+AP.Math.RandomInteger(3), ref ensemble); } else { xy = new double[npoints-1+1, nin+1]; nclasses = 2+AP.Math.RandomInteger(2); for(i=0; i<=npoints-1; i++) { for(j=0; j<=nin-1; j++) { xy[i,j] = 2*AP.Math.RandomReal()-1; } xy[i,nin] = AP.Math.RandomInteger(nclasses); } mlpe.mlpecreatec1(nin, nhid, nclasses, 1+AP.Math.RandomInteger(3), ref ensemble); } e = mlpe.mlpermserror(ref ensemble, ref xy, npoints); if( algtype==0 ) { mlpe.mlpebagginglm(ref ensemble, ref xy, npoints, 0.001, 1, ref info, ref rep, ref oobrep); } else { mlpe.mlpebagginglbfgs(ref ensemble, ref xy, npoints, 0.001, 1, 0.01, 0, ref info, ref rep, ref oobrep); } if( info<0 ) { trnerrors = true; } else { if( (double)(mlpe.mlpermserror(ref ensemble, ref xy, npoints))<(double)(e) ) { nless = nless+1; } } nall = nall+1; } } } trnerrors = trnerrors | (double)(nall-nless)>(double)(0.3*nall); // // Final report // waserrors = inferrors | procerrors | trnerrors; if( !silent ) { System.Console.Write("MLP ENSEMBLE TEST"); System.Console.WriteLine(); System.Console.Write("INFORMATIONAL FUNCTIONS: "); if( !inferrors ) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } System.Console.Write("BASIC PROCESSING: "); if( !procerrors ) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } System.Console.Write("TRAINING: "); if( !trnerrors ) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } if( waserrors ) { System.Console.Write("TEST SUMMARY: FAILED"); System.Console.WriteLine(); } else { System.Console.Write("TEST SUMMARY: PASSED"); System.Console.WriteLine(); } System.Console.WriteLine(); System.Console.WriteLine(); } result = !waserrors; return result; }
public static bool testmlpe(bool silent) { bool result = new bool(); bool waserrors = new bool(); int passcount = 0; int maxn = 0; int maxhid = 0; int nf = 0; int nhid = 0; int nl = 0; int nhid1 = 0; int nhid2 = 0; int ec = 0; int nkind = 0; int algtype = 0; int tasktype = 0; int pass = 0; mlpe.mlpensemble ensemble = new mlpe.mlpensemble(); mlptrain.mlpreport rep = new mlptrain.mlpreport(); mlptrain.mlpcvreport oobrep = new mlptrain.mlpcvreport(); double[,] xy = new double[0, 0]; int i = 0; int j = 0; int nin = 0; int nout = 0; int npoints = 0; double e = 0; int info = 0; int nless = 0; int nall = 0; int nclasses = 0; bool allsame = new bool(); bool inferrors = new bool(); bool procerrors = new bool(); bool trnerrors = new bool(); waserrors = false; inferrors = false; procerrors = false; trnerrors = false; passcount = 10; maxn = 4; maxhid = 4; // // General MLP ensembles tests // for (nf = 1; nf <= maxn; nf++) { for (nl = 1; nl <= maxn; nl++) { for (nhid1 = 0; nhid1 <= maxhid; nhid1++) { for (nhid2 = 0; nhid2 <= 0; nhid2++) { for (nkind = 0; nkind <= 3; nkind++) { for (ec = 1; ec <= 3; ec++) { // // Skip meaningless parameters combinations // if (nkind == 1 & nl < 2) { continue; } if (nhid1 == 0 & nhid2 != 0) { continue; } // // Tests // testinformational(nkind, nf, nhid1, nhid2, nl, ec, passcount, ref inferrors); testprocessing(nkind, nf, nhid1, nhid2, nl, ec, passcount, ref procerrors); } } } } } } // // network training must reduce error // test on random regression task // nin = 3; nout = 2; nhid = 5; npoints = 100; nless = 0; nall = 0; for (pass = 1; pass <= 10; pass++) { for (algtype = 0; algtype <= 1; algtype++) { for (tasktype = 0; tasktype <= 1; tasktype++) { if (tasktype == 0) { xy = new double[npoints - 1 + 1, nin + nout - 1 + 1]; for (i = 0; i <= npoints - 1; i++) { for (j = 0; j <= nin + nout - 1; j++) { xy[i, j] = 2 * AP.Math.RandomReal() - 1; } } mlpe.mlpecreate1(nin, nhid, nout, 1 + AP.Math.RandomInteger(3), ref ensemble); } else { xy = new double[npoints - 1 + 1, nin + 1]; nclasses = 2 + AP.Math.RandomInteger(2); for (i = 0; i <= npoints - 1; i++) { for (j = 0; j <= nin - 1; j++) { xy[i, j] = 2 * AP.Math.RandomReal() - 1; } xy[i, nin] = AP.Math.RandomInteger(nclasses); } mlpe.mlpecreatec1(nin, nhid, nclasses, 1 + AP.Math.RandomInteger(3), ref ensemble); } e = mlpe.mlpermserror(ref ensemble, ref xy, npoints); if (algtype == 0) { mlpe.mlpebagginglm(ref ensemble, ref xy, npoints, 0.001, 1, ref info, ref rep, ref oobrep); } else { mlpe.mlpebagginglbfgs(ref ensemble, ref xy, npoints, 0.001, 1, 0.01, 0, ref info, ref rep, ref oobrep); } if (info < 0) { trnerrors = true; } else { if ((double)(mlpe.mlpermserror(ref ensemble, ref xy, npoints)) < (double)(e)) { nless = nless + 1; } } nall = nall + 1; } } } trnerrors = trnerrors | (double)(nall - nless) > (double)(0.3 * nall); // // Final report // waserrors = inferrors | procerrors | trnerrors; if (!silent) { System.Console.Write("MLP ENSEMBLE TEST"); System.Console.WriteLine(); System.Console.Write("INFORMATIONAL FUNCTIONS: "); if (!inferrors) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } System.Console.Write("BASIC PROCESSING: "); if (!procerrors) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } System.Console.Write("TRAINING: "); if (!trnerrors) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } if (waserrors) { System.Console.Write("TEST SUMMARY: FAILED"); System.Console.WriteLine(); } else { System.Console.Write("TEST SUMMARY: PASSED"); System.Console.WriteLine(); } System.Console.WriteLine(); System.Console.WriteLine(); } result = !waserrors; return(result); }
/************************************************************************* Internal bagging subroutine. -- ALGLIB -- Copyright 19.02.2009 by Bochkanov Sergey *************************************************************************/ private static void mlpebagginginternal(ref mlpensemble ensemble, ref double[,] xy, int npoints, double decay, int restarts, double wstep, int maxits, bool lmalgorithm, ref int info, ref mlptrain.mlpreport rep, ref mlptrain.mlpcvreport ooberrors) { double[,] xys = new double[0,0]; bool[] s = new bool[0]; double[,] oobbuf = new double[0,0]; int[] oobcntbuf = new int[0]; double[] x = new double[0]; double[] y = new double[0]; double[] dy = new double[0]; double[] dsbuf = new double[0]; int nin = 0; int nout = 0; int ccnt = 0; int pcnt = 0; int i = 0; int j = 0; int k = 0; double v = 0; mlptrain.mlpreport tmprep = new mlptrain.mlpreport(); mlpbase.multilayerperceptron network = new mlpbase.multilayerperceptron(); int i_ = 0; int i1_ = 0; // // Test for inputs // if( !lmalgorithm & (double)(wstep)==(double)(0) & maxits==0 ) { info = -8; return; } if( npoints<=0 | restarts<1 | (double)(wstep)<(double)(0) | maxits<0 ) { info = -1; return; } if( ensemble.issoftmax ) { for(i=0; i<=npoints-1; i++) { if( (int)Math.Round(xy[i,ensemble.nin])<0 | (int)Math.Round(xy[i,ensemble.nin])>=ensemble.nout ) { info = -2; return; } } } // // allocate temporaries // info = 2; rep.ngrad = 0; rep.nhess = 0; rep.ncholesky = 0; ooberrors.relclserror = 0; ooberrors.avgce = 0; ooberrors.rmserror = 0; ooberrors.avgerror = 0; ooberrors.avgrelerror = 0; nin = ensemble.nin; nout = ensemble.nout; if( ensemble.issoftmax ) { ccnt = nin+1; pcnt = nin; } else { ccnt = nin+nout; pcnt = nin+nout; } xys = new double[npoints-1+1, ccnt-1+1]; s = new bool[npoints-1+1]; oobbuf = new double[npoints-1+1, nout-1+1]; oobcntbuf = new int[npoints-1+1]; x = new double[nin-1+1]; y = new double[nout-1+1]; if( ensemble.issoftmax ) { dy = new double[0+1]; } else { dy = new double[nout-1+1]; } for(i=0; i<=npoints-1; i++) { for(j=0; j<=nout-1; j++) { oobbuf[i,j] = 0; } } for(i=0; i<=npoints-1; i++) { oobcntbuf[i] = 0; } mlpbase.mlpunserialize(ref ensemble.serializedmlp, ref network); // // main bagging cycle // for(k=0; k<=ensemble.ensemblesize-1; k++) { // // prepare dataset // for(i=0; i<=npoints-1; i++) { s[i] = false; } for(i=0; i<=npoints-1; i++) { j = AP.Math.RandomInteger(npoints); s[j] = true; for(i_=0; i_<=ccnt-1;i_++) { xys[i,i_] = xy[j,i_]; } } // // train // if( lmalgorithm ) { mlptrain.mlptrainlm(ref network, ref xys, npoints, decay, restarts, ref info, ref tmprep); } else { mlptrain.mlptrainlbfgs(ref network, ref xys, npoints, decay, restarts, wstep, maxits, ref info, ref tmprep); } if( info<0 ) { return; } // // save results // rep.ngrad = rep.ngrad+tmprep.ngrad; rep.nhess = rep.nhess+tmprep.nhess; rep.ncholesky = rep.ncholesky+tmprep.ncholesky; i1_ = (0) - (k*ensemble.wcount); for(i_=k*ensemble.wcount; i_<=(k+1)*ensemble.wcount-1;i_++) { ensemble.weights[i_] = network.weights[i_+i1_]; } i1_ = (0) - (k*pcnt); for(i_=k*pcnt; i_<=(k+1)*pcnt-1;i_++) { ensemble.columnmeans[i_] = network.columnmeans[i_+i1_]; } i1_ = (0) - (k*pcnt); for(i_=k*pcnt; i_<=(k+1)*pcnt-1;i_++) { ensemble.columnsigmas[i_] = network.columnsigmas[i_+i1_]; } // // OOB estimates // for(i=0; i<=npoints-1; i++) { if( !s[i] ) { for(i_=0; i_<=nin-1;i_++) { x[i_] = xy[i,i_]; } mlpbase.mlpprocess(ref network, ref x, ref y); for(i_=0; i_<=nout-1;i_++) { oobbuf[i,i_] = oobbuf[i,i_] + y[i_]; } oobcntbuf[i] = oobcntbuf[i]+1; } } } // // OOB estimates // if( ensemble.issoftmax ) { bdss.dserrallocate(nout, ref dsbuf); } else { bdss.dserrallocate(-nout, ref dsbuf); } for(i=0; i<=npoints-1; i++) { if( oobcntbuf[i]!=0 ) { v = (double)(1)/(double)(oobcntbuf[i]); for(i_=0; i_<=nout-1;i_++) { y[i_] = v*oobbuf[i,i_]; } if( ensemble.issoftmax ) { dy[0] = xy[i,nin]; } else { i1_ = (nin) - (0); for(i_=0; i_<=nout-1;i_++) { dy[i_] = v*xy[i,i_+i1_]; } } bdss.dserraccumulate(ref dsbuf, ref y, ref dy); } } bdss.dserrfinish(ref dsbuf); ooberrors.relclserror = dsbuf[0]; ooberrors.avgce = dsbuf[1]; ooberrors.rmserror = dsbuf[2]; ooberrors.avgerror = dsbuf[3]; ooberrors.avgrelerror = dsbuf[4]; }
public static bool testmlp(bool silent) { bool result = new bool(); bool waserrors = new bool(); int passcount = 0; int maxn = 0; int maxhid = 0; int info = 0; int nf = 0; int nhid = 0; int nl = 0; int nhid1 = 0; int nhid2 = 0; int nkind = 0; int i = 0; int j = 0; mlpbase.multilayerperceptron network = new mlpbase.multilayerperceptron(); mlpbase.multilayerperceptron network2 = new mlpbase.multilayerperceptron(); mlptrain.mlpreport rep = new mlptrain.mlpreport(); mlptrain.mlpcvreport cvrep = new mlptrain.mlpcvreport(); int ncount = 0; double[,] xy = new double[0,0]; double[,] valxy = new double[0,0]; int ssize = 0; int valsize = 0; bool allsame = new bool(); bool inferrors = new bool(); bool procerrors = new bool(); bool graderrors = new bool(); bool hesserrors = new bool(); bool trnerrors = new bool(); waserrors = false; inferrors = false; procerrors = false; graderrors = false; hesserrors = false; trnerrors = false; passcount = 10; maxn = 4; maxhid = 4; // // General multilayer network tests // for(nf=1; nf<=maxn; nf++) { for(nl=1; nl<=maxn; nl++) { for(nhid1=0; nhid1<=maxhid; nhid1++) { for(nhid2=0; nhid2<=0; nhid2++) { for(nkind=0; nkind<=3; nkind++) { // // Skip meaningless parameters combinations // if( nkind==1 & nl<2 ) { continue; } if( nhid1==0 & nhid2!=0 ) { continue; } // // Tests // testinformational(nkind, nf, nhid1, nhid2, nl, passcount, ref inferrors); testprocessing(nkind, nf, nhid1, nhid2, nl, passcount, ref procerrors); testgradient(nkind, nf, nhid1, nhid2, nl, passcount, ref graderrors); testhessian(nkind, nf, nhid1, nhid2, nl, passcount, ref hesserrors); } } } } } // // Test network training on simple XOR problem // xy = new double[3+1, 2+1]; xy[0,0] = -1; xy[0,1] = -1; xy[0,2] = -1; xy[1,0] = +1; xy[1,1] = -1; xy[1,2] = +1; xy[2,0] = -1; xy[2,1] = +1; xy[2,2] = +1; xy[3,0] = +1; xy[3,1] = +1; xy[3,2] = -1; mlpbase.mlpcreate1(2, 2, 1, ref network); mlptrain.mlptrainlm(ref network, ref xy, 4, 0.001, 10, ref info, ref rep); trnerrors = trnerrors | (double)(mlpbase.mlprmserror(ref network, ref xy, 4))>(double)(0.1); // // Test CV on random noisy problem // ncount = 100; xy = new double[ncount-1+1, 1+1]; for(i=0; i<=ncount-1; i++) { xy[i,0] = 2*AP.Math.RandomReal()-1; xy[i,1] = AP.Math.RandomInteger(4); } mlpbase.mlpcreatec0(1, 4, ref network); mlptrain.mlpkfoldcvlm(ref network, ref xy, ncount, 0.001, 5, 10, ref info, ref rep, ref cvrep); // // Final report // waserrors = inferrors | procerrors | graderrors | hesserrors | trnerrors; if( !silent ) { System.Console.Write("MLP TEST"); System.Console.WriteLine(); System.Console.Write("INFORMATIONAL FUNCTIONS: "); if( !inferrors ) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } System.Console.Write("BASIC PROCESSING: "); if( !procerrors ) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } System.Console.Write("GRADIENT CALCULATION: "); if( !graderrors ) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } System.Console.Write("HESSIAN CALCULATION: "); if( !hesserrors ) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } System.Console.Write("TRAINING: "); if( !trnerrors ) { System.Console.Write("OK"); System.Console.WriteLine(); } else { System.Console.Write("FAILED"); System.Console.WriteLine(); } if( waserrors ) { System.Console.Write("TEST SUMMARY: FAILED"); System.Console.WriteLine(); } else { System.Console.Write("TEST SUMMARY: PASSED"); System.Console.WriteLine(); } System.Console.WriteLine(); System.Console.WriteLine(); } result = !waserrors; return result; }
/************************************************************************* Training neural networks ensemble using early stopping. INPUT PARAMETERS: Ensemble - model with initialized geometry XY - training set NPoints - training set size Decay - weight decay coefficient, >=0.001 Restarts - restarts, >0. OUTPUT PARAMETERS: Ensemble - trained model Info - return code: * -2, if there is a point with class number outside of [0..NClasses-1]. * -1, if incorrect parameters was passed (NPoints<0, Restarts<1). * 6, if task has been solved. Rep - training report. OOBErrors - out-of-bag generalization error estimate -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ public static void mlpetraines(ref mlpensemble ensemble, ref double[,] xy, int npoints, double decay, int restarts, ref int info, ref mlptrain.mlpreport rep) { int i = 0; int k = 0; int ccount = 0; int pcount = 0; double[,] trnxy = new double[0,0]; double[,] valxy = new double[0,0]; int trnsize = 0; int valsize = 0; mlpbase.multilayerperceptron network = new mlpbase.multilayerperceptron(); int tmpinfo = 0; mlptrain.mlpreport tmprep = new mlptrain.mlpreport(); int i_ = 0; int i1_ = 0; if( npoints<2 | restarts<1 | (double)(decay)<(double)(0) ) { info = -1; return; } if( ensemble.issoftmax ) { for(i=0; i<=npoints-1; i++) { if( (int)Math.Round(xy[i,ensemble.nin])<0 | (int)Math.Round(xy[i,ensemble.nin])>=ensemble.nout ) { info = -2; return; } } } info = 6; // // allocate // if( ensemble.issoftmax ) { ccount = ensemble.nin+1; pcount = ensemble.nin; } else { ccount = ensemble.nin+ensemble.nout; pcount = ensemble.nin+ensemble.nout; } trnxy = new double[npoints-1+1, ccount-1+1]; valxy = new double[npoints-1+1, ccount-1+1]; mlpbase.mlpunserialize(ref ensemble.serializedmlp, ref network); rep.ngrad = 0; rep.nhess = 0; rep.ncholesky = 0; // // train networks // for(k=0; k<=ensemble.ensemblesize-1; k++) { // // Split set // do { trnsize = 0; valsize = 0; for(i=0; i<=npoints-1; i++) { if( (double)(AP.Math.RandomReal())<(double)(0.66) ) { // // Assign sample to training set // for(i_=0; i_<=ccount-1;i_++) { trnxy[trnsize,i_] = xy[i,i_]; } trnsize = trnsize+1; } else { // // Assign sample to validation set // for(i_=0; i_<=ccount-1;i_++) { valxy[valsize,i_] = xy[i,i_]; } valsize = valsize+1; } } } while( ! (trnsize!=0 & valsize!=0) ); // // Train // mlptrain.mlptraines(ref network, ref trnxy, trnsize, ref valxy, valsize, decay, restarts, ref tmpinfo, ref tmprep); if( tmpinfo<0 ) { info = tmpinfo; return; } // // save results // i1_ = (0) - (k*ensemble.wcount); for(i_=k*ensemble.wcount; i_<=(k+1)*ensemble.wcount-1;i_++) { ensemble.weights[i_] = network.weights[i_+i1_]; } i1_ = (0) - (k*pcount); for(i_=k*pcount; i_<=(k+1)*pcount-1;i_++) { ensemble.columnmeans[i_] = network.columnmeans[i_+i1_]; } i1_ = (0) - (k*pcount); for(i_=k*pcount; i_<=(k+1)*pcount-1;i_++) { ensemble.columnsigmas[i_] = network.columnsigmas[i_+i1_]; } rep.ngrad = rep.ngrad+tmprep.ngrad; rep.nhess = rep.nhess+tmprep.nhess; rep.ncholesky = rep.ncholesky+tmprep.ncholesky; } }