Ejemplo n.º 1
0
 /*************************************************************************
 *  Cross-validation estimate of generalization error.
 *
 *  Base algorithm - L-BFGS.
 *
 *  INPUT PARAMETERS:
 *   Network     -   neural network with initialized geometry.   Network is
 *                   not changed during cross-validation -  it is used only
 *                   as a representative of its architecture.
 *   XY          -   training set.
 *   SSize       -   training set size
 *   Decay       -   weight  decay, same as in MLPTrainLBFGS
 *   Restarts    -   number of restarts, >0.
 *                   restarts are counted for each partition separately, so
 *                   total number of restarts will be Restarts*FoldsCount.
 *   WStep       -   stopping criterion, same as in MLPTrainLBFGS
 *   MaxIts      -   stopping criterion, same as in MLPTrainLBFGS
 *   FoldsCount  -   number of folds in k-fold cross-validation,
 *                   2<=FoldsCount<=SSize.
 *                   recommended value: 10.
 *
 *  OUTPUT PARAMETERS:
 *   Info        -   return code, same as in MLPTrainLBFGS
 *   Rep         -   report, same as in MLPTrainLM/MLPTrainLBFGS
 *   CVRep       -   generalization error estimates
 *
 *  -- ALGLIB --
 *    Copyright 09.12.2007 by Bochkanov Sergey
 *************************************************************************/
 public static void mlpkfoldcvlbfgs(ref mlpbase.multilayerperceptron network,
                                    ref double[,] xy,
                                    int npoints,
                                    double decay,
                                    int restarts,
                                    double wstep,
                                    int maxits,
                                    int foldscount,
                                    ref int info,
                                    ref mlpreport rep,
                                    ref mlpcvreport cvrep)
 {
     mlpkfoldcvgeneral(ref network, ref xy, npoints, decay, restarts, foldscount, false, wstep, maxits, ref info, ref rep, ref cvrep);
 }
Ejemplo n.º 2
0
        /*************************************************************************
        Internal cross-validation subroutine
        *************************************************************************/
        private static void mlpkfoldcvgeneral(mlpbase.multilayerperceptron n,
            double[,] xy,
            int npoints,
            double decay,
            int restarts,
            int foldscount,
            bool lmalgorithm,
            double wstep,
            int maxits,
            ref int info,
            mlpreport rep,
            mlpcvreport cvrep)
        {
            int i = 0;
            int fold = 0;
            int j = 0;
            int k = 0;
            mlpbase.multilayerperceptron network = new mlpbase.multilayerperceptron();
            int nin = 0;
            int nout = 0;
            int rowlen = 0;
            int wcount = 0;
            int nclasses = 0;
            int tssize = 0;
            int cvssize = 0;
            double[,] cvset = new double[0,0];
            double[,] testset = new double[0,0];
            int[] folds = new int[0];
            int relcnt = 0;
            mlpreport internalrep = new mlpreport();
            double[] x = new double[0];
            double[] y = new double[0];
            int i_ = 0;

            info = 0;

            
            //
            // Read network geometry, test parameters
            //
            mlpbase.mlpproperties(n, ref nin, ref nout, ref wcount);
            if( mlpbase.mlpissoftmax(n) )
            {
                nclasses = nout;
                rowlen = nin+1;
            }
            else
            {
                nclasses = -nout;
                rowlen = nin+nout;
            }
            if( (npoints<=0 || foldscount<2) || foldscount>npoints )
            {
                info = -1;
                return;
            }
            mlpbase.mlpcopy(n, network);
            
            //
            // K-fold out cross-validation.
            // First, estimate generalization error
            //
            testset = new double[npoints-1+1, rowlen-1+1];
            cvset = new double[npoints-1+1, rowlen-1+1];
            x = new double[nin-1+1];
            y = new double[nout-1+1];
            mlpkfoldsplit(xy, npoints, nclasses, foldscount, false, ref folds);
            cvrep.relclserror = 0;
            cvrep.avgce = 0;
            cvrep.rmserror = 0;
            cvrep.avgerror = 0;
            cvrep.avgrelerror = 0;
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            relcnt = 0;
            for(fold=0; fold<=foldscount-1; fold++)
            {
                
                //
                // Separate set
                //
                tssize = 0;
                cvssize = 0;
                for(i=0; i<=npoints-1; i++)
                {
                    if( folds[i]==fold )
                    {
                        for(i_=0; i_<=rowlen-1;i_++)
                        {
                            testset[tssize,i_] = xy[i,i_];
                        }
                        tssize = tssize+1;
                    }
                    else
                    {
                        for(i_=0; i_<=rowlen-1;i_++)
                        {
                            cvset[cvssize,i_] = xy[i,i_];
                        }
                        cvssize = cvssize+1;
                    }
                }
                
                //
                // Train on CV training set
                //
                if( lmalgorithm )
                {
                    mlptrainlm(network, cvset, cvssize, decay, restarts, ref info, internalrep);
                }
                else
                {
                    mlptrainlbfgs(network, cvset, cvssize, decay, restarts, wstep, maxits, ref info, internalrep);
                }
                if( info<0 )
                {
                    cvrep.relclserror = 0;
                    cvrep.avgce = 0;
                    cvrep.rmserror = 0;
                    cvrep.avgerror = 0;
                    cvrep.avgrelerror = 0;
                    return;
                }
                rep.ngrad = rep.ngrad+internalrep.ngrad;
                rep.nhess = rep.nhess+internalrep.nhess;
                rep.ncholesky = rep.ncholesky+internalrep.ncholesky;
                
                //
                // Estimate error using CV test set
                //
                if( mlpbase.mlpissoftmax(network) )
                {
                    
                    //
                    // classification-only code
                    //
                    cvrep.relclserror = cvrep.relclserror+mlpbase.mlpclserror(network, testset, tssize);
                    cvrep.avgce = cvrep.avgce+mlpbase.mlperrorn(network, testset, tssize);
                }
                for(i=0; i<=tssize-1; i++)
                {
                    for(i_=0; i_<=nin-1;i_++)
                    {
                        x[i_] = testset[i,i_];
                    }
                    mlpbase.mlpprocess(network, x, ref y);
                    if( mlpbase.mlpissoftmax(network) )
                    {
                        
                        //
                        // Classification-specific code
                        //
                        k = (int)Math.Round(testset[i,nin]);
                        for(j=0; j<=nout-1; j++)
                        {
                            if( j==k )
                            {
                                cvrep.rmserror = cvrep.rmserror+math.sqr(y[j]-1);
                                cvrep.avgerror = cvrep.avgerror+Math.Abs(y[j]-1);
                                cvrep.avgrelerror = cvrep.avgrelerror+Math.Abs(y[j]-1);
                                relcnt = relcnt+1;
                            }
                            else
                            {
                                cvrep.rmserror = cvrep.rmserror+math.sqr(y[j]);
                                cvrep.avgerror = cvrep.avgerror+Math.Abs(y[j]);
                            }
                        }
                    }
                    else
                    {
                        
                        //
                        // Regression-specific code
                        //
                        for(j=0; j<=nout-1; j++)
                        {
                            cvrep.rmserror = cvrep.rmserror+math.sqr(y[j]-testset[i,nin+j]);
                            cvrep.avgerror = cvrep.avgerror+Math.Abs(y[j]-testset[i,nin+j]);
                            if( (double)(testset[i,nin+j])!=(double)(0) )
                            {
                                cvrep.avgrelerror = cvrep.avgrelerror+Math.Abs((y[j]-testset[i,nin+j])/testset[i,nin+j]);
                                relcnt = relcnt+1;
                            }
                        }
                    }
                }
            }
            if( mlpbase.mlpissoftmax(network) )
            {
                cvrep.relclserror = cvrep.relclserror/npoints;
                cvrep.avgce = cvrep.avgce/(Math.Log(2)*npoints);
            }
            cvrep.rmserror = Math.Sqrt(cvrep.rmserror/(npoints*nout));
            cvrep.avgerror = cvrep.avgerror/(npoints*nout);
            cvrep.avgrelerror = cvrep.avgrelerror/relcnt;
            info = 1;
        }
Ejemplo n.º 3
0
        /*************************************************************************
        Cross-validation estimate of generalization error.

        Base algorithm - Levenberg-Marquardt.

        INPUT PARAMETERS:
            Network     -   neural network with initialized geometry.   Network is
                            not changed during cross-validation -  it is used only
                            as a representative of its architecture.
            XY          -   training set.
            SSize       -   training set size
            Decay       -   weight  decay, same as in MLPTrainLBFGS
            Restarts    -   number of restarts, >0.
                            restarts are counted for each partition separately, so
                            total number of restarts will be Restarts*FoldsCount.
            FoldsCount  -   number of folds in k-fold cross-validation,
                            2<=FoldsCount<=SSize.
                            recommended value: 10.

        OUTPUT PARAMETERS:
            Info        -   return code, same as in MLPTrainLBFGS
            Rep         -   report, same as in MLPTrainLM/MLPTrainLBFGS
            CVRep       -   generalization error estimates

          -- ALGLIB --
             Copyright 09.12.2007 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpkfoldcvlm(mlpbase.multilayerperceptron network,
            double[,] xy,
            int npoints,
            double decay,
            int restarts,
            int foldscount,
            ref int info,
            mlpreport rep,
            mlpcvreport cvrep)
        {
            info = 0;

            mlpkfoldcvgeneral(network, xy, npoints, decay, restarts, foldscount, true, 0.0, 0, ref info, rep, cvrep);
        }
Ejemplo n.º 4
0
        /*************************************************************************
        Cross-validation estimate of generalization error.

        Base algorithm - L-BFGS.

        INPUT PARAMETERS:
            Network     -   neural network with initialized geometry.   Network is
                            not changed during cross-validation -  it is used only
                            as a representative of its architecture.
            XY          -   training set.
            SSize       -   training set size
            Decay       -   weight  decay, same as in MLPTrainLBFGS
            Restarts    -   number of restarts, >0.
                            restarts are counted for each partition separately, so
                            total number of restarts will be Restarts*FoldsCount.
            WStep       -   stopping criterion, same as in MLPTrainLBFGS
            MaxIts      -   stopping criterion, same as in MLPTrainLBFGS
            FoldsCount  -   number of folds in k-fold cross-validation,
                            2<=FoldsCount<=SSize.
                            recommended value: 10.

        OUTPUT PARAMETERS:
            Info        -   return code, same as in MLPTrainLBFGS
            Rep         -   report, same as in MLPTrainLM/MLPTrainLBFGS
            CVRep       -   generalization error estimates

          -- ALGLIB --
             Copyright 09.12.2007 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpkfoldcvlbfgs(mlpbase.multilayerperceptron network,
            double[,] xy,
            int npoints,
            double decay,
            int restarts,
            double wstep,
            int maxits,
            int foldscount,
            ref int info,
            mlpreport rep,
            mlpcvreport cvrep)
        {
            info = 0;

            mlpkfoldcvgeneral(network, xy, npoints, decay, restarts, foldscount, false, wstep, maxits, ref info, rep, cvrep);
        }
Ejemplo n.º 5
0
        /*************************************************************************
        Internal bagging subroutine.

          -- ALGLIB --
             Copyright 19.02.2009 by Bochkanov Sergey
        *************************************************************************/
        private static void mlpebagginginternal(mlpe.mlpensemble ensemble,
            double[,] xy,
            int npoints,
            double decay,
            int restarts,
            double wstep,
            int maxits,
            bool lmalgorithm,
            ref int info,
            mlpreport rep,
            mlpcvreport ooberrors)
        {
            double[,] xys = new double[0,0];
            bool[] s = new bool[0];
            double[,] oobbuf = new double[0,0];
            int[] oobcntbuf = new int[0];
            double[] x = new double[0];
            double[] y = new double[0];
            double[] dy = new double[0];
            double[] dsbuf = new double[0];
            int ccnt = 0;
            int pcnt = 0;
            int i = 0;
            int j = 0;
            int k = 0;
            double v = 0;
            mlpreport tmprep = new mlpreport();
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;
            int i1_ = 0;

            info = 0;

            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            wcount = mlpbase.mlpgetweightscount(ensemble.network);
            
            //
            // Test for inputs
            //
            if( (!lmalgorithm && (double)(wstep)==(double)(0)) && maxits==0 )
            {
                info = -8;
                return;
            }
            if( ((npoints<=0 || restarts<1) || (double)(wstep)<(double)(0)) || maxits<0 )
            {
                info = -1;
                return;
            }
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                for(i=0; i<=npoints-1; i++)
                {
                    if( (int)Math.Round(xy[i,nin])<0 || (int)Math.Round(xy[i,nin])>=nout )
                    {
                        info = -2;
                        return;
                    }
                }
            }
            
            //
            // allocate temporaries
            //
            info = 2;
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            ooberrors.relclserror = 0;
            ooberrors.avgce = 0;
            ooberrors.rmserror = 0;
            ooberrors.avgerror = 0;
            ooberrors.avgrelerror = 0;
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                ccnt = nin+1;
                pcnt = nin;
            }
            else
            {
                ccnt = nin+nout;
                pcnt = nin+nout;
            }
            xys = new double[npoints, ccnt];
            s = new bool[npoints];
            oobbuf = new double[npoints, nout];
            oobcntbuf = new int[npoints];
            x = new double[nin];
            y = new double[nout];
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                dy = new double[1];
            }
            else
            {
                dy = new double[nout];
            }
            for(i=0; i<=npoints-1; i++)
            {
                for(j=0; j<=nout-1; j++)
                {
                    oobbuf[i,j] = 0;
                }
            }
            for(i=0; i<=npoints-1; i++)
            {
                oobcntbuf[i] = 0;
            }
            
            //
            // main bagging cycle
            //
            hqrnd.hqrndrandomize(rs);
            for(k=0; k<=ensemble.ensemblesize-1; k++)
            {
                
                //
                // prepare dataset
                //
                for(i=0; i<=npoints-1; i++)
                {
                    s[i] = false;
                }
                for(i=0; i<=npoints-1; i++)
                {
                    j = hqrnd.hqrnduniformi(rs, npoints);
                    s[j] = true;
                    for(i_=0; i_<=ccnt-1;i_++)
                    {
                        xys[i,i_] = xy[j,i_];
                    }
                }
                
                //
                // train
                //
                if( lmalgorithm )
                {
                    mlptrainlm(ensemble.network, xys, npoints, decay, restarts, ref info, tmprep);
                }
                else
                {
                    mlptrainlbfgs(ensemble.network, xys, npoints, decay, restarts, wstep, maxits, ref info, tmprep);
                }
                if( info<0 )
                {
                    return;
                }
                
                //
                // save results
                //
                rep.ngrad = rep.ngrad+tmprep.ngrad;
                rep.nhess = rep.nhess+tmprep.nhess;
                rep.ncholesky = rep.ncholesky+tmprep.ncholesky;
                i1_ = (0) - (k*wcount);
                for(i_=k*wcount; i_<=(k+1)*wcount-1;i_++)
                {
                    ensemble.weights[i_] = ensemble.network.weights[i_+i1_];
                }
                i1_ = (0) - (k*pcnt);
                for(i_=k*pcnt; i_<=(k+1)*pcnt-1;i_++)
                {
                    ensemble.columnmeans[i_] = ensemble.network.columnmeans[i_+i1_];
                }
                i1_ = (0) - (k*pcnt);
                for(i_=k*pcnt; i_<=(k+1)*pcnt-1;i_++)
                {
                    ensemble.columnsigmas[i_] = ensemble.network.columnsigmas[i_+i1_];
                }
                
                //
                // OOB estimates
                //
                for(i=0; i<=npoints-1; i++)
                {
                    if( !s[i] )
                    {
                        for(i_=0; i_<=nin-1;i_++)
                        {
                            x[i_] = xy[i,i_];
                        }
                        mlpbase.mlpprocess(ensemble.network, x, ref y);
                        for(i_=0; i_<=nout-1;i_++)
                        {
                            oobbuf[i,i_] = oobbuf[i,i_] + y[i_];
                        }
                        oobcntbuf[i] = oobcntbuf[i]+1;
                    }
                }
            }
            
            //
            // OOB estimates
            //
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                bdss.dserrallocate(nout, ref dsbuf);
            }
            else
            {
                bdss.dserrallocate(-nout, ref dsbuf);
            }
            for(i=0; i<=npoints-1; i++)
            {
                if( oobcntbuf[i]!=0 )
                {
                    v = (double)1/(double)oobcntbuf[i];
                    for(i_=0; i_<=nout-1;i_++)
                    {
                        y[i_] = v*oobbuf[i,i_];
                    }
                    if( mlpbase.mlpissoftmax(ensemble.network) )
                    {
                        dy[0] = xy[i,nin];
                    }
                    else
                    {
                        i1_ = (nin) - (0);
                        for(i_=0; i_<=nout-1;i_++)
                        {
                            dy[i_] = v*xy[i,i_+i1_];
                        }
                    }
                    bdss.dserraccumulate(ref dsbuf, y, dy);
                }
            }
            bdss.dserrfinish(ref dsbuf);
            ooberrors.relclserror = dsbuf[0];
            ooberrors.avgce = dsbuf[1];
            ooberrors.rmserror = dsbuf[2];
            ooberrors.avgerror = dsbuf[3];
            ooberrors.avgrelerror = dsbuf[4];
        }
Ejemplo n.º 6
0
        /*************************************************************************
        Training neural networks ensemble using  bootstrap  aggregating (bagging).
        L-BFGS algorithm is used as base training method.

        INPUT PARAMETERS:
            Ensemble    -   model with initialized geometry
            XY          -   training set
            NPoints     -   training set size
            Decay       -   weight decay coefficient, >=0.001
            Restarts    -   restarts, >0.
            WStep       -   stopping criterion, same as in MLPTrainLBFGS
            MaxIts      -   stopping criterion, same as in MLPTrainLBFGS

        OUTPUT PARAMETERS:
            Ensemble    -   trained model
            Info        -   return code:
                            * -8, if both WStep=0 and MaxIts=0
                            * -2, if there is a point with class number
                                  outside of [0..NClasses-1].
                            * -1, if incorrect parameters was passed
                                  (NPoints<0, Restarts<1).
                            *  2, if task has been solved.
            Rep         -   training report.
            OOBErrors   -   out-of-bag generalization error estimate

          -- ALGLIB --
             Copyright 17.02.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpebagginglbfgs(mlpe.mlpensemble ensemble,
            double[,] xy,
            int npoints,
            double decay,
            int restarts,
            double wstep,
            int maxits,
            ref int info,
            mlpreport rep,
            mlpcvreport ooberrors)
        {
            info = 0;

            mlpebagginginternal(ensemble, xy, npoints, decay, restarts, wstep, maxits, false, ref info, rep, ooberrors);
        }
Ejemplo n.º 7
0
        /*************************************************************************
        Training neural networks ensemble using  bootstrap  aggregating (bagging).
        Modified Levenberg-Marquardt algorithm is used as base training method.

        INPUT PARAMETERS:
            Ensemble    -   model with initialized geometry
            XY          -   training set
            NPoints     -   training set size
            Decay       -   weight decay coefficient, >=0.001
            Restarts    -   restarts, >0.

        OUTPUT PARAMETERS:
            Ensemble    -   trained model
            Info        -   return code:
                            * -2, if there is a point with class number
                                  outside of [0..NClasses-1].
                            * -1, if incorrect parameters was passed
                                  (NPoints<0, Restarts<1).
                            *  2, if task has been solved.
            Rep         -   training report.
            OOBErrors   -   out-of-bag generalization error estimate

          -- ALGLIB --
             Copyright 17.02.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpebagginglm(mlpe.mlpensemble ensemble,
            double[,] xy,
            int npoints,
            double decay,
            int restarts,
            ref int info,
            mlpreport rep,
            mlpcvreport ooberrors)
        {
            info = 0;

            mlpebagginginternal(ensemble, xy, npoints, decay, restarts, 0.0, 0, true, ref info, rep, ooberrors);
        }
Ejemplo n.º 8
0
 public override alglib.apobject make_copy()
 {
     mlpcvreport _result = new mlpcvreport();
     _result.relclserror = relclserror;
     _result.avgce = avgce;
     _result.rmserror = rmserror;
     _result.avgerror = avgerror;
     _result.avgrelerror = avgrelerror;
     return _result;
 }
Ejemplo n.º 9
0
        /*************************************************************************
        *  Internal cross-validation subroutine
        *************************************************************************/
        private static void mlpkfoldcvgeneral(ref mlpbase.multilayerperceptron n,
                                              ref double[,] xy,
                                              int npoints,
                                              double decay,
                                              int restarts,
                                              int foldscount,
                                              bool lmalgorithm,
                                              double wstep,
                                              int maxits,
                                              ref int info,
                                              ref mlpreport rep,
                                              ref mlpcvreport cvrep)
        {
            int i    = 0;
            int fold = 0;
            int j    = 0;
            int k    = 0;

            mlpbase.multilayerperceptron network = new mlpbase.multilayerperceptron();
            int nin      = 0;
            int nout     = 0;
            int rowlen   = 0;
            int wcount   = 0;
            int nclasses = 0;
            int tssize   = 0;
            int cvssize  = 0;

            double[,] cvset   = new double[0, 0];
            double[,] testset = new double[0, 0];
            int[]     folds       = new int[0];
            int       relcnt      = 0;
            mlpreport internalrep = new mlpreport();

            double[] x  = new double[0];
            double[] y  = new double[0];
            int      i_ = 0;


            //
            // Read network geometry, test parameters
            //
            mlpbase.mlpproperties(ref n, ref nin, ref nout, ref wcount);
            if (mlpbase.mlpissoftmax(ref n))
            {
                nclasses = nout;
                rowlen   = nin + 1;
            }
            else
            {
                nclasses = -nout;
                rowlen   = nin + nout;
            }
            if (npoints <= 0 | foldscount <2 | foldscount> npoints)
            {
                info = -1;
                return;
            }
            mlpbase.mlpcopy(ref n, ref network);

            //
            // K-fold out cross-validation.
            // First, estimate generalization error
            //
            testset = new double[npoints - 1 + 1, rowlen - 1 + 1];
            cvset   = new double[npoints - 1 + 1, rowlen - 1 + 1];
            x       = new double[nin - 1 + 1];
            y       = new double[nout - 1 + 1];
            mlpkfoldsplit(ref xy, npoints, nclasses, foldscount, false, ref folds);
            cvrep.relclserror = 0;
            cvrep.avgce       = 0;
            cvrep.rmserror    = 0;
            cvrep.avgerror    = 0;
            cvrep.avgrelerror = 0;
            rep.ngrad         = 0;
            rep.nhess         = 0;
            rep.ncholesky     = 0;
            relcnt            = 0;
            for (fold = 0; fold <= foldscount - 1; fold++)
            {
                //
                // Separate set
                //
                tssize  = 0;
                cvssize = 0;
                for (i = 0; i <= npoints - 1; i++)
                {
                    if (folds[i] == fold)
                    {
                        for (i_ = 0; i_ <= rowlen - 1; i_++)
                        {
                            testset[tssize, i_] = xy[i, i_];
                        }
                        tssize = tssize + 1;
                    }
                    else
                    {
                        for (i_ = 0; i_ <= rowlen - 1; i_++)
                        {
                            cvset[cvssize, i_] = xy[i, i_];
                        }
                        cvssize = cvssize + 1;
                    }
                }

                //
                // Train on CV training set
                //
                if (lmalgorithm)
                {
                    mlptrainlm(ref network, ref cvset, cvssize, decay, restarts, ref info, ref internalrep);
                }
                else
                {
                    mlptrainlbfgs(ref network, ref cvset, cvssize, decay, restarts, wstep, maxits, ref info, ref internalrep);
                }
                if (info < 0)
                {
                    cvrep.relclserror = 0;
                    cvrep.avgce       = 0;
                    cvrep.rmserror    = 0;
                    cvrep.avgerror    = 0;
                    cvrep.avgrelerror = 0;
                    return;
                }
                rep.ngrad     = rep.ngrad + internalrep.ngrad;
                rep.nhess     = rep.nhess + internalrep.nhess;
                rep.ncholesky = rep.ncholesky + internalrep.ncholesky;

                //
                // Estimate error using CV test set
                //
                if (mlpbase.mlpissoftmax(ref network))
                {
                    //
                    // classification-only code
                    //
                    cvrep.relclserror = cvrep.relclserror + mlpbase.mlpclserror(ref network, ref testset, tssize);
                    cvrep.avgce       = cvrep.avgce + mlpbase.mlperrorn(ref network, ref testset, tssize);
                }
                for (i = 0; i <= tssize - 1; i++)
                {
                    for (i_ = 0; i_ <= nin - 1; i_++)
                    {
                        x[i_] = testset[i, i_];
                    }
                    mlpbase.mlpprocess(ref network, ref x, ref y);
                    if (mlpbase.mlpissoftmax(ref network))
                    {
                        //
                        // Classification-specific code
                        //
                        k = (int)Math.Round(testset[i, nin]);
                        for (j = 0; j <= nout - 1; j++)
                        {
                            if (j == k)
                            {
                                cvrep.rmserror    = cvrep.rmserror + AP.Math.Sqr(y[j] - 1);
                                cvrep.avgerror    = cvrep.avgerror + Math.Abs(y[j] - 1);
                                cvrep.avgrelerror = cvrep.avgrelerror + Math.Abs(y[j] - 1);
                                relcnt            = relcnt + 1;
                            }
                            else
                            {
                                cvrep.rmserror = cvrep.rmserror + AP.Math.Sqr(y[j]);
                                cvrep.avgerror = cvrep.avgerror + Math.Abs(y[j]);
                            }
                        }
                    }
                    else
                    {
                        //
                        // Regression-specific code
                        //
                        for (j = 0; j <= nout - 1; j++)
                        {
                            cvrep.rmserror = cvrep.rmserror + AP.Math.Sqr(y[j] - testset[i, nin + j]);
                            cvrep.avgerror = cvrep.avgerror + Math.Abs(y[j] - testset[i, nin + j]);
                            if ((double)(testset[i, nin + j]) != (double)(0))
                            {
                                cvrep.avgrelerror = cvrep.avgrelerror + Math.Abs((y[j] - testset[i, nin + j]) / testset[i, nin + j]);
                                relcnt            = relcnt + 1;
                            }
                        }
                    }
                }
            }
            if (mlpbase.mlpissoftmax(ref network))
            {
                cvrep.relclserror = cvrep.relclserror / npoints;
                cvrep.avgce       = cvrep.avgce / (Math.Log(2) * npoints);
            }
            cvrep.rmserror    = Math.Sqrt(cvrep.rmserror / (npoints * nout));
            cvrep.avgerror    = cvrep.avgerror / (npoints * nout);
            cvrep.avgrelerror = cvrep.avgrelerror / relcnt;
            info = 1;
        }