Example #1
0
        /*************************************************************************
        This function trains neural network passed to this function, using current
        dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
        and current training settings. Training  from  NRestarts  random  starting
        positions is performed, best network is chosen.

        This function is inteded to be used internally. It may be used in  several
        settings:
        * training with ValSubsetSize=0, corresponds  to  "normal"  training  with
          termination  criteria  based on S.MaxIts (steps count) and S.WStep (step
          size). Training sample is given by TrnSubset/TrnSubsetSize.
        * training with ValSubsetSize>0, corresponds to  early  stopping  training
          with additional MaxIts/WStep stopping criteria. Training sample is given
          by TrnSubset/TrnSubsetSize, validation sample  is  given  by  ValSubset/
          ValSubsetSize.

          -- ALGLIB --
             Copyright 13.08.2012 by Bochkanov Sergey
        *************************************************************************/
        private static void mlptrainnetworkx(mlptrainer s,
            int nrestarts,
            int algokind,
            int[] trnsubset,
            int trnsubsetsize,
            int[] valsubset,
            int valsubsetsize,
            mlpbase.multilayerperceptron network,
            mlpreport rep,
            bool isrootcall,
            alglib.smp.shared_pool sessions)
        {
            mlpbase.modelerrors modrep = new mlpbase.modelerrors();
            double eval = 0;
            double ebest = 0;
            int ngradbatch = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int pcount = 0;
            int itbest = 0;
            int itcnt = 0;
            int ntype = 0;
            int ttype = 0;
            bool rndstart = new bool();
            int i = 0;
            int nr0 = 0;
            int nr1 = 0;
            mlpreport rep0 = new mlpreport();
            mlpreport rep1 = new mlpreport();
            bool randomizenetwork = new bool();
            double bestrmserror = 0;
            smlptrnsession psession = null;
            int i_ = 0;

            mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
            
            //
            // Process root call
            //
            if( isrootcall )
            {
                
                //
                // Check correctness of parameters
                //
                alglib.ap.assert(algokind==0 || algokind==-1, "MLPTrainNetworkX: unexpected AlgoKind");
                alglib.ap.assert(s.npoints>=0, "MLPTrainNetworkX: internal error - parameter S is not initialized or is spoiled(S.NPoints<0)");
                if( s.rcpar )
                {
                    ttype = 0;
                }
                else
                {
                    ttype = 1;
                }
                if( !mlpbase.mlpissoftmax(network) )
                {
                    ntype = 0;
                }
                else
                {
                    ntype = 1;
                }
                alglib.ap.assert(ntype==ttype, "MLPTrainNetworkX: internal error - type of the training network is not similar to network type in trainer object");
                alglib.ap.assert(s.nin==nin, "MLPTrainNetworkX: internal error - number of inputs in trainer is not equal to number of inputs in the training network.");
                alglib.ap.assert(s.nout==nout, "MLPTrainNetworkX: internal error - number of outputs in trainer is not equal to number of outputs in the training network.");
                alglib.ap.assert(nrestarts>=0, "MLPTrainNetworkX: internal error - NRestarts<0.");
                alglib.ap.assert(alglib.ap.len(trnsubset)>=trnsubsetsize, "MLPTrainNetworkX: internal error - parameter TrnSubsetSize more than input subset size(Length(TrnSubset)<TrnSubsetSize)");
                for(i=0; i<=trnsubsetsize-1; i++)
                {
                    alglib.ap.assert(trnsubset[i]>=0 && trnsubset[i]<=s.npoints-1, "MLPTrainNetworkX: internal error - parameter TrnSubset contains incorrect index(TrnSubset[I]<0 or TrnSubset[I]>S.NPoints-1)");
                }
                alglib.ap.assert(alglib.ap.len(valsubset)>=valsubsetsize, "MLPTrainNetworkX: internal error - parameter ValSubsetSize more than input subset size(Length(ValSubset)<ValSubsetSize)");
                for(i=0; i<=valsubsetsize-1; i++)
                {
                    alglib.ap.assert(valsubset[i]>=0 && valsubset[i]<=s.npoints-1, "MLPTrainNetworkX: internal error - parameter ValSubset contains incorrect index(ValSubset[I]<0 or ValSubset[I]>S.NPoints-1)");
                }
                
                //
                // Train
                //
                randomizenetwork = nrestarts>0;
                initmlptrnsessions(network, randomizenetwork, s, sessions);
                mlptrainnetworkx(s, nrestarts, algokind, trnsubset, trnsubsetsize, valsubset, valsubsetsize, network, rep, false, sessions);
                
                //
                // Choose best network
                //
                bestrmserror = math.maxrealnumber;
                alglib.smp.ae_shared_pool_first_recycled(sessions, ref psession);
                while( psession!=null )
                {
                    if( (double)(psession.bestrmserror)<(double)(bestrmserror) )
                    {
                        mlpbase.mlpimporttunableparameters(network, psession.bestparameters);
                        bestrmserror = psession.bestrmserror;
                    }
                    alglib.smp.ae_shared_pool_next_recycled(sessions, ref psession);
                }
                
                //
                // Calculate errors
                //
                if( s.datatype==0 )
                {
                    mlpbase.mlpallerrorssubset(network, s.densexy, s.npoints, trnsubset, trnsubsetsize, modrep);
                }
                if( s.datatype==1 )
                {
                    mlpbase.mlpallerrorssparsesubset(network, s.sparsexy, s.npoints, trnsubset, trnsubsetsize, modrep);
                }
                rep.relclserror = modrep.relclserror;
                rep.avgce = modrep.avgce;
                rep.rmserror = modrep.rmserror;
                rep.avgerror = modrep.avgerror;
                rep.avgrelerror = modrep.avgrelerror;
                
                //
                // Done
                //
                return;
            }
            
            //
            // Split problem, if we have more than 1 restart
            //
            if( nrestarts>=2 )
            {
                
                //
                // Divide problem with NRestarts into two: NR0 and NR1.
                //
                nr0 = nrestarts/2;
                nr1 = nrestarts-nr0;
                mlptrainnetworkx(s, nr0, algokind, trnsubset, trnsubsetsize, valsubset, valsubsetsize, network, rep0, false, sessions);
                mlptrainnetworkx(s, nr1, algokind, trnsubset, trnsubsetsize, valsubset, valsubsetsize, network, rep1, false, sessions);
                
                //
                // Aggregate results
                //
                rep.ngrad = rep0.ngrad+rep1.ngrad;
                rep.nhess = rep0.nhess+rep1.nhess;
                rep.ncholesky = rep0.ncholesky+rep1.ncholesky;
                
                //
                // Done :)
                //
                return;
            }
            
            //
            // Execution with NRestarts=1 or NRestarts=0:
            // * NRestarts=1 means that network is restarted from random position
            // * NRestarts=0 means that network is not randomized
            //
            alglib.ap.assert(nrestarts==0 || nrestarts==1, "MLPTrainNetworkX: internal error");
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            alglib.smp.ae_shared_pool_retrieve(sessions, ref psession);
            if( ((s.datatype==0 || s.datatype==1) && s.npoints>0) && trnsubsetsize!=0 )
            {
                
                //
                // Train network using combination of early stopping and step-size
                // and step-count based criteria. Network state with best value of
                // validation set error is stored in WBuf0. When validation set is
                // zero, most recent state of network is stored.
                //
                rndstart = nrestarts!=0;
                ngradbatch = 0;
                eval = 0;
                ebest = 0;
                itbest = 0;
                itcnt = 0;
                mlpstarttrainingx(s, rndstart, algokind, trnsubset, trnsubsetsize, psession);
                if( s.datatype==0 )
                {
                    ebest = mlpbase.mlperrorsubset(psession.network, s.densexy, s.npoints, valsubset, valsubsetsize);
                }
                if( s.datatype==1 )
                {
                    ebest = mlpbase.mlperrorsparsesubset(psession.network, s.sparsexy, s.npoints, valsubset, valsubsetsize);
                }
                for(i_=0; i_<=wcount-1;i_++)
                {
                    psession.wbuf0[i_] = psession.network.weights[i_];
                }
                while( mlpcontinuetrainingx(s, trnsubset, trnsubsetsize, ref ngradbatch, psession) )
                {
                    if( s.datatype==0 )
                    {
                        eval = mlpbase.mlperrorsubset(psession.network, s.densexy, s.npoints, valsubset, valsubsetsize);
                    }
                    if( s.datatype==1 )
                    {
                        eval = mlpbase.mlperrorsparsesubset(psession.network, s.sparsexy, s.npoints, valsubset, valsubsetsize);
                    }
                    if( (double)(eval)<=(double)(ebest) || valsubsetsize==0 )
                    {
                        for(i_=0; i_<=wcount-1;i_++)
                        {
                            psession.wbuf0[i_] = psession.network.weights[i_];
                        }
                        ebest = eval;
                        itbest = itcnt;
                    }
                    if( itcnt>30 && (double)(itcnt)>(double)(1.5*itbest) )
                    {
                        break;
                    }
                    itcnt = itcnt+1;
                }
                for(i_=0; i_<=wcount-1;i_++)
                {
                    psession.network.weights[i_] = psession.wbuf0[i_];
                }
                rep.ngrad = ngradbatch;
            }
            else
            {
                for(i=0; i<=wcount-1; i++)
                {
                    psession.network.weights[i] = 0;
                }
            }
            
            //
            // Evaluate network performance and update PSession.BestParameters/BestRMSError
            // (if needed).
            //
            if( s.datatype==0 )
            {
                mlpbase.mlpallerrorssubset(psession.network, s.densexy, s.npoints, trnsubset, trnsubsetsize, modrep);
            }
            if( s.datatype==1 )
            {
                mlpbase.mlpallerrorssparsesubset(psession.network, s.sparsexy, s.npoints, trnsubset, trnsubsetsize, modrep);
            }
            if( (double)(modrep.rmserror)<(double)(psession.bestrmserror) )
            {
                mlpbase.mlpexporttunableparameters(psession.network, ref psession.bestparameters, ref pcount);
                psession.bestrmserror = modrep.rmserror;
            }
            
            //
            // Move session back to pool
            //
            alglib.smp.ae_shared_pool_recycle(sessions, ref psession);
        }
Example #2
0
        /*************************************************************************
        Training neural networks ensemble using early stopping.

        INPUT PARAMETERS:
            Ensemble    -   model with initialized geometry
            XY          -   training set
            NPoints     -   training set size
            Decay       -   weight decay coefficient, >=0.001
            Restarts    -   restarts, >0.

        OUTPUT PARAMETERS:
            Ensemble    -   trained model
            Info        -   return code:
                            * -2, if there is a point with class number
                                  outside of [0..NClasses-1].
                            * -1, if incorrect parameters was passed
                                  (NPoints<0, Restarts<1).
                            *  6, if task has been solved.
            Rep         -   training report.
            OOBErrors   -   out-of-bag generalization error estimate

          -- ALGLIB --
             Copyright 10.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpetraines(mlpe.mlpensemble ensemble,
            double[,] xy,
            int npoints,
            double decay,
            int restarts,
            ref int info,
            mlpreport rep)
        {
            int i = 0;
            int k = 0;
            int ccount = 0;
            int pcount = 0;
            double[,] trnxy = new double[0,0];
            double[,] valxy = new double[0,0];
            int trnsize = 0;
            int valsize = 0;
            int tmpinfo = 0;
            mlpreport tmprep = new mlpreport();
            mlpbase.modelerrors moderr = new mlpbase.modelerrors();
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int i_ = 0;
            int i1_ = 0;

            info = 0;

            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            wcount = mlpbase.mlpgetweightscount(ensemble.network);
            if( (npoints<2 || restarts<1) || (double)(decay)<(double)(0) )
            {
                info = -1;
                return;
            }
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                for(i=0; i<=npoints-1; i++)
                {
                    if( (int)Math.Round(xy[i,nin])<0 || (int)Math.Round(xy[i,nin])>=nout )
                    {
                        info = -2;
                        return;
                    }
                }
            }
            info = 6;
            
            //
            // allocate
            //
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                ccount = nin+1;
                pcount = nin;
            }
            else
            {
                ccount = nin+nout;
                pcount = nin+nout;
            }
            trnxy = new double[npoints, ccount];
            valxy = new double[npoints, ccount];
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            
            //
            // train networks
            //
            for(k=0; k<=ensemble.ensemblesize-1; k++)
            {
                
                //
                // Split set
                //
                do
                {
                    trnsize = 0;
                    valsize = 0;
                    for(i=0; i<=npoints-1; i++)
                    {
                        if( (double)(math.randomreal())<(double)(0.66) )
                        {
                            
                            //
                            // Assign sample to training set
                            //
                            for(i_=0; i_<=ccount-1;i_++)
                            {
                                trnxy[trnsize,i_] = xy[i,i_];
                            }
                            trnsize = trnsize+1;
                        }
                        else
                        {
                            
                            //
                            // Assign sample to validation set
                            //
                            for(i_=0; i_<=ccount-1;i_++)
                            {
                                valxy[valsize,i_] = xy[i,i_];
                            }
                            valsize = valsize+1;
                        }
                    }
                }
                while( !(trnsize!=0 && valsize!=0) );
                
                //
                // Train
                //
                mlptraines(ensemble.network, trnxy, trnsize, valxy, valsize, decay, restarts, ref tmpinfo, tmprep);
                if( tmpinfo<0 )
                {
                    info = tmpinfo;
                    return;
                }
                
                //
                // save results
                //
                i1_ = (0) - (k*wcount);
                for(i_=k*wcount; i_<=(k+1)*wcount-1;i_++)
                {
                    ensemble.weights[i_] = ensemble.network.weights[i_+i1_];
                }
                i1_ = (0) - (k*pcount);
                for(i_=k*pcount; i_<=(k+1)*pcount-1;i_++)
                {
                    ensemble.columnmeans[i_] = ensemble.network.columnmeans[i_+i1_];
                }
                i1_ = (0) - (k*pcount);
                for(i_=k*pcount; i_<=(k+1)*pcount-1;i_++)
                {
                    ensemble.columnsigmas[i_] = ensemble.network.columnsigmas[i_+i1_];
                }
                rep.ngrad = rep.ngrad+tmprep.ngrad;
                rep.nhess = rep.nhess+tmprep.nhess;
                rep.ncholesky = rep.ncholesky+tmprep.ncholesky;
            }
            mlpe.mlpeallerrorsx(ensemble, xy, ensemble.network.dummysxy, npoints, 0, ensemble.network.dummyidx, 0, npoints, 0, ensemble.network.buf, moderr);
            rep.relclserror = moderr.relclserror;
            rep.avgce = moderr.avgce;
            rep.rmserror = moderr.rmserror;
            rep.avgerror = moderr.avgerror;
            rep.avgrelerror = moderr.avgrelerror;
        }
Example #3
0
        /*************************************************************************
        This function trains neural network ensemble passed to this function using
        current dataset and early stopping training algorithm. Each early stopping
        round performs NRestarts  random  restarts  (thus,  EnsembleSize*NRestarts
        training rounds is performed in total).

        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support (C++ computational core)
          !
          ! Second improvement gives constant  speedup (2-3X).  First  improvement
          ! gives  close-to-linear  speedup  on   multicore   systems.   Following
          ! operations can be executed in parallel:
          ! * EnsembleSize  training  sessions  performed  for  each  of  ensemble
          !   members (always parallelized)
          ! * NRestarts  training  sessions  performed  within  each  of  training
          !   sessions (if NRestarts>1)
          ! * gradient calculation over large dataset (if dataset is large enough)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.

        INPUT PARAMETERS:
            S           -   trainer object;
            Ensemble    -   neural network ensemble. It must have same  number  of
                            inputs and outputs/classes  as  was  specified  during
                            creation of the trainer object.
            NRestarts   -   number of restarts, >=0:
                            * NRestarts>0 means that specified  number  of  random
                              restarts are performed during each ES round;
                            * NRestarts=0 is silently replaced by 1.

        OUTPUT PARAMETERS:
            Ensemble    -   trained ensemble;
            Rep         -   it contains all type of errors.
            
        NOTE: this training method uses BOTH early stopping and weight decay!  So,
              you should select weight decay before starting training just as  you
              select it before training "conventional" networks.

        NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
              or  single-point  dataset  was  passed,  ensemble  is filled by zero
              values.

        NOTE: this method uses sum-of-squares error function for training.

          -- ALGLIB --
             Copyright 22.08.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlptrainensemblees(mlptrainer s,
            mlpe.mlpensemble ensemble,
            int nrestarts,
            mlpreport rep)
        {
            int nin = 0;
            int nout = 0;
            int ntype = 0;
            int ttype = 0;
            alglib.smp.shared_pool esessions = new alglib.smp.shared_pool();
            apserv.sinteger sgrad = new apserv.sinteger();
            mlpbase.modelerrors tmprep = new mlpbase.modelerrors();

            alglib.ap.assert(s.npoints>=0, "MLPTrainEnsembleES: parameter S is not initialized or is spoiled(S.NPoints<0)");
            if( !mlpe.mlpeissoftmax(ensemble) )
            {
                ntype = 0;
            }
            else
            {
                ntype = 1;
            }
            if( s.rcpar )
            {
                ttype = 0;
            }
            else
            {
                ttype = 1;
            }
            alglib.ap.assert(ntype==ttype, "MLPTrainEnsembleES: internal error - type of input network is not similar to network type in trainer object");
            nin = mlpbase.mlpgetinputscount(ensemble.network);
            alglib.ap.assert(s.nin==nin, "MLPTrainEnsembleES: number of inputs in trainer is not equal to number of inputs in ensemble network");
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            alglib.ap.assert(s.nout==nout, "MLPTrainEnsembleES: number of outputs in trainer is not equal to number of outputs in ensemble network");
            alglib.ap.assert(nrestarts>=0, "MLPTrainEnsembleES: NRestarts<0.");
            
            //
            // Initialize parameter Rep
            //
            rep.relclserror = 0;
            rep.avgce = 0;
            rep.rmserror = 0;
            rep.avgerror = 0;
            rep.avgrelerror = 0;
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            
            //
            // Allocate
            //
            apserv.ivectorsetlengthatleast(ref s.subset, s.npoints);
            apserv.ivectorsetlengthatleast(ref s.valsubset, s.npoints);
            
            //
            // Start training
            //
            // NOTE: ESessions is not initialized because MLPTrainEnsembleX
            //       needs uninitialized pool.
            //
            sgrad.val = 0;
            mlptrainensemblex(s, ensemble, 0, ensemble.ensemblesize, nrestarts, 0, sgrad, true, esessions);
            rep.ngrad = sgrad.val;
            
            //
            // Calculate errors.
            //
            if( s.datatype==0 )
            {
                mlpe.mlpeallerrorsx(ensemble, s.densexy, s.sparsexy, s.npoints, 0, ensemble.network.dummyidx, 0, s.npoints, 0, ensemble.network.buf, tmprep);
            }
            if( s.datatype==1 )
            {
                mlpe.mlpeallerrorsx(ensemble, s.densexy, s.sparsexy, s.npoints, 1, ensemble.network.dummyidx, 0, s.npoints, 0, ensemble.network.buf, tmprep);
            }
            rep.relclserror = tmprep.relclserror;
            rep.avgce = tmprep.avgce;
            rep.rmserror = tmprep.rmserror;
            rep.avgerror = tmprep.avgerror;
            rep.avgrelerror = tmprep.avgrelerror;
        }
Example #4
0
        /*************************************************************************
        Average relative error on the test set

        INPUT PARAMETERS:
            Ensemble-   ensemble
            XY      -   test set
            NPoints -   test set size

        RESULT:
            Its meaning for regression task is obvious. As for classification task
        it means average relative error when estimating posterior probabilities.

          -- ALGLIB --
             Copyright 17.02.2009 by Bochkanov Sergey
        *************************************************************************/
        public static double mlpeavgrelerror(mlpensemble ensemble,
            double[,] xy,
            int npoints)
        {
            double result = 0;
            mlpbase.modelerrors rep = new mlpbase.modelerrors();

            mlpeallerrorsx(ensemble, xy, ensemble.network.dummysxy, npoints, 0, ensemble.network.dummyidx, 0, npoints, 0, ensemble.network.buf, rep);
            result = rep.avgrelerror;
            return result;
        }
Example #5
0
        /*************************************************************************
        Calculation of all types of errors

          -- ALGLIB --
             Copyright 17.02.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpeallerrorsx(mlpensemble ensemble,
            double[,] densexy,
            sparse.sparsematrix sparsexy,
            int datasetsize,
            int datasettype,
            int[] idx,
            int subset0,
            int subset1,
            int subsettype,
            alglib.smp.shared_pool buf,
            mlpbase.modelerrors rep)
        {
            int i = 0;
            int j = 0;
            int nin = 0;
            int nout = 0;
            bool iscls = new bool();
            int srcidx = 0;
            hpccores.mlpbuffers pbuf = null;
            mlpbase.modelerrors rep0 = new mlpbase.modelerrors();
            mlpbase.modelerrors rep1 = new mlpbase.modelerrors();
            int i_ = 0;
            int i1_ = 0;

            
            //
            // Get network information
            //
            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            iscls = mlpbase.mlpissoftmax(ensemble.network);
            
            //
            // Retrieve buffer, prepare, process data, recycle buffer
            //
            alglib.smp.ae_shared_pool_retrieve(buf, ref pbuf);
            if( iscls )
            {
                bdss.dserrallocate(nout, ref pbuf.tmp0);
            }
            else
            {
                bdss.dserrallocate(-nout, ref pbuf.tmp0);
            }
            apserv.rvectorsetlengthatleast(ref pbuf.x, nin);
            apserv.rvectorsetlengthatleast(ref pbuf.y, nout);
            apserv.rvectorsetlengthatleast(ref pbuf.desiredy, nout);
            for(i=subset0; i<=subset1-1; i++)
            {
                srcidx = -1;
                if( subsettype==0 )
                {
                    srcidx = i;
                }
                if( subsettype==1 )
                {
                    srcidx = idx[i];
                }
                alglib.ap.assert(srcidx>=0, "MLPEAllErrorsX: internal error");
                if( datasettype==0 )
                {
                    for(i_=0; i_<=nin-1;i_++)
                    {
                        pbuf.x[i_] = densexy[srcidx,i_];
                    }
                }
                if( datasettype==1 )
                {
                    sparse.sparsegetrow(sparsexy, srcidx, ref pbuf.x);
                }
                mlpeprocess(ensemble, pbuf.x, ref pbuf.y);
                if( mlpbase.mlpissoftmax(ensemble.network) )
                {
                    if( datasettype==0 )
                    {
                        pbuf.desiredy[0] = densexy[srcidx,nin];
                    }
                    if( datasettype==1 )
                    {
                        pbuf.desiredy[0] = sparse.sparseget(sparsexy, srcidx, nin);
                    }
                }
                else
                {
                    if( datasettype==0 )
                    {
                        i1_ = (nin) - (0);
                        for(i_=0; i_<=nout-1;i_++)
                        {
                            pbuf.desiredy[i_] = densexy[srcidx,i_+i1_];
                        }
                    }
                    if( datasettype==1 )
                    {
                        for(j=0; j<=nout-1; j++)
                        {
                            pbuf.desiredy[j] = sparse.sparseget(sparsexy, srcidx, nin+j);
                        }
                    }
                }
                bdss.dserraccumulate(ref pbuf.tmp0, pbuf.y, pbuf.desiredy);
            }
            bdss.dserrfinish(ref pbuf.tmp0);
            rep.relclserror = pbuf.tmp0[0];
            rep.avgce = pbuf.tmp0[1]/Math.Log(2);
            rep.rmserror = pbuf.tmp0[2];
            rep.avgerror = pbuf.tmp0[3];
            rep.avgrelerror = pbuf.tmp0[4];
            alglib.smp.ae_shared_pool_recycle(buf, ref pbuf);
        }
Example #6
0
 public modelerrors(mlpbase.modelerrors obj)
 {
     _innerobj = obj;
 }
Example #7
0
 public modelerrors()
 {
     _innerobj = new mlpbase.modelerrors();
 }
Example #8
0
        /*************************************************************************
        This function trains neural network passed to this function, using current
        dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
        and current training settings. Training  from  NRestarts  random  starting
        positions is performed, best network is chosen.

        Training is performed using current training algorithm.

        INPUT PARAMETERS:
            S           -   trainer object;
            Network     -   neural network. It must have same number of inputs and
                            output/classes as was specified during creation of the
                            trainer object;
            TNetwork    -   the training neural network.
                            User  may  look  weights  in  parameter Network  while
                            continue training process.
                            It has architecture like Network. You have to  copy or 
                            create new network with architecture like Network.
            State       -   created LBFGS optimizer;
            NRestarts   -   number of restarts, >=0:
                            * NRestarts>0 means that specified  number  of  random
                              restarts are performed, best network is chosen after
                              training
                            * NRestarts=0 means that current state of the  network
                              is used for training.
            TrnSubset   -   some subset from training set(it stores row's numbers),
                            used as trainig set;
           TrnSubsetSize-   size of subset(if TrnSubsetSize<0 - used full dataset);
                            when TrnSubsetSize=0, network is filled by zero value,
                            and ValSubset parameter is IGNORED;
            ValSubset   -   some subset from training set(it stores row's numbers),
                            used as validation set;
           ValSubsetSize-   size of subset(if ValSubsetSize<0 - used full dataset);
                            when  ValSubsetSize<>0  this  mean  that is used early
                            stopping training algorithm;
            BufWBest    -   buffer for storing interim resuls (BufWBest[0:WCOunt-1]
                            it has be allocated by user);
            BufWFinal   -   buffer for storing interim resuls(BufWFinal[0:WCOunt-1]
                            it has be allocated by user).

        OUTPUT PARAMETERS:
            Network     -   trained network;
            Rep         -   training report.

        NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
              network  is  filled  by zero  values.  Same  behavior  for functions
              MLPStartTraining and MLPContinueTraining.

        NOTE: this method uses sum-of-squares error function for training.

          -- ALGLIB --
             Copyright 13.08.2012 by Bochkanov Sergey
        *************************************************************************/
        private static void mlptrainnetworkx(mlptrainer s,
            mlpbase.multilayerperceptron network,
            mlpbase.multilayerperceptron tnetwork,
            minlbfgs.minlbfgsstate state,
            int nrestarts,
            int[] trnsubset,
            int trnsubsetsize,
            int[] valsubset,
            int valsubsetsize,
            double[] bufwbest,
            double[] bufwfinal,
            mlpreport rep)
        {
            mlpbase.modelerrors modrep = new mlpbase.modelerrors();
            double eval = 0;
            double v = 0;
            double ebestcur = 0;
            double efinal = 0;
            int ngradbatch = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int twcount = 0;
            int itbest = 0;
            int itcnt = 0;
            int ntype = 0;
            int ttype = 0;
            bool rndstart = new bool();
            int pass = 0;
            int i = 0;
            int i_ = 0;

            alglib.ap.assert(s.npoints>=0, "MLPTrainNetworkX: internal error - parameter S is not initialized or is spoiled(S.NPoints<0)");
            if( s.rcpar )
            {
                ttype = 0;
            }
            else
            {
                ttype = 1;
            }
            if( !mlpbase.mlpissoftmax(network) )
            {
                ntype = 0;
            }
            else
            {
                ntype = 1;
            }
            alglib.ap.assert(ntype==ttype, "MLPTrainNetworkX: internal error - type of the resulting network is not similar to network type in trainer object");
            if( !mlpbase.mlpissoftmax(tnetwork) )
            {
                ntype = 0;
            }
            else
            {
                ntype = 1;
            }
            alglib.ap.assert(ntype==ttype, "MLPTrainNetworkX: internal error - type of the training network is not similar to network type in trainer object");
            mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
            alglib.ap.assert(s.nin==nin, "MLPTrainNetworkX: internal error - number of inputs in trainer is not equal to number of inputs in the network.");
            alglib.ap.assert(s.nout==nout, "MLPTrainNetworkX: internal error - number of outputs in trainer is not equal to number of outputs in the network.");
            mlpbase.mlpproperties(tnetwork, ref nin, ref nout, ref twcount);
            alglib.ap.assert(s.nin==nin, "MLPTrainNetworkX: internal error - number of inputs in trainer is not equal to number of inputs in the training network.");
            alglib.ap.assert(s.nout==nout, "MLPTrainNetworkX: internal error - number of outputs in trainer is not equal to number of outputs in the training network.");
            alglib.ap.assert(twcount==wcount, "MLPTrainNetworkX: internal error - number of weights the resulting network is not equal to number of weights in the training network.");
            alglib.ap.assert(nrestarts>=0, "MLPTrainNetworkX: internal error - NRestarts<0.");
            alglib.ap.assert(alglib.ap.len(trnsubset)>=trnsubsetsize, "MLPTrainNetworkX: internal error - parameter TrnSubsetSize more than input subset size(Length(TrnSubset)<TrnSubsetSize)");
            for(i=0; i<=trnsubsetsize-1; i++)
            {
                alglib.ap.assert(trnsubset[i]>=0 && trnsubset[i]<=s.npoints-1, "MLPTrainNetworkX: internal error - parameter TrnSubset contains incorrect index(TrnSubset[I]<0 or TrnSubset[I]>S.NPoints-1)");
            }
            alglib.ap.assert(alglib.ap.len(valsubset)>=valsubsetsize, "MLPTrainNetworkX: internal error - parameter ValSubsetSize more than input subset size(Length(ValSubset)<ValSubsetSize)");
            for(i=0; i<=valsubsetsize-1; i++)
            {
                alglib.ap.assert(valsubset[i]>=0 && valsubset[i]<=s.npoints-1, "MLPTrainNetworkX: internal error - parameter ValSubset contains incorrect index(ValSubset[I]<0 or ValSubset[I]>S.NPoints-1)");
            }
            
            //
            // Initialize parameter Rep
            //
            rep.relclserror = 0;
            rep.avgce = 0;
            rep.rmserror = 0;
            rep.avgerror = 0;
            rep.avgrelerror = 0;
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            if( ((s.datatype==0 || s.datatype==1) && s.npoints>0) && trnsubsetsize!=0 )
            {
                
                //
                // Prepare
                //
                efinal = math.maxrealnumber;
                if( nrestarts!=0 )
                {
                    rndstart = true;
                }
                else
                {
                    rndstart = false;
                    nrestarts = 1;
                }
                ngradbatch = 0;
                eval = 0;
                ebestcur = 0;
                for(pass=1; pass<=nrestarts; pass++)
                {
                    mlpstarttrainingx(s, network, tnetwork, state, rndstart, trnsubset, trnsubsetsize);
                    itbest = 0;
                    itcnt = 0;
                    if( s.datatype==0 )
                    {
                        ebestcur = mlpbase.mlperrorsubset(network, s.densexy, s.npoints, valsubset, valsubsetsize);
                    }
                    if( s.datatype==1 )
                    {
                        ebestcur = mlpbase.mlperrorsparsesubset(network, s.sparsexy, s.npoints, valsubset, valsubsetsize);
                    }
                    for(i_=0; i_<=wcount-1;i_++)
                    {
                        bufwbest[i_] = network.weights[i_];
                    }
                    while( mlpcontinuetrainingx(s, network, tnetwork, state, trnsubset, trnsubsetsize, ref ngradbatch) )
                    {
                        if( s.datatype==0 )
                        {
                            eval = mlpbase.mlperrorsubset(network, s.densexy, s.npoints, valsubset, valsubsetsize);
                        }
                        if( s.datatype==1 )
                        {
                            eval = mlpbase.mlperrorsparsesubset(network, s.sparsexy, s.npoints, valsubset, valsubsetsize);
                        }
                        if( (double)(eval)<=(double)(ebestcur) )
                        {
                            for(i_=0; i_<=wcount-1;i_++)
                            {
                                bufwbest[i_] = network.weights[i_];
                            }
                            ebestcur = eval;
                            itbest = itcnt;
                        }
                        if( itcnt>30 && (double)(itcnt)>(double)(1.5*itbest) )
                        {
                            break;
                        }
                        itcnt = itcnt+1;
                    }
                    for(i_=0; i_<=wcount-1;i_++)
                    {
                        network.weights[i_] = bufwbest[i_];
                    }
                    
                    //
                    // Compare with final(the best) answer.
                    //
                    v = 0.0;
                    for(i_=0; i_<=wcount-1;i_++)
                    {
                        v += bufwbest[i_]*bufwbest[i_];
                    }
                    if( s.datatype==0 )
                    {
                        ebestcur = mlpbase.mlperrorsubset(network, s.densexy, s.npoints, trnsubset, trnsubsetsize)+0.5*s.decay*v;
                    }
                    if( s.datatype==1 )
                    {
                        ebestcur = mlpbase.mlperrorsparsesubset(network, s.sparsexy, s.npoints, trnsubset, trnsubsetsize)+0.5*s.decay*v;
                    }
                    if( (double)(ebestcur)<(double)(efinal) )
                    {
                        for(i_=0; i_<=wcount-1;i_++)
                        {
                            bufwfinal[i_] = bufwbest[i_];
                        }
                        efinal = ebestcur;
                    }
                }
                
                //
                // Final network
                //
                for(i_=0; i_<=wcount-1;i_++)
                {
                    network.weights[i_] = bufwfinal[i_];
                }
                rep.ngrad = ngradbatch;
            }
            else
            {
                for(i=0; i<=wcount-1; i++)
                {
                    network.weights[i] = 0;
                }
            }
            
            //
            // Calculate errors.
            //
            if( s.datatype==0 )
            {
                mlpbase.mlpallerrorssubset(network, s.densexy, s.npoints, trnsubset, trnsubsetsize, modrep);
            }
            if( s.datatype==1 )
            {
                mlpbase.mlpallerrorssparsesubset(network, s.sparsexy, s.npoints, trnsubset, trnsubsetsize, modrep);
            }
            rep.relclserror = modrep.relclserror;
            rep.avgce = modrep.avgce;
            rep.rmserror = modrep.rmserror;
            rep.avgerror = modrep.avgerror;
            rep.avgrelerror = modrep.avgrelerror;
        }