コード例 #1
0
ファイル: alglibinternal.cs プロジェクト: B-Rich/Compass
 public override void init()
 {
     entries = new int[0, 0];
     buffer = new double[0];
     precr = new double[0];
     preci = new double[0];
     bluesteinpool = new alglib.smp.shared_pool();
 }
コード例 #2
0
ファイル: statistics.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This function replaces data in XY by their CENTERED ranks:
        * XY is processed row-by-row
        * rows are processed separately
        * tied data are correctly handled (tied ranks are calculated)
        * centered ranks are just usual ranks, but centered in such way  that  sum
          of within-row values is equal to 0.0.
        * centering is performed by subtracting mean from each row, i.e it changes
          mean value, but does NOT change higher moments

        SMP EDITION OF ALGLIB:

          ! This function can utilize multicore capabilities of  your system.  In
          ! order to do this you have to call version with "smp_" prefix,   which
          ! indicates that multicore code will be used.
          ! 
          ! This note is given for users of SMP edition; if you use GPL  edition,
          ! or commercial edition of ALGLIB without SMP support, you  still  will
          ! be able to call smp-version of this function,  but  all  computations
          ! will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.
          !
          ! You should remember that starting/stopping worker thread always  have
          ! non-zero cost. Although  multicore  version  is  pretty  efficient on
          ! large problems, we do not recommend you to use it on small problems -
          ! ones where expected operations count is less than 100.000

        INPUT PARAMETERS:
            XY      -   array[NPoints,NFeatures], dataset
            NPoints -   number of points
            NFeatures-  number of features

        OUTPUT PARAMETERS:
            XY      -   data are replaced by their within-row ranks;
                        ranking starts from 0, ends at NFeatures-1

          -- ALGLIB --
             Copyright 18.04.2013 by Bochkanov Sergey
        *************************************************************************/
        public static void rankdatacentered(double[,] xy,
            int npoints,
            int nfeatures)
        {
            apserv.apbuffers buf0 = new apserv.apbuffers();
            apserv.apbuffers buf1 = new apserv.apbuffers();
            int basecasecost = 0;
            alglib.smp.shared_pool pool = new alglib.smp.shared_pool();

            alglib.ap.assert(npoints>=0, "RankData: NPoints<0");
            alglib.ap.assert(nfeatures>=1, "RankData: NFeatures<1");
            alglib.ap.assert(alglib.ap.rows(xy)>=npoints, "RankData: Rows(XY)<NPoints");
            alglib.ap.assert(alglib.ap.cols(xy)>=nfeatures || npoints==0, "RankData: Cols(XY)<NFeatures");
            alglib.ap.assert(apserv.apservisfinitematrix(xy, npoints, nfeatures), "RankData: XY contains infinite/NAN elements");
            
            //
            // Basecase cost is a maximum cost of basecase problems.
            // Problems harded than that cost will be split.
            //
            // Problem cost is assumed to be NPoints*NFeatures*log2(NFeatures),
            // which is proportional, but NOT equal to number of FLOPs required
            // to solve problem.
            //
            basecasecost = 10000;
            
            //
            // Try to use serial code, no SMP functionality, no shared pools.
            //
            if( (double)(apserv.inttoreal(npoints)*apserv.inttoreal(nfeatures)*apserv.logbase2(nfeatures))<(double)(basecasecost) )
            {
                rankdatabasecase(xy, 0, npoints, nfeatures, true, buf0, buf1);
                return;
            }
            
            //
            // Parallel code
            //
            alglib.smp.ae_shared_pool_set_seed(pool, buf0);
            rankdatarec(xy, 0, npoints, nfeatures, true, pool, basecasecost);
        }
コード例 #3
0
        /*************************************************************************
        This function trains neural network ensemble passed to this function using
        current dataset and early stopping training algorithm. Each early stopping
        round performs NRestarts  random  restarts  (thus,  EnsembleSize*NRestarts
        training rounds is performed in total).

        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support (C++ computational core)
          !
          ! Second improvement gives constant  speedup (2-3X).  First  improvement
          ! gives  close-to-linear  speedup  on   multicore   systems.   Following
          ! operations can be executed in parallel:
          ! * EnsembleSize  training  sessions  performed  for  each  of  ensemble
          !   members (always parallelized)
          ! * NRestarts  training  sessions  performed  within  each  of  training
          !   sessions (if NRestarts>1)
          ! * gradient calculation over large dataset (if dataset is large enough)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.

        INPUT PARAMETERS:
            S           -   trainer object;
            Ensemble    -   neural network ensemble. It must have same  number  of
                            inputs and outputs/classes  as  was  specified  during
                            creation of the trainer object.
            NRestarts   -   number of restarts, >=0:
                            * NRestarts>0 means that specified  number  of  random
                              restarts are performed during each ES round;
                            * NRestarts=0 is silently replaced by 1.

        OUTPUT PARAMETERS:
            Ensemble    -   trained ensemble;
            Rep         -   it contains all type of errors.
            
        NOTE: this training method uses BOTH early stopping and weight decay!  So,
              you should select weight decay before starting training just as  you
              select it before training "conventional" networks.

        NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
              or  single-point  dataset  was  passed,  ensemble  is filled by zero
              values.

        NOTE: this method uses sum-of-squares error function for training.

          -- ALGLIB --
             Copyright 22.08.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlptrainensemblees(mlptrainer s,
            mlpe.mlpensemble ensemble,
            int nrestarts,
            mlpreport rep)
        {
            int nin = 0;
            int nout = 0;
            int ntype = 0;
            int ttype = 0;
            alglib.smp.shared_pool esessions = new alglib.smp.shared_pool();
            apserv.sinteger sgrad = new apserv.sinteger();
            mlpbase.modelerrors tmprep = new mlpbase.modelerrors();

            alglib.ap.assert(s.npoints>=0, "MLPTrainEnsembleES: parameter S is not initialized or is spoiled(S.NPoints<0)");
            if( !mlpe.mlpeissoftmax(ensemble) )
            {
                ntype = 0;
            }
            else
            {
                ntype = 1;
            }
            if( s.rcpar )
            {
                ttype = 0;
            }
            else
            {
                ttype = 1;
            }
            alglib.ap.assert(ntype==ttype, "MLPTrainEnsembleES: internal error - type of input network is not similar to network type in trainer object");
            nin = mlpbase.mlpgetinputscount(ensemble.network);
            alglib.ap.assert(s.nin==nin, "MLPTrainEnsembleES: number of inputs in trainer is not equal to number of inputs in ensemble network");
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            alglib.ap.assert(s.nout==nout, "MLPTrainEnsembleES: number of outputs in trainer is not equal to number of outputs in ensemble network");
            alglib.ap.assert(nrestarts>=0, "MLPTrainEnsembleES: NRestarts<0.");
            
            //
            // Initialize parameter Rep
            //
            rep.relclserror = 0;
            rep.avgce = 0;
            rep.rmserror = 0;
            rep.avgerror = 0;
            rep.avgrelerror = 0;
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            
            //
            // Allocate
            //
            apserv.ivectorsetlengthatleast(ref s.subset, s.npoints);
            apserv.ivectorsetlengthatleast(ref s.valsubset, s.npoints);
            
            //
            // Start training
            //
            // NOTE: ESessions is not initialized because MLPTrainEnsembleX
            //       needs uninitialized pool.
            //
            sgrad.val = 0;
            mlptrainensemblex(s, ensemble, 0, ensemble.ensemblesize, nrestarts, 0, sgrad, true, esessions);
            rep.ngrad = sgrad.val;
            
            //
            // Calculate errors.
            //
            if( s.datatype==0 )
            {
                mlpe.mlpeallerrorsx(ensemble, s.densexy, s.sparsexy, s.npoints, 0, ensemble.network.dummyidx, 0, s.npoints, 0, ensemble.network.buf, tmprep);
            }
            if( s.datatype==1 )
            {
                mlpe.mlpeallerrorsx(ensemble, s.densexy, s.sparsexy, s.npoints, 1, ensemble.network.dummyidx, 0, s.npoints, 0, ensemble.network.buf, tmprep);
            }
            rep.relclserror = tmprep.relclserror;
            rep.avgce = tmprep.avgce;
            rep.rmserror = tmprep.rmserror;
            rep.avgerror = tmprep.avgerror;
            rep.avgrelerror = tmprep.avgrelerror;
        }
コード例 #4
0
 public override void init()
 {
     ct = new double[0,0];
     ctbest = new double[0,0];
     xycbest = new int[0];
     xycprev = new int[0];
     d2 = new double[0];
     csizes = new int[0];
     initbuf = new apserv.apbuffers();
     updatepool = new alglib.smp.shared_pool();
 }
コード例 #5
0
        /*************************************************************************
        This function trains neural network passed to this function, using current
        dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
        and current training settings. Training  from  NRestarts  random  starting
        positions is performed, best network is chosen.

        Training is performed using current training algorithm.

        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support (C++ computational core)
          !
          ! Second improvement gives constant  speedup (2-3X).  First  improvement
          ! gives  close-to-linear  speedup  on   multicore   systems.   Following
          ! operations can be executed in parallel:
          ! * NRestarts training sessions performed within each of
          !   cross-validation rounds (if NRestarts>1)
          ! * gradient calculation over large dataset (if dataset is large enough)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.

        INPUT PARAMETERS:
            S           -   trainer object
            Network     -   neural network. It must have same number of inputs and
                            output/classes as was specified during creation of the
                            trainer object.
            NRestarts   -   number of restarts, >=0:
                            * NRestarts>0 means that specified  number  of  random
                              restarts are performed, best network is chosen after
                              training
                            * NRestarts=0 means that current state of the  network
                              is used for training.

        OUTPUT PARAMETERS:
            Network     -   trained network

        NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
              network  is  filled  by zero  values.  Same  behavior  for functions
              MLPStartTraining and MLPContinueTraining.

        NOTE: this method uses sum-of-squares error function for training.

          -- ALGLIB --
             Copyright 23.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlptrainnetwork(mlptrainer s,
            mlpbase.multilayerperceptron network,
            int nrestarts,
            mlpreport rep)
        {
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int ntype = 0;
            int ttype = 0;
            alglib.smp.shared_pool trnpool = new alglib.smp.shared_pool();

            alglib.ap.assert(s.npoints>=0, "MLPTrainNetwork: parameter S is not initialized or is spoiled(S.NPoints<0)");
            if( !mlpbase.mlpissoftmax(network) )
            {
                ntype = 0;
            }
            else
            {
                ntype = 1;
            }
            if( s.rcpar )
            {
                ttype = 0;
            }
            else
            {
                ttype = 1;
            }
            alglib.ap.assert(ntype==ttype, "MLPTrainNetwork: type of input network is not similar to network type in trainer object");
            mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
            alglib.ap.assert(s.nin==nin, "MLPTrainNetwork: number of inputs in trainer is not equal to number of inputs in network");
            alglib.ap.assert(s.nout==nout, "MLPTrainNetwork: number of outputs in trainer is not equal to number of outputs in network");
            alglib.ap.assert(nrestarts>=0, "MLPTrainNetwork: NRestarts<0.");
            
            //
            // Train
            //
            mlptrainnetworkx(s, nrestarts, -1, s.subset, -1, s.subset, 0, network, rep, true, trnpool);
        }
コード例 #6
0
        /*************************************************************************
        This function estimates generalization error using cross-validation on the
        current dataset with current training settings.

        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support (C++ computational core)
          !
          ! Second improvement gives constant  speedup (2-3X).  First  improvement
          ! gives  close-to-linear  speedup  on   multicore   systems.   Following
          ! operations can be executed in parallel:
          ! * FoldsCount cross-validation rounds (always)
          ! * NRestarts training sessions performed within each of
          !   cross-validation rounds (if NRestarts>1)
          ! * gradient calculation over large dataset (if dataset is large enough)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.

        INPUT PARAMETERS:
            S           -   trainer object
            Network     -   neural network. It must have same number of inputs and
                            output/classes as was specified during creation of the
                            trainer object. Network is not changed  during  cross-
                            validation and is not trained - it  is  used  only  as
                            representative of its architecture. I.e., we  estimate
                            generalization properties of  ARCHITECTURE,  not  some
                            specific network.
            NRestarts   -   number of restarts, >=0:
                            * NRestarts>0  means  that  for  each cross-validation
                              round   specified  number   of  random  restarts  is
                              performed,  with  best  network  being  chosen after
                              training.
                            * NRestarts=0 is same as NRestarts=1
            FoldsCount  -   number of folds in k-fold cross-validation:
                            * 2<=FoldsCount<=size of dataset
                            * recommended value: 10.
                            * values larger than dataset size will be silently
                              truncated down to dataset size

        OUTPUT PARAMETERS:
            Rep         -   structure which contains cross-validation estimates:
                            * Rep.RelCLSError - fraction of misclassified cases.
                            * Rep.AvgCE - acerage cross-entropy
                            * Rep.RMSError - root-mean-square error
                            * Rep.AvgError - average error
                            * Rep.AvgRelError - average relative error
                            
        NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
              or subset with only one point  was  given,  zeros  are  returned  as
              estimates.

        NOTE: this method performs FoldsCount cross-validation  rounds,  each  one
              with NRestarts random starts.  Thus,  FoldsCount*NRestarts  networks
              are trained in total.

        NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems.

        NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError
              contain errors in prediction of posterior probabilities.
                
          -- ALGLIB --
             Copyright 23.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpkfoldcv(mlptrainer s,
            mlpbase.multilayerperceptron network,
            int nrestarts,
            int foldscount,
            mlpreport rep)
        {
            alglib.smp.shared_pool pooldatacv = new alglib.smp.shared_pool();
            mlpparallelizationcv datacv = new mlpparallelizationcv();
            mlpparallelizationcv sdatacv = null;
            double[,] cvy = new double[0,0];
            int[] folds = new int[0];
            double[] buf = new double[0];
            double[] dy = new double[0];
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int rowsize = 0;
            int ntype = 0;
            int ttype = 0;
            int i = 0;
            int j = 0;
            int k = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;
            int i1_ = 0;

            if( !mlpbase.mlpissoftmax(network) )
            {
                ntype = 0;
            }
            else
            {
                ntype = 1;
            }
            if( s.rcpar )
            {
                ttype = 0;
            }
            else
            {
                ttype = 1;
            }
            alglib.ap.assert(ntype==ttype, "MLPKFoldCV: type of input network is not similar to network type in trainer object");
            alglib.ap.assert(s.npoints>=0, "MLPKFoldCV: possible trainer S is not initialized(S.NPoints<0)");
            mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
            alglib.ap.assert(s.nin==nin, "MLPKFoldCV:  number of inputs in trainer is not equal to number of inputs in network");
            alglib.ap.assert(s.nout==nout, "MLPKFoldCV:  number of outputs in trainer is not equal to number of outputs in network");
            alglib.ap.assert(nrestarts>=0, "MLPKFoldCV: NRestarts<0");
            alglib.ap.assert(foldscount>=2, "MLPKFoldCV: FoldsCount<2");
            if( foldscount>s.npoints )
            {
                foldscount = s.npoints;
            }
            rep.relclserror = 0;
            rep.avgce = 0;
            rep.rmserror = 0;
            rep.avgerror = 0;
            rep.avgrelerror = 0;
            hqrnd.hqrndrandomize(rs);
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            if( s.npoints==0 || s.npoints==1 )
            {
                return;
            }
            
            //
            // Read network geometry, test parameters
            //
            if( s.rcpar )
            {
                rowsize = nin+nout;
                dy = new double[nout];
                bdss.dserrallocate(-nout, ref buf);
            }
            else
            {
                rowsize = nin+1;
                dy = new double[1];
                bdss.dserrallocate(nout, ref buf);
            }
            
            //
            // Folds
            //
            folds = new int[s.npoints];
            for(i=0; i<=s.npoints-1; i++)
            {
                folds[i] = i*foldscount/s.npoints;
            }
            for(i=0; i<=s.npoints-2; i++)
            {
                j = i+hqrnd.hqrnduniformi(rs, s.npoints-i);
                if( j!=i )
                {
                    k = folds[i];
                    folds[i] = folds[j];
                    folds[j] = k;
                }
            }
            cvy = new double[s.npoints, nout];
            
            //
            // Initialize SEED-value for shared pool
            //
            datacv.ngrad = 0;
            mlpbase.mlpcopy(network, datacv.network);
            datacv.subset = new int[s.npoints];
            datacv.xyrow = new double[rowsize];
            datacv.y = new double[nout];
            
            //
            // Create shared pool
            //
            alglib.smp.ae_shared_pool_set_seed(pooldatacv, datacv);
            
            //
            // Parallelization
            //
            mthreadcv(s, rowsize, nrestarts, folds, 0, foldscount, cvy, pooldatacv);
            
            //
            // Calculate value for NGrad
            //
            alglib.smp.ae_shared_pool_first_recycled(pooldatacv, ref sdatacv);
            while( sdatacv!=null )
            {
                rep.ngrad = rep.ngrad+sdatacv.ngrad;
                alglib.smp.ae_shared_pool_next_recycled(pooldatacv, ref sdatacv);
            }
            
            //
            // Connect of results and calculate cross-validation error
            //
            for(i=0; i<=s.npoints-1; i++)
            {
                if( s.datatype==0 )
                {
                    for(i_=0; i_<=rowsize-1;i_++)
                    {
                        datacv.xyrow[i_] = s.densexy[i,i_];
                    }
                }
                if( s.datatype==1 )
                {
                    sparse.sparsegetrow(s.sparsexy, i, ref datacv.xyrow);
                }
                for(i_=0; i_<=nout-1;i_++)
                {
                    datacv.y[i_] = cvy[i,i_];
                }
                if( s.rcpar )
                {
                    i1_ = (nin) - (0);
                    for(i_=0; i_<=nout-1;i_++)
                    {
                        dy[i_] = datacv.xyrow[i_+i1_];
                    }
                }
                else
                {
                    dy[0] = datacv.xyrow[nin];
                }
                bdss.dserraccumulate(ref buf, datacv.y, dy);
            }
            bdss.dserrfinish(ref buf);
            rep.relclserror = buf[0];
            rep.avgce = buf[1];
            rep.rmserror = buf[2];
            rep.avgerror = buf[3];
            rep.avgrelerror = buf[4];
        }
コード例 #7
0
 public override void init()
 {
     network = new mlpbase.multilayerperceptron();
     rep = new mlpreport();
     subset = new int[0];
     xyrow = new double[0];
     y = new double[0];
     trnpool = new alglib.smp.shared_pool();
 }
コード例 #8
0
 public override void init()
 {
     trnsubset = new int[0];
     valsubset = new int[0];
     mlpsessions = new alglib.smp.shared_pool();
     mlprep = new mlpreport();
     network = new mlpbase.multilayerperceptron();
 }
コード例 #9
0
 public override void init()
 {
     hllayersizes = new int[0];
     hlconnections = new int[0];
     hlneurons = new int[0];
     structinfo = new int[0];
     weights = new double[0];
     columnmeans = new double[0];
     columnsigmas = new double[0];
     neurons = new double[0];
     dfdnet = new double[0];
     derror = new double[0];
     x = new double[0];
     y = new double[0];
     xy = new double[0,0];
     xyrow = new double[0];
     nwbuf = new double[0];
     integerbuf = new int[0];
     err = new modelerrors();
     rndbuf = new double[0];
     buf = new alglib.smp.shared_pool();
     gradbuf = new alglib.smp.shared_pool();
     dummydxy = new double[0,0];
     dummysxy = new sparse.sparsematrix();
     dummyidx = new int[0];
     dummypool = new alglib.smp.shared_pool();
 }