Ejemplo n.º 1
0
        /*************************************************************************
        Recursive subroutine for SPD inversion.

        NOTE: this function expects that matris is strictly positive-definite.

          -- ALGLIB routine --
             10.02.2010
             Bochkanov Sergey
        *************************************************************************/
        public static void spdmatrixcholeskyinverserec(ref double[,] a,
            int offs,
            int n,
            bool isupper,
            ref double[] tmp)
        {
            int i = 0;
            int j = 0;
            double v = 0;
            int n1 = 0;
            int n2 = 0;
            apserv.sinteger sinfo2 = new apserv.sinteger();
            matinvreport rep2 = new matinvreport();
            int i_ = 0;
            int i1_ = 0;

            if( n<1 )
            {
                return;
            }
            
            //
            // Base case
            //
            if( n<=ablas.ablasblocksize(a) )
            {
                sinfo2.val = 1;
                rmatrixtrinverserec(a, offs, n, isupper, false, tmp, sinfo2, rep2);
                alglib.ap.assert(sinfo2.val>0, "SPDMatrixCholeskyInverseRec: integrity check failed");
                if( isupper )
                {
                    
                    //
                    // Compute the product U * U'.
                    // NOTE: we never assume that diagonal of U is real
                    //
                    for(i=0; i<=n-1; i++)
                    {
                        if( i==0 )
                        {
                            
                            //
                            // 1x1 matrix
                            //
                            a[offs+i,offs+i] = math.sqr(a[offs+i,offs+i]);
                        }
                        else
                        {
                            
                            //
                            // (I+1)x(I+1) matrix,
                            //
                            // ( A11  A12 )   ( A11^H        )   ( A11*A11^H+A12*A12^H  A12*A22^H )
                            // (          ) * (              ) = (                                )
                            // (      A22 )   ( A12^H  A22^H )   ( A22*A12^H            A22*A22^H )
                            //
                            // A11 is IxI, A22 is 1x1.
                            //
                            i1_ = (offs) - (0);
                            for(i_=0; i_<=i-1;i_++)
                            {
                                tmp[i_] = a[i_+i1_,offs+i];
                            }
                            for(j=0; j<=i-1; j++)
                            {
                                v = a[offs+j,offs+i];
                                i1_ = (j) - (offs+j);
                                for(i_=offs+j; i_<=offs+i-1;i_++)
                                {
                                    a[offs+j,i_] = a[offs+j,i_] + v*tmp[i_+i1_];
                                }
                            }
                            v = a[offs+i,offs+i];
                            for(i_=offs; i_<=offs+i-1;i_++)
                            {
                                a[i_,offs+i] = v*a[i_,offs+i];
                            }
                            a[offs+i,offs+i] = math.sqr(a[offs+i,offs+i]);
                        }
                    }
                }
                else
                {
                    
                    //
                    // Compute the product L' * L
                    // NOTE: we never assume that diagonal of L is real
                    //
                    for(i=0; i<=n-1; i++)
                    {
                        if( i==0 )
                        {
                            
                            //
                            // 1x1 matrix
                            //
                            a[offs+i,offs+i] = math.sqr(a[offs+i,offs+i]);
                        }
                        else
                        {
                            
                            //
                            // (I+1)x(I+1) matrix,
                            //
                            // ( A11^H  A21^H )   ( A11      )   ( A11^H*A11+A21^H*A21  A21^H*A22 )
                            // (              ) * (          ) = (                                )
                            // (        A22^H )   ( A21  A22 )   ( A22^H*A21            A22^H*A22 )
                            //
                            // A11 is IxI, A22 is 1x1.
                            //
                            i1_ = (offs) - (0);
                            for(i_=0; i_<=i-1;i_++)
                            {
                                tmp[i_] = a[offs+i,i_+i1_];
                            }
                            for(j=0; j<=i-1; j++)
                            {
                                v = a[offs+i,offs+j];
                                i1_ = (0) - (offs);
                                for(i_=offs; i_<=offs+j;i_++)
                                {
                                    a[offs+j,i_] = a[offs+j,i_] + v*tmp[i_+i1_];
                                }
                            }
                            v = a[offs+i,offs+i];
                            for(i_=offs; i_<=offs+i-1;i_++)
                            {
                                a[offs+i,i_] = v*a[offs+i,i_];
                            }
                            a[offs+i,offs+i] = math.sqr(a[offs+i,offs+i]);
                        }
                    }
                }
                return;
            }
            
            //
            // Recursive code: triangular factor inversion merged with
            // UU' or L'L multiplication
            //
            ablas.ablassplitlength(a, n, ref n1, ref n2);
            
            //
            // form off-diagonal block of trangular inverse
            //
            if( isupper )
            {
                for(i=0; i<=n1-1; i++)
                {
                    for(i_=offs+n1; i_<=offs+n-1;i_++)
                    {
                        a[offs+i,i_] = -1*a[offs+i,i_];
                    }
                }
                ablas.rmatrixlefttrsm(n1, n2, a, offs, offs, isupper, false, 0, a, offs, offs+n1);
                ablas.rmatrixrighttrsm(n1, n2, a, offs+n1, offs+n1, isupper, false, 0, a, offs, offs+n1);
            }
            else
            {
                for(i=0; i<=n2-1; i++)
                {
                    for(i_=offs; i_<=offs+n1-1;i_++)
                    {
                        a[offs+n1+i,i_] = -1*a[offs+n1+i,i_];
                    }
                }
                ablas.rmatrixrighttrsm(n2, n1, a, offs, offs, isupper, false, 0, a, offs+n1, offs);
                ablas.rmatrixlefttrsm(n2, n1, a, offs+n1, offs+n1, isupper, false, 0, a, offs+n1, offs);
            }
            
            //
            // invert first diagonal block
            //
            spdmatrixcholeskyinverserec(ref a, offs, n1, isupper, ref tmp);
            
            //
            // update first diagonal block with off-diagonal block,
            // update off-diagonal block
            //
            if( isupper )
            {
                ablas.rmatrixsyrk(n1, n2, 1.0, a, offs, offs+n1, 0, 1.0, a, offs, offs, isupper);
                ablas.rmatrixrighttrsm(n1, n2, a, offs+n1, offs+n1, isupper, false, 1, a, offs, offs+n1);
            }
            else
            {
                ablas.rmatrixsyrk(n1, n2, 1.0, a, offs+n1, offs, 1, 1.0, a, offs, offs, isupper);
                ablas.rmatrixlefttrsm(n2, n1, a, offs+n1, offs+n1, isupper, false, 1, a, offs+n1, offs);
            }
            
            //
            // invert second diagonal block
            //
            spdmatrixcholeskyinverserec(ref a, offs+n1, n2, isupper, ref tmp);
        }
Ejemplo n.º 2
0
        /*************************************************************************
        Triangular matrix inverse (real)

        The subroutine inverts the following types of matrices:
            * upper triangular
            * upper triangular with unit diagonal
            * lower triangular
            * lower triangular with unit diagonal

        In case of an upper (lower) triangular matrix,  the  inverse  matrix  will
        also be upper (lower) triangular, and after the end of the algorithm,  the
        inverse matrix replaces the source matrix. The elements  below (above) the
        main diagonal are not changed by the algorithm.

        If  the matrix  has a unit diagonal, the inverse matrix also  has  a  unit
        diagonal, and the diagonal elements are not passed to the algorithm.

        COMMERCIAL EDITION OF ALGLIB:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function, which can be used from C++ and C#:
          ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
          ! * multicore support
          !
          ! Intel MKL gives approximately constant  (with  respect  to  number  of
          ! worker threads) acceleration factor which depends on CPU  being  used,
          ! problem  size  and  "baseline"  ALGLIB  edition  which  is  used   for
          ! comparison.
          !
          ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
          ! * about 2-3x faster than ALGLIB for C++ without MKL
          ! * about 7-10x faster than "pure C#" edition of ALGLIB
          ! Difference in performance will be more striking  on  newer  CPU's with
          ! support for newer SIMD instructions. Generally,  MKL  accelerates  any
          ! problem whose size is at least 128, with best  efficiency achieved for
          ! N's larger than 512.
          !
          ! Commercial edition of ALGLIB also supports multithreaded  acceleration
          ! of this function. We should note that triangular inverse is harder  to
          ! parallelize than, say, matrix-matrix  product  -  this  algorithm  has
          ! many internal synchronization points which can not be avoided. However
          ! parallelism starts to be profitable starting  from  N=1024,  achieving
          ! near-linear speedup for N=4096 or higher.
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! We recommend you to read 'Working with commercial version' section  of
          ! ALGLIB Reference Manual in order to find out how to  use  performance-
          ! related features provided by commercial edition of ALGLIB.
          
        Input parameters:
            A       -   matrix, array[0..N-1, 0..N-1].
            N       -   size of matrix A (optional) :
                        * if given, only principal NxN submatrix is processed  and
                          overwritten. other elements are unchanged.
                        * if not given,  size  is  automatically  determined  from
                          matrix size (A must be square matrix)
            IsUpper -   True, if the matrix is upper triangular.
            IsUnit  -   diagonal type (optional):
                        * if True, matrix has unit diagonal (a[i,i] are NOT used)
                        * if False, matrix diagonal is arbitrary
                        * if not given, False is assumed

        Output parameters:
            Info    -   same as for RMatrixLUInverse
            Rep     -   same as for RMatrixLUInverse
            A       -   same as for RMatrixLUInverse.

          -- ALGLIB --
             Copyright 05.02.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void rmatrixtrinverse(ref double[,] a,
            int n,
            bool isupper,
            bool isunit,
            ref int info,
            matinvreport rep)
        {
            int i = 0;
            int j = 0;
            double[] tmp = new double[0];
            apserv.sinteger sinfo = new apserv.sinteger();

            info = 0;

            alglib.ap.assert(n>0, "RMatrixTRInverse: N<=0!");
            alglib.ap.assert(alglib.ap.cols(a)>=n, "RMatrixTRInverse: cols(A)<N!");
            alglib.ap.assert(alglib.ap.rows(a)>=n, "RMatrixTRInverse: rows(A)<N!");
            alglib.ap.assert(apserv.isfinitertrmatrix(a, n, isupper), "RMatrixTRInverse: A contains infinite or NaN values!");
            
            //
            // calculate condition numbers
            //
            rep.r1 = rcond.rmatrixtrrcond1(a, n, isupper, isunit);
            rep.rinf = rcond.rmatrixtrrcondinf(a, n, isupper, isunit);
            if( (double)(rep.r1)<(double)(rcond.rcondthreshold()) || (double)(rep.rinf)<(double)(rcond.rcondthreshold()) )
            {
                for(i=0; i<=n-1; i++)
                {
                    for(j=0; j<=n-1; j++)
                    {
                        a[i,j] = 0;
                    }
                }
                rep.r1 = 0;
                rep.rinf = 0;
                info = -3;
                return;
            }
            
            //
            // Invert
            //
            tmp = new double[n];
            sinfo.val = 1;
            rmatrixtrinverserec(a, 0, n, isupper, isunit, tmp, sinfo, rep);
            info = sinfo.val;
        }
Ejemplo n.º 3
0
        /*************************************************************************
        This function trains neural network ensemble passed to this function using
        current dataset and early stopping training algorithm. Each early stopping
        round performs NRestarts  random  restarts  (thus,  EnsembleSize*NRestarts
        training rounds is performed in total).


          -- ALGLIB --
             Copyright 22.08.2012 by Bochkanov Sergey
        *************************************************************************/
        private static void mlptrainensemblex(mlptrainer s,
            mlpe.mlpensemble ensemble,
            int idx0,
            int idx1,
            int nrestarts,
            int trainingmethod,
            apserv.sinteger ngrad,
            bool isrootcall,
            alglib.smp.shared_pool esessions)
        {
            int pcount = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int i = 0;
            int j = 0;
            int k = 0;
            int trnsubsetsize = 0;
            int valsubsetsize = 0;
            int k0 = 0;
            apserv.sinteger ngrad0 = new apserv.sinteger();
            apserv.sinteger ngrad1 = new apserv.sinteger();
            mlpetrnsession psession = null;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;
            int i1_ = 0;

            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            wcount = mlpbase.mlpgetweightscount(ensemble.network);
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                pcount = nin;
            }
            else
            {
                pcount = nin+nout;
            }
            if( nrestarts<=0 )
            {
                nrestarts = 1;
            }
            
            //
            // Handle degenerate case
            //
            if( s.npoints<2 )
            {
                for(i=idx0; i<=idx1-1; i++)
                {
                    for(j=0; j<=wcount-1; j++)
                    {
                        ensemble.weights[i*wcount+j] = 0.0;
                    }
                    for(j=0; j<=pcount-1; j++)
                    {
                        ensemble.columnmeans[i*pcount+j] = 0.0;
                        ensemble.columnsigmas[i*pcount+j] = 1.0;
                    }
                }
                return;
            }
            
            //
            // Process root call
            //
            if( isrootcall )
            {
                
                //
                // Prepare:
                // * prepare MLPETrnSessions
                // * fill ensemble by zeros (helps to detect errors)
                //
                initmlpetrnsessions(ensemble.network, s, esessions);
                for(i=idx0; i<=idx1-1; i++)
                {
                    for(j=0; j<=wcount-1; j++)
                    {
                        ensemble.weights[i*wcount+j] = 0.0;
                    }
                    for(j=0; j<=pcount-1; j++)
                    {
                        ensemble.columnmeans[i*pcount+j] = 0.0;
                        ensemble.columnsigmas[i*pcount+j] = 0.0;
                    }
                }
                
                //
                // Train in non-root mode and exit
                //
                mlptrainensemblex(s, ensemble, idx0, idx1, nrestarts, trainingmethod, ngrad, false, esessions);
                return;
            }
            
            //
            // Split problem
            //
            if( idx1-idx0>=2 )
            {
                k0 = (idx1-idx0)/2;
                ngrad0.val = 0;
                ngrad1.val = 0;
                mlptrainensemblex(s, ensemble, idx0, idx0+k0, nrestarts, trainingmethod, ngrad0, false, esessions);
                mlptrainensemblex(s, ensemble, idx0+k0, idx1, nrestarts, trainingmethod, ngrad1, false, esessions);
                ngrad.val = ngrad0.val+ngrad1.val;
                return;
            }
            
            //
            // Retrieve and prepare session
            //
            alglib.smp.ae_shared_pool_retrieve(esessions, ref psession);
            
            //
            // Train
            //
            hqrnd.hqrndrandomize(rs);
            for(k=idx0; k<=idx1-1; k++)
            {
                
                //
                // Split set
                //
                trnsubsetsize = 0;
                valsubsetsize = 0;
                if( trainingmethod==0 )
                {
                    do
                    {
                        trnsubsetsize = 0;
                        valsubsetsize = 0;
                        for(i=0; i<=s.npoints-1; i++)
                        {
                            if( (double)(math.randomreal())<(double)(0.66) )
                            {
                                
                                //
                                // Assign sample to training set
                                //
                                psession.trnsubset[trnsubsetsize] = i;
                                trnsubsetsize = trnsubsetsize+1;
                            }
                            else
                            {
                                
                                //
                                // Assign sample to validation set
                                //
                                psession.valsubset[valsubsetsize] = i;
                                valsubsetsize = valsubsetsize+1;
                            }
                        }
                    }
                    while( !(trnsubsetsize!=0 && valsubsetsize!=0) );
                }
                if( trainingmethod==1 )
                {
                    valsubsetsize = 0;
                    trnsubsetsize = s.npoints;
                    for(i=0; i<=s.npoints-1; i++)
                    {
                        psession.trnsubset[i] = hqrnd.hqrnduniformi(rs, s.npoints);
                    }
                }
                
                //
                // Train
                //
                mlptrainnetworkx(s, nrestarts, -1, psession.trnsubset, trnsubsetsize, psession.valsubset, valsubsetsize, psession.network, psession.mlprep, true, psession.mlpsessions);
                ngrad.val = ngrad.val+psession.mlprep.ngrad;
                
                //
                // Save results
                //
                i1_ = (0) - (k*wcount);
                for(i_=k*wcount; i_<=(k+1)*wcount-1;i_++)
                {
                    ensemble.weights[i_] = psession.network.weights[i_+i1_];
                }
                i1_ = (0) - (k*pcount);
                for(i_=k*pcount; i_<=(k+1)*pcount-1;i_++)
                {
                    ensemble.columnmeans[i_] = psession.network.columnmeans[i_+i1_];
                }
                i1_ = (0) - (k*pcount);
                for(i_=k*pcount; i_<=(k+1)*pcount-1;i_++)
                {
                    ensemble.columnsigmas[i_] = psession.network.columnsigmas[i_+i1_];
                }
            }
            
            //
            // Recycle session
            //
            alglib.smp.ae_shared_pool_recycle(esessions, ref psession);
        }
Ejemplo n.º 4
0
        /*************************************************************************
        Inversion of a matrix given by its LU decomposition.

        COMMERCIAL EDITION OF ALGLIB:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function, which can be used from C++ and C#:
          ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
          ! * multicore support
          !
          ! Intel MKL gives approximately constant  (with  respect  to  number  of
          ! worker threads) acceleration factor which depends on CPU  being  used,
          ! problem  size  and  "baseline"  ALGLIB  edition  which  is  used   for
          ! comparison.
          !
          ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
          ! * about 2-3x faster than ALGLIB for C++ without MKL
          ! * about 7-10x faster than "pure C#" edition of ALGLIB
          ! Difference in performance will be more striking  on  newer  CPU's with
          ! support for newer SIMD instructions. Generally,  MKL  accelerates  any
          ! problem whose size is at least 128, with best  efficiency achieved for
          ! N's larger than 512.
          !
          ! Commercial edition of ALGLIB also supports multithreaded  acceleration
          ! of this function. We should note that matrix inversion  is  harder  to
          ! parallelize than, say, matrix-matrix  product  -  this  algorithm  has
          ! many internal synchronization points which can not be avoided. However
          ! parallelism starts to be profitable starting  from  N=1024,  achieving
          ! near-linear speedup for N=4096 or higher.
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! We recommend you to read 'Working with commercial version' section  of
          ! ALGLIB Reference Manual in order to find out how to  use  performance-
          ! related features provided by commercial edition of ALGLIB.

        INPUT PARAMETERS:
            A       -   LU decomposition of the matrix
                        (output of RMatrixLU subroutine).
            Pivots  -   table of permutations
                        (the output of RMatrixLU subroutine).
            N       -   size of matrix A (optional) :
                        * if given, only principal NxN submatrix is processed  and
                          overwritten. other elements are unchanged.
                        * if not given,  size  is  automatically  determined  from
                          matrix size (A must be square matrix)

        OUTPUT PARAMETERS:
            Info    -   return code:
                        * -3    A is singular, or VERY close to singular.
                                it is filled by zeros in such cases.
                        *  1    task is solved (but matrix A may be ill-conditioned,
                                check R1/RInf parameters for condition numbers).
            Rep     -   solver report, see below for more info
            A       -   inverse of matrix A.
                        Array whose indexes range within [0..N-1, 0..N-1].

        SOLVER REPORT

        Subroutine sets following fields of the Rep structure:
        * R1        reciprocal of condition number: 1/cond(A), 1-norm.
        * RInf      reciprocal of condition number: 1/cond(A), inf-norm.

          -- ALGLIB routine --
             05.02.2010
             Bochkanov Sergey
        *************************************************************************/
        public static void rmatrixluinverse(ref double[,] a,
            int[] pivots,
            int n,
            ref int info,
            matinvreport rep)
        {
            double[] work = new double[0];
            int i = 0;
            int j = 0;
            int k = 0;
            double v = 0;
            apserv.sinteger sinfo = new apserv.sinteger();

            info = 0;

            alglib.ap.assert(n>0, "RMatrixLUInverse: N<=0!");
            alglib.ap.assert(alglib.ap.cols(a)>=n, "RMatrixLUInverse: cols(A)<N!");
            alglib.ap.assert(alglib.ap.rows(a)>=n, "RMatrixLUInverse: rows(A)<N!");
            alglib.ap.assert(alglib.ap.len(pivots)>=n, "RMatrixLUInverse: len(Pivots)<N!");
            alglib.ap.assert(apserv.apservisfinitematrix(a, n, n), "RMatrixLUInverse: A contains infinite or NaN values!");
            info = 1;
            for(i=0; i<=n-1; i++)
            {
                if( pivots[i]>n-1 || pivots[i]<i )
                {
                    info = -1;
                }
            }
            alglib.ap.assert(info>0, "RMatrixLUInverse: incorrect Pivots array!");
            
            //
            // calculate condition numbers
            //
            rep.r1 = rcond.rmatrixlurcond1(a, n);
            rep.rinf = rcond.rmatrixlurcondinf(a, n);
            if( (double)(rep.r1)<(double)(rcond.rcondthreshold()) || (double)(rep.rinf)<(double)(rcond.rcondthreshold()) )
            {
                for(i=0; i<=n-1; i++)
                {
                    for(j=0; j<=n-1; j++)
                    {
                        a[i,j] = 0;
                    }
                }
                rep.r1 = 0;
                rep.rinf = 0;
                info = -3;
                return;
            }
            
            //
            // Call cache-oblivious code
            //
            work = new double[n];
            sinfo.val = 1;
            rmatrixluinverserec(ref a, 0, n, ref work, sinfo, rep);
            info = sinfo.val;
            
            //
            // apply permutations
            //
            for(i=0; i<=n-1; i++)
            {
                for(j=n-2; j>=0; j--)
                {
                    k = pivots[j];
                    v = a[i,j];
                    a[i,j] = a[i,k];
                    a[i,k] = v;
                }
            }
        }
Ejemplo n.º 5
0
        /*************************************************************************
        This function trains neural network ensemble passed to this function using
        current dataset and early stopping training algorithm. Each early stopping
        round performs NRestarts  random  restarts  (thus,  EnsembleSize*NRestarts
        training rounds is performed in total).

        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support (C++ computational core)
          !
          ! Second improvement gives constant  speedup (2-3X).  First  improvement
          ! gives  close-to-linear  speedup  on   multicore   systems.   Following
          ! operations can be executed in parallel:
          ! * EnsembleSize  training  sessions  performed  for  each  of  ensemble
          !   members (always parallelized)
          ! * NRestarts  training  sessions  performed  within  each  of  training
          !   sessions (if NRestarts>1)
          ! * gradient calculation over large dataset (if dataset is large enough)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.

        INPUT PARAMETERS:
            S           -   trainer object;
            Ensemble    -   neural network ensemble. It must have same  number  of
                            inputs and outputs/classes  as  was  specified  during
                            creation of the trainer object.
            NRestarts   -   number of restarts, >=0:
                            * NRestarts>0 means that specified  number  of  random
                              restarts are performed during each ES round;
                            * NRestarts=0 is silently replaced by 1.

        OUTPUT PARAMETERS:
            Ensemble    -   trained ensemble;
            Rep         -   it contains all type of errors.
            
        NOTE: this training method uses BOTH early stopping and weight decay!  So,
              you should select weight decay before starting training just as  you
              select it before training "conventional" networks.

        NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
              or  single-point  dataset  was  passed,  ensemble  is filled by zero
              values.

        NOTE: this method uses sum-of-squares error function for training.

          -- ALGLIB --
             Copyright 22.08.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlptrainensemblees(mlptrainer s,
            mlpe.mlpensemble ensemble,
            int nrestarts,
            mlpreport rep)
        {
            int nin = 0;
            int nout = 0;
            int ntype = 0;
            int ttype = 0;
            alglib.smp.shared_pool esessions = new alglib.smp.shared_pool();
            apserv.sinteger sgrad = new apserv.sinteger();
            mlpbase.modelerrors tmprep = new mlpbase.modelerrors();

            alglib.ap.assert(s.npoints>=0, "MLPTrainEnsembleES: parameter S is not initialized or is spoiled(S.NPoints<0)");
            if( !mlpe.mlpeissoftmax(ensemble) )
            {
                ntype = 0;
            }
            else
            {
                ntype = 1;
            }
            if( s.rcpar )
            {
                ttype = 0;
            }
            else
            {
                ttype = 1;
            }
            alglib.ap.assert(ntype==ttype, "MLPTrainEnsembleES: internal error - type of input network is not similar to network type in trainer object");
            nin = mlpbase.mlpgetinputscount(ensemble.network);
            alglib.ap.assert(s.nin==nin, "MLPTrainEnsembleES: number of inputs in trainer is not equal to number of inputs in ensemble network");
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            alglib.ap.assert(s.nout==nout, "MLPTrainEnsembleES: number of outputs in trainer is not equal to number of outputs in ensemble network");
            alglib.ap.assert(nrestarts>=0, "MLPTrainEnsembleES: NRestarts<0.");
            
            //
            // Initialize parameter Rep
            //
            rep.relclserror = 0;
            rep.avgce = 0;
            rep.rmserror = 0;
            rep.avgerror = 0;
            rep.avgrelerror = 0;
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            
            //
            // Allocate
            //
            apserv.ivectorsetlengthatleast(ref s.subset, s.npoints);
            apserv.ivectorsetlengthatleast(ref s.valsubset, s.npoints);
            
            //
            // Start training
            //
            // NOTE: ESessions is not initialized because MLPTrainEnsembleX
            //       needs uninitialized pool.
            //
            sgrad.val = 0;
            mlptrainensemblex(s, ensemble, 0, ensemble.ensemblesize, nrestarts, 0, sgrad, true, esessions);
            rep.ngrad = sgrad.val;
            
            //
            // Calculate errors.
            //
            if( s.datatype==0 )
            {
                mlpe.mlpeallerrorsx(ensemble, s.densexy, s.sparsexy, s.npoints, 0, ensemble.network.dummyidx, 0, s.npoints, 0, ensemble.network.buf, tmprep);
            }
            if( s.datatype==1 )
            {
                mlpe.mlpeallerrorsx(ensemble, s.densexy, s.sparsexy, s.npoints, 1, ensemble.network.dummyidx, 0, s.npoints, 0, ensemble.network.buf, tmprep);
            }
            rep.relclserror = tmprep.relclserror;
            rep.avgce = tmprep.avgce;
            rep.rmserror = tmprep.rmserror;
            rep.avgerror = tmprep.avgerror;
            rep.avgrelerror = tmprep.avgrelerror;
        }