/*************************************************************************
        This is "expert" 4PL/5PL fitting function, which can be used if  you  need
        better control over fitting process than provided  by  LogisticFit4()  or
        LogisticFit5().

        This function fits model of the form

            F(x|A,B,C,D)   = D+(A-D)/(1+Power(x/C,B))           (4PL model)

        or

            F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G)    (5PL model)
            
        Here:
            * A, D - unconstrained
            * B>=0 for 4PL, unconstrained for 5PL
            * C>0
            * G>0 (if present)

        INPUT PARAMETERS:
            X       -   array[N], stores X-values.
                        MUST include only non-negative numbers  (but  may  include
                        zero values). Can be unsorted.
            Y       -   array[N], values to fit.
            N       -   number of points. If N is less than  length  of  X/Y, only
                        leading N elements are used.
            CnstrLeft-  optional equality constraint for model value at the   left
                        boundary (at X=0). Specify NAN (Not-a-Number)  if  you  do
                        not need constraint on the model value at X=0 (in C++  you
                        can pass alglib::fp_nan as parameter, in  C#  it  will  be
                        Double.NaN).
                        See  below,  section  "EQUALITY  CONSTRAINTS"   for   more
                        information about constraints.
            CnstrRight- optional equality constraint for model value at X=infinity.
                        Specify NAN (Not-a-Number) if you do not  need  constraint
                        on the model value (in C++  you can pass alglib::fp_nan as
                        parameter, in  C# it will  be Double.NaN).
                        See  below,  section  "EQUALITY  CONSTRAINTS"   for   more
                        information about constraints.
            Is4PL   -   whether 4PL or 5PL models are fitted
            LambdaV -   regularization coefficient, LambdaV>=0.
                        Set it to zero unless you know what you are doing.
            EpsX    -   stopping condition (step size), EpsX>=0.
                        Zero value means that small step is automatically chosen.
                        See notes below for more information.
            RsCnt   -   number of repeated restarts from  random  points.  4PL/5PL
                        models are prone to problem of bad local extrema. Utilizing
                        multiple random restarts allows  us  to  improve algorithm
                        convergence.
                        RsCnt>=0.
                        Zero value means that function automatically choose  small
                        amount of restarts (recommended).
                        
        OUTPUT PARAMETERS:
            A, B, C, D- parameters of 4PL model
            G       -   parameter of 5PL model; for Is4PL=True, G=1 is returned.
            Rep     -   fitting report. This structure has many fields,  but  ONLY
                        ONES LISTED BELOW ARE SET:
                        * Rep.IterationsCount - number of iterations performed
                        * Rep.RMSError - root-mean-square error
                        * Rep.AvgError - average absolute error
                        * Rep.AvgRelError - average relative error (calculated for
                          non-zero Y-values)
                        * Rep.MaxError - maximum absolute error
                        * Rep.R2 - coefficient of determination,  R-squared.  This
                          coefficient   is  calculated  as  R2=1-RSS/TSS  (in case
                          of nonlinear  regression  there  are  multiple  ways  to
                          define R2, each of them giving different results).
                        
        NOTE: after  you  obtained  coefficients,  you  can  evaluate  model  with
              LogisticCalc5() function.

        NOTE: step is automatically scaled according to scale of parameters  being
              fitted before we compare its length with EpsX. Thus,  this  function
              can be used to fit data with very small or very large values without
              changing EpsX.

        EQUALITY CONSTRAINTS ON PARAMETERS

        4PL/5PL solver supports equality constraints on model values at  the  left
        boundary (X=0) and right  boundary  (X=infinity).  These  constraints  are
        completely optional and you can specify both of them, only  one  -  or  no
        constraints at all.

        Parameter  CnstrLeft  contains  left  constraint (or NAN for unconstrained
        fitting), and CnstrRight contains right  one.  For  4PL,  left  constraint
        ALWAYS corresponds to parameter A, and right one is ALWAYS  constraint  on
        D. That's because 4PL model is normalized in such way that B>=0.

        For 5PL model things are different. Unlike  4PL  one,  5PL  model  is  NOT
        symmetric with respect to  change  in  sign  of  B. Thus, negative B's are
        possible, and left constraint may constrain parameter A (for positive B's)
        - or parameter D (for negative B's). Similarly changes  meaning  of  right
        constraint.

        You do not have to decide what parameter to  constrain  -  algorithm  will
        automatically determine correct parameters as fitting progresses. However,
        question highlighted above is important when you interpret fitting results.
            

          -- ALGLIB PROJECT --
             Copyright 14.02.2014 by Bochkanov Sergey
        *************************************************************************/
        public static void logisticfit45x(double[] x,
            double[] y,
            int n,
            double cnstrleft,
            double cnstrright,
            bool is4pl,
            double lambdav,
            double epsx,
            int rscnt,
            ref double a,
            ref double b,
            ref double c,
            ref double d,
            ref double g,
            lsfitreport rep)
        {
            int i = 0;
            int k = 0;
            int innerit = 0;
            int outerit = 0;
            int nz = 0;
            double v = 0;
            double b00 = 0;
            double b01 = 0;
            double b10 = 0;
            double b11 = 0;
            double b30 = 0;
            double b31 = 0;
            double[] p0 = new double[0];
            double[] p1 = new double[0];
            double[] p2 = new double[0];
            double[] bndl = new double[0];
            double[] bndu = new double[0];
            double[] s = new double[0];
            double[,] z = new double[0,0];
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            minlm.minlmstate state = new minlm.minlmstate();
            minlm.minlmreport replm = new minlm.minlmreport();
            int maxits = 0;
            double fbest = 0;
            double flast = 0;
            double flast2 = 0;
            double scalex = 0;
            double scaley = 0;
            double[] bufx = new double[0];
            double[] bufy = new double[0];
            double rss = 0;
            double tss = 0;
            double meany = 0;

            x = (double[])x.Clone();
            y = (double[])y.Clone();
            a = 0;
            b = 0;
            c = 0;
            d = 0;
            g = 0;

            alglib.ap.assert(math.isfinite(epsx), "LogisticFitX: EpsX is infinite/NAN");
            alglib.ap.assert(math.isfinite(lambdav), "LogisticFitX: LambdaV is infinite/NAN");
            alglib.ap.assert(math.isfinite(cnstrleft) || Double.IsNaN(cnstrleft), "LogisticFitX: CnstrLeft is NOT finite or NAN");
            alglib.ap.assert(math.isfinite(cnstrright) || Double.IsNaN(cnstrright), "LogisticFitX: CnstrRight is NOT finite or NAN");
            alglib.ap.assert((double)(lambdav)>=(double)(0), "LogisticFitX: negative LambdaV");
            alglib.ap.assert(n>0, "LogisticFitX: N<=0");
            alglib.ap.assert(rscnt>=0, "LogisticFitX: RsCnt<0");
            alglib.ap.assert((double)(epsx)>=(double)(0), "LogisticFitX: EpsX<0");
            alglib.ap.assert(alglib.ap.len(x)>=n, "LogisticFitX: Length(X)<N");
            alglib.ap.assert(alglib.ap.len(y)>=n, "LogisticFitX: Length(Y)<N");
            alglib.ap.assert(apserv.isfinitevector(x, n), "LogisticFitX: X contains infinite/NAN values");
            alglib.ap.assert(apserv.isfinitevector(y, n), "LogisticFitX: X contains infinite/NAN values");
            hqrnd.hqrndseed(2211, 1033044, rs);
            clearreport(rep);
            if( (double)(epsx)==(double)(0) )
            {
                epsx = 1.0E-10;
            }
            if( rscnt==0 )
            {
                rscnt = 4;
            }
            maxits = 1000;
            
            //
            // Sort points by X.
            // Determine number of zero and non-zero values.
            //
            tsort.tagsortfastr(ref x, ref y, ref bufx, ref bufy, n);
            alglib.ap.assert((double)(x[0])>=(double)(0), "LogisticFitX: some X[] are negative");
            nz = n;
            for(i=0; i<=n-1; i++)
            {
                if( (double)(x[i])>(double)(0) )
                {
                    nz = i;
                    break;
                }
            }
            
            //
            // For NZ=N (all X[] are zero) special code is used.
            // For NZ<N we use general-purpose code.
            //
            rep.iterationscount = 0;
            if( nz==n )
            {
                
                //
                // NZ=N, degenerate problem.
                // No need to run optimizer.
                //
                v = 0.0;
                for(i=0; i<=n-1; i++)
                {
                    v = v+y[i];
                }
                v = v/n;
                if( math.isfinite(cnstrleft) )
                {
                    a = cnstrleft;
                }
                else
                {
                    a = v;
                }
                b = 1;
                c = 1;
                if( math.isfinite(cnstrright) )
                {
                    d = cnstrright;
                }
                else
                {
                    d = a;
                }
                g = 1;
            }
            else
            {
                
                //
                // Non-degenerate problem.
                // Determine scale of data.
                //
                scalex = x[nz+(n-nz)/2];
                alglib.ap.assert((double)(scalex)>(double)(0), "LogisticFitX: internal error");
                v = 0.0;
                for(i=0; i<=n-1; i++)
                {
                    v = v+y[i];
                }
                v = v/n;
                scaley = 0.0;
                for(i=0; i<=n-1; i++)
                {
                    scaley = scaley+math.sqr(y[i]-v);
                }
                scaley = Math.Sqrt(scaley/n);
                if( (double)(scaley)==(double)(0) )
                {
                    scaley = 1.0;
                }
                s = new double[5];
                s[0] = scaley;
                s[1] = 0.1;
                s[2] = scalex;
                s[3] = scaley;
                s[4] = 0.1;
                p0 = new double[5];
                p0[0] = 0;
                p0[1] = 0;
                p0[2] = 0;
                p0[3] = 0;
                p0[4] = 0;
                bndl = new double[5];
                bndu = new double[5];
                minlm.minlmcreatevj(5, n+5, p0, state);
                minlm.minlmsetscale(state, s);
                minlm.minlmsetcond(state, 0.0, 0.0, epsx, maxits);
                minlm.minlmsetxrep(state, true);
                
                //
                // Main loop - includes THREE (!) nested iterations:
                //
                // 1. Inner iteration is minimization of target function from
                //    the current initial point P1 subject to boundary constraints
                //    given by arrays BndL and BndU.
                //
                // 2. Middle iteration changes boundary constraints from tight to
                //    relaxed ones:
                //    * at the first middle iteration we optimize with "tight"
                //      constraints on parameters B and C (P[1] and P[2]). It
                //      allows us to find good initial point for the next middle
                //      iteration without risk of running into "hard" points (B=0, C=0).
                //      Initial point is initialized by outer iteration.
                //      Solution is placed to P1.
                //    * at the second middle iteration we relax boundary constraints
                //      on B and C. Solution P1 from the first middle iteration is
                //      used as initial point for the second one.
                //    * both first and second iterations are 4PL models, even when
                //      we fit 5PL.
                //    * additionally, for 5PL models, we use results from the second
                //      middle iteration is initial guess for 5PL fit.
                //    * after middle iteration is over we compare quality of the
                //      solution stored in P1 and offload it to A/B/C/D/G, if it
                //      is better.
                //
                // 3. Outer iteration (starts below) changes following parameters:
                //    * initial point
                //    * "tight" constraints BndL/BndU
                //    * "relaxed" constraints BndL/BndU
                //
                // Below we prepare combined matrix Z of optimization settings for
                // outer/middle iterations:
                //
                //     [ P00 BndL00 BndU00 BndL01 BndU01 ]
                //     [                                 ]
                //     [ P10 BndL10 BndU10 BndL11 BndU11 ]
                //
                // Here:
                // * Pi0 is initial point for I-th outer iteration
                // * BndLij is lower boundary for I-th outer iteration, J-th inner iteration
                // * BndUij - same as BndLij
                //
                z = new double[rscnt, 5+4*5];
                for(i=0; i<=rscnt-1; i++)
                {
                    if( math.isfinite(cnstrleft) )
                    {
                        z[i,0] = cnstrleft;
                    }
                    else
                    {
                        z[i,0] = y[0]+0.25*scaley*(hqrnd.hqrnduniformr(rs)-0.5);
                    }
                    z[i,1] = 0.5+hqrnd.hqrnduniformr(rs);
                    z[i,2] = x[nz+hqrnd.hqrnduniformi(rs, n-nz)];
                    if( math.isfinite(cnstrright) )
                    {
                        z[i,3] = cnstrright;
                    }
                    else
                    {
                        z[i,3] = y[n-1]+0.25*scaley*(hqrnd.hqrnduniformr(rs)-0.5);
                    }
                    z[i,4] = 1.0;
                    if( math.isfinite(cnstrleft) )
                    {
                        z[i,5+0] = cnstrleft;
                        z[i,10+0] = cnstrleft;
                    }
                    else
                    {
                        z[i,5+0] = Double.NegativeInfinity;
                        z[i,10+0] = Double.PositiveInfinity;
                    }
                    z[i,5+1] = 0.5;
                    z[i,10+1] = 2.0;
                    z[i,5+2] = 0.5*scalex;
                    z[i,10+2] = 2.0*scalex;
                    if( math.isfinite(cnstrright) )
                    {
                        z[i,5+3] = cnstrright;
                        z[i,10+3] = cnstrright;
                    }
                    else
                    {
                        z[i,5+3] = Double.NegativeInfinity;
                        z[i,10+3] = Double.PositiveInfinity;
                    }
                    z[i,5+4] = 1.0;
                    z[i,10+4] = 1.0;
                    if( math.isfinite(cnstrleft) )
                    {
                        z[i,15+0] = cnstrleft;
                        z[i,20+0] = cnstrleft;
                    }
                    else
                    {
                        z[i,15+0] = Double.NegativeInfinity;
                        z[i,20+0] = Double.PositiveInfinity;
                    }
                    z[i,15+1] = 0.01;
                    z[i,20+1] = Double.PositiveInfinity;
                    z[i,15+2] = math.machineepsilon*scalex;
                    z[i,20+2] = Double.PositiveInfinity;
                    if( math.isfinite(cnstrright) )
                    {
                        z[i,15+3] = cnstrright;
                        z[i,20+3] = cnstrright;
                    }
                    else
                    {
                        z[i,15+3] = Double.NegativeInfinity;
                        z[i,20+3] = Double.PositiveInfinity;
                    }
                    z[i,15+4] = 1.0;
                    z[i,20+4] = 1.0;
                }
                
                //
                // Run outer iterations
                //
                a = 0;
                b = 1;
                c = 1;
                d = 1;
                g = 1;
                fbest = math.maxrealnumber;
                p1 = new double[5];
                p2 = new double[5];
                for(outerit=0; outerit<=alglib.ap.rows(z)-1; outerit++)
                {
                    
                    //
                    // Beginning of the middle iterations.
                    // Prepare initial point P1.
                    //
                    for(i=0; i<=4; i++)
                    {
                        p1[i] = z[outerit,i];
                    }
                    flast = math.maxrealnumber;
                    for(innerit=0; innerit<=1; innerit++)
                    {
                        
                        //
                        // Set current boundary constraints.
                        // Run inner iteration.
                        //
                        for(i=0; i<=4; i++)
                        {
                            bndl[i] = z[outerit,5+innerit*10+0+i];
                            bndu[i] = z[outerit,5+innerit*10+5+i];
                        }
                        minlm.minlmsetbc(state, bndl, bndu);
                        logisticfitinternal(x, y, n, true, lambdav, state, replm, ref p1, ref flast);
                        rep.iterationscount = rep.iterationscount+replm.iterationscount;
                    }
                    
                    //
                    // Middle iteration: try to fit with 5-parameter logistic model (if needed).
                    //
                    // We perform two attempts to fit: one with B>0, another one with B<0.
                    // For PL4, these are equivalent up to transposition of A/D, but for 5PL
                    // sign of B is very important.
                    //
                    // NOTE: results of 4PL fit are used as initial point for 5PL.
                    //
                    if( !is4pl )
                    {
                        
                        //
                        // Loosen constraints on G,
                        // save constraints on A/B/D to B0/B1
                        //
                        bndl[4] = 0.1;
                        bndu[4] = 10.0;
                        b00 = bndl[0];
                        b01 = bndu[0];
                        b10 = bndl[1];
                        b11 = bndu[1];
                        b30 = bndl[3];
                        b31 = bndu[3];
                        
                        //
                        // First attempt: fitting with positive B
                        //
                        p2[0] = p1[0];
                        p2[1] = p1[1];
                        p2[2] = p1[2];
                        p2[3] = p1[3];
                        p2[4] = p1[4];
                        bndl[0] = b00;
                        bndu[0] = b01;
                        bndl[1] = b10;
                        bndu[1] = b11;
                        bndl[3] = b30;
                        bndu[3] = b31;
                        minlm.minlmsetbc(state, bndl, bndu);
                        logisticfitinternal(x, y, n, false, lambdav, state, replm, ref p2, ref flast2);
                        rep.iterationscount = rep.iterationscount+replm.iterationscount;
                        if( (double)(flast2)<(double)(flast) )
                        {
                            for(i=0; i<=4; i++)
                            {
                                p1[i] = p2[i];
                            }
                            flast = flast2;
                        }
                        
                        //
                        // First attempt: fitting with negative B
                        //
                        p2[0] = p1[3];
                        p2[1] = -p1[1];
                        p2[2] = p1[2];
                        p2[3] = p1[0];
                        p2[4] = p1[4];
                        bndl[0] = b30;
                        bndu[0] = b31;
                        bndl[1] = -b11;
                        bndu[1] = -b10;
                        bndl[3] = b00;
                        bndu[3] = b01;
                        minlm.minlmsetbc(state, bndl, bndu);
                        logisticfitinternal(x, y, n, false, lambdav, state, replm, ref p2, ref flast2);
                        rep.iterationscount = rep.iterationscount+replm.iterationscount;
                        if( (double)(flast2)<(double)(flast) )
                        {
                            for(i=0; i<=4; i++)
                            {
                                p1[i] = p2[i];
                            }
                            flast = flast2;
                        }
                    }
                    
                    //
                    // Middle iteration is done, compare its results with best value
                    // found so far.
                    //
                    if( (double)(flast)<(double)(fbest) )
                    {
                        a = p1[0];
                        b = p1[1];
                        c = p1[2];
                        d = p1[3];
                        g = p1[4];
                        fbest = flast;
                    }
                }
            }
            
            //
            // Calculate errors
            //
            rep.rmserror = 0;
            rep.avgerror = 0;
            rep.avgrelerror = 0;
            rep.maxerror = 0;
            k = 0;
            rss = 0.0;
            tss = 0.0;
            meany = 0.0;
            for(i=0; i<=n-1; i++)
            {
                meany = meany+y[i];
            }
            meany = meany/n;
            for(i=0; i<=n-1; i++)
            {
                
                //
                // Calculate residual from regression
                //
                if( (double)(x[i])>(double)(0) )
                {
                    v = d+(a-d)/Math.Pow(1.0+Math.Pow(x[i]/c, b), g)-y[i];
                }
                else
                {
                    if( (double)(b)>=(double)(0) )
                    {
                        v = a-y[i];
                    }
                    else
                    {
                        v = d-y[i];
                    }
                }
                
                //
                // Update RSS (residual sum of squares) and TSS (total sum of squares)
                // which are used to calculate coefficient of determination.
                //
                // NOTE: we use formula R2 = 1-RSS/TSS because it has nice property of
                //       being equal to 0.0 if and only if model perfectly fits data.
                //
                //       When we fit nonlinear models, there are exist multiple ways of
                //       determining R2, each of them giving different results. Formula
                //       above is the most intuitive one.
                //
                rss = rss+v*v;
                tss = tss+math.sqr(y[i]-meany);
                
                //
                // Update errors
                //
                rep.rmserror = rep.rmserror+math.sqr(v);
                rep.avgerror = rep.avgerror+Math.Abs(v);
                if( (double)(y[i])!=(double)(0) )
                {
                    rep.avgrelerror = rep.avgrelerror+Math.Abs(v/y[i]);
                    k = k+1;
                }
                rep.maxerror = Math.Max(rep.maxerror, Math.Abs(v));
            }
            rep.rmserror = Math.Sqrt(rep.rmserror/n);
            rep.avgerror = rep.avgerror/n;
            if( k>0 )
            {
                rep.avgrelerror = rep.avgrelerror/k;
            }
            rep.r2 = 1.0-rss/tss;
        }
Example #2
0
        /*************************************************************************
        Generation of random NxN Hermitian positive definite matrix with given
        condition number and norm2(A)=1

        INPUT PARAMETERS:
            N   -   matrix size
            C   -   condition number (in 2-norm)

        OUTPUT PARAMETERS:
            A   -   random HPD matrix with norm2(A)=1 and cond(A)=C

          -- ALGLIB routine --
             04.12.2009
             Bochkanov Sergey
        *************************************************************************/
        public static void hpdmatrixrndcond(int n,
            double c,
            ref complex[,] a)
        {
            int i = 0;
            int j = 0;
            double l1 = 0;
            double l2 = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();

            a = new complex[0,0];

            
            //
            // Special cases
            //
            if( n<=0 || (double)(c)<(double)(1) )
            {
                return;
            }
            a = new complex[n, n];
            if( n==1 )
            {
                a[0,0] = 1;
                return;
            }
            
            //
            // Prepare matrix
            //
            hqrnd.hqrndrandomize(rs);
            l1 = 0;
            l2 = Math.Log(1/c);
            for(i=0; i<=n-1; i++)
            {
                for(j=0; j<=n-1; j++)
                {
                    a[i,j] = 0;
                }
            }
            a[0,0] = Math.Exp(l1);
            for(i=1; i<=n-2; i++)
            {
                a[i,i] = Math.Exp(hqrnd.hqrnduniformr(rs)*(l2-l1)+l1);
            }
            a[n-1,n-1] = Math.Exp(l2);
            
            //
            // Multiply
            //
            hmatrixrndmultiply(ref a, n);
            
            //
            // post-process to ensure that matrix diagonal is real
            //
            for(i=0; i<=n-1; i++)
            {
                a[i,i].y = 0;
            }
        }
Example #3
0
File: linalg.cs Project: Ring-r/opt
        public static void hmatrixrndmultiply(ref complex[,] a,
            int n)
        {
            complex tau = 0;
            complex lambdav = 0;
            int s = 0;
            int i = 0;
            complex[] w = new complex[0];
            complex[] v = new complex[0];
            hqrnd.hqrndstate state = new hqrnd.hqrndstate();
            int i_ = 0;

            
            //
            // General case.
            //
            w = new complex[n];
            v = new complex[n+1];
            hqrnd.hqrndrandomize(state);
            for(s=2; s<=n; s++)
            {
                
                //
                // Prepare random normal v
                //
                do
                {
                    for(i=1; i<=s; i++)
                    {
                        hqrnd.hqrndnormal2(state, ref tau.x, ref tau.y);
                        v[i] = tau;
                    }
                    lambdav = 0.0;
                    for(i_=1; i_<=s;i_++)
                    {
                        lambdav += v[i_]*math.conj(v[i_]);
                    }
                }
                while( lambdav==0 );
                
                //
                // Prepare and apply reflection
                //
                creflections.complexgeneratereflection(ref v, s, ref tau);
                v[1] = 1;
                creflections.complexapplyreflectionfromtheright(ref a, tau, ref v, 0, n-1, n-s, n-1, ref w);
                creflections.complexapplyreflectionfromtheleft(ref a, math.conj(tau), v, n-s, n-1, 0, n-1, ref w);
            }
            
            //
            // Second pass.
            //
            for(i=0; i<=n-1; i++)
            {
                hqrnd.hqrndunit2(state, ref tau.x, ref tau.y);
                for(i_=0; i_<=n-1;i_++)
                {
                    a[i_,i] = tau*a[i_,i];
                }
                tau = math.conj(tau);
                for(i_=0; i_<=n-1;i_++)
                {
                    a[i,i_] = tau*a[i,i_];
                }
            }
        }
        //
        // Public declarations
        //

        public hqrndstate()
        {
            _innerobj = new hqrnd.hqrndstate();
        }
Example #5
0
File: linalg.cs Project: Ring-r/opt
        public static void cmatrixrndcond(int n,
            double c,
            ref complex[,] a)
        {
            int i = 0;
            int j = 0;
            double l1 = 0;
            double l2 = 0;
            hqrnd.hqrndstate state = new hqrnd.hqrndstate();
            complex v = 0;

            a = new complex[0,0];

            ap.assert(n>=1 & (double)(c)>=(double)(1), "CMatrixRndCond: N<1 or C<1!");
            a = new complex[n, n];
            if( n==1 )
            {
                
                //
                // special case
                //
                hqrnd.hqrndrandomize(state);
                hqrnd.hqrndunit2(state, ref v.x, ref v.y);
                a[0,0] = v;
                return;
            }
            l1 = 0;
            l2 = Math.Log(1/c);
            for(i=0; i<=n-1; i++)
            {
                for(j=0; j<=n-1; j++)
                {
                    a[i,j] = 0;
                }
            }
            a[0,0] = Math.Exp(l1);
            for(i=1; i<=n-2; i++)
            {
                a[i,i] = Math.Exp(math.randomreal()*(l2-l1)+l1);
            }
            a[n-1,n-1] = Math.Exp(l2);
            cmatrixrndorthogonalfromtheleft(ref a, n, n);
            cmatrixrndorthogonalfromtheright(ref a, n, n);
        }
Example #6
0
File: linalg.cs Project: Ring-r/opt
        public static void cmatrixrndorthogonalfromtheleft(ref complex[,] a,
            int m,
            int n)
        {
            complex tau = 0;
            complex lambdav = 0;
            int s = 0;
            int i = 0;
            int j = 0;
            complex[] w = new complex[0];
            complex[] v = new complex[0];
            hqrnd.hqrndstate state = new hqrnd.hqrndstate();
            int i_ = 0;

            ap.assert(n>=1 & m>=1, "CMatrixRndOrthogonalFromTheRight: N<1 or M<1!");
            if( m==1 )
            {
                
                //
                // special case
                //
                hqrnd.hqrndrandomize(state);
                hqrnd.hqrndunit2(state, ref tau.x, ref tau.y);
                for(j=0; j<=n-1; j++)
                {
                    a[0,j] = a[0,j]*tau;
                }
                return;
            }
            
            //
            // General case.
            // First pass.
            //
            w = new complex[n];
            v = new complex[m+1];
            hqrnd.hqrndrandomize(state);
            for(s=2; s<=m; s++)
            {
                
                //
                // Prepare random normal v
                //
                do
                {
                    for(i=1; i<=s; i++)
                    {
                        hqrnd.hqrndnormal2(state, ref tau.x, ref tau.y);
                        v[i] = tau;
                    }
                    lambdav = 0.0;
                    for(i_=1; i_<=s;i_++)
                    {
                        lambdav += v[i_]*math.conj(v[i_]);
                    }
                }
                while( lambdav==0 );
                
                //
                // Prepare and apply reflection
                //
                creflections.complexgeneratereflection(ref v, s, ref tau);
                v[1] = 1;
                creflections.complexapplyreflectionfromtheleft(ref a, tau, v, m-s, m-1, 0, n-1, ref w);
            }
            
            //
            // Second pass.
            //
            for(i=0; i<=m-1; i++)
            {
                hqrnd.hqrndunit2(state, ref tau.x, ref tau.y);
                for(i_=0; i_<=n-1;i_++)
                {
                    a[i,i_] = tau*a[i,i_];
                }
            }
        }
Example #7
0
 public override void init()
 {
     bestparameters = new double[0];
     network = new mlpbase.multilayerperceptron();
     optimizer = new minlbfgs.minlbfgsstate();
     optimizerrep = new minlbfgs.minlbfgsreport();
     wbuf0 = new double[0];
     wbuf1 = new double[0];
     allminibatches = new int[0];
     currentminibatch = new int[0];
     rstate = new rcommstate();
     generator = new hqrnd.hqrndstate();
 }
Example #8
0
            /*************************************************************************
            This is hash function.

              -- ALGLIB PROJECT --
                 Copyright 14.10.2011 by Bochkanov Sergey
            *************************************************************************/
            private static int hash(int i,
                int j,
                int tabsize)
            {
                int result = 0;
                hqrnd.hqrndstate r = new hqrnd.hqrndstate();

                hqrnd.hqrndseed(i, j, r);
                result = hqrnd.hqrnduniformi(r, tabsize);
                return result;
            }
Example #9
0
        public static void dfbuildinternal(double[,] xy,
            int npoints,
            int nvars,
            int nclasses,
            int ntrees,
            int samplesize,
            int nfeatures,
            int flags,
            ref int info,
            decisionforest df,
            dfreport rep)
        {
            int i = 0;
            int j = 0;
            int k = 0;
            int tmpi = 0;
            int lasttreeoffs = 0;
            int offs = 0;
            int ooboffs = 0;
            int treesize = 0;
            int nvarsinpool = 0;
            bool useevs = new bool();
            dfinternalbuffers bufs = new dfinternalbuffers();
            int[] permbuf = new int[0];
            double[] oobbuf = new double[0];
            int[] oobcntbuf = new int[0];
            double[,] xys = new double[0,0];
            double[] x = new double[0];
            double[] y = new double[0];
            int oobcnt = 0;
            int oobrelcnt = 0;
            double v = 0;
            double vmin = 0;
            double vmax = 0;
            bool bflag = new bool();
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;
            int i1_ = 0;

            info = 0;

            
            //
            // Test for inputs
            //
            if( (((((npoints<1 || samplesize<1) || samplesize>npoints) || nvars<1) || nclasses<1) || ntrees<1) || nfeatures<1 )
            {
                info = -1;
                return;
            }
            if( nclasses>1 )
            {
                for(i=0; i<=npoints-1; i++)
                {
                    if( (int)Math.Round(xy[i,nvars])<0 || (int)Math.Round(xy[i,nvars])>=nclasses )
                    {
                        info = -2;
                        return;
                    }
                }
            }
            info = 1;
            
            //
            // Flags
            //
            useevs = flags/dfuseevs%2!=0;
            
            //
            // Allocate data, prepare header
            //
            treesize = 1+innernodewidth*(samplesize-1)+leafnodewidth*samplesize;
            permbuf = new int[npoints-1+1];
            bufs.treebuf = new double[treesize-1+1];
            bufs.idxbuf = new int[npoints-1+1];
            bufs.tmpbufr = new double[npoints-1+1];
            bufs.tmpbufr2 = new double[npoints-1+1];
            bufs.tmpbufi = new int[npoints-1+1];
            bufs.sortrbuf = new double[npoints];
            bufs.sortrbuf2 = new double[npoints];
            bufs.sortibuf = new int[npoints];
            bufs.varpool = new int[nvars-1+1];
            bufs.evsbin = new bool[nvars-1+1];
            bufs.evssplits = new double[nvars-1+1];
            bufs.classibuf = new int[2*nclasses-1+1];
            oobbuf = new double[nclasses*npoints-1+1];
            oobcntbuf = new int[npoints-1+1];
            df.trees = new double[ntrees*treesize-1+1];
            xys = new double[samplesize-1+1, nvars+1];
            x = new double[nvars-1+1];
            y = new double[nclasses-1+1];
            for(i=0; i<=npoints-1; i++)
            {
                permbuf[i] = i;
            }
            for(i=0; i<=npoints*nclasses-1; i++)
            {
                oobbuf[i] = 0;
            }
            for(i=0; i<=npoints-1; i++)
            {
                oobcntbuf[i] = 0;
            }
            
            //
            // Prepare variable pool and EVS (extended variable selection/splitting) buffers
            // (whether EVS is turned on or not):
            // 1. detect binary variables and pre-calculate splits for them
            // 2. detect variables with non-distinct values and exclude them from pool
            //
            for(i=0; i<=nvars-1; i++)
            {
                bufs.varpool[i] = i;
            }
            nvarsinpool = nvars;
            if( useevs )
            {
                for(j=0; j<=nvars-1; j++)
                {
                    vmin = xy[0,j];
                    vmax = vmin;
                    for(i=0; i<=npoints-1; i++)
                    {
                        v = xy[i,j];
                        vmin = Math.Min(vmin, v);
                        vmax = Math.Max(vmax, v);
                    }
                    if( (double)(vmin)==(double)(vmax) )
                    {
                        
                        //
                        // exclude variable from pool
                        //
                        bufs.varpool[j] = bufs.varpool[nvarsinpool-1];
                        bufs.varpool[nvarsinpool-1] = -1;
                        nvarsinpool = nvarsinpool-1;
                        continue;
                    }
                    bflag = false;
                    for(i=0; i<=npoints-1; i++)
                    {
                        v = xy[i,j];
                        if( (double)(v)!=(double)(vmin) && (double)(v)!=(double)(vmax) )
                        {
                            bflag = true;
                            break;
                        }
                    }
                    if( bflag )
                    {
                        
                        //
                        // non-binary variable
                        //
                        bufs.evsbin[j] = false;
                    }
                    else
                    {
                        
                        //
                        // Prepare
                        //
                        bufs.evsbin[j] = true;
                        bufs.evssplits[j] = 0.5*(vmin+vmax);
                        if( (double)(bufs.evssplits[j])<=(double)(vmin) )
                        {
                            bufs.evssplits[j] = vmax;
                        }
                    }
                }
            }
            
            //
            // RANDOM FOREST FORMAT
            // W[0]         -   size of array
            // W[1]         -   version number
            // W[2]         -   NVars
            // W[3]         -   NClasses (1 for regression)
            // W[4]         -   NTrees
            // W[5]         -   trees offset
            //
            //
            // TREE FORMAT
            // W[Offs]      -   size of sub-array
            //     node info:
            // W[K+0]       -   variable number        (-1 for leaf mode)
            // W[K+1]       -   threshold              (class/value for leaf node)
            // W[K+2]       -   ">=" branch index      (absent for leaf node)
            //
            //
            df.nvars = nvars;
            df.nclasses = nclasses;
            df.ntrees = ntrees;
            
            //
            // Build forest
            //
            hqrnd.hqrndrandomize(rs);
            offs = 0;
            for(i=0; i<=ntrees-1; i++)
            {
                
                //
                // Prepare sample
                //
                for(k=0; k<=samplesize-1; k++)
                {
                    j = k+hqrnd.hqrnduniformi(rs, npoints-k);
                    tmpi = permbuf[k];
                    permbuf[k] = permbuf[j];
                    permbuf[j] = tmpi;
                    j = permbuf[k];
                    for(i_=0; i_<=nvars;i_++)
                    {
                        xys[k,i_] = xy[j,i_];
                    }
                }
                
                //
                // build tree, copy
                //
                dfbuildtree(xys, samplesize, nvars, nclasses, nfeatures, nvarsinpool, flags, bufs, rs);
                j = (int)Math.Round(bufs.treebuf[0]);
                i1_ = (0) - (offs);
                for(i_=offs; i_<=offs+j-1;i_++)
                {
                    df.trees[i_] = bufs.treebuf[i_+i1_];
                }
                lasttreeoffs = offs;
                offs = offs+j;
                
                //
                // OOB estimates
                //
                for(k=samplesize; k<=npoints-1; k++)
                {
                    for(j=0; j<=nclasses-1; j++)
                    {
                        y[j] = 0;
                    }
                    j = permbuf[k];
                    for(i_=0; i_<=nvars-1;i_++)
                    {
                        x[i_] = xy[j,i_];
                    }
                    dfprocessinternal(df, lasttreeoffs, x, ref y);
                    i1_ = (0) - (j*nclasses);
                    for(i_=j*nclasses; i_<=(j+1)*nclasses-1;i_++)
                    {
                        oobbuf[i_] = oobbuf[i_] + y[i_+i1_];
                    }
                    oobcntbuf[j] = oobcntbuf[j]+1;
                }
            }
            df.bufsize = offs;
            
            //
            // Normalize OOB results
            //
            for(i=0; i<=npoints-1; i++)
            {
                if( oobcntbuf[i]!=0 )
                {
                    v = (double)1/(double)oobcntbuf[i];
                    for(i_=i*nclasses; i_<=i*nclasses+nclasses-1;i_++)
                    {
                        oobbuf[i_] = v*oobbuf[i_];
                    }
                }
            }
            
            //
            // Calculate training set estimates
            //
            rep.relclserror = dfrelclserror(df, xy, npoints);
            rep.avgce = dfavgce(df, xy, npoints);
            rep.rmserror = dfrmserror(df, xy, npoints);
            rep.avgerror = dfavgerror(df, xy, npoints);
            rep.avgrelerror = dfavgrelerror(df, xy, npoints);
            
            //
            // Calculate OOB estimates.
            //
            rep.oobrelclserror = 0;
            rep.oobavgce = 0;
            rep.oobrmserror = 0;
            rep.oobavgerror = 0;
            rep.oobavgrelerror = 0;
            oobcnt = 0;
            oobrelcnt = 0;
            for(i=0; i<=npoints-1; i++)
            {
                if( oobcntbuf[i]!=0 )
                {
                    ooboffs = i*nclasses;
                    if( nclasses>1 )
                    {
                        
                        //
                        // classification-specific code
                        //
                        k = (int)Math.Round(xy[i,nvars]);
                        tmpi = 0;
                        for(j=1; j<=nclasses-1; j++)
                        {
                            if( (double)(oobbuf[ooboffs+j])>(double)(oobbuf[ooboffs+tmpi]) )
                            {
                                tmpi = j;
                            }
                        }
                        if( tmpi!=k )
                        {
                            rep.oobrelclserror = rep.oobrelclserror+1;
                        }
                        if( (double)(oobbuf[ooboffs+k])!=(double)(0) )
                        {
                            rep.oobavgce = rep.oobavgce-Math.Log(oobbuf[ooboffs+k]);
                        }
                        else
                        {
                            rep.oobavgce = rep.oobavgce-Math.Log(math.minrealnumber);
                        }
                        for(j=0; j<=nclasses-1; j++)
                        {
                            if( j==k )
                            {
                                rep.oobrmserror = rep.oobrmserror+math.sqr(oobbuf[ooboffs+j]-1);
                                rep.oobavgerror = rep.oobavgerror+Math.Abs(oobbuf[ooboffs+j]-1);
                                rep.oobavgrelerror = rep.oobavgrelerror+Math.Abs(oobbuf[ooboffs+j]-1);
                                oobrelcnt = oobrelcnt+1;
                            }
                            else
                            {
                                rep.oobrmserror = rep.oobrmserror+math.sqr(oobbuf[ooboffs+j]);
                                rep.oobavgerror = rep.oobavgerror+Math.Abs(oobbuf[ooboffs+j]);
                            }
                        }
                    }
                    else
                    {
                        
                        //
                        // regression-specific code
                        //
                        rep.oobrmserror = rep.oobrmserror+math.sqr(oobbuf[ooboffs]-xy[i,nvars]);
                        rep.oobavgerror = rep.oobavgerror+Math.Abs(oobbuf[ooboffs]-xy[i,nvars]);
                        if( (double)(xy[i,nvars])!=(double)(0) )
                        {
                            rep.oobavgrelerror = rep.oobavgrelerror+Math.Abs((oobbuf[ooboffs]-xy[i,nvars])/xy[i,nvars]);
                            oobrelcnt = oobrelcnt+1;
                        }
                    }
                    
                    //
                    // update OOB estimates count.
                    //
                    oobcnt = oobcnt+1;
                }
            }
            if( oobcnt>0 )
            {
                rep.oobrelclserror = rep.oobrelclserror/oobcnt;
                rep.oobavgce = rep.oobavgce/oobcnt;
                rep.oobrmserror = Math.Sqrt(rep.oobrmserror/(oobcnt*nclasses));
                rep.oobavgerror = rep.oobavgerror/(oobcnt*nclasses);
                if( oobrelcnt>0 )
                {
                    rep.oobavgrelerror = rep.oobavgrelerror/oobrelcnt;
                }
            }
        }
Example #10
0
        /*************************************************************************
        Randomization of neural network weights

          -- ALGLIB --
             Copyright 06.11.2007 by Bochkanov Sergey
        *************************************************************************/
        public static void mlprandomize(multilayerperceptron network)
        {
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int ntotal = 0;
            int istart = 0;
            hqrnd.hqrndstate r = new hqrnd.hqrndstate();
            int entrysize = 0;
            int entryoffs = 0;
            int neuronidx = 0;
            int neurontype = 0;
            double vmean = 0;
            double vvar = 0;
            int i = 0;
            int n1 = 0;
            int n2 = 0;
            double desiredsigma = 0;
            int montecarlocnt = 0;
            double ef = 0;
            double ef2 = 0;
            double v = 0;
            double wscale = 0;

            hqrnd.hqrndrandomize(r);
            mlpproperties(network, ref nin, ref nout, ref wcount);
            ntotal = network.structinfo[3];
            istart = network.structinfo[5];
            desiredsigma = 0.5;
            montecarlocnt = 20;
            
            //
            // Stage 1:
            // * Network.Weights is filled by standard deviation of weights
            // * default values: sigma=1
            //
            for(i=0; i<=wcount-1; i++)
            {
                network.weights[i] = 1.0;
            }
            
            //
            // Stage 2:
            // * assume that input neurons have zero mean and unit standard deviation
            // * assume that constant neurons have zero standard deviation
            // * perform forward pass along neurons
            // * for each non-input non-constant neuron:
            //   * calculate mean and standard deviation of neuron's output
            //     assuming that we know means/deviations of neurons which feed it
            //     and assuming that weights has unit variance and zero mean.
            // * for each nonlinear neuron additionally we perform backward pass:
            //   * scale variances of weights which feed it in such way that neuron's
            //     input has unit standard deviation
            //
            // NOTE: this algorithm assumes that each connection feeds at most one
            //       non-linear neuron. This assumption can be incorrect in upcoming
            //       architectures with strong neurons. However, algorithm should
            //       work smoothly even in this case.
            //
            // During this stage we use Network.RndBuf, which is grouped into NTotal
            // entries, each of them having following format:
            //
            // Buf[Offset+0]        mean value of neuron's output
            // Buf[Offset+1]        standard deviation of neuron's output
            // 
            //
            //
            entrysize = 2;
            apserv.rvectorsetlengthatleast(ref network.rndbuf, entrysize*ntotal);
            for(neuronidx=0; neuronidx<=ntotal-1; neuronidx++)
            {
                neurontype = network.structinfo[istart+neuronidx*nfieldwidth+0];
                entryoffs = entrysize*neuronidx;
                if( neurontype==-2 )
                {
                    
                    //
                    // Input neuron: zero mean, unit variance.
                    //
                    network.rndbuf[entryoffs+0] = 0.0;
                    network.rndbuf[entryoffs+1] = 1.0;
                    continue;
                }
                if( neurontype==-3 )
                {
                    
                    //
                    // "-1" neuron: mean=-1, zero variance.
                    //
                    network.rndbuf[entryoffs+0] = -1.0;
                    network.rndbuf[entryoffs+1] = 0.0;
                    continue;
                }
                if( neurontype==-4 )
                {
                    
                    //
                    // "0" neuron: mean=0, zero variance.
                    //
                    network.rndbuf[entryoffs+0] = 0.0;
                    network.rndbuf[entryoffs+1] = 0.0;
                    continue;
                }
                if( neurontype==0 )
                {
                    
                    //
                    // Adaptive summator neuron:
                    // * calculate its mean and variance.
                    // * we assume that weights of this neuron have unit variance and zero mean.
                    // * thus, neuron's output is always have zero mean
                    // * as for variance, it is a bit more interesting:
                    //   * let n[i] is i-th input neuron
                    //   * let w[i] is i-th weight
                    //   * we assume that n[i] and w[i] are independently distributed
                    //   * Var(n0*w0+n1*w1+...) = Var(n0*w0)+Var(n1*w1)+...
                    //   * Var(X*Y) = mean(X)^2*Var(Y) + mean(Y)^2*Var(X) + Var(X)*Var(Y)
                    //   * mean(w[i])=0, var(w[i])=1
                    //   * Var(n[i]*w[i]) = mean(n[i])^2 + Var(n[i])
                    //
                    n1 = network.structinfo[istart+neuronidx*nfieldwidth+2];
                    n2 = n1+network.structinfo[istart+neuronidx*nfieldwidth+1]-1;
                    vmean = 0.0;
                    vvar = 0.0;
                    for(i=n1; i<=n2; i++)
                    {
                        vvar = vvar+math.sqr(network.rndbuf[entrysize*i+0])+math.sqr(network.rndbuf[entrysize*i+1]);
                    }
                    network.rndbuf[entryoffs+0] = vmean;
                    network.rndbuf[entryoffs+1] = Math.Sqrt(vvar);
                    continue;
                }
                if( neurontype==-5 )
                {
                    
                    //
                    // Linear activation function
                    //
                    i = network.structinfo[istart+neuronidx*nfieldwidth+2];
                    vmean = network.rndbuf[entrysize*i+0];
                    vvar = math.sqr(network.rndbuf[entrysize*i+1]);
                    if( (double)(vvar)>(double)(0) )
                    {
                        wscale = desiredsigma/Math.Sqrt(vvar);
                    }
                    else
                    {
                        wscale = 1.0;
                    }
                    randomizebackwardpass(network, i, wscale);
                    network.rndbuf[entryoffs+0] = vmean*wscale;
                    network.rndbuf[entryoffs+1] = desiredsigma;
                    continue;
                }
                if( neurontype>0 )
                {
                    
                    //
                    // Nonlinear activation function:
                    // * scale its inputs
                    // * estimate mean/sigma of its output using Monte-Carlo method
                    //   (we simulate different inputs with unit deviation and
                    //   sample activation function output on such inputs)
                    //
                    i = network.structinfo[istart+neuronidx*nfieldwidth+2];
                    vmean = network.rndbuf[entrysize*i+0];
                    vvar = math.sqr(network.rndbuf[entrysize*i+1]);
                    if( (double)(vvar)>(double)(0) )
                    {
                        wscale = desiredsigma/Math.Sqrt(vvar);
                    }
                    else
                    {
                        wscale = 1.0;
                    }
                    randomizebackwardpass(network, i, wscale);
                    ef = 0.0;
                    ef2 = 0.0;
                    vmean = vmean*wscale;
                    for(i=0; i<=montecarlocnt-1; i++)
                    {
                        v = vmean+desiredsigma*hqrnd.hqrndnormal(r);
                        ef = ef+v;
                        ef2 = ef2+v*v;
                    }
                    ef = ef/montecarlocnt;
                    ef2 = ef2/montecarlocnt;
                    network.rndbuf[entryoffs+0] = ef;
                    network.rndbuf[entryoffs+1] = Math.Max(ef2-ef*ef, 0.0);
                    continue;
                }
                alglib.ap.assert(false, "MLPRandomize: unexpected neuron type");
            }
            
            //
            // Stage 3: generate weights.
            //
            for(i=0; i<=wcount-1; i++)
            {
                network.weights[i] = network.weights[i]*hqrnd.hqrndnormal(r);
            }
        }
Example #11
0
        /*************************************************************************
        This function selects initial centers according to specified initialization
        algorithm.

        IMPORTANT: this function provides no  guarantees  regarding  selection  of
                   DIFFERENT  centers.  Centers  returned  by  this  function  may
                   include duplicates (say, when random sampling is  used). It  is
                   also possible that some centers are empty.
                   Algorithm which uses this function must be able to deal with it.
                   Say, you may want to use FixCenters() in order to fix empty centers.

        INPUT PARAMETERS:
            XY          -   dataset, array [0..NPoints-1,0..NVars-1].
            NPoints     -   points count
            NVars       -   number of variables, NVars>=1
            InitAlgo    -   initialization algorithm:
                            * 0 - automatic selection of best algorithm
                            * 1 - random selection
                            * 2 - k-means++
                            * 3 - fast-greedy init
                            *-1 - first K rows of dataset are used (debug algorithm)
            K           -   number of centers, K>=1
            CT          -   possibly preallocated output buffer, resized if needed
            InitBuf     -   internal buffer, possibly unitialized instance of
                            APBuffers. It is recommended to use this instance only
                            with SelectInitialCenters() and FixCenters() functions,
                            because these functions may allocate really large storage.
            UpdatePool  -   shared pool seeded with instance of APBuffers structure
                            (seed instance can be unitialized). Used internally with
                            KMeansUpdateDistances() function. It is recommended
                            to use this pool ONLY with KMeansUpdateDistances()
                            function.

        OUTPUT PARAMETERS:
            CT          -   set of K clusters, one per row
            
        RESULT:
            True on success, False on failure (impossible to create K independent clusters)

          -- ALGLIB --
             Copyright 21.01.2015 by Bochkanov Sergey
        *************************************************************************/
        private static void selectinitialcenters(double[,] xy,
            int npoints,
            int nvars,
            int initalgo,
            int k,
            ref double[,] ct,
            apserv.apbuffers initbuf,
            alglib.smp.shared_pool updatepool)
        {
            int cidx = 0;
            int i = 0;
            int j = 0;
            double v = 0;
            double vv = 0;
            double s = 0;
            int lastnz = 0;
            int ptidx = 0;
            int samplesize = 0;
            int samplescntnew = 0;
            int samplescntall = 0;
            double samplescale = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;

            hqrnd.hqrndrandomize(rs);
            
            //
            // Check parameters
            //
            alglib.ap.assert(npoints>0, "SelectInitialCenters: internal error");
            alglib.ap.assert(nvars>0, "SelectInitialCenters: internal error");
            alglib.ap.assert(k>0, "SelectInitialCenters: internal error");
            if( initalgo==0 )
            {
                initalgo = 3;
            }
            apserv.rmatrixsetlengthatleast(ref ct, k, nvars);
            
            //
            // Random initialization
            //
            if( initalgo==-1 )
            {
                for(i=0; i<=k-1; i++)
                {
                    for(i_=0; i_<=nvars-1;i_++)
                    {
                        ct[i,i_] = xy[i%npoints,i_];
                    }
                }
                return;
            }
            
            //
            // Random initialization
            //
            if( initalgo==1 )
            {
                for(i=0; i<=k-1; i++)
                {
                    j = hqrnd.hqrnduniformi(rs, npoints);
                    for(i_=0; i_<=nvars-1;i_++)
                    {
                        ct[i,i_] = xy[j,i_];
                    }
                }
                return;
            }
            
            //
            // k-means++ initialization
            //
            if( initalgo==2 )
            {
                
                //
                // Prepare distances array.
                // Select initial center at random.
                //
                apserv.rvectorsetlengthatleast(ref initbuf.ra0, npoints);
                for(i=0; i<=npoints-1; i++)
                {
                    initbuf.ra0[i] = math.maxrealnumber;
                }
                ptidx = hqrnd.hqrnduniformi(rs, npoints);
                for(i_=0; i_<=nvars-1;i_++)
                {
                    ct[0,i_] = xy[ptidx,i_];
                }
                
                //
                // For each newly added center repeat:
                // * reevaluate distances from points to best centers
                // * sample points with probability dependent on distance
                // * add new center
                //
                for(cidx=0; cidx<=k-2; cidx++)
                {
                    
                    //
                    // Reevaluate distances
                    //
                    s = 0.0;
                    for(i=0; i<=npoints-1; i++)
                    {
                        v = 0.0;
                        for(j=0; j<=nvars-1; j++)
                        {
                            vv = xy[i,j]-ct[cidx,j];
                            v = v+vv*vv;
                        }
                        if( (double)(v)<(double)(initbuf.ra0[i]) )
                        {
                            initbuf.ra0[i] = v;
                        }
                        s = s+initbuf.ra0[i];
                    }
                    
                    //
                    // If all distances are zero, it means that we can not find enough
                    // distinct points. In this case we just select non-distinct center
                    // at random and continue iterations. This issue will be handled
                    // later in the FixCenters() function.
                    //
                    if( (double)(s)==(double)(0.0) )
                    {
                        ptidx = hqrnd.hqrnduniformi(rs, npoints);
                        for(i_=0; i_<=nvars-1;i_++)
                        {
                            ct[cidx+1,i_] = xy[ptidx,i_];
                        }
                        continue;
                    }
                    
                    //
                    // Select point as center using its distance.
                    // We also handle situation when because of rounding errors
                    // no point was selected - in this case, last non-zero one
                    // will be used.
                    //
                    v = hqrnd.hqrnduniformr(rs);
                    vv = 0.0;
                    lastnz = -1;
                    ptidx = -1;
                    for(i=0; i<=npoints-1; i++)
                    {
                        if( (double)(initbuf.ra0[i])==(double)(0.0) )
                        {
                            continue;
                        }
                        lastnz = i;
                        vv = vv+initbuf.ra0[i];
                        if( (double)(v)<=(double)(vv/s) )
                        {
                            ptidx = i;
                            break;
                        }
                    }
                    alglib.ap.assert(lastnz>=0, "SelectInitialCenters: integrity error");
                    if( ptidx<0 )
                    {
                        ptidx = lastnz;
                    }
                    for(i_=0; i_<=nvars-1;i_++)
                    {
                        ct[cidx+1,i_] = xy[ptidx,i_];
                    }
                }
                return;
            }
            
            //
            // "Fast-greedy" algorithm based on "Scalable k-means++".
            //
            // We perform several rounds, within each round we sample about 0.5*K points
            // (not exactly 0.5*K) until we have 2*K points sampled. Before each round
            // we calculate distances from dataset points to closest points sampled so far.
            // We sample dataset points independently using distance xtimes 0.5*K divided by total
            // as probability (similar to k-means++, but each point is sampled independently;
            // after each round we have roughtly 0.5*K points added to sample).
            //
            // After sampling is done, we run "greedy" version of k-means++ on this subsample
            // which selects most distant point on every round.
            //
            if( initalgo==3 )
            {
                
                //
                // Prepare arrays.
                // Select initial center at random, add it to "new" part of sample,
                // which is stored at the beginning of the array
                //
                samplesize = 2*k;
                samplescale = 0.5*k;
                apserv.rmatrixsetlengthatleast(ref initbuf.rm0, samplesize, nvars);
                ptidx = hqrnd.hqrnduniformi(rs, npoints);
                for(i_=0; i_<=nvars-1;i_++)
                {
                    initbuf.rm0[0,i_] = xy[ptidx,i_];
                }
                samplescntnew = 1;
                samplescntall = 1;
                apserv.rvectorsetlengthatleast(ref initbuf.ra0, npoints);
                apserv.rvectorsetlengthatleast(ref initbuf.ra1, npoints);
                apserv.ivectorsetlengthatleast(ref initbuf.ia1, npoints);
                for(i=0; i<=npoints-1; i++)
                {
                    initbuf.ra0[i] = math.maxrealnumber;
                }
                
                //
                // Repeat until samples count is 2*K
                //
                while( samplescntall<samplesize )
                {
                    
                    //
                    // Evaluate distances from points to NEW centers, store to RA1.
                    // Reset counter of "new" centers.
                    //
                    kmeansupdatedistances(xy, 0, npoints, nvars, initbuf.rm0, samplescntall-samplescntnew, samplescntall, initbuf.ia1, initbuf.ra1, updatepool);
                    samplescntnew = 0;
                    
                    //
                    // Merge new distances with old ones.
                    // Calculate sum of distances, if sum is exactly zero - fill sample
                    // by randomly selected points and terminate.
                    //
                    s = 0.0;
                    for(i=0; i<=npoints-1; i++)
                    {
                        initbuf.ra0[i] = Math.Min(initbuf.ra0[i], initbuf.ra1[i]);
                        s = s+initbuf.ra0[i];
                    }
                    if( (double)(s)==(double)(0.0) )
                    {
                        while( samplescntall<samplesize )
                        {
                            ptidx = hqrnd.hqrnduniformi(rs, npoints);
                            for(i_=0; i_<=nvars-1;i_++)
                            {
                                initbuf.rm0[samplescntall,i_] = xy[ptidx,i_];
                            }
                            apserv.inc(ref samplescntall);
                            apserv.inc(ref samplescntnew);
                        }
                        break;
                    }
                    
                    //
                    // Sample points independently.
                    //
                    for(i=0; i<=npoints-1; i++)
                    {
                        if( samplescntall==samplesize )
                        {
                            break;
                        }
                        if( (double)(initbuf.ra0[i])==(double)(0.0) )
                        {
                            continue;
                        }
                        if( (double)(hqrnd.hqrnduniformr(rs))<=(double)(samplescale*initbuf.ra0[i]/s) )
                        {
                            for(i_=0; i_<=nvars-1;i_++)
                            {
                                initbuf.rm0[samplescntall,i_] = xy[i,i_];
                            }
                            apserv.inc(ref samplescntall);
                            apserv.inc(ref samplescntnew);
                        }
                    }
                }
                
                //
                // Run greedy version of k-means on sampled points
                //
                apserv.rvectorsetlengthatleast(ref initbuf.ra0, samplescntall);
                for(i=0; i<=samplescntall-1; i++)
                {
                    initbuf.ra0[i] = math.maxrealnumber;
                }
                ptidx = hqrnd.hqrnduniformi(rs, samplescntall);
                for(i_=0; i_<=nvars-1;i_++)
                {
                    ct[0,i_] = initbuf.rm0[ptidx,i_];
                }
                for(cidx=0; cidx<=k-2; cidx++)
                {
                    
                    //
                    // Reevaluate distances
                    //
                    for(i=0; i<=samplescntall-1; i++)
                    {
                        v = 0.0;
                        for(j=0; j<=nvars-1; j++)
                        {
                            vv = initbuf.rm0[i,j]-ct[cidx,j];
                            v = v+vv*vv;
                        }
                        if( (double)(v)<(double)(initbuf.ra0[i]) )
                        {
                            initbuf.ra0[i] = v;
                        }
                    }
                    
                    //
                    // Select point as center in greedy manner - most distant
                    // point is selected.
                    //
                    ptidx = 0;
                    for(i=0; i<=samplescntall-1; i++)
                    {
                        if( (double)(initbuf.ra0[i])>(double)(initbuf.ra0[ptidx]) )
                        {
                            ptidx = i;
                        }
                    }
                    for(i_=0; i_<=nvars-1;i_++)
                    {
                        ct[cidx+1,i_] = initbuf.rm0[ptidx,i_];
                    }
                }
                return;
            }
            
            //
            // Internal error
            //
            alglib.ap.assert(false, "SelectInitialCenters: internal error");
        }
Example #12
0
        /*************************************************************************
        K-means++ clusterization

        INPUT PARAMETERS:
            XY          -   dataset, array [0..NPoints-1,0..NVars-1].
            NPoints     -   dataset size, NPoints>=K
            NVars       -   number of variables, NVars>=1
            K           -   desired number of clusters, K>=1
            InitAlgo    -   initialization algorithm:
                            * 0 - automatic selection of best algorithm
                            * 1 - random selection of centers
                            * 2 - k-means++
                            * 3 - fast-greedy init
                            *-1 - first K rows of dataset are used
                                  (special debug algorithm)
            MaxIts      -   iterations limit or zero for no limit
            Restarts    -   number of restarts, Restarts>=1
            KMeansDbgNoIts- debug flag; if set, Lloyd's iteration is not performed,
                            only initialization phase.
            Buf         -   special reusable structure which stores previously allocated
                            memory, intended to avoid memory fragmentation when solving
                            multiple subsequent problems:
                            * MUST BE INITIALIZED WITH KMeansInitBuffers() CALL BEFORE
                              FIRST PASS TO THIS FUNCTION!
                            * subsequent passes must be made without re-initialization

        OUTPUT PARAMETERS:
            Info        -   return code:
                            * -3, if task is degenerate (number of distinct points is
                                  less than K)
                            * -1, if incorrect NPoints/NFeatures/K/Restarts was passed
                            *  1, if subroutine finished successfully
            IterationsCount- actual number of iterations performed by clusterizer
            CCol        -   array[0..NVars-1,0..K-1].matrix whose columns store
                            cluster's centers
            NeedCCol    -   True in case caller requires to store result in CCol
            CRow        -   array[0..K-1,0..NVars-1], same as CCol, but centers are
                            stored in rows
            NeedCRow    -   True in case caller requires to store result in CCol
            XYC         -   array[NPoints], which contains cluster indexes
            Energy      -   merit function of clusterization

          -- ALGLIB --
             Copyright 21.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void kmeansgenerateinternal(double[,] xy,
            int npoints,
            int nvars,
            int k,
            int initalgo,
            int maxits,
            int restarts,
            bool kmeansdbgnoits,
            ref int info,
            ref int iterationscount,
            ref double[,] ccol,
            bool needccol,
            ref double[,] crow,
            bool needcrow,
            ref int[] xyc,
            ref double energy,
            kmeansbuffers buf)
        {
            int i = 0;
            int j = 0;
            int i1 = 0;
            double e = 0;
            double eprev = 0;
            double v = 0;
            double vv = 0;
            bool waschanges = new bool();
            bool zerosizeclusters = new bool();
            int pass = 0;
            int itcnt = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;

            info = 0;
            iterationscount = 0;
            ccol = new double[0,0];
            crow = new double[0,0];
            xyc = new int[0];
            energy = 0;

            
            //
            // Test parameters
            //
            if( ((npoints<k || nvars<1) || k<1) || restarts<1 )
            {
                info = -1;
                iterationscount = 0;
                return;
            }
            
            //
            // TODO: special case K=1
            // TODO: special case K=NPoints
            //
            info = 1;
            iterationscount = 0;
            
            //
            // Multiple passes of k-means++ algorithm
            //
            xyc = new int[npoints];
            apserv.rmatrixsetlengthatleast(ref buf.ct, k, nvars);
            apserv.rmatrixsetlengthatleast(ref buf.ctbest, k, nvars);
            apserv.ivectorsetlengthatleast(ref buf.xycprev, npoints);
            apserv.ivectorsetlengthatleast(ref buf.xycbest, npoints);
            apserv.rvectorsetlengthatleast(ref buf.d2, npoints);
            apserv.ivectorsetlengthatleast(ref buf.csizes, k);
            energy = math.maxrealnumber;
            hqrnd.hqrndrandomize(rs);
            for(pass=1; pass<=restarts; pass++)
            {
                
                //
                // Select initial centers.
                //
                // Note that for performance reasons centers are stored in ROWS of CT, not
                // in columns. We'll transpose CT in the end and store it in the C.
                //
                // Also note that SelectInitialCenters() may return degenerate set of centers
                // (some of them have no corresponding points in dataset, some are non-distinct).
                // Algorithm below is robust enough to deal with such set.
                //
                selectinitialcenters(xy, npoints, nvars, initalgo, k, ref buf.ct, buf.initbuf, buf.updatepool);
                
                //
                // Lloyd's iteration
                //
                if( !kmeansdbgnoits )
                {
                    
                    //
                    // Perform iteration as usual, in normal mode
                    //
                    for(i=0; i<=npoints-1; i++)
                    {
                        xyc[i] = -1;
                    }
                    eprev = math.maxrealnumber;
                    e = math.maxrealnumber;
                    itcnt = 0;
                    while( maxits==0 || itcnt<maxits )
                    {
                        
                        //
                        // Update iteration counter
                        //
                        itcnt = itcnt+1;
                        apserv.inc(ref iterationscount);
                        
                        //
                        // Call KMeansUpdateDistances(), fill XYC with center numbers,
                        // D2 with center distances.
                        //
                        for(i=0; i<=npoints-1; i++)
                        {
                            buf.xycprev[i] = xyc[i];
                        }
                        kmeansupdatedistances(xy, 0, npoints, nvars, buf.ct, 0, k, xyc, buf.d2, buf.updatepool);
                        waschanges = false;
                        for(i=0; i<=npoints-1; i++)
                        {
                            waschanges = waschanges || xyc[i]!=buf.xycprev[i];
                        }
                        
                        //
                        // Update centers
                        //
                        for(j=0; j<=k-1; j++)
                        {
                            buf.csizes[j] = 0;
                        }
                        for(i=0; i<=k-1; i++)
                        {
                            for(j=0; j<=nvars-1; j++)
                            {
                                buf.ct[i,j] = 0;
                            }
                        }
                        for(i=0; i<=npoints-1; i++)
                        {
                            buf.csizes[xyc[i]] = buf.csizes[xyc[i]]+1;
                            for(i_=0; i_<=nvars-1;i_++)
                            {
                                buf.ct[xyc[i],i_] = buf.ct[xyc[i],i_] + xy[i,i_];
                            }
                        }
                        zerosizeclusters = false;
                        for(j=0; j<=k-1; j++)
                        {
                            if( buf.csizes[j]!=0 )
                            {
                                v = (double)1/(double)buf.csizes[j];
                                for(i_=0; i_<=nvars-1;i_++)
                                {
                                    buf.ct[j,i_] = v*buf.ct[j,i_];
                                }
                            }
                            zerosizeclusters = zerosizeclusters || buf.csizes[j]==0;
                        }
                        if( zerosizeclusters )
                        {
                            
                            //
                            // Some clusters have zero size - rare, but possible.
                            // We'll choose new centers for such clusters using k-means++ rule
                            // and restart algorithm
                            //
                            if( !fixcenters(xy, npoints, nvars, buf.ct, k, buf.initbuf, buf.updatepool) )
                            {
                                info = -3;
                                return;
                            }
                            continue;
                        }
                        
                        //
                        // Stop if one of two conditions is met:
                        // 1. nothing has changed during iteration
                        // 2. energy function increased after recalculation on new centers
                        //
                        e = 0;
                        for(i=0; i<=npoints-1; i++)
                        {
                            v = 0.0;
                            i1 = xyc[i];
                            for(j=0; j<=nvars-1; j++)
                            {
                                vv = xy[i,j]-buf.ct[i1,j];
                                v = v+vv*vv;
                            }
                            e = e+v;
                        }
                        if( !waschanges || (double)(e)>=(double)(eprev) )
                        {
                            break;
                        }
                        
                        //
                        // Update EPrev
                        //
                        eprev = e;
                    }
                }
                else
                {
                    
                    //
                    // Debug mode: no Lloyd's iteration.
                    // We just calculate potential E.
                    //
                    kmeansupdatedistances(xy, 0, npoints, nvars, buf.ct, 0, k, xyc, buf.d2, buf.updatepool);
                    e = 0;
                    for(i=0; i<=npoints-1; i++)
                    {
                        e = e+buf.d2[i];
                    }
                }
                
                //
                // Compare E with best centers found so far
                //
                if( (double)(e)<(double)(energy) )
                {
                    
                    //
                    // store partition.
                    //
                    energy = e;
                    blas.copymatrix(buf.ct, 0, k-1, 0, nvars-1, ref buf.ctbest, 0, k-1, 0, nvars-1);
                    for(i=0; i<=npoints-1; i++)
                    {
                        buf.xycbest[i] = xyc[i];
                    }
                }
            }
            
            //
            // Copy and transpose
            //
            if( needccol )
            {
                ccol = new double[nvars, k];
                blas.copyandtranspose(buf.ctbest, 0, k-1, 0, nvars-1, ref ccol, 0, nvars-1, 0, k-1);
            }
            if( needcrow )
            {
                crow = new double[k, nvars];
                ablas.rmatrixcopy(k, nvars, buf.ctbest, 0, 0, ref crow, 0, 0);
            }
            for(i=0; i<=npoints-1; i++)
            {
                xyc[i] = buf.xycbest[i];
            }
        }
Example #13
0
            /*************************************************************************
            K-means++ clusterization

            INPUT PARAMETERS:
                XY          -   dataset, array [0..NPoints-1,0..NVars-1].
                NPoints     -   dataset size, NPoints>=K
                NVars       -   number of variables, NVars>=1
                K           -   desired number of clusters, K>=1
                Restarts    -   number of restarts, Restarts>=1

            OUTPUT PARAMETERS:
                Info        -   return code:
                                * -3, if task is degenerate (number of distinct points is
                                      less than K)
                                * -1, if incorrect NPoints/NFeatures/K/Restarts was passed
                                *  1, if subroutine finished successfully
                CCol        -   array[0..NVars-1,0..K-1].matrix whose columns store
                                cluster's centers
                NeedCCol    -   True in case caller requires to store result in CCol
                CRow        -   array[0..K-1,0..NVars-1], same as CCol, but centers are
                                stored in rows
                NeedCRow    -   True in case caller requires to store result in CCol
                XYC         -   array[NPoints], which contains cluster indexes

              -- ALGLIB --
                 Copyright 21.03.2009 by Bochkanov Sergey
            *************************************************************************/
            public static void kmeansgenerateinternal(double[,] xy,
                int npoints,
                int nvars,
                int k,
                int maxits,
                int restarts,
                ref int info,
                ref double[,] ccol,
                bool needccol,
                ref double[,] crow,
                bool needcrow,
                ref int[] xyc)
            {
                int i = 0;
                int j = 0;
                double[,] ct = new double[0, 0];
                double[,] ctbest = new double[0, 0];
                int[] xycbest = new int[0];
                double e = 0;
                double eprev = 0;
                double ebest = 0;
                double[] x = new double[0];
                double[] tmp = new double[0];
                double[] d2 = new double[0];
                double[] p = new double[0];
                int[] csizes = new int[0];
                bool[] cbusy = new bool[0];
                double v = 0;
                int cclosest = 0;
                double dclosest = 0;
                double[] work = new double[0];
                bool waschanges = new bool();
                bool zerosizeclusters = new bool();
                int pass = 0;
                int itcnt = 0;
                hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
                int i_ = 0;

                info = 0;
                ccol = new double[0, 0];
                crow = new double[0, 0];
                xyc = new int[0];


                //
                // Test parameters
                //
                if (((npoints < k || nvars < 1) || k < 1) || restarts < 1)
                {
                    info = -1;
                    return;
                }

                //
                // TODO: special case K=1
                // TODO: special case K=NPoints
                //
                info = 1;

                //
                // Multiple passes of k-means++ algorithm
                //
                ct = new double[k, nvars];
                ctbest = new double[k, nvars];
                xyc = new int[npoints];
                xycbest = new int[npoints];
                d2 = new double[npoints];
                p = new double[npoints];
                tmp = new double[nvars];
                csizes = new int[k];
                cbusy = new bool[k];
                ebest = math.maxrealnumber;
                hqrnd.hqrndrandomize(rs);
                for (pass = 1; pass <= restarts; pass++)
                {

                    //
                    // Select initial centers  using k-means++ algorithm
                    // 1. Choose first center at random
                    // 2. Choose next centers using their distance from centers already chosen
                    //
                    // Note that for performance reasons centers are stored in ROWS of CT, not
                    // in columns. We'll transpose CT in the end and store it in the C.
                    //
                    i = hqrnd.hqrnduniformi(rs, npoints);
                    for (i_ = 0; i_ <= nvars - 1; i_++)
                    {
                        ct[0, i_] = xy[i, i_];
                    }
                    cbusy[0] = true;
                    for (i = 1; i <= k - 1; i++)
                    {
                        cbusy[i] = false;
                    }
                    if (!selectcenterpp(xy, npoints, nvars, ref ct, ref cbusy, k, ref d2, ref p, ref tmp))
                    {
                        info = -3;
                        return;
                    }

                    //
                    // Update centers:
                    // 2. update center positions
                    //
                    for (i = 0; i <= npoints - 1; i++)
                    {
                        xyc[i] = -1;
                    }
                    eprev = math.maxrealnumber;
                    itcnt = 0;
                    e = 0;
                    while (maxits == 0 || itcnt < maxits)
                    {

                        //
                        // Update iteration counter
                        //
                        itcnt = itcnt + 1;

                        //
                        // fill XYC with center numbers
                        //
                        waschanges = false;
                        for (i = 0; i <= npoints - 1; i++)
                        {
                            cclosest = -1;
                            dclosest = math.maxrealnumber;
                            for (j = 0; j <= k - 1; j++)
                            {
                                for (i_ = 0; i_ <= nvars - 1; i_++)
                                {
                                    tmp[i_] = xy[i, i_];
                                }
                                for (i_ = 0; i_ <= nvars - 1; i_++)
                                {
                                    tmp[i_] = tmp[i_] - ct[j, i_];
                                }
                                v = 0.0;
                                for (i_ = 0; i_ <= nvars - 1; i_++)
                                {
                                    v += tmp[i_] * tmp[i_];
                                }
                                if ((double)(v) < (double)(dclosest))
                                {
                                    cclosest = j;
                                    dclosest = v;
                                }
                            }
                            if (xyc[i] != cclosest)
                            {
                                waschanges = true;
                            }
                            xyc[i] = cclosest;
                        }

                        //
                        // Update centers
                        //
                        for (j = 0; j <= k - 1; j++)
                        {
                            csizes[j] = 0;
                        }
                        for (i = 0; i <= k - 1; i++)
                        {
                            for (j = 0; j <= nvars - 1; j++)
                            {
                                ct[i, j] = 0;
                            }
                        }
                        for (i = 0; i <= npoints - 1; i++)
                        {
                            csizes[xyc[i]] = csizes[xyc[i]] + 1;
                            for (i_ = 0; i_ <= nvars - 1; i_++)
                            {
                                ct[xyc[i], i_] = ct[xyc[i], i_] + xy[i, i_];
                            }
                        }
                        zerosizeclusters = false;
                        for (j = 0; j <= k - 1; j++)
                        {
                            if (csizes[j] != 0)
                            {
                                v = (double)1 / (double)csizes[j];
                                for (i_ = 0; i_ <= nvars - 1; i_++)
                                {
                                    ct[j, i_] = v * ct[j, i_];
                                }
                            }
                            cbusy[j] = csizes[j] != 0;
                            zerosizeclusters = zerosizeclusters || csizes[j] == 0;
                        }
                        if (zerosizeclusters)
                        {

                            //
                            // Some clusters have zero size - rare, but possible.
                            // We'll choose new centers for such clusters using k-means++ rule
                            // and restart algorithm
                            //
                            if (!selectcenterpp(xy, npoints, nvars, ref ct, ref cbusy, k, ref d2, ref p, ref tmp))
                            {
                                info = -3;
                                return;
                            }
                            continue;
                        }

                        //
                        // Stop if one of two conditions is met:
                        // 1. nothing has changed during iteration
                        // 2. energy function increased 
                        //
                        e = 0;
                        for (i = 0; i <= npoints - 1; i++)
                        {
                            for (i_ = 0; i_ <= nvars - 1; i_++)
                            {
                                tmp[i_] = xy[i, i_];
                            }
                            for (i_ = 0; i_ <= nvars - 1; i_++)
                            {
                                tmp[i_] = tmp[i_] - ct[xyc[i], i_];
                            }
                            v = 0.0;
                            for (i_ = 0; i_ <= nvars - 1; i_++)
                            {
                                v += tmp[i_] * tmp[i_];
                            }
                            e = e + v;
                        }
                        if (!waschanges || (double)(e) >= (double)(eprev))
                        {
                            break;
                        }

                        //
                        // Update EPrev
                        //
                        eprev = e;
                    }

                    //
                    // 3. Calculate E, compare with best centers found so far
                    //
                    if ((double)(e) < (double)(ebest))
                    {

                        //
                        // store partition.
                        //
                        ebest = e;
                        blas.copymatrix(ct, 0, k - 1, 0, nvars - 1, ref ctbest, 0, k - 1, 0, nvars - 1);
                        for (i = 0; i <= npoints - 1; i++)
                        {
                            xycbest[i] = xyc[i];
                        }
                    }
                }

                //
                // Copy and transpose
                //
                if (needccol)
                {
                    ccol = new double[nvars, k];
                    blas.copyandtranspose(ctbest, 0, k - 1, 0, nvars - 1, ref ccol, 0, nvars - 1, 0, k - 1);
                }
                if (needcrow)
                {
                    crow = new double[k, nvars];
                    ablas.rmatrixcopy(k, nvars, ctbest, 0, 0, ref crow, 0, 0);
                }
                for (i = 0; i <= npoints - 1; i++)
                {
                    xyc[i] = xycbest[i];
                }
            }
Example #14
0
        /*************************************************************************
        Sparse Cholesky decomposition: "expert" function.

        The algorithm computes Cholesky decomposition  of  a  symmetric  positive-
        definite sparse matrix. The result is representation of A  as  A=U^T*U  or
        A=L*L^T

        Triangular factor L or U is written to separate SparseMatrix structure. If
        output buffer already contrains enough memory to store L/U, this memory is
        reused.

        INPUT PARAMETERS:
            A       -   upper or lower triangle of sparse matrix.
                        Matrix can be in any sparse storage format.
            N       -   size of matrix A (can be smaller than actual size of A)
            IsUpper -   if IsUpper=True, then A contains an upper triangle of
                        a symmetric matrix, otherwise A contains a lower one.
                        Another triangle is ignored.
            P0, P1  -   integer arrays:
                        * for Ordering=-3  -  user-supplied permutation  of  rows/
                          columns, which complies  to  requirements stated  in the
                          "OUTPUT PARAMETERS" section.  Both  P0 and  P1  must  be
                          initialized by user.
                        * for other values of  Ordering  -  possibly  preallocated
                          buffer,  which   is   filled   by  internally  generated
                          permutation. Automatically resized if its  size  is  too
                          small to store data.
            Ordering-   sparse matrix reordering algorithm which is used to reduce
                        fill-in amount:
                        * -3    use ordering supplied by user in P0/P1
                        * -2    use random ordering
                        * -1    use original order
                        * 0     use best algorithm implemented so far
                        If input matrix is  given  in  SKS  format,  factorization
                        function ignores Ordering and uses original order  of  the
                        columns. The idea is that if you already store  matrix  in
                        SKS format, it is better not to perform costly reordering.
            Algo    -   type of algorithm which is used during factorization:
                        * 0     use best  algorithm  (for  SKS  input  or   output
                                matrices Algo=2 is used; otherwise Algo=1 is used)
                        * 1     use CRS-based algorithm
                        * 2     use skyline-based factorization algorithm.
                                This algorithm is a  fastest  one  for low-profile
                                matrices,  but  requires  too  much of memory  for
                                matrices with large bandwidth.
            Fmt     -   desired storage format  of  the  output,  as  returned  by
                        SparseGetMatrixType() function:
                        * 0 for hash-based storage
                        * 1 for CRS
                        * 2 for SKS
                        If you do not know what format to choose, use 1 (CRS).
            Buf     -   SparseBuffers structure which is used to store temporaries.
                        This function may reuse previously allocated  storage,  so
                        if you perform repeated factorizations it is beneficial to
                        reuse Buf.
            C       -   SparseMatrix structure  which  can  be  just  some  random
                        garbage. In  case  in  contains  enough  memory  to  store
                        triangular factors, this memory will be reused. Othwerwise,
                        algorithm will automatically allocate enough memory.
            

        OUTPUT PARAMETERS:
            C       -   the result of factorization, stored in desired format.  If
                        IsUpper=True, then the upper triangle  contains  matrix U,
                        such  that  (P'*A*P) = U^T*U,  where  P  is  a permutation
                        matrix (see below). The elements below the  main  diagonal
                        are zero.
                        Similarly, if IsUpper = False. In this case L is returned,
                        and we have (P'*A*P) = L*(L^T).
            P0      -   permutation  (according   to   Ordering  parameter)  which
                        minimizes amount of fill-in:
                        * P0 is array[N]
                        * permutation is applied to A before  factorization  takes
                          place, i.e. we have U'*U = L*L' = P'*A*P
                        * P0[k]=j means that column/row j of A  is  moved  to k-th
                          position before starting factorization.
            P1      -   permutation P in another format, array[N]:
                        * P1[k]=j means that k-th column/row of A is moved to j-th
                          position

        RESULT:
            If  the  matrix  is  positive-definite,  the  function  returns  True.
            Otherwise, the function returns False. Contents of C is not determined
            in such case.

        NOTE: for  performance  reasons  this  function  does NOT check that input
              matrix  includes  only  finite  values. It is your responsibility to
              make sure that there are no infinite or NAN values in the matrix.

          -- ALGLIB routine --
             16.01.2014
             Bochkanov Sergey
        *************************************************************************/
        public static bool sparsecholeskyx(sparse.sparsematrix a,
            int n,
            bool isupper,
            ref int[] p0,
            ref int[] p1,
            int ordering,
            int algo,
            int fmt,
            sparse.sparsebuffers buf,
            sparse.sparsematrix c)
        {
            bool result = new bool();
            int i = 0;
            int j = 0;
            int k = 0;
            int t0 = 0;
            int t1 = 0;
            double v = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();

            alglib.ap.assert(n>=0, "SparseMatrixCholeskyBuf: N<0");
            alglib.ap.assert(sparse.sparsegetnrows(a)>=n, "SparseMatrixCholeskyBuf: rows(A)<N");
            alglib.ap.assert(sparse.sparsegetncols(a)>=n, "SparseMatrixCholeskyBuf: cols(A)<N");
            alglib.ap.assert(ordering>=-3 && ordering<=0, "SparseMatrixCholeskyBuf: invalid Ordering parameter");
            alglib.ap.assert(algo>=0 && algo<=2, "SparseMatrixCholeskyBuf: invalid Algo parameter");
            hqrnd.hqrndrandomize(rs);
            
            //
            // Perform some quick checks.
            // Because sparse matrices are expensive data structures, these
            // checks are better to perform during early stages of the factorization.
            //
            result = false;
            if( n<1 )
            {
                return result;
            }
            for(i=0; i<=n-1; i++)
            {
                if( (double)(sparse.sparsegetdiagonal(a, i))<=(double)(0) )
                {
                    return result;
                }
            }
            
            //
            // First, determine appropriate ordering:
            // * for SKS inputs, Ordering=-1 is automatically chosen (overrides user settings)
            //
            if( ordering==0 )
            {
                ordering = -1;
            }
            if( sparse.sparseissks(a) )
            {
                ordering = -1;
            }
            if( ordering==-3 )
            {
                
                //
                // User-supplied ordering.
                // Check its correctness.
                //
                alglib.ap.assert(alglib.ap.len(p0)>=n, "SparseCholeskyX: user-supplied permutation is too short");
                alglib.ap.assert(alglib.ap.len(p1)>=n, "SparseCholeskyX: user-supplied permutation is too short");
                for(i=0; i<=n-1; i++)
                {
                    alglib.ap.assert(p0[i]>=0 && p0[i]<n, "SparseCholeskyX: user-supplied permutation includes values outside of [0,N)");
                    alglib.ap.assert(p1[i]>=0 && p1[i]<n, "SparseCholeskyX: user-supplied permutation includes values outside of [0,N)");
                    alglib.ap.assert(p1[p0[i]]==i, "SparseCholeskyX: user-supplied permutation is inconsistent - P1 is not inverse of P0");
                }
            }
            if( ordering==-2 )
            {
                
                //
                // Use random ordering
                //
                apserv.ivectorsetlengthatleast(ref p0, n);
                apserv.ivectorsetlengthatleast(ref p1, n);
                for(i=0; i<=n-1; i++)
                {
                    p0[i] = i;
                }
                for(i=0; i<=n-1; i++)
                {
                    j = i+hqrnd.hqrnduniformi(rs, n-i);
                    if( j!=i )
                    {
                        k = p0[i];
                        p0[i] = p0[j];
                        p0[j] = k;
                    }
                }
                for(i=0; i<=n-1; i++)
                {
                    p1[p0[i]] = i;
                }
            }
            if( ordering==-1 )
            {
                
                //
                // Use initial ordering
                //
                apserv.ivectorsetlengthatleast(ref p0, n);
                apserv.ivectorsetlengthatleast(ref p1, n);
                for(i=0; i<=n-1; i++)
                {
                    p0[i] = i;
                    p1[i] = i;
                }
            }
            
            //
            // Determine algorithm to use:
            // * for SKS input or output - use SKS solver (overrides user settings)
            // * default is to use Algo=1
            //
            if( algo==0 )
            {
                algo = 1;
            }
            if( sparse.sparseissks(a) || fmt==2 )
            {
                algo = 2;
            }
            algo = 2;
            if( algo==2 )
            {
                
                //
                // Skyline Cholesky with non-skyline output.
                //
                // Call CholeskyX() recursively with Buf.S as output matrix,
                // then perform conversion from SKS to desired format. We can
                // use Buf.S in reccurrent call because SKS-to-SKS CholeskyX()
                // does not uses this field.
                //
                if( fmt!=2 )
                {
                    result = sparsecholeskyx(a, n, isupper, ref p0, ref p1, -3, algo, 2, buf, buf.s);
                    if( result )
                    {
                        sparse.sparsecopytobuf(buf.s, fmt, c);
                    }
                    return result;
                }
                
                //
                // Skyline Cholesky with skyline output
                //
                if( sparse.sparseissks(a) && ordering==-1 )
                {
                    
                    //
                    // Non-permuted skyline matrix.
                    //
                    // Quickly copy matrix to output buffer without permutation.
                    //
                    // NOTE: Buf.D is used as dummy vector filled with zeros.
                    //
                    apserv.ivectorsetlengthatleast(ref buf.d, n);
                    for(i=0; i<=n-1; i++)
                    {
                        buf.d[i] = 0;
                    }
                    if( isupper )
                    {
                        
                        //
                        // Create strictly upper-triangular matrix,
                        // copy upper triangle of input.
                        //
                        sparse.sparsecreatesksbuf(n, n, buf.d, a.uidx, c);
                        for(i=0; i<=n-1; i++)
                        {
                            t0 = a.ridx[i+1]-a.uidx[i]-1;
                            t1 = a.ridx[i+1]-1;
                            k = c.ridx[i+1]-c.uidx[i]-1;
                            for(j=t0; j<=t1; j++)
                            {
                                c.vals[k] = a.vals[j];
                                k = k+1;
                            }
                        }
                    }
                    else
                    {
                        
                        //
                        // Create strictly lower-triangular matrix,
                        // copy lower triangle of input.
                        //
                        sparse.sparsecreatesksbuf(n, n, a.didx, buf.d, c);
                        for(i=0; i<=n-1; i++)
                        {
                            t0 = a.ridx[i];
                            t1 = a.ridx[i]+a.didx[i];
                            k = c.ridx[i];
                            for(j=t0; j<=t1; j++)
                            {
                                c.vals[k] = a.vals[j];
                                k = k+1;
                            }
                        }
                    }
                }
                else
                {
                    
                    //
                    // Non-identity permutations OR non-skyline input:
                    // * investigate profile of permuted A
                    // * create skyline matrix in output buffer
                    // * copy input with permutation
                    //
                    apserv.ivectorsetlengthatleast(ref buf.d, n);
                    apserv.ivectorsetlengthatleast(ref buf.u, n);
                    for(i=0; i<=n-1; i++)
                    {
                        buf.d[i] = 0;
                        buf.u[i] = 0;
                    }
                    t0 = 0;
                    t1 = 0;
                    while( sparse.sparseenumerate(a, ref t0, ref t1, ref i, ref j, ref v) )
                    {
                        if( (isupper && j>=i) || (!isupper && j<=i) )
                        {
                            i = p1[i];
                            j = p1[j];
                            if( (j<i && isupper) || (j>i && !isupper) )
                            {
                                apserv.swapi(ref i, ref j);
                            }
                            if( i>j )
                            {
                                buf.d[i] = Math.Max(buf.d[i], i-j);
                            }
                            else
                            {
                                buf.u[j] = Math.Max(buf.u[j], j-i);
                            }
                        }
                    }
                    sparse.sparsecreatesksbuf(n, n, buf.d, buf.u, c);
                    t0 = 0;
                    t1 = 0;
                    while( sparse.sparseenumerate(a, ref t0, ref t1, ref i, ref j, ref v) )
                    {
                        if( (isupper && j>=i) || (!isupper && j<=i) )
                        {
                            i = p1[i];
                            j = p1[j];
                            if( (j<i && isupper) || (j>i && !isupper) )
                            {
                                apserv.swapi(ref j, ref i);
                            }
                            sparse.sparserewriteexisting(c, i, j, v);
                        }
                    }
                }
                result = sparsecholeskyskyline(c, n, isupper);
                return result;
            }
            alglib.ap.assert(false, "SparseCholeskyX: internal error - unexpected algorithm");
            return result;
        }
Example #15
0
        /*************************************************************************
        This function estimates generalization error using cross-validation on the
        current dataset with current training settings.

        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support (C++ computational core)
          !
          ! Second improvement gives constant  speedup (2-3X).  First  improvement
          ! gives  close-to-linear  speedup  on   multicore   systems.   Following
          ! operations can be executed in parallel:
          ! * FoldsCount cross-validation rounds (always)
          ! * NRestarts training sessions performed within each of
          !   cross-validation rounds (if NRestarts>1)
          ! * gradient calculation over large dataset (if dataset is large enough)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.

        INPUT PARAMETERS:
            S           -   trainer object
            Network     -   neural network. It must have same number of inputs and
                            output/classes as was specified during creation of the
                            trainer object. Network is not changed  during  cross-
                            validation and is not trained - it  is  used  only  as
                            representative of its architecture. I.e., we  estimate
                            generalization properties of  ARCHITECTURE,  not  some
                            specific network.
            NRestarts   -   number of restarts, >=0:
                            * NRestarts>0  means  that  for  each cross-validation
                              round   specified  number   of  random  restarts  is
                              performed,  with  best  network  being  chosen after
                              training.
                            * NRestarts=0 is same as NRestarts=1
            FoldsCount  -   number of folds in k-fold cross-validation:
                            * 2<=FoldsCount<=size of dataset
                            * recommended value: 10.
                            * values larger than dataset size will be silently
                              truncated down to dataset size

        OUTPUT PARAMETERS:
            Rep         -   structure which contains cross-validation estimates:
                            * Rep.RelCLSError - fraction of misclassified cases.
                            * Rep.AvgCE - acerage cross-entropy
                            * Rep.RMSError - root-mean-square error
                            * Rep.AvgError - average error
                            * Rep.AvgRelError - average relative error
                            
        NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
              or subset with only one point  was  given,  zeros  are  returned  as
              estimates.

        NOTE: this method performs FoldsCount cross-validation  rounds,  each  one
              with NRestarts random starts.  Thus,  FoldsCount*NRestarts  networks
              are trained in total.

        NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems.

        NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError
              contain errors in prediction of posterior probabilities.
                
          -- ALGLIB --
             Copyright 23.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpkfoldcv(mlptrainer s,
            mlpbase.multilayerperceptron network,
            int nrestarts,
            int foldscount,
            mlpreport rep)
        {
            alglib.smp.shared_pool pooldatacv = new alglib.smp.shared_pool();
            mlpparallelizationcv datacv = new mlpparallelizationcv();
            mlpparallelizationcv sdatacv = null;
            double[,] cvy = new double[0,0];
            int[] folds = new int[0];
            double[] buf = new double[0];
            double[] dy = new double[0];
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int rowsize = 0;
            int ntype = 0;
            int ttype = 0;
            int i = 0;
            int j = 0;
            int k = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;
            int i1_ = 0;

            if( !mlpbase.mlpissoftmax(network) )
            {
                ntype = 0;
            }
            else
            {
                ntype = 1;
            }
            if( s.rcpar )
            {
                ttype = 0;
            }
            else
            {
                ttype = 1;
            }
            alglib.ap.assert(ntype==ttype, "MLPKFoldCV: type of input network is not similar to network type in trainer object");
            alglib.ap.assert(s.npoints>=0, "MLPKFoldCV: possible trainer S is not initialized(S.NPoints<0)");
            mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
            alglib.ap.assert(s.nin==nin, "MLPKFoldCV:  number of inputs in trainer is not equal to number of inputs in network");
            alglib.ap.assert(s.nout==nout, "MLPKFoldCV:  number of outputs in trainer is not equal to number of outputs in network");
            alglib.ap.assert(nrestarts>=0, "MLPKFoldCV: NRestarts<0");
            alglib.ap.assert(foldscount>=2, "MLPKFoldCV: FoldsCount<2");
            if( foldscount>s.npoints )
            {
                foldscount = s.npoints;
            }
            rep.relclserror = 0;
            rep.avgce = 0;
            rep.rmserror = 0;
            rep.avgerror = 0;
            rep.avgrelerror = 0;
            hqrnd.hqrndrandomize(rs);
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            if( s.npoints==0 || s.npoints==1 )
            {
                return;
            }
            
            //
            // Read network geometry, test parameters
            //
            if( s.rcpar )
            {
                rowsize = nin+nout;
                dy = new double[nout];
                bdss.dserrallocate(-nout, ref buf);
            }
            else
            {
                rowsize = nin+1;
                dy = new double[1];
                bdss.dserrallocate(nout, ref buf);
            }
            
            //
            // Folds
            //
            folds = new int[s.npoints];
            for(i=0; i<=s.npoints-1; i++)
            {
                folds[i] = i*foldscount/s.npoints;
            }
            for(i=0; i<=s.npoints-2; i++)
            {
                j = i+hqrnd.hqrnduniformi(rs, s.npoints-i);
                if( j!=i )
                {
                    k = folds[i];
                    folds[i] = folds[j];
                    folds[j] = k;
                }
            }
            cvy = new double[s.npoints, nout];
            
            //
            // Initialize SEED-value for shared pool
            //
            datacv.ngrad = 0;
            mlpbase.mlpcopy(network, datacv.network);
            datacv.subset = new int[s.npoints];
            datacv.xyrow = new double[rowsize];
            datacv.y = new double[nout];
            
            //
            // Create shared pool
            //
            alglib.smp.ae_shared_pool_set_seed(pooldatacv, datacv);
            
            //
            // Parallelization
            //
            mthreadcv(s, rowsize, nrestarts, folds, 0, foldscount, cvy, pooldatacv);
            
            //
            // Calculate value for NGrad
            //
            alglib.smp.ae_shared_pool_first_recycled(pooldatacv, ref sdatacv);
            while( sdatacv!=null )
            {
                rep.ngrad = rep.ngrad+sdatacv.ngrad;
                alglib.smp.ae_shared_pool_next_recycled(pooldatacv, ref sdatacv);
            }
            
            //
            // Connect of results and calculate cross-validation error
            //
            for(i=0; i<=s.npoints-1; i++)
            {
                if( s.datatype==0 )
                {
                    for(i_=0; i_<=rowsize-1;i_++)
                    {
                        datacv.xyrow[i_] = s.densexy[i,i_];
                    }
                }
                if( s.datatype==1 )
                {
                    sparse.sparsegetrow(s.sparsexy, i, ref datacv.xyrow);
                }
                for(i_=0; i_<=nout-1;i_++)
                {
                    datacv.y[i_] = cvy[i,i_];
                }
                if( s.rcpar )
                {
                    i1_ = (nin) - (0);
                    for(i_=0; i_<=nout-1;i_++)
                    {
                        dy[i_] = datacv.xyrow[i_+i1_];
                    }
                }
                else
                {
                    dy[0] = datacv.xyrow[nin];
                }
                bdss.dserraccumulate(ref buf, datacv.y, dy);
            }
            bdss.dserrfinish(ref buf);
            rep.relclserror = buf[0];
            rep.avgce = buf[1];
            rep.rmserror = buf[2];
            rep.avgerror = buf[3];
            rep.avgrelerror = buf[4];
        }
Example #16
0
 public override void init()
 {
     x0 = new double[0];
     x1 = new double[0];
     t = new double[0];
     xbest = new double[0];
     r = new hqrnd.hqrndstate();
     x = new double[0];
     mv = new double[0];
     mtv = new double[0];
     rstate = new rcommstate();
 }
Example #17
0
        /*************************************************************************
        Subroutine prepares K-fold split of the training set.

        NOTES:
            "NClasses>0" means that we have classification task.
            "NClasses<0" means regression task with -NClasses real outputs.
        *************************************************************************/
        private static void mlpkfoldsplit(double[,] xy,
            int npoints,
            int nclasses,
            int foldscount,
            bool stratifiedsplits,
            ref int[] folds)
        {
            int i = 0;
            int j = 0;
            int k = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();

            folds = new int[0];

            
            //
            // test parameters
            //
            alglib.ap.assert(npoints>0, "MLPKFoldSplit: wrong NPoints!");
            alglib.ap.assert(nclasses>1 || nclasses<0, "MLPKFoldSplit: wrong NClasses!");
            alglib.ap.assert(foldscount>=2 && foldscount<=npoints, "MLPKFoldSplit: wrong FoldsCount!");
            alglib.ap.assert(!stratifiedsplits, "MLPKFoldSplit: stratified splits are not supported!");
            
            //
            // Folds
            //
            hqrnd.hqrndrandomize(rs);
            folds = new int[npoints-1+1];
            for(i=0; i<=npoints-1; i++)
            {
                folds[i] = i*foldscount/npoints;
            }
            for(i=0; i<=npoints-2; i++)
            {
                j = i+hqrnd.hqrnduniformi(rs, npoints-i);
                if( j!=i )
                {
                    k = folds[i];
                    folds[i] = folds[j];
                    folds[j] = k;
                }
            }
        }
Example #18
0
 public normestimatorstate()
 {
     x0 = new double[0];
     x1 = new double[0];
     t = new double[0];
     xbest = new double[0];
     r = new hqrnd.hqrndstate();
     x = new double[0];
     mv = new double[0];
     mtv = new double[0];
     rstate = new rcommstate();
 }
Example #19
0
        /*************************************************************************
        This function trains neural network ensemble passed to this function using
        current dataset and early stopping training algorithm. Each early stopping
        round performs NRestarts  random  restarts  (thus,  EnsembleSize*NRestarts
        training rounds is performed in total).


          -- ALGLIB --
             Copyright 22.08.2012 by Bochkanov Sergey
        *************************************************************************/
        private static void mlptrainensemblex(mlptrainer s,
            mlpe.mlpensemble ensemble,
            int idx0,
            int idx1,
            int nrestarts,
            int trainingmethod,
            apserv.sinteger ngrad,
            bool isrootcall,
            alglib.smp.shared_pool esessions)
        {
            int pcount = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int i = 0;
            int j = 0;
            int k = 0;
            int trnsubsetsize = 0;
            int valsubsetsize = 0;
            int k0 = 0;
            apserv.sinteger ngrad0 = new apserv.sinteger();
            apserv.sinteger ngrad1 = new apserv.sinteger();
            mlpetrnsession psession = null;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;
            int i1_ = 0;

            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            wcount = mlpbase.mlpgetweightscount(ensemble.network);
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                pcount = nin;
            }
            else
            {
                pcount = nin+nout;
            }
            if( nrestarts<=0 )
            {
                nrestarts = 1;
            }
            
            //
            // Handle degenerate case
            //
            if( s.npoints<2 )
            {
                for(i=idx0; i<=idx1-1; i++)
                {
                    for(j=0; j<=wcount-1; j++)
                    {
                        ensemble.weights[i*wcount+j] = 0.0;
                    }
                    for(j=0; j<=pcount-1; j++)
                    {
                        ensemble.columnmeans[i*pcount+j] = 0.0;
                        ensemble.columnsigmas[i*pcount+j] = 1.0;
                    }
                }
                return;
            }
            
            //
            // Process root call
            //
            if( isrootcall )
            {
                
                //
                // Prepare:
                // * prepare MLPETrnSessions
                // * fill ensemble by zeros (helps to detect errors)
                //
                initmlpetrnsessions(ensemble.network, s, esessions);
                for(i=idx0; i<=idx1-1; i++)
                {
                    for(j=0; j<=wcount-1; j++)
                    {
                        ensemble.weights[i*wcount+j] = 0.0;
                    }
                    for(j=0; j<=pcount-1; j++)
                    {
                        ensemble.columnmeans[i*pcount+j] = 0.0;
                        ensemble.columnsigmas[i*pcount+j] = 0.0;
                    }
                }
                
                //
                // Train in non-root mode and exit
                //
                mlptrainensemblex(s, ensemble, idx0, idx1, nrestarts, trainingmethod, ngrad, false, esessions);
                return;
            }
            
            //
            // Split problem
            //
            if( idx1-idx0>=2 )
            {
                k0 = (idx1-idx0)/2;
                ngrad0.val = 0;
                ngrad1.val = 0;
                mlptrainensemblex(s, ensemble, idx0, idx0+k0, nrestarts, trainingmethod, ngrad0, false, esessions);
                mlptrainensemblex(s, ensemble, idx0+k0, idx1, nrestarts, trainingmethod, ngrad1, false, esessions);
                ngrad.val = ngrad0.val+ngrad1.val;
                return;
            }
            
            //
            // Retrieve and prepare session
            //
            alglib.smp.ae_shared_pool_retrieve(esessions, ref psession);
            
            //
            // Train
            //
            hqrnd.hqrndrandomize(rs);
            for(k=idx0; k<=idx1-1; k++)
            {
                
                //
                // Split set
                //
                trnsubsetsize = 0;
                valsubsetsize = 0;
                if( trainingmethod==0 )
                {
                    do
                    {
                        trnsubsetsize = 0;
                        valsubsetsize = 0;
                        for(i=0; i<=s.npoints-1; i++)
                        {
                            if( (double)(math.randomreal())<(double)(0.66) )
                            {
                                
                                //
                                // Assign sample to training set
                                //
                                psession.trnsubset[trnsubsetsize] = i;
                                trnsubsetsize = trnsubsetsize+1;
                            }
                            else
                            {
                                
                                //
                                // Assign sample to validation set
                                //
                                psession.valsubset[valsubsetsize] = i;
                                valsubsetsize = valsubsetsize+1;
                            }
                        }
                    }
                    while( !(trnsubsetsize!=0 && valsubsetsize!=0) );
                }
                if( trainingmethod==1 )
                {
                    valsubsetsize = 0;
                    trnsubsetsize = s.npoints;
                    for(i=0; i<=s.npoints-1; i++)
                    {
                        psession.trnsubset[i] = hqrnd.hqrnduniformi(rs, s.npoints);
                    }
                }
                
                //
                // Train
                //
                mlptrainnetworkx(s, nrestarts, -1, psession.trnsubset, trnsubsetsize, psession.valsubset, valsubsetsize, psession.network, psession.mlprep, true, psession.mlpsessions);
                ngrad.val = ngrad.val+psession.mlprep.ngrad;
                
                //
                // Save results
                //
                i1_ = (0) - (k*wcount);
                for(i_=k*wcount; i_<=(k+1)*wcount-1;i_++)
                {
                    ensemble.weights[i_] = psession.network.weights[i_+i1_];
                }
                i1_ = (0) - (k*pcount);
                for(i_=k*pcount; i_<=(k+1)*pcount-1;i_++)
                {
                    ensemble.columnmeans[i_] = psession.network.columnmeans[i_+i1_];
                }
                i1_ = (0) - (k*pcount);
                for(i_=k*pcount; i_<=(k+1)*pcount-1;i_++)
                {
                    ensemble.columnsigmas[i_] = psession.network.columnsigmas[i_+i1_];
                }
            }
            
            //
            // Recycle session
            //
            alglib.smp.ae_shared_pool_recycle(esessions, ref psession);
        }
Example #20
0
File: linalg.cs Project: Ring-r/opt
        public static void rmatrixrndorthogonalfromtheleft(ref double[,] a,
            int m,
            int n)
        {
            double tau = 0;
            double lambdav = 0;
            int s = 0;
            int i = 0;
            int j = 0;
            double u1 = 0;
            double u2 = 0;
            double[] w = new double[0];
            double[] v = new double[0];
            hqrnd.hqrndstate state = new hqrnd.hqrndstate();
            int i_ = 0;

            ap.assert(n>=1 & m>=1, "RMatrixRndOrthogonalFromTheRight: N<1 or M<1!");
            if( m==1 )
            {
                
                //
                // special case
                //
                tau = 2*math.randominteger(2)-1;
                for(j=0; j<=n-1; j++)
                {
                    a[0,j] = a[0,j]*tau;
                }
                return;
            }
            
            //
            // General case.
            // First pass.
            //
            w = new double[n];
            v = new double[m+1];
            hqrnd.hqrndrandomize(state);
            for(s=2; s<=m; s++)
            {
                
                //
                // Prepare random normal v
                //
                do
                {
                    i = 1;
                    while( i<=s )
                    {
                        hqrnd.hqrndnormal2(state, ref u1, ref u2);
                        v[i] = u1;
                        if( i+1<=s )
                        {
                            v[i+1] = u2;
                        }
                        i = i+2;
                    }
                    lambdav = 0.0;
                    for(i_=1; i_<=s;i_++)
                    {
                        lambdav += v[i_]*v[i_];
                    }
                }
                while( (double)(lambdav)==(double)(0) );
                
                //
                // Prepare and apply reflection
                //
                reflections.generatereflection(ref v, s, ref tau);
                v[1] = 1;
                reflections.applyreflectionfromtheleft(ref a, tau, v, m-s, m-1, 0, n-1, ref w);
            }
            
            //
            // Second pass.
            //
            for(i=0; i<=m-1; i++)
            {
                tau = 2*math.randominteger(2)-1;
                for(i_=0; i_<=n-1;i_++)
                {
                    a[i,i_] = tau*a[i,i_];
                }
            }
        }
Example #21
0
        /*************************************************************************
        Internal bagging subroutine.

          -- ALGLIB --
             Copyright 19.02.2009 by Bochkanov Sergey
        *************************************************************************/
        private static void mlpebagginginternal(mlpe.mlpensemble ensemble,
            double[,] xy,
            int npoints,
            double decay,
            int restarts,
            double wstep,
            int maxits,
            bool lmalgorithm,
            ref int info,
            mlpreport rep,
            mlpcvreport ooberrors)
        {
            double[,] xys = new double[0,0];
            bool[] s = new bool[0];
            double[,] oobbuf = new double[0,0];
            int[] oobcntbuf = new int[0];
            double[] x = new double[0];
            double[] y = new double[0];
            double[] dy = new double[0];
            double[] dsbuf = new double[0];
            int ccnt = 0;
            int pcnt = 0;
            int i = 0;
            int j = 0;
            int k = 0;
            double v = 0;
            mlpreport tmprep = new mlpreport();
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
            int i_ = 0;
            int i1_ = 0;

            info = 0;

            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            wcount = mlpbase.mlpgetweightscount(ensemble.network);
            
            //
            // Test for inputs
            //
            if( (!lmalgorithm && (double)(wstep)==(double)(0)) && maxits==0 )
            {
                info = -8;
                return;
            }
            if( ((npoints<=0 || restarts<1) || (double)(wstep)<(double)(0)) || maxits<0 )
            {
                info = -1;
                return;
            }
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                for(i=0; i<=npoints-1; i++)
                {
                    if( (int)Math.Round(xy[i,nin])<0 || (int)Math.Round(xy[i,nin])>=nout )
                    {
                        info = -2;
                        return;
                    }
                }
            }
            
            //
            // allocate temporaries
            //
            info = 2;
            rep.ngrad = 0;
            rep.nhess = 0;
            rep.ncholesky = 0;
            ooberrors.relclserror = 0;
            ooberrors.avgce = 0;
            ooberrors.rmserror = 0;
            ooberrors.avgerror = 0;
            ooberrors.avgrelerror = 0;
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                ccnt = nin+1;
                pcnt = nin;
            }
            else
            {
                ccnt = nin+nout;
                pcnt = nin+nout;
            }
            xys = new double[npoints, ccnt];
            s = new bool[npoints];
            oobbuf = new double[npoints, nout];
            oobcntbuf = new int[npoints];
            x = new double[nin];
            y = new double[nout];
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                dy = new double[1];
            }
            else
            {
                dy = new double[nout];
            }
            for(i=0; i<=npoints-1; i++)
            {
                for(j=0; j<=nout-1; j++)
                {
                    oobbuf[i,j] = 0;
                }
            }
            for(i=0; i<=npoints-1; i++)
            {
                oobcntbuf[i] = 0;
            }
            
            //
            // main bagging cycle
            //
            hqrnd.hqrndrandomize(rs);
            for(k=0; k<=ensemble.ensemblesize-1; k++)
            {
                
                //
                // prepare dataset
                //
                for(i=0; i<=npoints-1; i++)
                {
                    s[i] = false;
                }
                for(i=0; i<=npoints-1; i++)
                {
                    j = hqrnd.hqrnduniformi(rs, npoints);
                    s[j] = true;
                    for(i_=0; i_<=ccnt-1;i_++)
                    {
                        xys[i,i_] = xy[j,i_];
                    }
                }
                
                //
                // train
                //
                if( lmalgorithm )
                {
                    mlptrainlm(ensemble.network, xys, npoints, decay, restarts, ref info, tmprep);
                }
                else
                {
                    mlptrainlbfgs(ensemble.network, xys, npoints, decay, restarts, wstep, maxits, ref info, tmprep);
                }
                if( info<0 )
                {
                    return;
                }
                
                //
                // save results
                //
                rep.ngrad = rep.ngrad+tmprep.ngrad;
                rep.nhess = rep.nhess+tmprep.nhess;
                rep.ncholesky = rep.ncholesky+tmprep.ncholesky;
                i1_ = (0) - (k*wcount);
                for(i_=k*wcount; i_<=(k+1)*wcount-1;i_++)
                {
                    ensemble.weights[i_] = ensemble.network.weights[i_+i1_];
                }
                i1_ = (0) - (k*pcnt);
                for(i_=k*pcnt; i_<=(k+1)*pcnt-1;i_++)
                {
                    ensemble.columnmeans[i_] = ensemble.network.columnmeans[i_+i1_];
                }
                i1_ = (0) - (k*pcnt);
                for(i_=k*pcnt; i_<=(k+1)*pcnt-1;i_++)
                {
                    ensemble.columnsigmas[i_] = ensemble.network.columnsigmas[i_+i1_];
                }
                
                //
                // OOB estimates
                //
                for(i=0; i<=npoints-1; i++)
                {
                    if( !s[i] )
                    {
                        for(i_=0; i_<=nin-1;i_++)
                        {
                            x[i_] = xy[i,i_];
                        }
                        mlpbase.mlpprocess(ensemble.network, x, ref y);
                        for(i_=0; i_<=nout-1;i_++)
                        {
                            oobbuf[i,i_] = oobbuf[i,i_] + y[i_];
                        }
                        oobcntbuf[i] = oobcntbuf[i]+1;
                    }
                }
            }
            
            //
            // OOB estimates
            //
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                bdss.dserrallocate(nout, ref dsbuf);
            }
            else
            {
                bdss.dserrallocate(-nout, ref dsbuf);
            }
            for(i=0; i<=npoints-1; i++)
            {
                if( oobcntbuf[i]!=0 )
                {
                    v = (double)1/(double)oobcntbuf[i];
                    for(i_=0; i_<=nout-1;i_++)
                    {
                        y[i_] = v*oobbuf[i,i_];
                    }
                    if( mlpbase.mlpissoftmax(ensemble.network) )
                    {
                        dy[0] = xy[i,nin];
                    }
                    else
                    {
                        i1_ = (nin) - (0);
                        for(i_=0; i_<=nout-1;i_++)
                        {
                            dy[i_] = v*xy[i,i_+i1_];
                        }
                    }
                    bdss.dserraccumulate(ref dsbuf, y, dy);
                }
            }
            bdss.dserrfinish(ref dsbuf);
            ooberrors.relclserror = dsbuf[0];
            ooberrors.avgce = dsbuf[1];
            ooberrors.rmserror = dsbuf[2];
            ooberrors.avgerror = dsbuf[3];
            ooberrors.avgrelerror = dsbuf[4];
        }
Example #22
0
File: linalg.cs Project: Ring-r/opt
        public static void smatrixrndmultiply(ref double[,] a,
            int n)
        {
            double tau = 0;
            double lambdav = 0;
            int s = 0;
            int i = 0;
            double u1 = 0;
            double u2 = 0;
            double[] w = new double[0];
            double[] v = new double[0];
            hqrnd.hqrndstate state = new hqrnd.hqrndstate();
            int i_ = 0;

            
            //
            // General case.
            //
            w = new double[n];
            v = new double[n+1];
            hqrnd.hqrndrandomize(state);
            for(s=2; s<=n; s++)
            {
                
                //
                // Prepare random normal v
                //
                do
                {
                    i = 1;
                    while( i<=s )
                    {
                        hqrnd.hqrndnormal2(state, ref u1, ref u2);
                        v[i] = u1;
                        if( i+1<=s )
                        {
                            v[i+1] = u2;
                        }
                        i = i+2;
                    }
                    lambdav = 0.0;
                    for(i_=1; i_<=s;i_++)
                    {
                        lambdav += v[i_]*v[i_];
                    }
                }
                while( (double)(lambdav)==(double)(0) );
                
                //
                // Prepare and apply reflection
                //
                reflections.generatereflection(ref v, s, ref tau);
                v[1] = 1;
                reflections.applyreflectionfromtheright(ref a, tau, v, 0, n-1, n-s, n-1, ref w);
                reflections.applyreflectionfromtheleft(ref a, tau, v, n-s, n-1, 0, n-1, ref w);
            }
            
            //
            // Second pass.
            //
            for(i=0; i<=n-1; i++)
            {
                tau = 2*math.randominteger(2)-1;
                for(i_=0; i_<=n-1;i_++)
                {
                    a[i_,i] = tau*a[i_,i];
                }
                for(i_=0; i_<=n-1;i_++)
                {
                    a[i,i_] = tau*a[i,i_];
                }
            }
        }
Example #23
0
        /*************************************************************************
        Generation of random NxN matrix with given condition number and norm2(A)=1

        INPUT PARAMETERS:
            N   -   matrix size
            C   -   condition number (in 2-norm)

        OUTPUT PARAMETERS:
            A   -   random matrix with norm2(A)=1 and cond(A)=C

          -- ALGLIB routine --
             04.12.2009
             Bochkanov Sergey
        *************************************************************************/
        public static void rmatrixrndcond(int n,
            double c,
            ref double[,] a)
        {
            int i = 0;
            int j = 0;
            double l1 = 0;
            double l2 = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();

            a = new double[0,0];

            alglib.ap.assert(n>=1 && (double)(c)>=(double)(1), "RMatrixRndCond: N<1 or C<1!");
            a = new double[n, n];
            if( n==1 )
            {
                
                //
                // special case
                //
                a[0,0] = 2*math.randominteger(2)-1;
                return;
            }
            hqrnd.hqrndrandomize(rs);
            l1 = 0;
            l2 = Math.Log(1/c);
            for(i=0; i<=n-1; i++)
            {
                for(j=0; j<=n-1; j++)
                {
                    a[i,j] = 0;
                }
            }
            a[0,0] = Math.Exp(l1);
            for(i=1; i<=n-2; i++)
            {
                a[i,i] = Math.Exp(hqrnd.hqrnduniformr(rs)*(l2-l1)+l1);
            }
            a[n-1,n-1] = Math.Exp(l2);
            rmatrixrndorthogonalfromtheleft(ref a, n, n);
            rmatrixrndorthogonalfromtheright(ref a, n, n);
        }
        public static bool testhqrnd(bool silent)
        {
            bool result = new bool();
            bool waserrors = new bool();
            int samplesize = 0;
            double sigmathreshold = 0;
            int passcount = 0;
            int n = 0;
            int i = 0;
            int pass = 0;
            int s1 = 0;
            int s2 = 0;
            int i1 = 0;
            int i2 = 0;
            double r1 = 0;
            double r2 = 0;
            double[] x = new double[0];
            double mean = 0;
            double means = 0;
            double stddev = 0;
            double stddevs = 0;
            double lambdav = 0;
            bool seederrors = new bool();
            bool urerrors = new bool();
            double ursigmaerr = 0;
            bool uierrors = new bool();
            double uisigmaerr = 0;
            bool normerrors = new bool();
            double normsigmaerr = 0;
            bool experrors = new bool();
            double expsigmaerr = 0;
            hqrnd.hqrndstate state = new hqrnd.hqrndstate();

            waserrors = false;
            sigmathreshold = 7;
            samplesize = 100000;
            passcount = 50;
            seederrors = false;
            urerrors = false;
            uierrors = false;
            normerrors = false;
            experrors = false;
            x = new double[samplesize-1+1];
            
            //
            // Test seed errors
            //
            for(pass=1; pass<=passcount; pass++)
            {
                s1 = 1+math.randominteger(32000);
                s2 = 1+math.randominteger(32000);
                unsetstate(state);
                hqrnd.hqrndseed(s1, s2, state);
                i1 = hqrnd.hqrnduniformi(state, 100);
                unsetstate(state);
                hqrnd.hqrndseed(s1, s2, state);
                i2 = hqrnd.hqrnduniformi(state, 100);
                seederrors = seederrors | i1!=i2;
                unsetstate(state);
                hqrnd.hqrndseed(s1, s2, state);
                r1 = hqrnd.hqrnduniformr(state);
                unsetstate(state);
                hqrnd.hqrndseed(s1, s2, state);
                r2 = hqrnd.hqrnduniformr(state);
                seederrors = seederrors | (double)(r1)!=(double)(r2);
            }
            
            //
            // Test HQRNDRandomize() and real uniform generator
            //
            unsetstate(state);
            hqrnd.hqrndrandomize(state);
            ursigmaerr = 0;
            for(i=0; i<=samplesize-1; i++)
            {
                x[i] = hqrnd.hqrnduniformr(state);
            }
            for(i=0; i<=samplesize-1; i++)
            {
                urerrors = (urerrors | (double)(x[i])<=(double)(0)) | (double)(x[i])>=(double)(1);
            }
            calculatemv(x, samplesize, ref mean, ref means, ref stddev, ref stddevs);
            if( (double)(means)!=(double)(0) )
            {
                ursigmaerr = Math.Max(ursigmaerr, Math.Abs((mean-0.5)/means));
            }
            else
            {
                urerrors = true;
            }
            if( (double)(stddevs)!=(double)(0) )
            {
                ursigmaerr = Math.Max(ursigmaerr, Math.Abs((stddev-Math.Sqrt((double)1/(double)12))/stddevs));
            }
            else
            {
                urerrors = true;
            }
            urerrors = urerrors | (double)(ursigmaerr)>(double)(sigmathreshold);
            
            //
            // Test HQRNDRandomize() and integer uniform
            //
            unsetstate(state);
            hqrnd.hqrndrandomize(state);
            uisigmaerr = 0;
            for(n=2; n<=10; n++)
            {
                for(i=0; i<=samplesize-1; i++)
                {
                    x[i] = hqrnd.hqrnduniformi(state, n);
                }
                for(i=0; i<=samplesize-1; i++)
                {
                    uierrors = (uierrors | (double)(x[i])<(double)(0)) | (double)(x[i])>=(double)(n);
                }
                calculatemv(x, samplesize, ref mean, ref means, ref stddev, ref stddevs);
                if( (double)(means)!=(double)(0) )
                {
                    uisigmaerr = Math.Max(uisigmaerr, Math.Abs((mean-0.5*(n-1))/means));
                }
                else
                {
                    uierrors = true;
                }
                if( (double)(stddevs)!=(double)(0) )
                {
                    uisigmaerr = Math.Max(uisigmaerr, Math.Abs((stddev-Math.Sqrt((math.sqr(n)-1)/12))/stddevs));
                }
                else
                {
                    uierrors = true;
                }
            }
            uierrors = uierrors | (double)(uisigmaerr)>(double)(sigmathreshold);
            
            //
            // Special 'close-to-limit' test on uniformity of integers
            // (straightforward implementation like 'RND mod N' will return
            //  non-uniform numbers for N=2/3*LIMIT)
            //
            unsetstate(state);
            hqrnd.hqrndrandomize(state);
            uisigmaerr = 0;
            n = 1431655708;
            for(i=0; i<=samplesize-1; i++)
            {
                x[i] = hqrnd.hqrnduniformi(state, n);
            }
            for(i=0; i<=samplesize-1; i++)
            {
                uierrors = (uierrors | (double)(x[i])<(double)(0)) | (double)(x[i])>=(double)(n);
            }
            calculatemv(x, samplesize, ref mean, ref means, ref stddev, ref stddevs);
            if( (double)(means)!=(double)(0) )
            {
                uisigmaerr = Math.Max(uisigmaerr, Math.Abs((mean-0.5*(n-1))/means));
            }
            else
            {
                uierrors = true;
            }
            if( (double)(stddevs)!=(double)(0) )
            {
                uisigmaerr = Math.Max(uisigmaerr, Math.Abs((stddev-Math.Sqrt((math.sqr(n)-1)/12))/stddevs));
            }
            else
            {
                uierrors = true;
            }
            uierrors = uierrors | (double)(uisigmaerr)>(double)(sigmathreshold);
            
            //
            // Test normal
            //
            unsetstate(state);
            hqrnd.hqrndrandomize(state);
            normsigmaerr = 0;
            i = 0;
            while( i<samplesize )
            {
                hqrnd.hqrndnormal2(state, ref r1, ref r2);
                x[i] = r1;
                if( i+1<samplesize )
                {
                    x[i+1] = r2;
                }
                i = i+2;
            }
            calculatemv(x, samplesize, ref mean, ref means, ref stddev, ref stddevs);
            if( (double)(means)!=(double)(0) )
            {
                normsigmaerr = Math.Max(normsigmaerr, Math.Abs((mean-0)/means));
            }
            else
            {
                normerrors = true;
            }
            if( (double)(stddevs)!=(double)(0) )
            {
                normsigmaerr = Math.Max(normsigmaerr, Math.Abs((stddev-1)/stddevs));
            }
            else
            {
                normerrors = true;
            }
            normerrors = normerrors | (double)(normsigmaerr)>(double)(sigmathreshold);
            
            //
            // Test exponential
            //
            unsetstate(state);
            hqrnd.hqrndrandomize(state);
            expsigmaerr = 0;
            lambdav = 2+5*math.randomreal();
            for(i=0; i<=samplesize-1; i++)
            {
                x[i] = hqrnd.hqrndexponential(state, lambdav);
            }
            for(i=0; i<=samplesize-1; i++)
            {
                uierrors = uierrors | (double)(x[i])<(double)(0);
            }
            calculatemv(x, samplesize, ref mean, ref means, ref stddev, ref stddevs);
            if( (double)(means)!=(double)(0) )
            {
                expsigmaerr = Math.Max(expsigmaerr, Math.Abs((mean-1.0/lambdav)/means));
            }
            else
            {
                experrors = true;
            }
            if( (double)(stddevs)!=(double)(0) )
            {
                expsigmaerr = Math.Max(expsigmaerr, Math.Abs((stddev-1.0/lambdav)/stddevs));
            }
            else
            {
                experrors = true;
            }
            experrors = experrors | (double)(expsigmaerr)>(double)(sigmathreshold);
            
            //
            // Final report
            //
            waserrors = (((seederrors | urerrors) | uierrors) | normerrors) | experrors;
            if( !silent )
            {
                System.Console.Write("RNG TEST");
                System.Console.WriteLine();
                System.Console.Write("SEED TEST:                               ");
                if( !seederrors )
                {
                    System.Console.Write("OK");
                    System.Console.WriteLine();
                }
                else
                {
                    System.Console.Write("FAILED");
                    System.Console.WriteLine();
                }
                System.Console.Write("UNIFORM CONTINUOUS:                      ");
                if( !urerrors )
                {
                    System.Console.Write("OK");
                    System.Console.WriteLine();
                }
                else
                {
                    System.Console.Write("FAILED");
                    System.Console.WriteLine();
                }
                System.Console.Write("UNIFORM INTEGER:                         ");
                if( !uierrors )
                {
                    System.Console.Write("OK");
                    System.Console.WriteLine();
                }
                else
                {
                    System.Console.Write("FAILED");
                    System.Console.WriteLine();
                }
                System.Console.Write("NORMAL:                                  ");
                if( !normerrors )
                {
                    System.Console.Write("OK");
                    System.Console.WriteLine();
                }
                else
                {
                    System.Console.Write("FAILED");
                    System.Console.WriteLine();
                }
                System.Console.Write("EXPONENTIAL:                             ");
                if( !experrors )
                {
                    System.Console.Write("OK");
                    System.Console.WriteLine();
                }
                else
                {
                    System.Console.Write("FAILED");
                    System.Console.WriteLine();
                }
                if( waserrors )
                {
                    System.Console.Write("TEST SUMMARY: FAILED");
                    System.Console.WriteLine();
                }
                else
                {
                    System.Console.Write("TEST SUMMARY: PASSED");
                    System.Console.WriteLine();
                }
                System.Console.WriteLine();
                System.Console.WriteLine();
            }
            result = !waserrors;
            return result;
        }
Example #25
0
        /*************************************************************************
        Generation of random NxN Hermitian matrix with given condition number  and
        norm2(A)=1

        INPUT PARAMETERS:
            N   -   matrix size
            C   -   condition number (in 2-norm)

        OUTPUT PARAMETERS:
            A   -   random matrix with norm2(A)=1 and cond(A)=C

          -- ALGLIB routine --
             04.12.2009
             Bochkanov Sergey
        *************************************************************************/
        public static void hmatrixrndcond(int n,
            double c,
            ref complex[,] a)
        {
            int i = 0;
            int j = 0;
            double l1 = 0;
            double l2 = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();

            a = new complex[0,0];

            alglib.ap.assert(n>=1 && (double)(c)>=(double)(1), "HMatrixRndCond: N<1 or C<1!");
            a = new complex[n, n];
            if( n==1 )
            {
                
                //
                // special case
                //
                a[0,0] = 2*math.randominteger(2)-1;
                return;
            }
            
            //
            // Prepare matrix
            //
            hqrnd.hqrndrandomize(rs);
            l1 = 0;
            l2 = Math.Log(1/c);
            for(i=0; i<=n-1; i++)
            {
                for(j=0; j<=n-1; j++)
                {
                    a[i,j] = 0;
                }
            }
            a[0,0] = Math.Exp(l1);
            for(i=1; i<=n-2; i++)
            {
                a[i,i] = (2*hqrnd.hqrnduniformi(rs, 2)-1)*Math.Exp(hqrnd.hqrnduniformr(rs)*(l2-l1)+l1);
            }
            a[n-1,n-1] = Math.Exp(l2);
            
            //
            // Multiply
            //
            hmatrixrndmultiply(ref a, n);
            
            //
            // post-process to ensure that matrix diagonal is real
            //
            for(i=0; i<=n-1; i++)
            {
                a[i,i].y = 0;
            }
        }
 public hqrndstate(hqrnd.hqrndstate obj)
 {
     _innerobj = obj;
 }
Example #27
0
 public override void init()
 {
     s = new double[0];
     bndl = new double[0];
     bndu = new double[0];
     hasbndl = new bool[0];
     hasbndu = new bool[0];
     cleic = new double[0,0];
     x = new double[0];
     fi = new double[0];
     j = new double[0,0];
     rstate = new rcommstate();
     rstateags = new rcommstate();
     agsrs = new hqrnd.hqrndstate();
     xstart = new double[0];
     xc = new double[0];
     xn = new double[0];
     grs = new double[0];
     d = new double[0];
     colmax = new double[0];
     diagh = new double[0];
     signmin = new double[0];
     signmax = new double[0];
     scaledbndl = new double[0];
     scaledbndu = new double[0];
     scaledcleic = new double[0,0];
     rholinear = new double[0];
     samplex = new double[0,0];
     samplegm = new double[0,0];
     samplegmbc = new double[0,0];
     samplef = new double[0];
     samplef0 = new double[0];
     nsqp = new minnsqp();
     tmp0 = new double[0];
     tmp1 = new double[0];
     tmp2 = new double[0,0];
     tmp3 = new int[0];
     xbase = new double[0];
     fp = new double[0];
     fm = new double[0];
 }