Exemplo n.º 1
0
 static extern int Blah(
     [MarshalAs(UnmanagedType.I4)]
     int i,
     string s,
     union u,
     sparse sp,
     [In, Out] string inout,
     StringBuilder sb,
     IntPtr ip,
     out bool ob,
     ref bytes bs);
Exemplo n.º 2
0
        /*************************************************************************
        Internal subroutine.
        Initialization for preprocessor based on a sample.

        INPUT
            Network -   initialized neural network;
            XY      -   sample, given by sparse matrix;
            SSize   -   sample size.

        OUTPUT
            Network -   neural network with initialised preprocessor.

          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpinitpreprocessorsparse(multilayerperceptron network,
            sparse.sparsematrix xy,
            int ssize)
        {
            int jmax = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int ntotal = 0;
            int istart = 0;
            int offs = 0;
            int ntype = 0;
            double[] means = new double[0];
            double[] sigmas = new double[0];
            double s = 0;
            int i = 0;
            int j = 0;

            mlpproperties(network, ref nin, ref nout, ref wcount);
            ntotal = network.structinfo[3];
            istart = network.structinfo[5];
            
            //
            // Means/Sigmas
            //
            if( mlpissoftmax(network) )
            {
                jmax = nin-1;
            }
            else
            {
                jmax = nin+nout-1;
            }
            means = new double[jmax+1];
            sigmas = new double[jmax+1];
            for(i=0; i<=jmax; i++)
            {
                means[i] = 0;
                sigmas[i] = 0;
            }
            for(i=0; i<=ssize-1; i++)
            {
                sparse.sparsegetrow(xy, i, ref network.xyrow);
                for(j=0; j<=jmax; j++)
                {
                    means[j] = means[j]+network.xyrow[j];
                }
            }
            for(i=0; i<=jmax; i++)
            {
                means[i] = means[i]/ssize;
            }
            for(i=0; i<=ssize-1; i++)
            {
                sparse.sparsegetrow(xy, i, ref network.xyrow);
                for(j=0; j<=jmax; j++)
                {
                    sigmas[j] = sigmas[j]+math.sqr(network.xyrow[j]-means[j]);
                }
            }
            for(i=0; i<=jmax; i++)
            {
                sigmas[i] = Math.Sqrt(sigmas[i]/ssize);
            }
            
            //
            // Inputs
            //
            for(i=0; i<=nin-1; i++)
            {
                network.columnmeans[i] = means[i];
                network.columnsigmas[i] = sigmas[i];
                if( (double)(network.columnsigmas[i])==(double)(0) )
                {
                    network.columnsigmas[i] = 1;
                }
            }
            
            //
            // Outputs
            //
            if( !mlpissoftmax(network) )
            {
                for(i=0; i<=nout-1; i++)
                {
                    offs = istart+(ntotal-nout+i)*nfieldwidth;
                    ntype = network.structinfo[offs+0];
                    
                    //
                    // Linear outputs
                    //
                    if( ntype==0 )
                    {
                        network.columnmeans[nin+i] = means[nin+i];
                        network.columnsigmas[nin+i] = sigmas[nin+i];
                        if( (double)(network.columnsigmas[nin+i])==(double)(0) )
                        {
                            network.columnsigmas[nin+i] = 1;
                        }
                    }
                    
                    //
                    // Bounded outputs (half-interval)
                    //
                    if( ntype==3 )
                    {
                        s = means[nin+i]-network.columnmeans[nin+i];
                        if( (double)(s)==(double)(0) )
                        {
                            s = Math.Sign(network.columnsigmas[nin+i]);
                        }
                        if( (double)(s)==(double)(0) )
                        {
                            s = 1.0;
                        }
                        network.columnsigmas[nin+i] = Math.Sign(network.columnsigmas[nin+i])*Math.Abs(s);
                        if( (double)(network.columnsigmas[nin+i])==(double)(0) )
                        {
                            network.columnsigmas[nin+i] = 1;
                        }
                    }
                }
            }
        }
Exemplo n.º 3
0
        /*************************************************************************
        Procedure for solution of A*x=b with sparse A.

        INPUT PARAMETERS:
            State   -   algorithm state
            A       -   sparse M*N matrix in the CRS format (you MUST contvert  it 
                        to CRS format  by  calling  SparseConvertToCRS()  function
                        BEFORE you pass it to this function).
            B       -   right part, array[M]

        RESULT:
            This function returns no result.
            You can get solution by calling LinCGResults()
            
        NOTE: this function uses lightweight preconditioning -  multiplication  by
              inverse of diag(A). If you want, you can turn preconditioning off by
              calling LinLSQRSetPrecUnit(). However, preconditioning cost is   low
              and preconditioner is very important for solution  of  badly  scaled
              problems.

          -- ALGLIB --
             Copyright 30.11.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void linlsqrsolvesparse(linlsqrstate state,
            sparse.sparsematrix a,
            double[] b)
        {
            int n = 0;
            int i = 0;
            int j = 0;
            int t0 = 0;
            int t1 = 0;
            double v = 0;

            n = state.n;
            alglib.ap.assert(!state.running, "LinLSQRSolveSparse: you can not call this function when LinLSQRIteration is running");
            alglib.ap.assert(alglib.ap.len(b)>=state.m, "LinLSQRSolveSparse: Length(B)<M");
            alglib.ap.assert(apserv.isfinitevector(b, state.m), "LinLSQRSolveSparse: B contains infinite or NaN values");
            
            //
            // Allocate temporaries
            //
            apserv.rvectorsetlengthatleast(ref state.tmpd, n);
            apserv.rvectorsetlengthatleast(ref state.tmpx, n);
            
            //
            // Compute diagonal scaling matrix D
            //
            if( state.prectype==0 )
            {
                
                //
                // Default preconditioner - inverse of column norms
                //
                for(i=0; i<=n-1; i++)
                {
                    state.tmpd[i] = 0;
                }
                t0 = 0;
                t1 = 0;
                while( sparse.sparseenumerate(a, ref t0, ref t1, ref i, ref j, ref v) )
                {
                    state.tmpd[j] = state.tmpd[j]+math.sqr(v);
                }
                for(i=0; i<=n-1; i++)
                {
                    if( (double)(state.tmpd[i])>(double)(0) )
                    {
                        state.tmpd[i] = 1/Math.Sqrt(state.tmpd[i]);
                    }
                    else
                    {
                        state.tmpd[i] = 1;
                    }
                }
            }
            else
            {
                
                //
                // No diagonal scaling
                //
                for(i=0; i<=n-1; i++)
                {
                    state.tmpd[i] = 1;
                }
            }
            
            //
            // Solve.
            //
            // Instead of solving A*x=b we solve preconditioned system (A*D)*(inv(D)*x)=b.
            // Transformed A is not calculated explicitly, we just modify multiplication
            // by A or A'. After solution we modify State.RX so it will store untransformed
            // variables
            //
            linlsqrsetb(state, b);
            linlsqrrestart(state);
            while( linlsqriteration(state) )
            {
                if( state.needmv )
                {
                    for(i=0; i<=n-1; i++)
                    {
                        state.tmpx[i] = state.tmpd[i]*state.x[i];
                    }
                    sparse.sparsemv(a, state.tmpx, ref state.mv);
                }
                if( state.needmtv )
                {
                    sparse.sparsemtv(a, state.x, ref state.mtv);
                    for(i=0; i<=n-1; i++)
                    {
                        state.mtv[i] = state.tmpd[i]*state.mtv[i];
                    }
                }
            }
            for(i=0; i<=n-1; i++)
            {
                state.rx[i] = state.tmpd[i]*state.rx[i];
            }
        }
Exemplo n.º 4
0
        /*************************************************************************
        This function runs QPBLEIC solver; it returns after optimization   process
        was completed. Following QP problem is solved:

            min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
            
        subject to boundary constraints.

        INPUT PARAMETERS:
            AC          -   for dense problems (AKind=0), A-term of CQM object
                            contains system matrix. Other terms are unspecified
                            and should not be referenced.
            SparseAC    -   for sparse problems (AKind=1
            AKind       -   sparse matrix format:
                            * 0 for dense matrix
                            * 1 for sparse matrix
            SparseUpper -   which triangle of SparseAC stores matrix  -  upper  or
                            lower one (for dense matrices this  parameter  is  not
                            actual).
            AbsASum     -   SUM(|A[i,j]|)
            AbsASum2    -   SUM(A[i,j]^2)
            BC          -   linear term, array[NC]
            BndLC       -   lower bound, array[NC]
            BndUC       -   upper bound, array[NC]
            SC          -   scale vector, array[NC]:
                            * I-th element contains scale of I-th variable,
                            * SC[I]>0
            XOriginC    -   origin term, array[NC]. Can be zero.
            NC          -   number of variables in the  original  formulation  (no
                            slack variables).
            CLEICC      -   linear equality/inequality constraints. Present version
                            of this function does NOT provide  publicly  available
                            support for linear constraints. This feature  will  be
                            introduced in the future versions of the function.
            NEC, NIC    -   number of equality/inequality constraints.
                            MUST BE ZERO IN THE CURRENT VERSION!!!
            Settings    -   QPBLEICSettings object initialized by one of the initialization
                            functions.
            SState      -   object which stores temporaries:
                            * if uninitialized object was passed, FirstCall parameter MUST
                              be set to True; object will be automatically initialized by the
                              function, and FirstCall will be set to False.
                            * if FirstCall=False, it is assumed that this parameter was already
                              initialized by previous call to this function with same
                              problem dimensions (variable count N).
            FirstCall   -   whether it is first call of this function for this specific
                            instance of SState, with this number of variables N specified.
            XS          -   initial point, array[NC]
            
            
        OUTPUT PARAMETERS:
            XS          -   last point
            FirstCall   -   uncondtionally set to False
            TerminationType-termination type:
                            *
                            *
                            *

          -- ALGLIB --
             Copyright 14.05.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void qpbleicoptimize(cqmodels.convexquadraticmodel a,
            sparse.sparsematrix sparsea,
            int akind,
            bool sparseaupper,
            double absasum,
            double absasum2,
            double[] b,
            double[] bndl,
            double[] bndu,
            double[] s,
            double[] xorigin,
            int n,
            double[,] cleic,
            int nec,
            int nic,
            qpbleicsettings settings,
            qpbleicbuffers sstate,
            ref bool firstcall,
            ref double[] xs,
            ref int terminationtype)
        {
            int i = 0;
            double d2 = 0;
            double d1 = 0;
            double d0 = 0;
            double v = 0;
            double v0 = 0;
            double v1 = 0;
            double md = 0;
            double mx = 0;
            double mb = 0;
            int d1est = 0;
            int d2est = 0;
            int i_ = 0;

            terminationtype = 0;

            alglib.ap.assert(akind==0 || akind==1, "QPBLEICOptimize: unexpected AKind");
            sstate.repinneriterationscount = 0;
            sstate.repouteriterationscount = 0;
            terminationtype = 0;
            
            //
            // Prepare solver object, if needed
            //
            if( firstcall )
            {
                minbleic.minbleiccreate(n, xs, sstate.solver);
                firstcall = false;
            }
            
            //
            // Prepare max(|B|)
            //
            mb = 0.0;
            for(i=0; i<=n-1; i++)
            {
                mb = Math.Max(mb, Math.Abs(b[i]));
            }
            
            //
            // Temporaries
            //
            apserv.ivectorsetlengthatleast(ref sstate.tmpi, nec+nic);
            apserv.rvectorsetlengthatleast(ref sstate.tmp0, n);
            apserv.rvectorsetlengthatleast(ref sstate.tmp1, n);
            for(i=0; i<=nec-1; i++)
            {
                sstate.tmpi[i] = 0;
            }
            for(i=0; i<=nic-1; i++)
            {
                sstate.tmpi[nec+i] = -1;
            }
            minbleic.minbleicsetlc(sstate.solver, cleic, sstate.tmpi, nec+nic);
            minbleic.minbleicsetbc(sstate.solver, bndl, bndu);
            minbleic.minbleicsetdrep(sstate.solver, true);
            minbleic.minbleicsetcond(sstate.solver, math.minrealnumber, 0.0, 0.0, settings.maxits);
            minbleic.minbleicsetscale(sstate.solver, s);
            minbleic.minbleicsetprecscale(sstate.solver);
            minbleic.minbleicrestartfrom(sstate.solver, xs);
            while( minbleic.minbleiciteration(sstate.solver) )
            {
                
                //
                // Line search started
                //
                if( sstate.solver.lsstart )
                {
                    
                    //
                    // Iteration counters:
                    // * inner iterations count is increased on every line search
                    // * outer iterations count is increased only at steepest descent line search
                    //
                    apserv.inc(ref sstate.repinneriterationscount);
                    if( sstate.solver.steepestdescentstep )
                    {
                        apserv.inc(ref sstate.repouteriterationscount);
                    }
                    
                    //
                    // Build quadratic model of F along descent direction:
                    //
                    //     F(x+alpha*d) = D2*alpha^2 + D1*alpha + D0
                    //
                    // Calculate estimates of linear and quadratic term
                    // (term magnitude is compared with magnitude of numerical errors)
                    //
                    d0 = sstate.solver.f;
                    d1 = 0.0;
                    for(i_=0; i_<=n-1;i_++)
                    {
                        d1 += sstate.solver.d[i_]*sstate.solver.g[i_];
                    }
                    d2 = 0;
                    if( akind==0 )
                    {
                        d2 = cqmodels.cqmxtadx2(a, sstate.solver.d);
                    }
                    if( akind==1 )
                    {
                        sparse.sparsesmv(sparsea, sparseaupper, sstate.solver.d, ref sstate.tmp0);
                        d2 = 0.0;
                        for(i=0; i<=n-1; i++)
                        {
                            d2 = d2+sstate.solver.d[i]*sstate.tmp0[i];
                        }
                        d2 = 0.5*d2;
                    }
                    mx = 0.0;
                    md = 0.0;
                    for(i=0; i<=n-1; i++)
                    {
                        mx = Math.Max(mx, Math.Abs(sstate.solver.x[i]));
                        md = Math.Max(md, Math.Abs(sstate.solver.d[i]));
                    }
                    optserv.estimateparabolicmodel(absasum, absasum2, mx, mb, md, d1, d2, ref d1est, ref d2est);
                    
                    //
                    // Tests for "normal" convergence.
                    //
                    // This line search may be started from steepest descent
                    // stage (stage 2) or from L-BFGS stage (stage 3) of the
                    // BLEIC algorithm. Depending on stage type, different
                    // checks are performed.
                    //
                    // Say, L-BFGS stage is an equality-constrained refinement
                    // stage of BLEIC. This stage refines current iterate
                    // under "frozen" equality constraints. We can terminate
                    // iterations at this stage only when we encounter
                    // unconstrained direction of negative curvature. In all
                    // other cases (say, when constrained gradient is zero)
                    // we should not terminate algorithm because everything may
                    // change after de-activating presently active constraints.
                    //
                    // Tests for convergence are performed only at "steepest descent" stage
                    // of the BLEIC algorithm, and only when function is non-concave
                    // (D2 is positive or approximately zero) along direction D.
                    //
                    // NOTE: we do not test iteration count (MaxIts) here, because
                    //       this stopping condition is tested by BLEIC itself.
                    //
                    if( sstate.solver.steepestdescentstep && d2est>=0 )
                    {
                        if( d1est>=0 )
                        {
                            
                            //
                            // "Emergency" stopping condition: D is non-descent direction.
                            // Sometimes it is possible because of numerical noise in the
                            // target function.
                            //
                            terminationtype = 4;
                            for(i=0; i<=n-1; i++)
                            {
                                xs[i] = sstate.solver.x[i];
                            }
                            break;
                        }
                        if( d2est>0 )
                        {
                            
                            //
                            // Stopping condition #4 - gradient norm is small:
                            //
                            // 1. rescale State.Solver.D and State.Solver.G according to
                            //    current scaling, store results to Tmp0 and Tmp1.
                            // 2. Normalize Tmp0 (scaled direction vector).
                            // 3. compute directional derivative (in scaled variables),
                            //    which is equal to DOTPRODUCT(Tmp0,Tmp1).
                            //
                            v = 0;
                            for(i=0; i<=n-1; i++)
                            {
                                sstate.tmp0[i] = sstate.solver.d[i]/s[i];
                                sstate.tmp1[i] = sstate.solver.g[i]*s[i];
                                v = v+math.sqr(sstate.tmp0[i]);
                            }
                            alglib.ap.assert((double)(v)>(double)(0), "QPBLEICOptimize: inernal errror (scaled direction is zero)");
                            v = 1/Math.Sqrt(v);
                            for(i_=0; i_<=n-1;i_++)
                            {
                                sstate.tmp0[i_] = v*sstate.tmp0[i_];
                            }
                            v = 0.0;
                            for(i_=0; i_<=n-1;i_++)
                            {
                                v += sstate.tmp0[i_]*sstate.tmp1[i_];
                            }
                            if( (double)(Math.Abs(v))<=(double)(settings.epsg) )
                            {
                                terminationtype = 4;
                                for(i=0; i<=n-1; i++)
                                {
                                    xs[i] = sstate.solver.x[i];
                                }
                                break;
                            }
                            
                            //
                            // Stopping condition #1 - relative function improvement is small:
                            //
                            // 1. calculate steepest descent step:   V = -D1/(2*D2)
                            // 2. calculate function change:         V1= D2*V^2 + D1*V
                            // 3. stop if function change is small enough
                            //
                            v = -(d1/(2*d2));
                            v1 = d2*v*v+d1*v;
                            if( (double)(Math.Abs(v1))<=(double)(settings.epsf*Math.Max(d0, 1.0)) )
                            {
                                terminationtype = 1;
                                for(i=0; i<=n-1; i++)
                                {
                                    xs[i] = sstate.solver.x[i];
                                }
                                break;
                            }
                            
                            //
                            // Stopping condition #2 - scaled step is small:
                            //
                            // 1. calculate step multiplier V0 (step itself is D*V0)
                            // 2. calculate scaled step length V
                            // 3. stop if step is small enough
                            //
                            v0 = -(d1/(2*d2));
                            v = 0;
                            for(i=0; i<=n-1; i++)
                            {
                                v = v+math.sqr(v0*sstate.solver.d[i]/s[i]);
                            }
                            if( (double)(Math.Sqrt(v))<=(double)(settings.epsx) )
                            {
                                terminationtype = 2;
                                for(i=0; i<=n-1; i++)
                                {
                                    xs[i] = sstate.solver.x[i];
                                }
                                break;
                            }
                        }
                    }
                    
                    //
                    // Test for unconstrained direction of negative curvature
                    //
                    if( (d2est<0 || (d2est==0 && d1est<0)) && !sstate.solver.boundedstep )
                    {
                        
                        //
                        // Function is unbounded from below:
                        // * function will decrease along D, i.e. either:
                        //   * D2<0
                        //   * D2=0 and D1<0
                        // * step is unconstrained
                        //
                        // If these conditions are true, we abnormally terminate QP
                        // algorithm with return code -4 (we can do so at any stage
                        // of BLEIC - whether it is L-BFGS or steepest descent one).
                        //
                        terminationtype = -4;
                        for(i=0; i<=n-1; i++)
                        {
                            xs[i] = sstate.solver.x[i];
                        }
                        break;
                    }
                    
                    //
                    // Suggest new step (only if D1 is negative far away from zero,
                    // D2 is positive far away from zero).
                    //
                    if( d1est<0 && d2est>0 )
                    {
                        sstate.solver.stp = apserv.safeminposrv(-d1, 2*d2, sstate.solver.curstpmax);
                    }
                }
                
                //
                // Gradient evaluation
                //
                if( sstate.solver.needfg )
                {
                    for(i=0; i<=n-1; i++)
                    {
                        sstate.tmp0[i] = sstate.solver.x[i]-xorigin[i];
                    }
                    if( akind==0 )
                    {
                        cqmodels.cqmadx(a, sstate.tmp0, ref sstate.tmp1);
                    }
                    if( akind==1 )
                    {
                        sparse.sparsesmv(sparsea, sparseaupper, sstate.tmp0, ref sstate.tmp1);
                    }
                    v0 = 0.0;
                    for(i_=0; i_<=n-1;i_++)
                    {
                        v0 += sstate.tmp0[i_]*sstate.tmp1[i_];
                    }
                    v1 = 0.0;
                    for(i_=0; i_<=n-1;i_++)
                    {
                        v1 += sstate.tmp0[i_]*b[i_];
                    }
                    sstate.solver.f = 0.5*v0+v1;
                    for(i_=0; i_<=n-1;i_++)
                    {
                        sstate.solver.g[i_] = sstate.tmp1[i_];
                    }
                    for(i_=0; i_<=n-1;i_++)
                    {
                        sstate.solver.g[i_] = sstate.solver.g[i_] + b[i_];
                    }
                }
            }
            if( terminationtype==0 )
            {
                
                //
                // BLEIC optimizer was terminated by one of its inner stopping
                // conditions. Usually it is iteration counter (if such
                // stopping condition was specified by user).
                //
                minbleic.minbleicresultsbuf(sstate.solver, ref xs, sstate.solverrep);
                terminationtype = sstate.solverrep.terminationtype;
            }
            else
            {
                
                //
                // BLEIC optimizer was terminated in "emergency" mode by QP
                // solver.
                //
                // NOTE: such termination is "emergency" only when viewed from
                //       BLEIC's position. QP solver sees such termination as
                //       routine one, triggered by QP's stopping criteria.
                //
                minbleic.minbleicemergencytermination(sstate.solver);
            }
        }
Exemplo n.º 5
0
        /*************************************************************************
        Procedure for solution of A*x=b with sparse A.

        INPUT PARAMETERS:
            State   -   algorithm state
            A       -   sparse matrix in the CRS format (you MUST contvert  it  to 
                        CRS format by calling SparseConvertToCRS() function).
            IsUpper -   whether upper or lower triangle of A is used:
                        * IsUpper=True  => only upper triangle is used and lower
                                           triangle is not referenced at all 
                        * IsUpper=False => only lower triangle is used and upper
                                           triangle is not referenced at all
            B       -   right part, array[N]

        RESULT:
            This function returns no result.
            You can get solution by calling LinCGResults()

          -- ALGLIB --
             Copyright 14.11.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void lincgsolvesparse(lincgstate state,
            sparse.sparsematrix a,
            bool isupper,
            double[] b)
        {
            double vmv = 0;
            int i_ = 0;

            alglib.ap.assert(alglib.ap.len(b)>=state.n, "LinCGSetB: Length(B)<N");
            alglib.ap.assert(apserv.isfinitevector(b, state.n), "LinCGSetB: B contains infinite or NaN values!");
            lincgrestart(state);
            lincgsetb(state, b);
            while( lincgiteration(state) )
            {
                if( state.needmv )
                {
                    sparse.sparsesmv(a, isupper, state.x, ref state.mv);
                }
                if( state.needvmv )
                {
                    sparse.sparsesmv(a, isupper, state.x, ref state.mv);
                    vmv = 0.0;
                    for(i_=0; i_<=state.n-1;i_++)
                    {
                        vmv += state.x[i_]*state.mv[i_];
                    }
                    state.vmv = vmv;
                }
                if( state.needprec )
                {
                    for(i_=0; i_<=state.n-1;i_++)
                    {
                        state.pv[i_] = state.x[i_];
                    }
                }
            }
        }
Exemplo n.º 6
0
 public sparsematrix(sparse.sparsematrix obj)
 {
     _innerobj = obj;
 }
Exemplo n.º 7
0
 public sparsebuffers(sparse.sparsebuffers obj)
 {
     _innerobj = obj;
 }
Exemplo n.º 8
0
        /*************************************************************************
        Sparse Cholesky decomposition for skyline matrixm using in-place algorithm
        without allocating additional storage.

        The algorithm computes Cholesky decomposition  of  a  symmetric  positive-
        definite sparse matrix. The result of an algorithm is a representation  of
        A as A=U^T*U or A=L*L^T

        This  function  is  a  more  efficient alternative to general, but  slower
        SparseCholeskyX(), because it does not  create  temporary  copies  of  the
        target. It performs factorization in-place, which gives  best  performance
        on low-profile matrices. Its drawback, however, is that it can not perform
        profile-reducing permutation of input matrix.

        INPUT PARAMETERS:
            A       -   sparse matrix in skyline storage (SKS) format.
            N       -   size of matrix A (can be smaller than actual size of A)
            IsUpper -   if IsUpper=True, then factorization is performed on  upper
                        triangle. Another triangle is ignored (it may contant some
                        data, but it is not changed).
            

        OUTPUT PARAMETERS:
            A       -   the result of factorization, stored in SKS. If IsUpper=True,
                        then the upper  triangle  contains  matrix  U,  such  that
                        A = U^T*U. Lower triangle is not changed.
                        Similarly, if IsUpper = False. In this case L is returned,
                        and we have A = L*(L^T).
                        Note that THIS function does not  perform  permutation  of
                        rows to reduce bandwidth.

        RESULT:
            If  the  matrix  is  positive-definite,  the  function  returns  True.
            Otherwise, the function returns False. Contents of A is not determined
            in such case.

        NOTE: for  performance  reasons  this  function  does NOT check that input
              matrix  includes  only  finite  values. It is your responsibility to
              make sure that there are no infinite or NAN values in the matrix.

          -- ALGLIB routine --
             16.01.2014
             Bochkanov Sergey
        *************************************************************************/
        public static bool sparsecholeskyskyline(sparse.sparsematrix a,
            int n,
            bool isupper)
        {
            bool result = new bool();
            int i = 0;
            int j = 0;
            int k = 0;
            int jnz = 0;
            int jnza = 0;
            int jnzl = 0;
            double v = 0;
            double vv = 0;
            double a12 = 0;
            int nready = 0;
            int nadd = 0;
            int banda = 0;
            int offsa = 0;
            int offsl = 0;

            alglib.ap.assert(n>=0, "SparseCholeskySkyline: N<0");
            alglib.ap.assert(sparse.sparsegetnrows(a)>=n, "SparseCholeskySkyline: rows(A)<N");
            alglib.ap.assert(sparse.sparsegetncols(a)>=n, "SparseCholeskySkyline: cols(A)<N");
            alglib.ap.assert(sparse.sparseissks(a), "SparseCholeskySkyline: A is not stored in SKS format");
            result = false;
            
            //
            // transpose if needed
            //
            if( isupper )
            {
                sparse.sparsetransposesks(a);
            }
            
            //
            // Perform Cholesky decomposition:
            // * we assume than leading NReady*NReady submatrix is done
            // * having Cholesky decomposition of NReady*NReady submatrix we
            //   obtain decomposition of larger (NReady+NAdd)*(NReady+NAdd) one.
            //
            // Here is algorithm. At the start we have
            //
            //     (      |   )
            //     (  L   |   )
            // S = (      |   )
            //     (----------)
            //     (  A   | B )
            //
            // with L being already computed Cholesky factor, A and B being
            // unprocessed parts of the matrix. Of course, L/A/B are stored
            // in SKS format.
            //
            // Then, we calculate A1:=(inv(L)*A')' and replace A with A1.
            // Then, we calculate B1:=B-A1*A1'     and replace B with B1
            //
            // Finally, we calculate small NAdd*NAdd Cholesky of B1 with
            // dense solver. Now, L/A1/B1 are Cholesky decomposition of the
            // larger (NReady+NAdd)*(NReady+NAdd) matrix.
            //
            nready = 0;
            nadd = 1;
            while( nready<n )
            {
                alglib.ap.assert(nadd==1, "SkylineCholesky: internal error");
                
                //
                // Calculate A1:=(inv(L)*A')'
                //
                // Elements are calculated row by row (example below is given
                // for NAdd=1):
                // * first, we solve L[0,0]*A1[0]=A[0]
                // * then, we solve  L[1,0]*A1[0]+L[1,1]*A1[1]=A[1]
                // * then, we move to next row and so on
                // * during calculation of A1 we update A12 - squared norm of A1
                //
                // We extensively use sparsity of both A/A1 and L:
                // * first, equations from 0 to BANDWIDTH(A1)-1 are completely zero
                // * second, for I>=BANDWIDTH(A1), I-th equation is reduced from
                //     L[I,0]*A1[0] + L[I,1]*A1[1] + ... + L[I,I]*A1[I] = A[I]
                //   to
                //     L[I,JNZ]*A1[JNZ] + ... + L[I,I]*A1[I] = A[I]
                //   where JNZ = max(NReady-BANDWIDTH(A1),I-BANDWIDTH(L[i]))
                //   (JNZ is an index of the firts column where both A and L become
                //   nonzero).
                //
                // NOTE: we rely on details of SparseMatrix internal storage format.
                //       This is allowed by SparseMatrix specification.
                //
                a12 = 0.0;
                if( a.didx[nready]>0 )
                {
                    banda = a.didx[nready];
                    for(i=nready-banda; i<=nready-1; i++)
                    {
                        
                        //
                        // Elements of A1[0:I-1] were computed:
                        // * A1[0:NReady-BandA-1] are zero (sparse)
                        // * A1[NReady-BandA:I-1] replaced corresponding elements of A
                        //
                        // Now it is time to get I-th one.
                        //
                        // First, we calculate:
                        // * JNZA  - index of the first column where A become nonzero
                        // * JNZL  - index of the first column where L become nonzero
                        // * JNZ   - index of the first column where both A and L become nonzero
                        // * OffsA - offset of A[JNZ] in A.Vals
                        // * OffsL - offset of L[I,JNZ] in A.Vals
                        //
                        // Then, we solve SUM(A1[j]*L[I,j],j=JNZ..I-1) + A1[I]*L[I,I] = A[I],
                        // with A1[JNZ..I-1] already known, and A1[I] unknown.
                        //
                        jnza = nready-banda;
                        jnzl = i-a.didx[i];
                        jnz = Math.Max(jnza, jnzl);
                        offsa = a.ridx[nready]+(jnz-jnza);
                        offsl = a.ridx[i]+(jnz-jnzl);
                        v = 0.0;
                        k = i-1-jnz;
                        for(j=0; j<=k; j++)
                        {
                            v = v+a.vals[offsa+j]*a.vals[offsl+j];
                        }
                        vv = (a.vals[offsa+k+1]-v)/a.vals[offsl+k+1];
                        a.vals[offsa+k+1] = vv;
                        a12 = a12+vv*vv;
                    }
                }
                
                //
                // Calculate CHOLESKY(B-A1*A1')
                //
                offsa = a.ridx[nready]+a.didx[nready];
                v = a.vals[offsa];
                if( (double)(v)<=(double)(a12) )
                {
                    result = false;
                    return result;
                }
                a.vals[offsa] = Math.Sqrt(v-a12);
                
                //
                // Increase size of the updated matrix
                //
                apserv.inc(ref nready);
            }
            
            //
            // transpose if needed
            //
            if( isupper )
            {
                sparse.sparsetransposesks(a);
            }
            result = true;
            return result;
        }
Exemplo n.º 9
0
 /*************************************************************************
 Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
 *************************************************************************/
 public static void _pexec_mlpgradbatchsparsesubset(multilayerperceptron network,
     sparse.sparsematrix xy,
     int setsize,
     int[] idx,
     int subsetsize,
     ref double e,
     ref double[] grad)
 {
     mlpgradbatchsparsesubset(network,xy,setsize,idx,subsetsize,ref e,ref grad);
 }
Exemplo n.º 10
0
        /*************************************************************************
        Batch gradient calculation for a set of inputs/outputs  for  a  subset  of
        dataset given by set of indexes.


        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support 
          !
          ! First improvement gives close-to-linear speedup on multicore  systems.
          ! Second improvement gives constant speedup (2-3x depending on your CPU)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.


        INPUT PARAMETERS:
            Network -   network initialized with one of the network creation funcs
            XY      -   original dataset in sparse format; one sample = one row:
                        * MATRIX MUST BE STORED IN CRS FORMAT
                        * first NIn columns contain inputs,
                        * for regression problem, next NOut columns store
                          desired outputs.
                        * for classification problem, next column (just one!)
                          stores class number.
            SetSize -   real size of XY, SetSize>=0;
            Idx     -   subset of SubsetSize elements, array[SubsetSize]:
                        * Idx[I] stores row index in the original dataset which is
                          given by XY. Gradient is calculated with respect to rows
                          whose indexes are stored in Idx[].
                        * Idx[]  must store correct indexes; this function  throws
                          an  exception  in  case  incorrect index (less than 0 or
                          larger than rows(XY)) is given
                        * Idx[]  may  store  indexes  in  any  order and even with
                          repetitions.
            SubsetSize- number of elements in Idx[] array:
                        * positive value means that subset given by Idx[] is processed
                        * zero value results in zero gradient
                        * negative value means that full dataset is processed
            Grad      - possibly  preallocated array. If size of array is  smaller
                        than WCount, it will be reallocated. It is  recommended to
                        reuse  previously  allocated  array  to  reduce allocation
                        overhead.

        OUTPUT PARAMETERS:
            E       -   error function, SUM(sqr(y[i]-desiredy[i])/2,i)
            Grad    -   gradient  of  E  with  respect   to  weights  of  network,
                        array[WCount]

        NOTE: when  SubsetSize<0 is used full dataset by call MLPGradBatchSparse
              function.
            
          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpgradbatchsparsesubset(multilayerperceptron network,
            sparse.sparsematrix xy,
            int setsize,
            int[] idx,
            int subsetsize,
            ref double e,
            ref double[] grad)
        {
            int i = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int npoints = 0;
            int subset0 = 0;
            int subset1 = 0;
            int subsettype = 0;
            smlpgrad sgrad = null;

            e = 0;

            alglib.ap.assert(setsize>=0, "MLPGradBatchSparseSubset: SetSize<0");
            alglib.ap.assert(subsetsize<=alglib.ap.len(idx), "MLPGradBatchSparseSubset: SubsetSize>Length(Idx)");
            alglib.ap.assert(sparse.sparseiscrs(xy), "MLPGradBatchSparseSubset: sparse matrix XY must be in CRS format.");
            npoints = setsize;
            if( subsetsize<0 )
            {
                subset0 = 0;
                subset1 = setsize;
                subsettype = 0;
            }
            else
            {
                subset0 = 0;
                subset1 = subsetsize;
                subsettype = 1;
                for(i=0; i<=subsetsize-1; i++)
                {
                    alglib.ap.assert(idx[i]>=0, "MLPGradBatchSparseSubset: incorrect index of XY row(Idx[I]<0)");
                    alglib.ap.assert(idx[i]<=npoints-1, "MLPGradBatchSparseSubset: incorrect index of XY row(Idx[I]>Rows(XY)-1)");
                }
            }
            mlpproperties(network, ref nin, ref nout, ref wcount);
            apserv.rvectorsetlengthatleast(ref grad, wcount);
            alglib.smp.ae_shared_pool_first_recycled(network.gradbuf, ref sgrad);
            while( sgrad!=null )
            {
                sgrad.f = 0.0;
                for(i=0; i<=wcount-1; i++)
                {
                    sgrad.g[i] = 0.0;
                }
                alglib.smp.ae_shared_pool_next_recycled(network.gradbuf, ref sgrad);
            }
            mlpgradbatchx(network, network.dummydxy, xy, setsize, 1, idx, subset0, subset1, subsettype, network.buf, network.gradbuf);
            e = 0.0;
            for(i=0; i<=wcount-1; i++)
            {
                grad[i] = 0.0;
            }
            alglib.smp.ae_shared_pool_first_recycled(network.gradbuf, ref sgrad);
            while( sgrad!=null )
            {
                e = e+sgrad.f;
                for(i=0; i<=wcount-1; i++)
                {
                    grad[i] = grad[i]+sgrad.g[i];
                }
                alglib.smp.ae_shared_pool_next_recycled(network.gradbuf, ref sgrad);
            }
        }
Exemplo n.º 11
0
 /*************************************************************************
 Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
 *************************************************************************/
 public static void _pexec_mlpgradbatchsparse(multilayerperceptron network,
     sparse.sparsematrix xy,
     int ssize,
     ref double e,
     ref double[] grad)
 {
     mlpgradbatchsparse(network,xy,ssize,ref e,ref grad);
 }
Exemplo n.º 12
0
        /*************************************************************************
        Batch gradient calculation for a set  of inputs/outputs  given  by  sparse
        matrices


        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support 
          !
          ! First improvement gives close-to-linear speedup on multicore  systems.
          ! Second improvement gives constant speedup (2-3x depending on your CPU)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.


        INPUT PARAMETERS:
            Network -   network initialized with one of the network creation funcs
            XY      -   original dataset in sparse format; one sample = one row:
                        * MATRIX MUST BE STORED IN CRS FORMAT
                        * first NIn columns contain inputs.
                        * for regression problem, next NOut columns store
                          desired outputs.
                        * for classification problem, next column (just one!)
                          stores class number.
            SSize   -   number of elements in XY
            Grad    -   possibly preallocated array. If size of array is smaller
                        than WCount, it will be reallocated. It is recommended to
                        reuse previously allocated array to reduce allocation
                        overhead.

        OUTPUT PARAMETERS:
            E       -   error function, SUM(sqr(y[i]-desiredy[i])/2,i)
            Grad    -   gradient of E with respect to weights of network, array[WCount]

          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpgradbatchsparse(multilayerperceptron network,
            sparse.sparsematrix xy,
            int ssize,
            ref double e,
            ref double[] grad)
        {
            int i = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int subset0 = 0;
            int subset1 = 0;
            int subsettype = 0;
            smlpgrad sgrad = null;

            e = 0;

            alglib.ap.assert(ssize>=0, "MLPGradBatchSparse: SSize<0");
            alglib.ap.assert(sparse.sparseiscrs(xy), "MLPGradBatchSparse: sparse matrix XY must be in CRS format.");
            subset0 = 0;
            subset1 = ssize;
            subsettype = 0;
            mlpproperties(network, ref nin, ref nout, ref wcount);
            apserv.rvectorsetlengthatleast(ref grad, wcount);
            alglib.smp.ae_shared_pool_first_recycled(network.gradbuf, ref sgrad);
            while( sgrad!=null )
            {
                sgrad.f = 0.0;
                for(i=0; i<=wcount-1; i++)
                {
                    sgrad.g[i] = 0.0;
                }
                alglib.smp.ae_shared_pool_next_recycled(network.gradbuf, ref sgrad);
            }
            mlpgradbatchx(network, network.dummydxy, xy, ssize, 1, network.dummyidx, subset0, subset1, subsettype, network.buf, network.gradbuf);
            e = 0.0;
            for(i=0; i<=wcount-1; i++)
            {
                grad[i] = 0.0;
            }
            alglib.smp.ae_shared_pool_first_recycled(network.gradbuf, ref sgrad);
            while( sgrad!=null )
            {
                e = e+sgrad.f;
                for(i=0; i<=wcount-1; i++)
                {
                    grad[i] = grad[i]+sgrad.g[i];
                }
                alglib.smp.ae_shared_pool_next_recycled(network.gradbuf, ref sgrad);
            }
        }
Exemplo n.º 13
0
 /*************************************************************************
 Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
 *************************************************************************/
 public static double _pexec_mlpavgrelerrorsparse(multilayerperceptron network,
     sparse.sparsematrix xy,
     int npoints)
 {
     return mlpavgrelerrorsparse(network,xy,npoints);
 }
Exemplo n.º 14
0
        /*************************************************************************
        Average relative error on the test set given by sparse matrix.


        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support 
          !
          ! First improvement gives close-to-linear speedup on multicore  systems.
          ! Second improvement gives constant speedup (2-3x depending on your CPU)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.


        INPUT PARAMETERS:
            Network     -   neural network;
            XY          -   training  set,  see  below  for  information  on   the
                            training set format. This function checks  correctness
                            of  the  dataset  (no  NANs/INFs,  class  numbers  are
                            correct) and throws exception when  incorrect  dataset
                            is passed.  Sparse  matrix  must  use  CRS  format for
                            storage.
            NPoints     -   points count, >=0.

        RESULT:
        Its meaning for regression task is obvious. As for classification task, it
        means  average  relative  error  when  estimating posterior probability of
        belonging to the correct class.

        DATASET FORMAT:

        This  function  uses  two  different  dataset formats - one for regression
        networks, another one for classification networks.

        For regression networks with NIn inputs and NOut outputs following dataset
        format is used:
        * dataset is given by NPoints*(NIn+NOut) matrix
        * each row corresponds to one example
        * first NIn columns are inputs, next NOut columns are outputs

        For classification networks with NIn inputs and NClasses clases  following
        dataset format is used:
        * dataset is given by NPoints*(NIn+1) matrix
        * each row corresponds to one example
        * first NIn columns are inputs, last column stores class number (from 0 to
          NClasses-1).
          
          -- ALGLIB --
             Copyright 09.08.2012 by Bochkanov Sergey
        *************************************************************************/
        public static double mlpavgrelerrorsparse(multilayerperceptron network,
            sparse.sparsematrix xy,
            int npoints)
        {
            double result = 0;

            alglib.ap.assert(sparse.sparseiscrs(xy), "MLPAvgRelErrorSparse: XY is not in CRS format.");
            alglib.ap.assert(sparse.sparsegetnrows(xy)>=npoints, "MLPAvgRelErrorSparse: XY has less than NPoints rows");
            if( npoints>0 )
            {
                if( mlpissoftmax(network) )
                {
                    alglib.ap.assert(sparse.sparsegetncols(xy)>=mlpgetinputscount(network)+1, "MLPAvgRelErrorSparse: XY has less than NIn+1 columns");
                }
                else
                {
                    alglib.ap.assert(sparse.sparsegetncols(xy)>=mlpgetinputscount(network)+mlpgetoutputscount(network), "MLPAvgRelErrorSparse: XY has less than NIn+NOut columns");
                }
            }
            mlpallerrorsx(network, network.dummydxy, xy, npoints, 1, network.dummyidx, 0, npoints, 0, network.buf, network.err);
            result = network.err.avgrelerror;
            return result;
        }
Exemplo n.º 15
0
        /*************************************************************************
        Internal subroutine.
        Initialization for preprocessor based on a subsample.

        INPUT PARAMETERS:
            Network -   network initialized with one of the network creation funcs
            XY      -   original dataset, given by sparse matrix;
                        one sample = one row;
                        first NIn columns contain inputs,
                        next NOut columns - desired outputs.
            SetSize -   real size of XY, SetSize>=0;
            Idx     -   subset of SubsetSize elements, array[SubsetSize]:
                        * Idx[I] stores row index in the original dataset which is
                          given by XY. Gradient is calculated with respect to rows
                          whose indexes are stored in Idx[].
                        * Idx[]  must store correct indexes; this function  throws
                          an  exception  in  case  incorrect index (less than 0 or
                          larger than rows(XY)) is given
                        * Idx[]  may  store  indexes  in  any  order and even with
                          repetitions.
            SubsetSize- number of elements in Idx[] array.
            
        OUTPUT:
            Network -   neural network with initialised preprocessor.
            
        NOTE: when SubsetSize<0 is used full dataset by call
              MLPInitPreprocessorSparse function.
              
          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpinitpreprocessorsparsesubset(multilayerperceptron network,
            sparse.sparsematrix xy,
            int setsize,
            int[] idx,
            int subsetsize)
        {
            int jmax = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int ntotal = 0;
            int istart = 0;
            int offs = 0;
            int ntype = 0;
            double[] means = new double[0];
            double[] sigmas = new double[0];
            double s = 0;
            int npoints = 0;
            int i = 0;
            int j = 0;

            alglib.ap.assert(setsize>=0, "MLPInitPreprocessorSparseSubset: SetSize<0");
            if( subsetsize<0 )
            {
                mlpinitpreprocessorsparse(network, xy, setsize);
                return;
            }
            alglib.ap.assert(subsetsize<=alglib.ap.len(idx), "MLPInitPreprocessorSparseSubset: SubsetSize>Length(Idx)");
            npoints = setsize;
            for(i=0; i<=subsetsize-1; i++)
            {
                alglib.ap.assert(idx[i]>=0, "MLPInitPreprocessorSparseSubset: incorrect index of XY row(Idx[I]<0)");
                alglib.ap.assert(idx[i]<=npoints-1, "MLPInitPreprocessorSparseSubset: incorrect index of XY row(Idx[I]>Rows(XY)-1)");
            }
            mlpproperties(network, ref nin, ref nout, ref wcount);
            ntotal = network.structinfo[3];
            istart = network.structinfo[5];
            
            //
            // Means/Sigmas
            //
            if( mlpissoftmax(network) )
            {
                jmax = nin-1;
            }
            else
            {
                jmax = nin+nout-1;
            }
            means = new double[jmax+1];
            sigmas = new double[jmax+1];
            for(i=0; i<=jmax; i++)
            {
                means[i] = 0;
                sigmas[i] = 0;
            }
            for(i=0; i<=subsetsize-1; i++)
            {
                sparse.sparsegetrow(xy, idx[i], ref network.xyrow);
                for(j=0; j<=jmax; j++)
                {
                    means[j] = means[j]+network.xyrow[j];
                }
            }
            for(i=0; i<=jmax; i++)
            {
                means[i] = means[i]/subsetsize;
            }
            for(i=0; i<=subsetsize-1; i++)
            {
                sparse.sparsegetrow(xy, idx[i], ref network.xyrow);
                for(j=0; j<=jmax; j++)
                {
                    sigmas[j] = sigmas[j]+math.sqr(network.xyrow[j]-means[j]);
                }
            }
            for(i=0; i<=jmax; i++)
            {
                sigmas[i] = Math.Sqrt(sigmas[i]/subsetsize);
            }
            
            //
            // Inputs
            //
            for(i=0; i<=nin-1; i++)
            {
                network.columnmeans[i] = means[i];
                network.columnsigmas[i] = sigmas[i];
                if( (double)(network.columnsigmas[i])==(double)(0) )
                {
                    network.columnsigmas[i] = 1;
                }
            }
            
            //
            // Outputs
            //
            if( !mlpissoftmax(network) )
            {
                for(i=0; i<=nout-1; i++)
                {
                    offs = istart+(ntotal-nout+i)*nfieldwidth;
                    ntype = network.structinfo[offs+0];
                    
                    //
                    // Linear outputs
                    //
                    if( ntype==0 )
                    {
                        network.columnmeans[nin+i] = means[nin+i];
                        network.columnsigmas[nin+i] = sigmas[nin+i];
                        if( (double)(network.columnsigmas[nin+i])==(double)(0) )
                        {
                            network.columnsigmas[nin+i] = 1;
                        }
                    }
                    
                    //
                    // Bounded outputs (half-interval)
                    //
                    if( ntype==3 )
                    {
                        s = means[nin+i]-network.columnmeans[nin+i];
                        if( (double)(s)==(double)(0) )
                        {
                            s = Math.Sign(network.columnsigmas[nin+i]);
                        }
                        if( (double)(s)==(double)(0) )
                        {
                            s = 1.0;
                        }
                        network.columnsigmas[nin+i] = Math.Sign(network.columnsigmas[nin+i])*Math.Abs(s);
                        if( (double)(network.columnsigmas[nin+i])==(double)(0) )
                        {
                            network.columnsigmas[nin+i] = 1;
                        }
                    }
                }
            }
        }
Exemplo n.º 16
0
        /*************************************************************************
        Calculation of all types of errors on dataset given by sparse matrix

          -- ALGLIB --
             Copyright 10.09.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpeallerrorssparse(mlpensemble ensemble,
            sparse.sparsematrix xy,
            int npoints,
            ref double relcls,
            ref double avgce,
            ref double rms,
            ref double avg,
            ref double avgrel)
        {
            int i = 0;
            double[] buf = new double[0];
            double[] workx = new double[0];
            double[] y = new double[0];
            double[] dy = new double[0];
            int nin = 0;
            int nout = 0;
            int i_ = 0;
            int i1_ = 0;

            relcls = 0;
            avgce = 0;
            rms = 0;
            avg = 0;
            avgrel = 0;

            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                dy = new double[1];
                bdss.dserrallocate(nout, ref buf);
            }
            else
            {
                dy = new double[nout];
                bdss.dserrallocate(-nout, ref buf);
            }
            for(i=0; i<=npoints-1; i++)
            {
                sparse.sparsegetrow(xy, i, ref workx);
                mlpeprocess(ensemble, workx, ref y);
                if( mlpbase.mlpissoftmax(ensemble.network) )
                {
                    dy[0] = workx[nin];
                }
                else
                {
                    i1_ = (nin) - (0);
                    for(i_=0; i_<=nout-1;i_++)
                    {
                        dy[i_] = workx[i_+i1_];
                    }
                }
                bdss.dserraccumulate(ref buf, y, dy);
            }
            bdss.dserrfinish(ref buf);
            relcls = buf[0];
            avgce = buf[1];
            rms = buf[2];
            avg = buf[3];
            avgrel = buf[4];
        }
Exemplo n.º 17
0
        /*************************************************************************
        This function sets "current dataset" of the trainer object to  one  passed
        by user (sparse matrix is used to store dataset).

        INPUT PARAMETERS:
            S           -   trainer object
            XY          -   training  set,  see  below  for  information  on   the
                            training set format. This function checks  correctness
                            of  the  dataset  (no  NANs/INFs,  class  numbers  are
                            correct) and throws exception when  incorrect  dataset
                            is passed. Any  sparse  storage  format  can be  used:
                            Hash-table, CRS...
            NPoints     -   points count, >=0

        DATASET FORMAT:

        This  function  uses  two  different  dataset formats - one for regression
        networks, another one for classification networks.

        For regression networks with NIn inputs and NOut outputs following dataset
        format is used:
        * dataset is given by NPoints*(NIn+NOut) matrix
        * each row corresponds to one example
        * first NIn columns are inputs, next NOut columns are outputs

        For classification networks with NIn inputs and NClasses clases  following
        datasetformat is used:
        * dataset is given by NPoints*(NIn+1) matrix
        * each row corresponds to one example
        * first NIn columns are inputs, last column stores class number (from 0 to
          NClasses-1).
          
          -- ALGLIB --
             Copyright 23.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpsetsparsedataset(mlptrainer s,
            sparse.sparsematrix xy,
            int npoints)
        {
            double v = 0;
            int t0 = 0;
            int t1 = 0;
            int i = 0;
            int j = 0;

            
            //
            // Check correctness of the data
            //
            alglib.ap.assert(s.nin>0, "MLPSetSparseDataset: possible parameter S is not initialized or spoiled(S.NIn<=0).");
            alglib.ap.assert(npoints>=0, "MLPSetSparseDataset: NPoint<0");
            alglib.ap.assert(npoints<=sparse.sparsegetnrows(xy), "MLPSetSparseDataset: invalid size of sparse matrix XY(NPoint more then rows of matrix XY)");
            if( npoints>0 )
            {
                t0 = 0;
                t1 = 0;
                if( s.rcpar )
                {
                    alglib.ap.assert(s.nout>=1, "MLPSetSparseDataset: possible parameter S is not initialized or is spoiled(NOut<1 for regression).");
                    alglib.ap.assert(s.nin+s.nout<=sparse.sparsegetncols(xy), "MLPSetSparseDataset: invalid size of sparse matrix XY(too few columns in sparse matrix XY).");
                    while( sparse.sparseenumerate(xy, ref t0, ref t1, ref i, ref j, ref v) )
                    {
                        if( i<npoints && j<s.nin+s.nout )
                        {
                            alglib.ap.assert(math.isfinite(v), "MLPSetSparseDataset: sparse matrix XY contains Infinite or NaN.");
                        }
                    }
                }
                else
                {
                    alglib.ap.assert(s.nout>=2, "MLPSetSparseDataset: possible parameter S is not initialized or is spoiled(NClasses<2 for classifier).");
                    alglib.ap.assert(s.nin+1<=sparse.sparsegetncols(xy), "MLPSetSparseDataset: invalid size of sparse matrix XY(too few columns in sparse matrix XY).");
                    while( sparse.sparseenumerate(xy, ref t0, ref t1, ref i, ref j, ref v) )
                    {
                        if( i<npoints && j<=s.nin )
                        {
                            if( j!=s.nin )
                            {
                                alglib.ap.assert(math.isfinite(v), "MLPSetSparseDataset: sparse matrix XY contains Infinite or NaN.");
                            }
                            else
                            {
                                alglib.ap.assert((math.isfinite(v) && (int)Math.Round(v)>=0) && (int)Math.Round(v)<s.nout, "MLPSetSparseDataset: invalid sparse matrix XY(in classifier used nonexistent class number: either XY[.,NIn]<0 or XY[.,NIn]>=NClasses).");
                            }
                        }
                    }
                }
            }
            
            //
            // Set dataset
            //
            s.datatype = 1;
            s.npoints = npoints;
            sparse.sparsecopytocrs(xy, s.sparsexy);
        }
Exemplo n.º 18
0
        /*************************************************************************
        Internal function which actually calculates batch gradient for a subset or
        full dataset, which can be represented in different formats.

        THIS FUNCTION IS NOT INTENDED TO BE USED BY ALGLIB USERS!

          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpgradbatchx(multilayerperceptron network,
            double[,] densexy,
            sparse.sparsematrix sparsexy,
            int datasetsize,
            int datasettype,
            int[] idx,
            int subset0,
            int subset1,
            int subsettype,
            alglib.smp.shared_pool buf,
            alglib.smp.shared_pool gradbuf)
        {
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int rowsize = 0;
            int srcidx = 0;
            int cstart = 0;
            int csize = 0;
            int j = 0;
            double problemcost = 0;
            hpccores.mlpbuffers buf2 = null;
            int len0 = 0;
            int len1 = 0;
            hpccores.mlpbuffers pbuf = null;
            smlpgrad sgrad = null;
            int i_ = 0;

            alglib.ap.assert(datasetsize>=0, "MLPGradBatchX: SetSize<0");
            alglib.ap.assert(datasettype==0 || datasettype==1, "MLPGradBatchX: DatasetType is incorrect");
            alglib.ap.assert(subsettype==0 || subsettype==1, "MLPGradBatchX: SubsetType is incorrect");
            
            //
            // Determine network and dataset properties
            //
            mlpproperties(network, ref nin, ref nout, ref wcount);
            if( mlpissoftmax(network) )
            {
                rowsize = nin+1;
            }
            else
            {
                rowsize = nin+nout;
            }
            
            //
            // Split problem.
            //
            // Splitting problem allows us to reduce  effect  of  single-precision
            // arithmetics (SSE-optimized version of MLPChunkedGradient uses single
            // precision  internally, but converts them to  double precision after
            // results are exported from HPC buffer to network). Small batches are
            // calculated in single precision, results are  aggregated  in  double
            // precision, and it allows us to avoid accumulation  of  errors  when
            // we process very large batches (tens of thousands of items).
            //
            // NOTE: it is important to use real arithmetics for ProblemCost
            //       because ProblemCost may be larger than MAXINT.
            //
            problemcost = subset1-subset0;
            problemcost = problemcost*wcount;
            if( subset1-subset0>=2*microbatchsize && (double)(problemcost)>(double)(gradbasecasecost) )
            {
                apserv.splitlength(subset1-subset0, microbatchsize, ref len0, ref len1);
                mlpgradbatchx(network, densexy, sparsexy, datasetsize, datasettype, idx, subset0, subset0+len0, subsettype, buf, gradbuf);
                mlpgradbatchx(network, densexy, sparsexy, datasetsize, datasettype, idx, subset0+len0, subset1, subsettype, buf, gradbuf);
                return;
            }
            
            //
            // Chunked processing
            //
            alglib.smp.ae_shared_pool_retrieve(gradbuf, ref sgrad);
            alglib.smp.ae_shared_pool_retrieve(buf, ref pbuf);
            hpccores.hpcpreparechunkedgradient(network.weights, wcount, mlpntotal(network), nin, nout, pbuf);
            cstart = subset0;
            while( cstart<subset1 )
            {
                
                //
                // Determine size of current chunk and copy it to PBuf.XY
                //
                csize = Math.Min(subset1, cstart+pbuf.chunksize)-cstart;
                for(j=0; j<=csize-1; j++)
                {
                    srcidx = -1;
                    if( subsettype==0 )
                    {
                        srcidx = cstart+j;
                    }
                    if( subsettype==1 )
                    {
                        srcidx = idx[cstart+j];
                    }
                    alglib.ap.assert(srcidx>=0, "MLPGradBatchX: internal error");
                    if( datasettype==0 )
                    {
                        for(i_=0; i_<=rowsize-1;i_++)
                        {
                            pbuf.xy[j,i_] = densexy[srcidx,i_];
                        }
                    }
                    if( datasettype==1 )
                    {
                        sparse.sparsegetrow(sparsexy, srcidx, ref pbuf.xyrow);
                        for(i_=0; i_<=rowsize-1;i_++)
                        {
                            pbuf.xy[j,i_] = pbuf.xyrow[i_];
                        }
                    }
                }
                
                //
                // Process chunk and advance line pointer
                //
                mlpchunkedgradient(network, pbuf.xy, 0, csize, pbuf.batch4buf, pbuf.hpcbuf, ref sgrad.f, false);
                cstart = cstart+pbuf.chunksize;
            }
            hpccores.hpcfinalizechunkedgradient(pbuf, sgrad.g);
            alglib.smp.ae_shared_pool_recycle(buf, ref pbuf);
            alglib.smp.ae_shared_pool_recycle(gradbuf, ref sgrad);
        }
Exemplo n.º 19
0
        /*************************************************************************
        Sparse Cholesky decomposition: "expert" function.

        The algorithm computes Cholesky decomposition  of  a  symmetric  positive-
        definite sparse matrix. The result is representation of A  as  A=U^T*U  or
        A=L*L^T

        Triangular factor L or U is written to separate SparseMatrix structure. If
        output buffer already contrains enough memory to store L/U, this memory is
        reused.

        INPUT PARAMETERS:
            A       -   upper or lower triangle of sparse matrix.
                        Matrix can be in any sparse storage format.
            N       -   size of matrix A (can be smaller than actual size of A)
            IsUpper -   if IsUpper=True, then A contains an upper triangle of
                        a symmetric matrix, otherwise A contains a lower one.
                        Another triangle is ignored.
            P0, P1  -   integer arrays:
                        * for Ordering=-3  -  user-supplied permutation  of  rows/
                          columns, which complies  to  requirements stated  in the
                          "OUTPUT PARAMETERS" section.  Both  P0 and  P1  must  be
                          initialized by user.
                        * for other values of  Ordering  -  possibly  preallocated
                          buffer,  which   is   filled   by  internally  generated
                          permutation. Automatically resized if its  size  is  too
                          small to store data.
            Ordering-   sparse matrix reordering algorithm which is used to reduce
                        fill-in amount:
                        * -3    use ordering supplied by user in P0/P1
                        * -2    use random ordering
                        * -1    use original order
                        * 0     use best algorithm implemented so far
                        If input matrix is  given  in  SKS  format,  factorization
                        function ignores Ordering and uses original order  of  the
                        columns. The idea is that if you already store  matrix  in
                        SKS format, it is better not to perform costly reordering.
            Algo    -   type of algorithm which is used during factorization:
                        * 0     use best  algorithm  (for  SKS  input  or   output
                                matrices Algo=2 is used; otherwise Algo=1 is used)
                        * 1     use CRS-based algorithm
                        * 2     use skyline-based factorization algorithm.
                                This algorithm is a  fastest  one  for low-profile
                                matrices,  but  requires  too  much of memory  for
                                matrices with large bandwidth.
            Fmt     -   desired storage format  of  the  output,  as  returned  by
                        SparseGetMatrixType() function:
                        * 0 for hash-based storage
                        * 1 for CRS
                        * 2 for SKS
                        If you do not know what format to choose, use 1 (CRS).
            Buf     -   SparseBuffers structure which is used to store temporaries.
                        This function may reuse previously allocated  storage,  so
                        if you perform repeated factorizations it is beneficial to
                        reuse Buf.
            C       -   SparseMatrix structure  which  can  be  just  some  random
                        garbage. In  case  in  contains  enough  memory  to  store
                        triangular factors, this memory will be reused. Othwerwise,
                        algorithm will automatically allocate enough memory.
            

        OUTPUT PARAMETERS:
            C       -   the result of factorization, stored in desired format.  If
                        IsUpper=True, then the upper triangle  contains  matrix U,
                        such  that  (P'*A*P) = U^T*U,  where  P  is  a permutation
                        matrix (see below). The elements below the  main  diagonal
                        are zero.
                        Similarly, if IsUpper = False. In this case L is returned,
                        and we have (P'*A*P) = L*(L^T).
            P0      -   permutation  (according   to   Ordering  parameter)  which
                        minimizes amount of fill-in:
                        * P0 is array[N]
                        * permutation is applied to A before  factorization  takes
                          place, i.e. we have U'*U = L*L' = P'*A*P
                        * P0[k]=j means that column/row j of A  is  moved  to k-th
                          position before starting factorization.
            P1      -   permutation P in another format, array[N]:
                        * P1[k]=j means that k-th column/row of A is moved to j-th
                          position

        RESULT:
            If  the  matrix  is  positive-definite,  the  function  returns  True.
            Otherwise, the function returns False. Contents of C is not determined
            in such case.

        NOTE: for  performance  reasons  this  function  does NOT check that input
              matrix  includes  only  finite  values. It is your responsibility to
              make sure that there are no infinite or NAN values in the matrix.

          -- ALGLIB routine --
             16.01.2014
             Bochkanov Sergey
        *************************************************************************/
        public static bool sparsecholeskyx(sparse.sparsematrix a,
            int n,
            bool isupper,
            ref int[] p0,
            ref int[] p1,
            int ordering,
            int algo,
            int fmt,
            sparse.sparsebuffers buf,
            sparse.sparsematrix c)
        {
            bool result = new bool();
            int i = 0;
            int j = 0;
            int k = 0;
            int t0 = 0;
            int t1 = 0;
            double v = 0;
            hqrnd.hqrndstate rs = new hqrnd.hqrndstate();

            alglib.ap.assert(n>=0, "SparseMatrixCholeskyBuf: N<0");
            alglib.ap.assert(sparse.sparsegetnrows(a)>=n, "SparseMatrixCholeskyBuf: rows(A)<N");
            alglib.ap.assert(sparse.sparsegetncols(a)>=n, "SparseMatrixCholeskyBuf: cols(A)<N");
            alglib.ap.assert(ordering>=-3 && ordering<=0, "SparseMatrixCholeskyBuf: invalid Ordering parameter");
            alglib.ap.assert(algo>=0 && algo<=2, "SparseMatrixCholeskyBuf: invalid Algo parameter");
            hqrnd.hqrndrandomize(rs);
            
            //
            // Perform some quick checks.
            // Because sparse matrices are expensive data structures, these
            // checks are better to perform during early stages of the factorization.
            //
            result = false;
            if( n<1 )
            {
                return result;
            }
            for(i=0; i<=n-1; i++)
            {
                if( (double)(sparse.sparsegetdiagonal(a, i))<=(double)(0) )
                {
                    return result;
                }
            }
            
            //
            // First, determine appropriate ordering:
            // * for SKS inputs, Ordering=-1 is automatically chosen (overrides user settings)
            //
            if( ordering==0 )
            {
                ordering = -1;
            }
            if( sparse.sparseissks(a) )
            {
                ordering = -1;
            }
            if( ordering==-3 )
            {
                
                //
                // User-supplied ordering.
                // Check its correctness.
                //
                alglib.ap.assert(alglib.ap.len(p0)>=n, "SparseCholeskyX: user-supplied permutation is too short");
                alglib.ap.assert(alglib.ap.len(p1)>=n, "SparseCholeskyX: user-supplied permutation is too short");
                for(i=0; i<=n-1; i++)
                {
                    alglib.ap.assert(p0[i]>=0 && p0[i]<n, "SparseCholeskyX: user-supplied permutation includes values outside of [0,N)");
                    alglib.ap.assert(p1[i]>=0 && p1[i]<n, "SparseCholeskyX: user-supplied permutation includes values outside of [0,N)");
                    alglib.ap.assert(p1[p0[i]]==i, "SparseCholeskyX: user-supplied permutation is inconsistent - P1 is not inverse of P0");
                }
            }
            if( ordering==-2 )
            {
                
                //
                // Use random ordering
                //
                apserv.ivectorsetlengthatleast(ref p0, n);
                apserv.ivectorsetlengthatleast(ref p1, n);
                for(i=0; i<=n-1; i++)
                {
                    p0[i] = i;
                }
                for(i=0; i<=n-1; i++)
                {
                    j = i+hqrnd.hqrnduniformi(rs, n-i);
                    if( j!=i )
                    {
                        k = p0[i];
                        p0[i] = p0[j];
                        p0[j] = k;
                    }
                }
                for(i=0; i<=n-1; i++)
                {
                    p1[p0[i]] = i;
                }
            }
            if( ordering==-1 )
            {
                
                //
                // Use initial ordering
                //
                apserv.ivectorsetlengthatleast(ref p0, n);
                apserv.ivectorsetlengthatleast(ref p1, n);
                for(i=0; i<=n-1; i++)
                {
                    p0[i] = i;
                    p1[i] = i;
                }
            }
            
            //
            // Determine algorithm to use:
            // * for SKS input or output - use SKS solver (overrides user settings)
            // * default is to use Algo=1
            //
            if( algo==0 )
            {
                algo = 1;
            }
            if( sparse.sparseissks(a) || fmt==2 )
            {
                algo = 2;
            }
            algo = 2;
            if( algo==2 )
            {
                
                //
                // Skyline Cholesky with non-skyline output.
                //
                // Call CholeskyX() recursively with Buf.S as output matrix,
                // then perform conversion from SKS to desired format. We can
                // use Buf.S in reccurrent call because SKS-to-SKS CholeskyX()
                // does not uses this field.
                //
                if( fmt!=2 )
                {
                    result = sparsecholeskyx(a, n, isupper, ref p0, ref p1, -3, algo, 2, buf, buf.s);
                    if( result )
                    {
                        sparse.sparsecopytobuf(buf.s, fmt, c);
                    }
                    return result;
                }
                
                //
                // Skyline Cholesky with skyline output
                //
                if( sparse.sparseissks(a) && ordering==-1 )
                {
                    
                    //
                    // Non-permuted skyline matrix.
                    //
                    // Quickly copy matrix to output buffer without permutation.
                    //
                    // NOTE: Buf.D is used as dummy vector filled with zeros.
                    //
                    apserv.ivectorsetlengthatleast(ref buf.d, n);
                    for(i=0; i<=n-1; i++)
                    {
                        buf.d[i] = 0;
                    }
                    if( isupper )
                    {
                        
                        //
                        // Create strictly upper-triangular matrix,
                        // copy upper triangle of input.
                        //
                        sparse.sparsecreatesksbuf(n, n, buf.d, a.uidx, c);
                        for(i=0; i<=n-1; i++)
                        {
                            t0 = a.ridx[i+1]-a.uidx[i]-1;
                            t1 = a.ridx[i+1]-1;
                            k = c.ridx[i+1]-c.uidx[i]-1;
                            for(j=t0; j<=t1; j++)
                            {
                                c.vals[k] = a.vals[j];
                                k = k+1;
                            }
                        }
                    }
                    else
                    {
                        
                        //
                        // Create strictly lower-triangular matrix,
                        // copy lower triangle of input.
                        //
                        sparse.sparsecreatesksbuf(n, n, a.didx, buf.d, c);
                        for(i=0; i<=n-1; i++)
                        {
                            t0 = a.ridx[i];
                            t1 = a.ridx[i]+a.didx[i];
                            k = c.ridx[i];
                            for(j=t0; j<=t1; j++)
                            {
                                c.vals[k] = a.vals[j];
                                k = k+1;
                            }
                        }
                    }
                }
                else
                {
                    
                    //
                    // Non-identity permutations OR non-skyline input:
                    // * investigate profile of permuted A
                    // * create skyline matrix in output buffer
                    // * copy input with permutation
                    //
                    apserv.ivectorsetlengthatleast(ref buf.d, n);
                    apserv.ivectorsetlengthatleast(ref buf.u, n);
                    for(i=0; i<=n-1; i++)
                    {
                        buf.d[i] = 0;
                        buf.u[i] = 0;
                    }
                    t0 = 0;
                    t1 = 0;
                    while( sparse.sparseenumerate(a, ref t0, ref t1, ref i, ref j, ref v) )
                    {
                        if( (isupper && j>=i) || (!isupper && j<=i) )
                        {
                            i = p1[i];
                            j = p1[j];
                            if( (j<i && isupper) || (j>i && !isupper) )
                            {
                                apserv.swapi(ref i, ref j);
                            }
                            if( i>j )
                            {
                                buf.d[i] = Math.Max(buf.d[i], i-j);
                            }
                            else
                            {
                                buf.u[j] = Math.Max(buf.u[j], j-i);
                            }
                        }
                    }
                    sparse.sparsecreatesksbuf(n, n, buf.d, buf.u, c);
                    t0 = 0;
                    t1 = 0;
                    while( sparse.sparseenumerate(a, ref t0, ref t1, ref i, ref j, ref v) )
                    {
                        if( (isupper && j>=i) || (!isupper && j<=i) )
                        {
                            i = p1[i];
                            j = p1[j];
                            if( (j<i && isupper) || (j>i && !isupper) )
                            {
                                apserv.swapi(ref j, ref i);
                            }
                            sparse.sparserewriteexisting(c, i, j, v);
                        }
                    }
                }
                result = sparsecholeskyskyline(c, n, isupper);
                return result;
            }
            alglib.ap.assert(false, "SparseCholeskyX: internal error - unexpected algorithm");
            return result;
        }
Exemplo n.º 20
0
        /*************************************************************************
        Calculation of all types of errors on subset of dataset.

        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support 
          !
          ! First improvement gives close-to-linear speedup on multicore  systems.
          ! Second improvement gives constant speedup (2-3x depending on your CPU)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.


        INPUT PARAMETERS:
            Network -   network initialized with one of the network creation funcs
            XY      -   original dataset given by sparse matrix;
                        one sample = one row;
                        first NIn columns contain inputs,
                        next NOut columns - desired outputs.
            SetSize -   real size of XY, SetSize>=0;
            Subset  -   subset of SubsetSize elements, array[SubsetSize];
            SubsetSize- number of elements in Subset[] array:
                        * if SubsetSize>0, rows of XY with indices Subset[0]...
                          ...Subset[SubsetSize-1] are processed
                        * if SubsetSize=0, zeros are returned
                        * if SubsetSize<0, entire dataset is  processed;  Subset[]
                          array is ignored in this case.

        OUTPUT PARAMETERS:
            Rep     -   it contains all type of errors.


          -- ALGLIB --
             Copyright 04.09.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpallerrorssparsesubset(multilayerperceptron network,
            sparse.sparsematrix xy,
            int setsize,
            int[] subset,
            int subsetsize,
            modelerrors rep)
        {
            int idx0 = 0;
            int idx1 = 0;
            int idxtype = 0;

            alglib.ap.assert(sparse.sparseiscrs(xy), "MLPAllErrorsSparseSubset: XY is not in CRS format.");
            alglib.ap.assert(sparse.sparsegetnrows(xy)>=setsize, "MLPAllErrorsSparseSubset: XY has less than SetSize rows");
            if( setsize>0 )
            {
                if( mlpissoftmax(network) )
                {
                    alglib.ap.assert(sparse.sparsegetncols(xy)>=mlpgetinputscount(network)+1, "MLPAllErrorsSparseSubset: XY has less than NIn+1 columns");
                }
                else
                {
                    alglib.ap.assert(sparse.sparsegetncols(xy)>=mlpgetinputscount(network)+mlpgetoutputscount(network), "MLPAllErrorsSparseSubset: XY has less than NIn+NOut columns");
                }
            }
            if( subsetsize>=0 )
            {
                idx0 = 0;
                idx1 = subsetsize;
                idxtype = 1;
            }
            else
            {
                idx0 = 0;
                idx1 = setsize;
                idxtype = 0;
            }
            mlpallerrorsx(network, network.dummydxy, xy, setsize, 1, subset, idx0, idx1, idxtype, network.buf, rep);
        }
Exemplo n.º 21
0
            /*************************************************************************
            This function estimates norm of the sparse M*N matrix A.

            INPUT PARAMETERS:
                State       -   norm estimator state, must be initialized with a  call
                                to NormEstimatorCreate()
                A           -   sparse M*N matrix, must be converted to CRS format
                                prior to calling this function.

            After this function  is  over  you can call NormEstimatorResults() to get 
            estimate of the norm(A).

              -- ALGLIB --
                 Copyright 06.12.2011 by Bochkanov Sergey
            *************************************************************************/
            public static void normestimatorestimatesparse(normestimatorstate state,
                sparse.sparsematrix a)
            {
                normestimatorrestart(state);
                while (normestimatoriteration(state))
                {
                    if (state.needmv)
                    {
                        sparse.sparsemv(a, state.x, ref state.mv);
                        continue;
                    }
                    if (state.needmtv)
                    {
                        sparse.sparsemtv(a, state.x, ref state.mtv);
                        continue;
                    }
                }
            }
Exemplo n.º 22
0
 /*************************************************************************
 Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
 *************************************************************************/
 public static void _pexec_mlpallerrorssparsesubset(multilayerperceptron network,
     sparse.sparsematrix xy,
     int setsize,
     int[] subset,
     int subsetsize,
     modelerrors rep)
 {
     mlpallerrorssparsesubset(network,xy,setsize,subset,subsetsize,rep);
 }
Exemplo n.º 23
0
        /*************************************************************************
        Procedure for solution of A*x=b with sparse A.

        INPUT PARAMETERS:
            State   -   algorithm state
            A       -   sparse M*N matrix in the CRS format (you MUST contvert  it 
                        to CRS format  by  calling  SparseConvertToCRS()  function
                        BEFORE you pass it to this function).
            B       -   right part, array[M]

        RESULT:
            This function returns no result.
            You can get solution by calling LinCGResults()

          -- ALGLIB --
             Copyright 30.11.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void linlsqrsolvesparse(linlsqrstate state,
            sparse.sparsematrix a,
            double[] b)
        {
            alglib.ap.assert(!state.running, "LinLSQRSolveSparse: you can not call this function when LinLSQRIteration is running");
            alglib.ap.assert(alglib.ap.len(b)>=state.m, "LinLSQRSolveSparse: Length(B)<M");
            alglib.ap.assert(apserv.isfinitevector(b, state.m), "LinLSQRSolveSparse: B contains infinite or NaN values");
            linlsqrsetb(state, b);
            linlsqrrestart(state);
            while( linlsqriteration(state) )
            {
                if( state.needmv )
                {
                    sparse.sparsemv(a, state.x, ref state.mv);
                }
                if( state.needmtv )
                {
                    sparse.sparsemtv(a, state.x, ref state.mtv);
                }
            }
        }
Exemplo n.º 24
0
        /*************************************************************************
        Error of the neural network on subset of sparse dataset.


        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support 
          !
          ! First improvement gives close-to-linear speedup on multicore  systems.
          ! Second improvement gives constant speedup (2-3x depending on your CPU)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.


        INPUT PARAMETERS:
            Network   -     neural network;
            XY        -     training  set,  see  below  for  information  on   the
                            training set format. This function checks  correctness
                            of  the  dataset  (no  NANs/INFs,  class  numbers  are
                            correct) and throws exception when  incorrect  dataset
                            is passed.  Sparse  matrix  must  use  CRS  format for
                            storage.
            SetSize   -     real size of XY, SetSize>=0;
                            it is used when SubsetSize<0;
            Subset    -     subset of SubsetSize elements, array[SubsetSize];
            SubsetSize-     number of elements in Subset[] array:
                            * if SubsetSize>0, rows of XY with indices Subset[0]...
                              ...Subset[SubsetSize-1] are processed
                            * if SubsetSize=0, zeros are returned
                            * if SubsetSize<0, entire dataset is  processed;  Subset[]
                              array is ignored in this case.

        RESULT:
            sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)

        DATASET FORMAT:

        This  function  uses  two  different  dataset formats - one for regression
        networks, another one for classification networks.

        For regression networks with NIn inputs and NOut outputs following dataset
        format is used:
        * dataset is given by NPoints*(NIn+NOut) matrix
        * each row corresponds to one example
        * first NIn columns are inputs, next NOut columns are outputs

        For classification networks with NIn inputs and NClasses clases  following
        dataset format is used:
        * dataset is given by NPoints*(NIn+1) matrix
        * each row corresponds to one example
        * first NIn columns are inputs, last column stores class number (from 0 to
          NClasses-1).

          -- ALGLIB --
             Copyright 04.09.2012 by Bochkanov Sergey
        *************************************************************************/
        public static double mlperrorsparsesubset(multilayerperceptron network,
            sparse.sparsematrix xy,
            int setsize,
            int[] subset,
            int subsetsize)
        {
            double result = 0;
            int idx0 = 0;
            int idx1 = 0;
            int idxtype = 0;

            alglib.ap.assert(sparse.sparseiscrs(xy), "MLPErrorSparseSubset: XY is not in CRS format.");
            alglib.ap.assert(sparse.sparsegetnrows(xy)>=setsize, "MLPErrorSparseSubset: XY has less than SetSize rows");
            if( setsize>0 )
            {
                if( mlpissoftmax(network) )
                {
                    alglib.ap.assert(sparse.sparsegetncols(xy)>=mlpgetinputscount(network)+1, "MLPErrorSparseSubset: XY has less than NIn+1 columns");
                }
                else
                {
                    alglib.ap.assert(sparse.sparsegetncols(xy)>=mlpgetinputscount(network)+mlpgetoutputscount(network), "MLPErrorSparseSubset: XY has less than NIn+NOut columns");
                }
            }
            if( subsetsize>=0 )
            {
                idx0 = 0;
                idx1 = subsetsize;
                idxtype = 1;
            }
            else
            {
                idx0 = 0;
                idx1 = setsize;
                idxtype = 0;
            }
            mlpallerrorsx(network, network.dummydxy, xy, setsize, 1, subset, idx0, idx1, idxtype, network.buf, network.err);
            result = math.sqr(network.err.rmserror)*(idx1-idx0)*mlpgetoutputscount(network)/2;
            return result;
        }
Exemplo n.º 25
0
        /*************************************************************************
        This function runs QQP solver; it returns after optimization  process  was
        completed. Following QP problem is solved:

            min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
            
        subject to boundary constraints.

        IMPORTANT: UNLIKE MANY OTHER SOLVERS, THIS FUNCTION DOES NOT  REQUIRE  YOU
                   TO INITIALIZE STATE OBJECT. IT CAN BE AUTOMATICALLY INITIALIZED
                   DURING SOLUTION PROCESS.

        INPUT PARAMETERS:
            AC          -   for dense problems (AKind=0) A-term of CQM object
                            contains system matrix. Other terms are unspecified
                            and should not be referenced.
            SparseAC    -   for sparse problems (AKind=1
            AKind       -   sparse matrix format:
                            * 0 for dense matrix
                            * 1 for sparse matrix
            SparseUpper -   which triangle of SparseAC stores matrix  -  upper  or
                            lower one (for dense matrices this  parameter  is  not
                            actual).
            BC          -   linear term, array[NC]
            BndLC       -   lower bound, array[NC]
            BndUC       -   upper bound, array[NC]
            SC          -   scale vector, array[NC]:
                            * I-th element contains scale of I-th variable,
                            * SC[I]>0
            XOriginC    -   origin term, array[NC]. Can be zero.
            NC          -   number of variables in the  original  formulation  (no
                            slack variables).
            CLEICC      -   linear equality/inequality constraints. Present version
                            of this function does NOT provide  publicly  available
                            support for linear constraints. This feature  will  be
                            introduced in the future versions of the function.
            NEC, NIC    -   number of equality/inequality constraints.
                            MUST BE ZERO IN THE CURRENT VERSION!!!
            Settings    -   QQPSettings object initialized by one of the initialization
                            functions.
            SState      -   object which stores temporaries:
                            * uninitialized object is automatically initialized
                            * previously allocated memory is reused as much
                              as possible
            XS          -   initial point, array[NC]
            
            
        OUTPUT PARAMETERS:
            XS          -   last point
            TerminationType-termination type:
                            *
                            *
                            *

          -- ALGLIB --
             Copyright 14.05.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void qqpoptimize(cqmodels.convexquadraticmodel ac,
            sparse.sparsematrix sparseac,
            int akind,
            bool sparseupper,
            double[] bc,
            double[] bndlc,
            double[] bnduc,
            double[] sc,
            double[] xoriginc,
            int nc,
            double[,] cleicc,
            int nec,
            int nic,
            qqpsettings settings,
            qqpbuffers sstate,
            double[] xs,
            ref int terminationtype)
        {
            int n = 0;
            int nmain = 0;
            int i = 0;
            int j = 0;
            int k = 0;
            double v = 0;
            double vv = 0;
            double d2 = 0;
            double d1 = 0;
            int d1est = 0;
            int d2est = 0;
            bool needact = new bool();
            double reststp = 0;
            double fullstp = 0;
            double stpmax = 0;
            double stp = 0;
            int stpcnt = 0;
            int cidx = 0;
            double cval = 0;
            int cgcnt = 0;
            int cgmax = 0;
            int newtcnt = 0;
            int sparsesolver = 0;
            double beta = 0;
            bool b = new bool();
            double fprev = 0;
            double fcur = 0;
            int i_ = 0;

            terminationtype = 0;

            
            //
            // Primary checks
            //
            alglib.ap.assert(akind==0 || akind==1, "QQPOptimize: incorrect AKind");
            sstate.nmain = nc;
            sstate.nslack = nic;
            sstate.n = nc+nic;
            sstate.nec = nec;
            sstate.nic = nic;
            sstate.akind = akind;
            n = sstate.n;
            nmain = sstate.nmain;
            terminationtype = 0;
            sstate.repinneriterationscount = 0;
            sstate.repouteriterationscount = 0;
            sstate.repncholesky = 0;
            sstate.repncupdates = 0;
            
            //
            // Several checks
            // * matrix size
            // * scale vector
            // * consistency of bound constraints
            // * consistency of settings
            //
            if( akind==1 )
            {
                alglib.ap.assert(sparse.sparsegetnrows(sparseac)==nmain, "QQPOptimize: rows(SparseAC)<>NMain");
                alglib.ap.assert(sparse.sparsegetncols(sparseac)==nmain, "QQPOptimize: cols(SparseAC)<>NMain");
            }
            for(i=0; i<=nmain-1; i++)
            {
                alglib.ap.assert(math.isfinite(sc[i]) && (double)(sc[i])>(double)(0), "QQPOptimize: incorrect scale");
            }
            for(i=0; i<=nmain-1; i++)
            {
                if( math.isfinite(bndlc[i]) && math.isfinite(bnduc[i]) )
                {
                    if( (double)(bndlc[i])>(double)(bnduc[i]) )
                    {
                        terminationtype = -3;
                        return;
                    }
                }
            }
            alglib.ap.assert(settings.cgphase || settings.cnphase, "QQPOptimize: both phases (CG and Newton) are inactive");
            
            //
            // Allocate data structures
            //
            apserv.rvectorsetlengthatleast(ref sstate.bndl, n);
            apserv.rvectorsetlengthatleast(ref sstate.bndu, n);
            apserv.bvectorsetlengthatleast(ref sstate.havebndl, n);
            apserv.bvectorsetlengthatleast(ref sstate.havebndu, n);
            apserv.rvectorsetlengthatleast(ref sstate.xs, n);
            apserv.rvectorsetlengthatleast(ref sstate.xp, n);
            apserv.rvectorsetlengthatleast(ref sstate.gc, n);
            apserv.rvectorsetlengthatleast(ref sstate.cgc, n);
            apserv.rvectorsetlengthatleast(ref sstate.cgp, n);
            apserv.rvectorsetlengthatleast(ref sstate.dc, n);
            apserv.rvectorsetlengthatleast(ref sstate.dp, n);
            apserv.rvectorsetlengthatleast(ref sstate.tmp0, n);
            apserv.rvectorsetlengthatleast(ref sstate.stpbuf, 15);
            sactivesets.sasinit(n, sstate.sas);
            
            //
            // Scale/shift problem coefficients:
            //
            //     min { 0.5*(x-x0)'*A*(x-x0) + b'*(x-x0) }
            //
            // becomes (after transformation "x = S*y+x0")
            //
            //     min { 0.5*y'*(S*A*S)*y + (S*b)'*y
            //
            // Modified A_mod=S*A*S and b_mod=S*(b+A*x0) are
            // stored into SState.DenseA and SState.B.
            //
            // NOTE: DenseA/DenseB are arrays whose lengths are
            //       NMain, not N=NMain+NSlack! We store reduced 
            //       matrix and vector because extend parts (last
            //       NSlack rows/columns) are exactly zero.
            //       
            //
            apserv.rvectorsetlengthatleast(ref sstate.b, nmain);
            for(i=0; i<=nmain-1; i++)
            {
                sstate.b[i] = sc[i]*bc[i];
            }
            if( akind==0 )
            {
                
                //
                // Dense QP problem - just copy and scale.
                //
                apserv.rmatrixsetlengthatleast(ref sstate.densea, nmain, nmain);
                cqmodels.cqmgeta(ac, ref sstate.densea);
                sstate.absamax = 0;
                sstate.absasum = 0;
                sstate.absasum2 = 0;
                for(i=0; i<=nmain-1; i++)
                {
                    for(j=0; j<=nmain-1; j++)
                    {
                        v = sc[i]*sstate.densea[i,j]*sc[j];
                        sstate.densea[i,j] = v;
                        sstate.absamax = Math.Max(sstate.absamax, v);
                        sstate.absasum = sstate.absasum+v;
                        sstate.absasum2 = sstate.absasum2+v*v;
                    }
                }
            }
            else
            {
                
                //
                // Sparse QP problem - a bit tricky. Depending on format of the
                // input we use different strategies for copying matrix:
                // * SKS matrices are copied to SKS format
                // * anything else is copied to CRS format
                //
                alglib.ap.assert(akind==1, "QQPOptimize: unexpected AKind (internal error)");
                sparse.sparsecopytosksbuf(sparseac, sstate.sparsea);
                if( sparseupper )
                {
                    sparse.sparsetransposesks(sstate.sparsea);
                }
                sstate.sparseupper = false;
                sstate.absamax = 0;
                sstate.absasum = 0;
                sstate.absasum2 = 0;
                for(i=0; i<=n-1; i++)
                {
                    k = sstate.sparsea.ridx[i];
                    for(j=i-sstate.sparsea.didx[i]; j<=i; j++)
                    {
                        v = sc[i]*sstate.sparsea.vals[k]*sc[j];
                        sstate.sparsea.vals[k] = v;
                        if( i==j )
                        {
                            
                            //
                            // Diagonal terms are counted only once
                            //
                            sstate.absamax = Math.Max(sstate.absamax, v);
                            sstate.absasum = sstate.absasum+v;
                            sstate.absasum2 = sstate.absasum2+v*v;
                        }
                        else
                        {
                            
                            //
                            // Offdiagonal terms are counted twice
                            //
                            sstate.absamax = Math.Max(sstate.absamax, v);
                            sstate.absasum = sstate.absasum+2*v;
                            sstate.absasum2 = sstate.absasum2+2*v*v;
                        }
                        k = k+1;
                    }
                }
            }
            
            //
            // Load box constraints into State structure.
            //
            // We apply transformation to variables: y=(x-x_origin)/s,
            // each of the constraints is appropriately shifted/scaled.
            //
            for(i=0; i<=nmain-1; i++)
            {
                sstate.havebndl[i] = math.isfinite(bndlc[i]);
                if( sstate.havebndl[i] )
                {
                    sstate.bndl[i] = (bndlc[i]-xoriginc[i])/sc[i];
                }
                else
                {
                    alglib.ap.assert(Double.IsNegativeInfinity(bndlc[i]), "QQPOptimize: incorrect lower bound");
                    sstate.bndl[i] = Double.NegativeInfinity;
                }
                sstate.havebndu[i] = math.isfinite(bnduc[i]);
                if( sstate.havebndu[i] )
                {
                    sstate.bndu[i] = (bnduc[i]-xoriginc[i])/sc[i];
                }
                else
                {
                    alglib.ap.assert(Double.IsPositiveInfinity(bnduc[i]), "QQPOptimize: incorrect upper bound");
                    sstate.bndu[i] = Double.PositiveInfinity;
                }
            }
            for(i=nmain; i<=n-1; i++)
            {
                sstate.havebndl[i] = true;
                sstate.bndl[i] = 0.0;
                sstate.havebndu[i] = false;
                sstate.bndu[i] = Double.PositiveInfinity;
            }
            
            //
            // Shift/scale linear constraints with transformation y=(x-x_origin)/s:
            // * constraint "c[i]'*x = b[i]" becomes "(s[i]*c[i])'*x = b[i]-c[i]'*x_origin".
            // * after constraint is loaded into SState.CLEIC, it is additionally normalized
            //
            apserv.rmatrixsetlengthatleast(ref sstate.cleic, nec+nic, n+1);
            for(i=0; i<=nec+nic-1; i++)
            {
                v = 0;
                vv = 0;
                for(j=0; j<=nmain-1; j++)
                {
                    sstate.cleic[i,j] = cleicc[i,j]*sc[j];
                    vv = vv+math.sqr(sstate.cleic[i,j]);
                    v = v+cleicc[i,j]*xoriginc[j];
                }
                vv = Math.Sqrt(vv);
                for(j=nmain; j<=n-1; j++)
                {
                    sstate.cleic[i,j] = 0.0;
                }
                sstate.cleic[i,n] = cleicc[i,nmain]-v;
                if( i>=nec )
                {
                    sstate.cleic[i,nmain+i-nec] = 1.0;
                }
                if( (double)(vv)>(double)(0) )
                {
                    for(j=0; j<=n; j++)
                    {
                        sstate.cleic[i,j] = sstate.cleic[i,j]/vv;
                    }
                }
            }
            
            //
            // Process initial point:
            // * first NMain components are equal to XS-XOriginC
            // * last NIC components are deduced from linear constraints
            // * make sure that boundary constraints are preserved by transformation
            //
            for(i=0; i<=nmain-1; i++)
            {
                sstate.xs[i] = (xs[i]-xoriginc[i])/sc[i];
                if( sstate.havebndl[i] && (double)(sstate.xs[i])<(double)(sstate.bndl[i]) )
                {
                    sstate.xs[i] = sstate.bndl[i];
                }
                if( sstate.havebndu[i] && (double)(sstate.xs[i])>(double)(sstate.bndu[i]) )
                {
                    sstate.xs[i] = sstate.bndu[i];
                }
                if( sstate.havebndl[i] && (double)(xs[i])==(double)(bndlc[i]) )
                {
                    sstate.xs[i] = sstate.bndl[i];
                }
                if( sstate.havebndu[i] && (double)(xs[i])==(double)(bnduc[i]) )
                {
                    sstate.xs[i] = sstate.bndu[i];
                }
            }
            for(i=0; i<=nic-1; i++)
            {
                v = 0.0;
                for(i_=0; i_<=nmain-1;i_++)
                {
                    v += sstate.xs[i_]*sstate.cleic[nec+i,i_];
                }
                sstate.xs[nmain+i] = Math.Max(sstate.cleic[nec+i,n]-v, 0.0);
            }
            
            //
            // Prepare "active set" structure
            //
            sactivesets.sassetbc(sstate.sas, sstate.bndl, sstate.bndu);
            sactivesets.sassetlcx(sstate.sas, sstate.cleic, 0, 0);
            if( !sactivesets.sasstartoptimization(sstate.sas, sstate.xs) )
            {
                terminationtype = -3;
                return;
            }
            
            //
            // Select sparse direct solver
            //
            if( akind==1 )
            {
                sparsesolver = settings.sparsesolver;
                if( sparsesolver==0 )
                {
                    sparsesolver = 1;
                }
                if( sparse.sparseissks(sstate.sparsea) )
                {
                    sparsesolver = 2;
                }
                sparsesolver = 2;
                alglib.ap.assert(sparsesolver==1 || sparsesolver==2, "QQPOptimize: incorrect SparseSolver");
            }
            else
            {
                sparsesolver = 0;
            }
            
            //
            // Main loop.
            //
            // Following variables are used:
            // * GC stores current gradient (unconstrained)
            // * CGC stores current gradient (constrained)
            // * DC stores current search direction
            // * CGP stores constrained gradient at previous point
            //   (zero on initial entry)
            // * DP stores previous search direction
            //   (zero on initial entry)
            //
            cgmax = settings.cgminits;
            sstate.repinneriterationscount = 0;
            sstate.repouteriterationscount = 0;
            while( true )
            {
                if( settings.maxouterits>0 && sstate.repouteriterationscount>=settings.maxouterits )
                {
                    terminationtype = 5;
                    break;
                }
                if( sstate.repouteriterationscount>0 )
                {
                    
                    //
                    // Check EpsF- and EpsX-based stopping criteria.
                    // Because problem was already scaled, we do not scale step before checking its length.
                    // NOTE: these checks are performed only after at least one outer iteration was made.
                    //
                    if( (double)(settings.epsf)>(double)(0) )
                    {
                        
                        //
                        // NOTE 1: here we rely on the fact that ProjectedTargetFunction() ignore D when Stp=0
                        // NOTE 2: code below handles situation when update increases function value instead
                        //         of decreasing it.
                        //
                        fprev = projectedtargetfunction(sstate, sstate.xp, sstate.dc, 0.0, ref sstate.tmp0);
                        fcur = projectedtargetfunction(sstate, sstate.sas.xc, sstate.dc, 0.0, ref sstate.tmp0);
                        if( (double)(fprev-fcur)<=(double)(settings.epsf*Math.Max(Math.Abs(fprev), Math.Max(Math.Abs(fcur), 1.0))) )
                        {
                            terminationtype = 1;
                            break;
                        }
                    }
                    if( (double)(settings.epsx)>(double)(0) )
                    {
                        v = 0.0;
                        for(i=0; i<=n-1; i++)
                        {
                            v = v+math.sqr(sstate.xp[i]-sstate.sas.xc[i]);
                        }
                        if( (double)(Math.Sqrt(v))<=(double)(settings.epsx) )
                        {
                            terminationtype = 2;
                            break;
                        }
                    }
                }
                apserv.inc(ref sstate.repouteriterationscount);
                for(i_=0; i_<=n-1;i_++)
                {
                    sstate.xp[i_] = sstate.sas.xc[i_];
                }
                if( !settings.cgphase )
                {
                    cgmax = 0;
                }
                for(i=0; i<=n-1; i++)
                {
                    sstate.cgp[i] = 0.0;
                    sstate.dp[i] = 0.0;
                }
                for(cgcnt=0; cgcnt<=cgmax-1; cgcnt++)
                {
                    
                    //
                    // Calculate unconstrained gradient GC for "extended" QP problem
                    // Determine active set, current constrained gradient CGC.
                    // Check gradient-based stopping condition.
                    //
                    // NOTE: because problem was scaled, we do not have to apply scaling
                    //       to gradient before checking stopping condition.
                    //
                    targetgradient(sstate, sstate.sas.xc, ref sstate.gc);
                    sactivesets.sasreactivateconstraints(sstate.sas, sstate.gc);
                    for(i_=0; i_<=n-1;i_++)
                    {
                        sstate.cgc[i_] = sstate.gc[i_];
                    }
                    sactivesets.sasconstraineddirection(sstate.sas, ref sstate.cgc);
                    v = 0.0;
                    for(i_=0; i_<=n-1;i_++)
                    {
                        v += sstate.cgc[i_]*sstate.cgc[i_];
                    }
                    if( (double)(Math.Sqrt(v))<=(double)(settings.epsg) )
                    {
                        terminationtype = 4;
                        break;
                    }
                    
                    //
                    // Prepare search direction DC and explore it.
                    //
                    // We try to use CGP/DP to prepare conjugate gradient step,
                    // but we resort to steepest descent step (Beta=0) in case
                    // we are at I-th boundary, but DP[I]<>0.
                    //
                    // Such approach allows us to ALWAYS have feasible DC, with
                    // guaranteed compatibility with both feasible area and current
                    // active set.
                    //
                    // Automatic CG reset performed every time DP is incompatible
                    // with current active set and/or feasible area. We also
                    // perform reset every QuickQPRestartCG iterations.
                    //
                    for(i_=0; i_<=n-1;i_++)
                    {
                        sstate.dc[i_] = -sstate.cgc[i_];
                    }
                    v = 0.0;
                    vv = 0.0;
                    b = false;
                    for(i=0; i<=n-1; i++)
                    {
                        v = v+sstate.cgc[i]*sstate.cgc[i];
                        vv = vv+sstate.cgp[i]*sstate.cgp[i];
                        b = b || ((sstate.havebndl[i] && (double)(sstate.sas.xc[i])==(double)(sstate.bndl[i])) && (double)(sstate.dp[i])!=(double)(0));
                        b = b || ((sstate.havebndu[i] && (double)(sstate.sas.xc[i])==(double)(sstate.bndu[i])) && (double)(sstate.dp[i])!=(double)(0));
                    }
                    b = b || (double)(vv)==(double)(0);
                    b = b || cgcnt%quickqprestartcg==0;
                    if( !b )
                    {
                        beta = v/vv;
                    }
                    else
                    {
                        beta = 0.0;
                    }
                    for(i_=0; i_<=n-1;i_++)
                    {
                        sstate.dc[i_] = sstate.dc[i_] + beta*sstate.dp[i_];
                    }
                    sactivesets.sasconstraineddirection(sstate.sas, ref sstate.dc);
                    sactivesets.sasexploredirection(sstate.sas, sstate.dc, ref stpmax, ref cidx, ref cval);
                    
                    //
                    // Build quadratic model of F along descent direction:
                    //
                    //     F(xc+alpha*D) = D2*alpha^2 + D1*alpha
                    //
                    // Terminate algorithm if needed.
                    //
                    // NOTE: we do not maintain constant term D0
                    //
                    quadraticmodel(sstate, sstate.sas.xc, sstate.dc, sstate.gc, ref d1, ref d1est, ref d2, ref d2est);
                    if( (double)(d1)==(double)(0) && (double)(d2)==(double)(0) )
                    {
                        
                        //
                        // D1 and D2 are exactly zero, success.
                        // After this if-then we assume that D is non-zero.
                        //
                        terminationtype = 4;
                        break;
                    }
                    if( d1est>=0 )
                    {
                        
                        //
                        // Numerical noise is too large, it means that we are close
                        // to minimum - and that further improvement is impossible.
                        //
                        // After this if-then we assume that D1 is definitely negative
                        // (even under presence of numerical errors).
                        //
                        terminationtype = 7;
                        break;
                    }
                    if( d2est<=0 && cidx<0 )
                    {
                        
                        //
                        // Function is unbounded from below:
                        // * D1<0 (verified by previous block)
                        // * D2Est<=0, which means that either D2<0 - or it can not
                        //   be reliably distinguished from zero.
                        // * step is unconstrained
                        //
                        // If these conditions are true, we abnormally terminate QP
                        // algorithm with return code -4
                        //
                        terminationtype = -4;
                        break;
                    }
                    
                    //
                    // Perform step along DC.
                    //
                    // In this block of code we maintain two step length:
                    // * RestStp -  restricted step, maximum step length along DC which does
                    //              not violate constraints
                    // * FullStp -  step length along DC which minimizes quadratic function
                    //              without taking constraints into account. If problem is
                    //              unbounded from below without constraints, FullStp is
                    //              forced to be RestStp.
                    //
                    // So, if function is convex (D2>0):
                    // * FullStp = -D1/(2*D2)
                    // * RestStp = restricted FullStp
                    // * 0<=RestStp<=FullStp
                    //
                    // If function is non-convex, but bounded from below under constraints:
                    // * RestStp = step length subject to constraints
                    // * FullStp = RestStp
                    //
                    // After RestStp and FullStp are initialized, we generate several trial
                    // steps which are different multiples of RestStp and FullStp.
                    //
                    if( d2est>0 )
                    {
                        alglib.ap.assert((double)(d1)<(double)(0), "QQPOptimize: internal error");
                        fullstp = -(d1/(2*d2));
                        needact = (double)(fullstp)>=(double)(stpmax);
                        if( needact )
                        {
                            alglib.ap.assert(alglib.ap.len(sstate.stpbuf)>=3, "QQPOptimize: StpBuf overflow");
                            reststp = stpmax;
                            stp = reststp;
                            sstate.stpbuf[0] = reststp*4;
                            sstate.stpbuf[1] = fullstp;
                            sstate.stpbuf[2] = fullstp/4;
                            stpcnt = 3;
                        }
                        else
                        {
                            reststp = fullstp;
                            stp = fullstp;
                            stpcnt = 0;
                        }
                    }
                    else
                    {
                        alglib.ap.assert(cidx>=0, "QQPOptimize: internal error");
                        alglib.ap.assert(alglib.ap.len(sstate.stpbuf)>=2, "QQPOptimize: StpBuf overflow");
                        reststp = stpmax;
                        fullstp = stpmax;
                        stp = reststp;
                        needact = true;
                        sstate.stpbuf[0] = 4*reststp;
                        stpcnt = 1;
                    }
                    findbeststepandmove(sstate, sstate.sas, sstate.dc, stp, needact, cidx, cval, sstate.stpbuf, stpcnt, ref sstate.activated, ref sstate.tmp0);
                    
                    //
                    // Update CG information.
                    //
                    for(i_=0; i_<=n-1;i_++)
                    {
                        sstate.dp[i_] = sstate.dc[i_];
                    }
                    for(i_=0; i_<=n-1;i_++)
                    {
                        sstate.cgp[i_] = sstate.cgc[i_];
                    }
                    
                    //
                    // Update iterations counter
                    //
                    sstate.repinneriterationscount = sstate.repinneriterationscount+1;
                }
                if( terminationtype!=0 )
                {
                    break;
                }
                cgmax = settings.cgmaxits;
                
                //
                // Generate YIdx - reordering of variables for constrained Newton phase.
                // Free variables come first, fixed are last ones.
                //
                newtcnt = 0;
                while( true )
                {
                    
                    //
                    // Skip iteration if constrained Newton is turned off.
                    //
                    if( !settings.cnphase )
                    {
                        break;
                    }
                    
                    //
                    // At the first iteration   - build Cholesky decomposition of Hessian.
                    // At subsequent iterations - refine Hessian by adding new constraints.
                    //
                    // Loop is terminated in following cases:
                    // * Hessian is not positive definite subject to current constraints
                    //   (termination during initial decomposition)
                    // * there were no new constraints being activated
                    //   (termination during update)
                    // * all constraints were activated during last step
                    //   (termination during update)
                    // * CNMaxUpdates were performed on matrix
                    //   (termination during update)
                    //
                    if( newtcnt==0 )
                    {
                        
                        //
                        // Perform initial Newton step. If Cholesky decomposition fails,
                        // increase number of CG iterations to CGMaxIts - it should help
                        // us to find set of constraints which will make matrix positive
                        // definite.
                        //
                        b = cnewtonbuild(sstate, sparsesolver, ref sstate.repncholesky);
                        if( b )
                        {
                            cgmax = settings.cgminits;
                        }
                    }
                    else
                    {
                        b = cnewtonupdate(sstate, settings, ref sstate.repncupdates);
                    }
                    if( !b )
                    {
                        break;
                    }
                    apserv.inc(ref newtcnt);
                    
                    //
                    // Calculate gradient GC.
                    //
                    targetgradient(sstate, sstate.sas.xc, ref sstate.gc);
                    
                    //
                    // Bound-constrained Newton step
                    //
                    for(i=0; i<=n-1; i++)
                    {
                        sstate.dc[i] = sstate.gc[i];
                    }
                    if( !cnewtonstep(sstate, settings, sstate.dc) )
                    {
                        break;
                    }
                    quadraticmodel(sstate, sstate.sas.xc, sstate.dc, sstate.gc, ref d1, ref d1est, ref d2, ref d2est);
                    if( d1est>=0 || d2est<=0 )
                    {
                        break;
                    }
                    alglib.ap.assert((double)(d1)<(double)(0), "QQPOptimize: internal error");
                    fullstp = -(d1/(2*d2));
                    sactivesets.sasexploredirection(sstate.sas, sstate.dc, ref stpmax, ref cidx, ref cval);
                    needact = (double)(fullstp)>=(double)(stpmax);
                    if( needact )
                    {
                        alglib.ap.assert(alglib.ap.len(sstate.stpbuf)>=3, "QQPOptimize: StpBuf overflow");
                        reststp = stpmax;
                        stp = reststp;
                        sstate.stpbuf[0] = reststp*4;
                        sstate.stpbuf[1] = fullstp;
                        sstate.stpbuf[2] = fullstp/4;
                        stpcnt = 3;
                    }
                    else
                    {
                        reststp = fullstp;
                        stp = fullstp;
                        stpcnt = 0;
                    }
                    findbeststepandmove(sstate, sstate.sas, sstate.dc, stp, needact, cidx, cval, sstate.stpbuf, stpcnt, ref sstate.activated, ref sstate.tmp0);
                }
                if( terminationtype!=0 )
                {
                    break;
                }
            }
            
            //
            // Stop optimization and unpack results.
            //
            // Add XOriginC to XS and make sure that boundary constraints are
            // both (a) satisfied, (b) preserved. Former means that "shifted"
            // point is feasible, while latter means that point which was exactly
            // at the boundary before shift will be exactly at the boundary
            // after shift.
            //
            sactivesets.sasstopoptimization(sstate.sas);
            for(i=0; i<=nmain-1; i++)
            {
                xs[i] = sc[i]*sstate.sas.xc[i]+xoriginc[i];
                if( sstate.havebndl[i] && (double)(xs[i])<(double)(bndlc[i]) )
                {
                    xs[i] = bndlc[i];
                }
                if( sstate.havebndu[i] && (double)(xs[i])>(double)(bnduc[i]) )
                {
                    xs[i] = bnduc[i];
                }
                if( sstate.havebndl[i] && (double)(sstate.sas.xc[i])==(double)(sstate.bndl[i]) )
                {
                    xs[i] = bndlc[i];
                }
                if( sstate.havebndu[i] && (double)(sstate.sas.xc[i])==(double)(sstate.bndu[i]) )
                {
                    xs[i] = bnduc[i];
                }
            }
        }
Exemplo n.º 26
0
 /*************************************************************************
 Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
 *************************************************************************/
 public static double _pexec_mlperrorsparsesubset(multilayerperceptron network,
     sparse.sparsematrix xy,
     int setsize,
     int[] subset,
     int subsetsize)
 {
     return mlperrorsparsesubset(network,xy,setsize,subset,subsetsize);
 }
Exemplo n.º 27
0
        /*************************************************************************
        This  function  sets  sparse  quadratic  term  for  QP solver. By default,
        quadratic term is zero.

        IMPORTANT:

        This solver minimizes following  function:
            f(x) = 0.5*x'*A*x + b'*x.
        Note that quadratic term has 0.5 before it. So if  you  want  to  minimize
            f(x) = x^2 + x
        you should rewrite your problem as follows:
            f(x) = 0.5*(2*x^2) + x
        and your matrix A will be equal to [[2.0]], not to [[1.0]]

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            A       -   matrix, array[N,N]
            IsUpper -   (optional) storage type:
                        * if True, symmetric matrix  A  is  given  by  its  upper
                          triangle, and the lower triangle isn’t used
                        * if False, symmetric matrix  A  is  given  by  its lower
                          triangle, and the upper triangle isn’t used
                        * if not given, both lower and upper  triangles  must  be
                          filled.

          -- ALGLIB --
             Copyright 11.01.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void minqpsetquadratictermsparse(minqpstate state,
            sparse.sparsematrix a,
            bool isupper)
        {
            int n = 0;
            int t0 = 0;
            int t1 = 0;
            int i = 0;
            int j = 0;
            double v = 0;

            n = state.n;
            alglib.ap.assert(sparse.sparsegetnrows(a)==n, "MinQPSetQuadraticTermSparse: Rows(A)<>N");
            alglib.ap.assert(sparse.sparsegetncols(a)==n, "MinQPSetQuadraticTermSparse: Cols(A)<>N");
            sparse.sparsecopytocrsbuf(a, state.sparsea);
            state.sparseaupper = isupper;
            state.akind = 1;
            
            //
            // Estimate norm of A
            // (it will be used later in the quadratic penalty function)
            //
            state.absamax = 0;
            state.absasum = 0;
            state.absasum2 = 0;
            t0 = 0;
            t1 = 0;
            while( sparse.sparseenumerate(a, ref t0, ref t1, ref i, ref j, ref v) )
            {
                if( i==j )
                {
                    
                    //
                    // Diagonal terms are counted only once
                    //
                    state.absamax = Math.Max(state.absamax, v);
                    state.absasum = state.absasum+v;
                    state.absasum2 = state.absasum2+v*v;
                }
                if( (j>i && isupper) || (j<i && !isupper) )
                {
                    
                    //
                    // Offdiagonal terms are counted twice
                    //
                    state.absamax = Math.Max(state.absamax, v);
                    state.absasum = state.absasum+2*v;
                    state.absasum2 = state.absasum2+2*v*v;
                }
            }
        }
Exemplo n.º 28
0
        /*************************************************************************
        Calculation of all types of errors at once for a subset or  full  dataset,
        which can be represented in different formats.

        THIS INTERNAL FUNCTION IS NOT INTENDED TO BE USED BY ALGLIB USERS!

          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpallerrorsx(multilayerperceptron network,
            double[,] densexy,
            sparse.sparsematrix sparsexy,
            int datasetsize,
            int datasettype,
            int[] idx,
            int subset0,
            int subset1,
            int subsettype,
            alglib.smp.shared_pool buf,
            modelerrors rep)
        {
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int rowsize = 0;
            bool iscls = new bool();
            int srcidx = 0;
            int cstart = 0;
            int csize = 0;
            int j = 0;
            hpccores.mlpbuffers pbuf = null;
            int len0 = 0;
            int len1 = 0;
            modelerrors rep0 = new modelerrors();
            modelerrors rep1 = new modelerrors();
            int i_ = 0;
            int i1_ = 0;

            alglib.ap.assert(datasetsize>=0, "MLPAllErrorsX: SetSize<0");
            alglib.ap.assert(datasettype==0 || datasettype==1, "MLPAllErrorsX: DatasetType is incorrect");
            alglib.ap.assert(subsettype==0 || subsettype==1, "MLPAllErrorsX: SubsetType is incorrect");
            
            //
            // Determine network properties
            //
            mlpproperties(network, ref nin, ref nout, ref wcount);
            iscls = mlpissoftmax(network);
            
            //
            // Split problem.
            //
            // Splitting problem allows us to reduce  effect  of  single-precision
            // arithmetics (SSE-optimized version of MLPChunkedProcess uses single
            // precision  internally, but converts them to  double precision after
            // results are exported from HPC buffer to network). Small batches are
            // calculated in single precision, results are  aggregated  in  double
            // precision, and it allows us to avoid accumulation  of  errors  when
            // we process very large batches (tens of thousands of items).
            //
            // NOTE: it is important to use real arithmetics for ProblemCost
            //       because ProblemCost may be larger than MAXINT.
            //
            if( subset1-subset0>=2*microbatchsize && (double)(apserv.inttoreal(subset1-subset0)*apserv.inttoreal(wcount))>(double)(gradbasecasecost) )
            {
                apserv.splitlength(subset1-subset0, microbatchsize, ref len0, ref len1);
                mlpallerrorsx(network, densexy, sparsexy, datasetsize, datasettype, idx, subset0, subset0+len0, subsettype, buf, rep0);
                mlpallerrorsx(network, densexy, sparsexy, datasetsize, datasettype, idx, subset0+len0, subset1, subsettype, buf, rep1);
                rep.relclserror = (len0*rep0.relclserror+len1*rep1.relclserror)/(len0+len1);
                rep.avgce = (len0*rep0.avgce+len1*rep1.avgce)/(len0+len1);
                rep.rmserror = Math.Sqrt((len0*math.sqr(rep0.rmserror)+len1*math.sqr(rep1.rmserror))/(len0+len1));
                rep.avgerror = (len0*rep0.avgerror+len1*rep1.avgerror)/(len0+len1);
                rep.avgrelerror = (len0*rep0.avgrelerror+len1*rep1.avgrelerror)/(len0+len1);
                return;
            }
            
            //
            // Retrieve and prepare
            //
            alglib.smp.ae_shared_pool_retrieve(buf, ref pbuf);
            if( iscls )
            {
                rowsize = nin+1;
                bdss.dserrallocate(nout, ref pbuf.tmp0);
            }
            else
            {
                rowsize = nin+nout;
                bdss.dserrallocate(-nout, ref pbuf.tmp0);
            }
            
            //
            // Processing
            //
            hpccores.hpcpreparechunkedgradient(network.weights, wcount, mlpntotal(network), nin, nout, pbuf);
            cstart = subset0;
            while( cstart<subset1 )
            {
                
                //
                // Determine size of current chunk and copy it to PBuf.XY
                //
                csize = Math.Min(subset1, cstart+pbuf.chunksize)-cstart;
                for(j=0; j<=csize-1; j++)
                {
                    srcidx = -1;
                    if( subsettype==0 )
                    {
                        srcidx = cstart+j;
                    }
                    if( subsettype==1 )
                    {
                        srcidx = idx[cstart+j];
                    }
                    alglib.ap.assert(srcidx>=0, "MLPAllErrorsX: internal error");
                    if( datasettype==0 )
                    {
                        for(i_=0; i_<=rowsize-1;i_++)
                        {
                            pbuf.xy[j,i_] = densexy[srcidx,i_];
                        }
                    }
                    if( datasettype==1 )
                    {
                        sparse.sparsegetrow(sparsexy, srcidx, ref pbuf.xyrow);
                        for(i_=0; i_<=rowsize-1;i_++)
                        {
                            pbuf.xy[j,i_] = pbuf.xyrow[i_];
                        }
                    }
                }
                
                //
                // Unpack XY and process (temporary code, to be replaced by chunked processing)
                //
                for(j=0; j<=csize-1; j++)
                {
                    for(i_=0; i_<=rowsize-1;i_++)
                    {
                        pbuf.xy2[j,i_] = pbuf.xy[j,i_];
                    }
                }
                mlpchunkedprocess(network, pbuf.xy2, 0, csize, pbuf.batch4buf, pbuf.hpcbuf);
                for(j=0; j<=csize-1; j++)
                {
                    for(i_=0; i_<=nin-1;i_++)
                    {
                        pbuf.x[i_] = pbuf.xy2[j,i_];
                    }
                    i1_ = (nin) - (0);
                    for(i_=0; i_<=nout-1;i_++)
                    {
                        pbuf.y[i_] = pbuf.xy2[j,i_+i1_];
                    }
                    if( iscls )
                    {
                        pbuf.desiredy[0] = pbuf.xy[j,nin];
                    }
                    else
                    {
                        i1_ = (nin) - (0);
                        for(i_=0; i_<=nout-1;i_++)
                        {
                            pbuf.desiredy[i_] = pbuf.xy[j,i_+i1_];
                        }
                    }
                    bdss.dserraccumulate(ref pbuf.tmp0, pbuf.y, pbuf.desiredy);
                }
                
                //
                // Process chunk and advance line pointer
                //
                cstart = cstart+pbuf.chunksize;
            }
            bdss.dserrfinish(ref pbuf.tmp0);
            rep.relclserror = pbuf.tmp0[0];
            rep.avgce = pbuf.tmp0[1]/Math.Log(2);
            rep.rmserror = pbuf.tmp0[2];
            rep.avgerror = pbuf.tmp0[3];
            rep.avgrelerror = pbuf.tmp0[4];
            
            //
            // Recycle
            //
            alglib.smp.ae_shared_pool_recycle(buf, ref pbuf);
        }
Exemplo n.º 29
0
        /*************************************************************************
        Procedure for solution of A*x=b with sparse A.

        INPUT PARAMETERS:
            State   -   algorithm state
            A       -   sparse matrix in the CRS format (you MUST contvert  it  to 
                        CRS format by calling SparseConvertToCRS() function).
            IsUpper -   whether upper or lower triangle of A is used:
                        * IsUpper=True  => only upper triangle is used and lower
                                           triangle is not referenced at all 
                        * IsUpper=False => only lower triangle is used and upper
                                           triangle is not referenced at all
            B       -   right part, array[N]

        RESULT:
            This function returns no result.
            You can get solution by calling LinCGResults()
            
        NOTE: this function uses lightweight preconditioning -  multiplication  by
              inverse of diag(A). If you want, you can turn preconditioning off by
              calling LinCGSetPrecUnit(). However, preconditioning cost is low and
              preconditioner  is  very  important  for  solution  of  badly scaled
              problems.

          -- ALGLIB --
             Copyright 14.11.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void lincgsolvesparse(lincgstate state,
            sparse.sparsematrix a,
            bool isupper,
            double[] b)
        {
            int n = 0;
            int i = 0;
            double v = 0;
            double vmv = 0;
            int i_ = 0;

            n = state.n;
            alglib.ap.assert(alglib.ap.len(b)>=state.n, "LinCGSetB: Length(B)<N");
            alglib.ap.assert(apserv.isfinitevector(b, state.n), "LinCGSetB: B contains infinite or NaN values!");
            
            //
            // Allocate temporaries
            //
            apserv.rvectorsetlengthatleast(ref state.tmpd, n);
            
            //
            // Compute diagonal scaling matrix D
            //
            if( state.prectype==0 )
            {
                
                //
                // Default preconditioner - inverse of matrix diagonal
                //
                for(i=0; i<=n-1; i++)
                {
                    v = sparse.sparsegetdiagonal(a, i);
                    if( (double)(v)>(double)(0) )
                    {
                        state.tmpd[i] = 1/Math.Sqrt(v);
                    }
                    else
                    {
                        state.tmpd[i] = 1;
                    }
                }
            }
            else
            {
                
                //
                // No diagonal scaling
                //
                for(i=0; i<=n-1; i++)
                {
                    state.tmpd[i] = 1;
                }
            }
            
            //
            // Solve
            //
            lincgrestart(state);
            lincgsetb(state, b);
            while( lincgiteration(state) )
            {
                
                //
                // Process different requests from optimizer
                //
                if( state.needmv )
                {
                    sparse.sparsesmv(a, isupper, state.x, ref state.mv);
                }
                if( state.needvmv )
                {
                    sparse.sparsesmv(a, isupper, state.x, ref state.mv);
                    vmv = 0.0;
                    for(i_=0; i_<=state.n-1;i_++)
                    {
                        vmv += state.x[i_]*state.mv[i_];
                    }
                    state.vmv = vmv;
                }
                if( state.needprec )
                {
                    for(i=0; i<=n-1; i++)
                    {
                        state.pv[i] = state.x[i]*math.sqr(state.tmpd[i]);
                    }
                }
            }
        }
Exemplo n.º 30
0
        /*************************************************************************
        Calculation of all types of errors

          -- ALGLIB --
             Copyright 17.02.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpeallerrorsx(mlpensemble ensemble,
            double[,] densexy,
            sparse.sparsematrix sparsexy,
            int datasetsize,
            int datasettype,
            int[] idx,
            int subset0,
            int subset1,
            int subsettype,
            alglib.smp.shared_pool buf,
            mlpbase.modelerrors rep)
        {
            int i = 0;
            int j = 0;
            int nin = 0;
            int nout = 0;
            bool iscls = new bool();
            int srcidx = 0;
            hpccores.mlpbuffers pbuf = null;
            mlpbase.modelerrors rep0 = new mlpbase.modelerrors();
            mlpbase.modelerrors rep1 = new mlpbase.modelerrors();
            int i_ = 0;
            int i1_ = 0;

            
            //
            // Get network information
            //
            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            iscls = mlpbase.mlpissoftmax(ensemble.network);
            
            //
            // Retrieve buffer, prepare, process data, recycle buffer
            //
            alglib.smp.ae_shared_pool_retrieve(buf, ref pbuf);
            if( iscls )
            {
                bdss.dserrallocate(nout, ref pbuf.tmp0);
            }
            else
            {
                bdss.dserrallocate(-nout, ref pbuf.tmp0);
            }
            apserv.rvectorsetlengthatleast(ref pbuf.x, nin);
            apserv.rvectorsetlengthatleast(ref pbuf.y, nout);
            apserv.rvectorsetlengthatleast(ref pbuf.desiredy, nout);
            for(i=subset0; i<=subset1-1; i++)
            {
                srcidx = -1;
                if( subsettype==0 )
                {
                    srcidx = i;
                }
                if( subsettype==1 )
                {
                    srcidx = idx[i];
                }
                alglib.ap.assert(srcidx>=0, "MLPEAllErrorsX: internal error");
                if( datasettype==0 )
                {
                    for(i_=0; i_<=nin-1;i_++)
                    {
                        pbuf.x[i_] = densexy[srcidx,i_];
                    }
                }
                if( datasettype==1 )
                {
                    sparse.sparsegetrow(sparsexy, srcidx, ref pbuf.x);
                }
                mlpeprocess(ensemble, pbuf.x, ref pbuf.y);
                if( mlpbase.mlpissoftmax(ensemble.network) )
                {
                    if( datasettype==0 )
                    {
                        pbuf.desiredy[0] = densexy[srcidx,nin];
                    }
                    if( datasettype==1 )
                    {
                        pbuf.desiredy[0] = sparse.sparseget(sparsexy, srcidx, nin);
                    }
                }
                else
                {
                    if( datasettype==0 )
                    {
                        i1_ = (nin) - (0);
                        for(i_=0; i_<=nout-1;i_++)
                        {
                            pbuf.desiredy[i_] = densexy[srcidx,i_+i1_];
                        }
                    }
                    if( datasettype==1 )
                    {
                        for(j=0; j<=nout-1; j++)
                        {
                            pbuf.desiredy[j] = sparse.sparseget(sparsexy, srcidx, nin+j);
                        }
                    }
                }
                bdss.dserraccumulate(ref pbuf.tmp0, pbuf.y, pbuf.desiredy);
            }
            bdss.dserrfinish(ref pbuf.tmp0);
            rep.relclserror = pbuf.tmp0[0];
            rep.avgce = pbuf.tmp0[1]/Math.Log(2);
            rep.rmserror = pbuf.tmp0[2];
            rep.avgerror = pbuf.tmp0[3];
            rep.avgrelerror = pbuf.tmp0[4];
            alglib.smp.ae_shared_pool_recycle(buf, ref pbuf);
        }