コード例 #1
0
ファイル: optimization.cs プロジェクト: Ring-r/opt
        /*************************************************************************
        This function scales and copies NMain elements of GUnscaled into GScaled.
        Other NSlack components of GScaled are set to zero.
        *************************************************************************/
        private static void scalegradientandexpand(minbleicstate state,
            double[] gunscaled,
            ref double[] gscaled)
        {
            int i = 0;

            for(i=0; i<=state.nmain-1; i++)
            {
                gscaled[i] = gunscaled[i]*state.transforms[i];
            }
            for(i=0; i<=state.nslack-1; i++)
            {
                gscaled[state.nmain+i] = 0;
            }
        }
コード例 #2
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This subroutine finalizes internal structures after emergency  termination
        from State.LSStart report (see comments on MinBLEICState for more information).

        INPUT PARAMETERS:
            State   -   structure after exit from LSStart report

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicemergencytermination(minbleicstate state)
        {
            sactivesets.sasstopoptimization(state.sas);
        }
コード例 #3
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
 /*************************************************************************
 Clears request fileds (to be sure that we don't forget to clear something)
 *************************************************************************/
 private static void clearrequestfields(minbleicstate state)
 {
     state.needf = false;
     state.needfg = false;
     state.xupdated = false;
     state.lsstart = false;
 }
コード例 #4
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        BLEIC results

        INPUT PARAMETERS:
            State   -   algorithm state

        OUTPUT PARAMETERS:
            X       -   array[0..N-1], solution
            Rep     -   optimization report. You should check Rep.TerminationType
                        in  order  to  distinguish  successful  termination  from
                        unsuccessful one:
                        * -8    internal integrity control  detected  infinite or
                                NAN   values   in   function/gradient.   Abnormal
                                termination signalled.
                        * -7   gradient verification failed.
                               See MinBLEICSetGradientCheck() for more information.
                        * -3   inconsistent constraints. Feasible point is
                               either nonexistent or too hard to find. Try to
                               restart optimizer with better initial approximation
                        *  1   relative function improvement is no more than EpsF.
                        *  2   scaled step is no more than EpsX.
                        *  4   scaled gradient norm is no more than EpsG.
                        *  5   MaxIts steps was taken
                        *  8   terminated by user who called minbleicrequesttermination().
                               X contains point which was "current accepted"  when
                               termination request was submitted.
                        More information about fields of this  structure  can  be
                        found in the comments on MinBLEICReport datatype.
           
          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicresults(minbleicstate state,
            ref double[] x,
            minbleicreport rep)
        {
            x = new double[0];

            minbleicresultsbuf(state, ref x, rep);
        }
コード例 #5
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This subroutine restarts algorithm from new point.
        All optimization parameters (including constraints) are left unchanged.

        This  function  allows  to  solve multiple  optimization  problems  (which
        must have  same number of dimensions) without object reallocation penalty.

        INPUT PARAMETERS:
            State   -   structure previously allocated with MinBLEICCreate call.
            X       -   new starting point.

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicrestartfrom(minbleicstate state,
            double[] x)
        {
            int n = 0;
            int i_ = 0;

            n = state.nmain;
            
            //
            // First, check for errors in the inputs
            //
            alglib.ap.assert(alglib.ap.len(x)>=n, "MinBLEICRestartFrom: Length(X)<N");
            alglib.ap.assert(apserv.isfinitevector(x, n), "MinBLEICRestartFrom: X contains infinite or NaN values!");
            
            //
            // Set XC
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.xstart[i_] = x[i_];
            }
            
            //
            // prepare RComm facilities
            //
            state.rstate.ia = new int[6+1];
            state.rstate.ba = new bool[0+1];
            state.rstate.ra = new double[5+1];
            state.rstate.stage = -1;
            clearrequestfields(state);
            sactivesets.sasstopoptimization(state.sas);
        }
コード例 #6
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This function turns on/off reporting.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            NeedXRep-   whether iteration reports are needed or not

        If NeedXRep is True, algorithm will call rep() callback function if  it is
        provided to MinBLEICOptimize().

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetxrep(minbleicstate state,
            bool needxrep)
        {
            state.xrep = needxrep;
        }
コード例 #7
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This function sets maximum step length

        IMPORTANT: this feature is hard to combine with preconditioning. You can't
        set upper limit on step length, when you solve optimization  problem  with
        linear (non-boundary) constraints AND preconditioner turned on.

        When  non-boundary  constraints  are  present,  you  have to either a) use
        preconditioner, or b) use upper limit on step length.  YOU CAN'T USE BOTH!
        In this case algorithm will terminate with appropriate error code.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
                        want to limit step length.

        Use this subroutine when you optimize target function which contains exp()
        or  other  fast  growing  functions,  and optimization algorithm makes too
        large  steps  which  lead   to overflow. This function allows us to reject
        steps  that  are  too  large  (and  therefore  expose  us  to the possible
        overflow) without actually calculating function value at the x+stp*d.

          -- ALGLIB --
             Copyright 02.04.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetstpmax(minbleicstate state,
            double stpmax)
        {
            alglib.ap.assert(math.isfinite(stpmax), "MinBLEICSetStpMax: StpMax is not finite!");
            alglib.ap.assert((double)(stpmax)>=(double)(0), "MinBLEICSetStpMax: StpMax<0!");
            state.stpmax = stpmax;
        }
コード例 #8
0
ファイル: optimization.cs プロジェクト: Ring-r/opt
        /*************************************************************************
        Internal initialization subroutine
        *************************************************************************/
        private static void minbleicinitinternal(int n,
            double[] x,
            double diffstep,
            minbleicstate state)
        {
            int i = 0;
            double[,] c = new double[0,0];
            int[] ct = new int[0];

            state.nmain = n;
            state.optdim = 0;
            state.diffstep = diffstep;
            state.bndloriginal = new double[n];
            state.bndleffective = new double[n];
            state.hasbndl = new bool[n];
            state.bnduoriginal = new double[n];
            state.bndueffective = new double[n];
            state.hasbndu = new bool[n];
            state.xstart = new double[n];
            state.soriginal = new double[n];
            state.x = new double[n];
            state.g = new double[n];
            for(i=0; i<=n-1; i++)
            {
                state.bndloriginal[i] = Double.NegativeInfinity;
                state.hasbndl[i] = false;
                state.bnduoriginal[i] = Double.PositiveInfinity;
                state.hasbndu[i] = false;
                state.soriginal[i] = 1.0;
            }
            minbleicsetlc(state, c, ct, 0);
            minbleicsetinnercond(state, 0.0, 0.0, 0.0);
            minbleicsetoutercond(state, 1.0E-6, 1.0E-6);
            minbleicsetmaxits(state, 0);
            minbleicsetxrep(state, false);
            minbleicsetstpmax(state, 0.0);
            minbleicsetprecdefault(state);
            minbleicrestartfrom(state, x);
        }
コード例 #9
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
 public override alglib.apobject make_copy()
 {
     minbleicstate _result = new minbleicstate();
     _result.nmain = nmain;
     _result.nslack = nslack;
     _result.epsg = epsg;
     _result.epsf = epsf;
     _result.epsx = epsx;
     _result.maxits = maxits;
     _result.xrep = xrep;
     _result.drep = drep;
     _result.stpmax = stpmax;
     _result.diffstep = diffstep;
     _result.sas = (sactivesets.sactiveset)sas.make_copy();
     _result.s = (double[])s.Clone();
     _result.prectype = prectype;
     _result.diagh = (double[])diagh.Clone();
     _result.x = (double[])x.Clone();
     _result.f = f;
     _result.g = (double[])g.Clone();
     _result.needf = needf;
     _result.needfg = needfg;
     _result.xupdated = xupdated;
     _result.lsstart = lsstart;
     _result.steepestdescentstep = steepestdescentstep;
     _result.boundedstep = boundedstep;
     _result.userterminationneeded = userterminationneeded;
     _result.teststep = teststep;
     _result.rstate = (rcommstate)rstate.make_copy();
     _result.ugc = (double[])ugc.Clone();
     _result.cgc = (double[])cgc.Clone();
     _result.xn = (double[])xn.Clone();
     _result.ugn = (double[])ugn.Clone();
     _result.cgn = (double[])cgn.Clone();
     _result.xp = (double[])xp.Clone();
     _result.fc = fc;
     _result.fn = fn;
     _result.fp = fp;
     _result.d = (double[])d.Clone();
     _result.cleic = (double[,])cleic.Clone();
     _result.nec = nec;
     _result.nic = nic;
     _result.lastgoodstep = lastgoodstep;
     _result.lastscaledgoodstep = lastscaledgoodstep;
     _result.maxscaledgrad = maxscaledgrad;
     _result.hasbndl = (bool[])hasbndl.Clone();
     _result.hasbndu = (bool[])hasbndu.Clone();
     _result.bndl = (double[])bndl.Clone();
     _result.bndu = (double[])bndu.Clone();
     _result.repinneriterationscount = repinneriterationscount;
     _result.repouteriterationscount = repouteriterationscount;
     _result.repnfev = repnfev;
     _result.repvaridx = repvaridx;
     _result.repterminationtype = repterminationtype;
     _result.repdebugeqerr = repdebugeqerr;
     _result.repdebugfs = repdebugfs;
     _result.repdebugff = repdebugff;
     _result.repdebugdx = repdebugdx;
     _result.repdebugfeasqpits = repdebugfeasqpits;
     _result.repdebugfeasgpaits = repdebugfeasgpaits;
     _result.xstart = (double[])xstart.Clone();
     _result.solver = (snnls.snnlssolver)solver.make_copy();
     _result.fbase = fbase;
     _result.fm2 = fm2;
     _result.fm1 = fm1;
     _result.fp1 = fp1;
     _result.fp2 = fp2;
     _result.xm1 = xm1;
     _result.xp1 = xp1;
     _result.gm1 = gm1;
     _result.gp1 = gp1;
     _result.cidx = cidx;
     _result.cval = cval;
     _result.tmpprec = (double[])tmpprec.Clone();
     _result.tmp0 = (double[])tmp0.Clone();
     _result.nfev = nfev;
     _result.mcstage = mcstage;
     _result.stp = stp;
     _result.curstpmax = curstpmax;
     _result.activationstep = activationstep;
     _result.work = (double[])work.Clone();
     _result.lstate = (linmin.linminstate)lstate.make_copy();
     _result.trimthreshold = trimthreshold;
     _result.nonmonotoniccnt = nonmonotoniccnt;
     _result.bufyk = (double[,])bufyk.Clone();
     _result.bufsk = (double[,])bufsk.Clone();
     _result.bufrho = (double[])bufrho.Clone();
     _result.buftheta = (double[])buftheta.Clone();
     _result.bufsize = bufsize;
     return _result;
 }
コード例 #10
0
ファイル: optimization.cs プロジェクト: Ring-r/opt
        /*************************************************************************
        This function projects gradient onto equality constrained subspace
        *************************************************************************/
        private static void makegradientprojection(minbleicstate state,
            ref double[] pg)
        {
            int i = 0;
            int nmain = 0;
            int nslack = 0;
            double v = 0;
            int i_ = 0;

            nmain = state.nmain;
            nslack = state.nslack;
            for(i=0; i<=nmain+nslack-1; i++)
            {
                if( state.activeconstraints[i] )
                {
                    pg[i] = 0;
                }
            }
            for(i=0; i<=state.cecnt-1; i++)
            {
                v = 0.0;
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    v += pg[i_]*state.cecurrent[i,i_];
                }
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    pg[i_] = pg[i_] - v*state.cecurrent[i,i_];
                }
            }
        }
コード例 #11
0
ファイル: optimization.cs プロジェクト: Ring-r/opt
        /*************************************************************************
        This function prepares equality constrained subproblem:

        1. X is used to activate constraints (if there are ones which are still
           inactive, but should be activated).
        2. constraints matrix CEOrt is copied to CECurrent and modified  according
           to the list of active bound constraints (corresponding elements are
           filled by zeros and reorthogonalized).
        3. XE - least squares solution of equality constraints - is recalculated
        4. X is copied to PX and projected onto equality constrained subspace
        5. inactive constraints are checked against PX - if there is at least one
           which should be activated, we activate it and move back to (2)
        6. as result, PX is feasible with respect to bound constraints - step (5)
           guarantees it. But PX can be infeasible with respect to equality ones,
           because step (2) is done without checks for consistency. As the final
           step, we check that PX is feasible. If not, we return False. True is
           returned otherwise.

        If this algorithm returned True, then:
        * X is not changed
        * PX contains projection of X onto constrained subspace
        * G is not changed
        * PG contains projection of G onto constrained subspace
        * PX is feasible with respect to all constraints
        * all constraints which are active at PX, are activated
        *************************************************************************/
        private static bool prepareconstraintmatrix(minbleicstate state,
            double[] x,
            double[] g,
            ref double[] px,
            ref double[] pg)
        {
            bool result = new bool();
            int i = 0;
            int nmain = 0;
            int nslack = 0;
            double v = 0;
            double ferr = 0;
            int i_ = 0;

            nmain = state.nmain;
            nslack = state.nslack;
            result = true;
            
            //
            // Step 1
            //
            additionalcheckforconstraints(state, x);
            
            //
            // Steps 2-5
            //
            do
            {
                
                //
                // Steps 2-3
                //
                rebuildcexe(state);
                
                //
                // Step 4
                //
                // Calculate PX, PG
                //
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    px[i_] = x[i_];
                }
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    px[i_] = px[i_] - state.xe[i_];
                }
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    pg[i_] = g[i_];
                }
                for(i=0; i<=nmain+nslack-1; i++)
                {
                    if( state.activeconstraints[i] )
                    {
                        px[i] = 0;
                        pg[i] = 0;
                    }
                }
                for(i=0; i<=state.cecnt-1; i++)
                {
                    v = 0.0;
                    for(i_=0; i_<=nmain+nslack-1;i_++)
                    {
                        v += px[i_]*state.cecurrent[i,i_];
                    }
                    for(i_=0; i_<=nmain+nslack-1;i_++)
                    {
                        px[i_] = px[i_] - v*state.cecurrent[i,i_];
                    }
                    v = 0.0;
                    for(i_=0; i_<=nmain+nslack-1;i_++)
                    {
                        v += pg[i_]*state.cecurrent[i,i_];
                    }
                    for(i_=0; i_<=nmain+nslack-1;i_++)
                    {
                        pg[i_] = pg[i_] - v*state.cecurrent[i,i_];
                    }
                }
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    px[i_] = px[i_] + state.xe[i_];
                }
                
                //
                // Step 5 (loop condition below)
                //
            }
            while( additionalcheckforconstraints(state, px) );
            
            //
            // Step 6
            //
            ferr = 0;
            for(i=0; i<=state.cecnt-1; i++)
            {
                v = 0.0;
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    v += px[i_]*state.ceeffective[i,i_];
                }
                v = v-state.ceeffective[i,nmain+nslack];
                ferr = Math.Max(ferr, Math.Abs(v));
            }
            result = (double)(ferr)<=(double)(state.outerepsi);
            return result;
        }
コード例 #12
0
ファイル: optimization.cs プロジェクト: Ring-r/opt
        /*************************************************************************
        This function rebuilds CECurrent and XE according to current set of
        active bound constraints.
        *************************************************************************/
        private static void rebuildcexe(minbleicstate state)
        {
            int i = 0;
            int j = 0;
            int k = 0;
            int nmain = 0;
            int nslack = 0;
            double v = 0;
            int i_ = 0;

            nmain = state.nmain;
            nslack = state.nslack;
            ablas.rmatrixcopy(state.cecnt, nmain+nslack+1, state.ceeffective, 0, 0, ref state.cecurrent, 0, 0);
            for(i=0; i<=state.cecnt-1; i++)
            {
                
                //
                // "Subtract" active bound constraints from I-th linear constraint
                //
                for(j=0; j<=nmain+nslack-1; j++)
                {
                    if( state.activeconstraints[j] )
                    {
                        state.cecurrent[i,nmain+nslack] = state.cecurrent[i,nmain+nslack]-state.cecurrent[i,j]*state.constrainedvalues[j];
                        state.cecurrent[i,j] = 0.0;
                    }
                }
                
                //
                // Reorthogonalize I-th constraint with respect to previous ones
                // NOTE: we also update right part, which is CECurrent[...,NMain+NSlack].
                //
                for(k=0; k<=i-1; k++)
                {
                    v = 0.0;
                    for(i_=0; i_<=nmain+nslack-1;i_++)
                    {
                        v += state.cecurrent[k,i_]*state.cecurrent[i,i_];
                    }
                    for(i_=0; i_<=nmain+nslack;i_++)
                    {
                        state.cecurrent[i,i_] = state.cecurrent[i,i_] - v*state.cecurrent[k,i_];
                    }
                }
                
                //
                // Calculate norm of I-th row of CECurrent. Fill by zeros, if it is
                // too small. Normalize otherwise.
                //
                // NOTE: we also scale last column of CECurrent (right part)
                //
                v = 0.0;
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    v += state.cecurrent[i,i_]*state.cecurrent[i,i_];
                }
                v = Math.Sqrt(v);
                if( (double)(v)>(double)(10000*math.machineepsilon) )
                {
                    v = 1/v;
                    for(i_=0; i_<=nmain+nslack;i_++)
                    {
                        state.cecurrent[i,i_] = v*state.cecurrent[i,i_];
                    }
                }
                else
                {
                    for(j=0; j<=nmain+nslack; j++)
                    {
                        state.cecurrent[i,j] = 0;
                    }
                }
            }
            for(j=0; j<=nmain+nslack-1; j++)
            {
                state.xe[j] = 0;
            }
            for(i=0; i<=nmain+nslack-1; i++)
            {
                if( state.activeconstraints[i] )
                {
                    state.xe[i] = state.xe[i]+state.constrainedvalues[i];
                }
            }
            for(i=0; i<=state.cecnt-1; i++)
            {
                v = state.cecurrent[i,nmain+nslack];
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    state.xe[i_] = state.xe[i_] + v*state.cecurrent[i,i_];
                }
            }
        }
コード例 #13
0
ファイル: optimization.cs プロジェクト: Ring-r/opt
        /*************************************************************************
        This function makes additional check for constraints which can be activated.

        We try activate constraints one by one, but it is possible that several
        constraints should be activated during one iteration. It this case only
        one of them (probably last) will be activated. This function will fix it -
        it will pass through constraints and activate those which are at the boundary
        or beyond it.

        It will return True, if at least one constraint was activated by this function.
        *************************************************************************/
        private static bool additionalcheckforconstraints(minbleicstate state,
            double[] x)
        {
            bool result = new bool();
            int i = 0;
            int nmain = 0;
            int nslack = 0;

            result = false;
            nmain = state.nmain;
            nslack = state.nslack;
            for(i=0; i<=nmain-1; i++)
            {
                if( !state.activeconstraints[i] )
                {
                    if( state.hasbndl[i] )
                    {
                        if( (double)(x[i])<=(double)(state.bndleffective[i]) )
                        {
                            state.activeconstraints[i] = true;
                            state.constrainedvalues[i] = state.bndleffective[i];
                            result = true;
                        }
                    }
                    if( state.hasbndu[i] )
                    {
                        if( (double)(x[i])>=(double)(state.bndueffective[i]) )
                        {
                            state.activeconstraints[i] = true;
                            state.constrainedvalues[i] = state.bndueffective[i];
                            result = true;
                        }
                    }
                }
            }
            for(i=0; i<=nslack-1; i++)
            {
                if( !state.activeconstraints[nmain+i] )
                {
                    if( (double)(x[nmain+i])<=(double)(0) )
                    {
                        state.activeconstraints[nmain+i] = true;
                        state.constrainedvalues[nmain+i] = 0;
                        result = true;
                    }
                }
            }
            return result;
        }
コード例 #14
0
ファイル: optimization.cs プロジェクト: Ring-r/opt
        /*************************************************************************
        This subroutine applies modifications to the target function given by
        its value F and gradient G at the projected point X which lies in the
        equality constrained subspace.

        Following modifications are applied:
        * modified barrier functions to handle inequality constraints
          (both F and G are modified)
        * projection of gradient into equality constrained subspace
          (only G is modified)
        * quadratic penalty for deviations from equality constrained subspace
          (both F and G are modified)

        It also calculates gradient norm (three different norms for three
        different types of gradient), feasibility and complementary slackness
        errors.

        INPUT PARAMETERS:
            State   -   optimizer state (we use its fields to get information
                        about constraints)
            X       -   point (projected into equality constrained subspace)
            R       -   residual from projection
            RNorm2  -   residual norm squared
            F       -   function value at X
            G       -   function gradient at X

        OUTPUT PARAMETERS:
            F       -   modified function value at X
            G       -   modified function gradient at X
            GNorm   -   2-norm of unmodified G
            MPGNorm -   2-norm of modified G
            MBA     -   minimum argument of barrier functions.
                        If X is strictly feasible, it is greater than zero.
                        If X lies on a boundary, it is zero.
                        It is negative for infeasible X.
            FIErr   -   2-norm of feasibility error with respect to
                        inequality/bound constraints
            CSErr   -   2-norm of complementarity slackness error
        *************************************************************************/
        private static void modifytargetfunction(minbleicstate state,
            double[] x,
            double[] r,
            double rnorm2,
            ref double f,
            ref double[] g,
            ref double gnorm,
            ref double mpgnorm)
        {
            double v = 0;
            int i = 0;
            int nmain = 0;
            int nslack = 0;
            bool hasconstraints = new bool();
            int i_ = 0;

            gnorm = 0;
            mpgnorm = 0;

            nmain = state.nmain;
            nslack = state.nslack;
            hasconstraints = false;
            
            //
            // GNorm
            //
            v = 0.0;
            for(i_=0; i_<=nmain+nslack-1;i_++)
            {
                v += g[i_]*g[i_];
            }
            gnorm = Math.Sqrt(v);
            
            //
            // Process equality constraints:
            // * modify F to handle penalty term for equality constraints
            // * project gradient on null space of equality constraints
            // * add penalty term for equality constraints to gradient
            //
            f = f+rnorm2;
            for(i=0; i<=nmain+nslack-1; i++)
            {
                if( state.activeconstraints[i] )
                {
                    g[i] = 0;
                }
            }
            for(i=0; i<=state.cecnt-1; i++)
            {
                v = 0.0;
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    v += g[i_]*state.cecurrent[i,i_];
                }
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    g[i_] = g[i_] - v*state.cecurrent[i,i_];
                }
            }
            for(i_=0; i_<=nmain+nslack-1;i_++)
            {
                g[i_] = g[i_] + 2*r[i_];
            }
            
            //
            // MPGNorm
            //
            v = 0.0;
            for(i_=0; i_<=nmain+nslack-1;i_++)
            {
                v += g[i_]*g[i_];
            }
            mpgnorm = Math.Sqrt(v);
        }
コード例 #15
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        Modification  of  the  preconditioner:  diagonal of approximate Hessian is
        used.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            D       -   diagonal of the approximate Hessian, array[0..N-1],
                        (if larger, only leading N elements are used).

        NOTE 1: D[i] should be positive. Exception will be thrown otherwise.

        NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.

          -- ALGLIB --
             Copyright 13.10.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetprecdiag(minbleicstate state,
            double[] d)
        {
            int i = 0;

            alglib.ap.assert(alglib.ap.len(d)>=state.nmain, "MinBLEICSetPrecDiag: D is too short");
            for(i=0; i<=state.nmain-1; i++)
            {
                alglib.ap.assert(math.isfinite(d[i]), "MinBLEICSetPrecDiag: D contains infinite or NAN elements");
                alglib.ap.assert((double)(d[i])>(double)(0), "MinBLEICSetPrecDiag: D contains non-positive elements");
            }
            apserv.rvectorsetlengthatleast(ref state.diagh, state.nmain);
            state.prectype = 2;
            for(i=0; i<=state.nmain-1; i++)
            {
                state.diagh[i] = d[i];
            }
        }
コード例 #16
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
                             BOUND CONSTRAINED OPTIMIZATION
               WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS

        DESCRIPTION:
        The  subroutine  minimizes  function   F(x)  of N arguments subject to any
        combination of:
        * bound constraints
        * linear inequality constraints
        * linear equality constraints

        REQUIREMENTS:
        * user must provide function value and gradient
        * starting point X0 must be feasible or
          not too far away from the feasible set
        * grad(f) must be Lipschitz continuous on a level set:
          L = { x : f(x)<=f(x0) }
        * function must be defined everywhere on the feasible set F

        USAGE:

        Constrained optimization if far more complex than the unconstrained one.
        Here we give very brief outline of the BLEIC optimizer. We strongly recommend
        you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
        on optimization, which is available at http://www.alglib.net/optimization/

        1. User initializes algorithm state with MinBLEICCreate() call

        2. USer adds boundary and/or linear constraints by calling
           MinBLEICSetBC() and MinBLEICSetLC() functions.

        3. User sets stopping conditions with MinBLEICSetCond().

        4. User calls MinBLEICOptimize() function which takes algorithm  state and
           pointer (delegate, etc.) to callback function which calculates F/G.

        5. User calls MinBLEICResults() to get solution

        6. Optionally user may call MinBLEICRestartFrom() to solve another problem
           with same N but another starting point.
           MinBLEICRestartFrom() allows to reuse already initialized structure.


        INPUT PARAMETERS:
            N       -   problem dimension, N>0:
                        * if given, only leading N elements of X are used
                        * if not given, automatically determined from size ofX
            X       -   starting point, array[N]:
                        * it is better to set X to a feasible point
                        * but X can be infeasible, in which case algorithm will try
                          to find feasible point first, using X as initial
                          approximation.

        OUTPUT PARAMETERS:
            State   -   structure stores algorithm state

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleiccreate(int n,
            double[] x,
            minbleicstate state)
        {
            double[,] c = new double[0,0];
            int[] ct = new int[0];

            alglib.ap.assert(n>=1, "MinBLEICCreate: N<1");
            alglib.ap.assert(alglib.ap.len(x)>=n, "MinBLEICCreate: Length(X)<N");
            alglib.ap.assert(apserv.isfinitevector(x, n), "MinBLEICCreate: X contains infinite or NaN values!");
            minbleicinitinternal(n, x, 0.0, state);
        }
コード例 #17
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        Modification of the preconditioner: scale-based diagonal preconditioning.

        This preconditioning mode can be useful when you  don't  have  approximate
        diagonal of Hessian, but you know that your  variables  are  badly  scaled
        (for  example,  one  variable is in [1,10], and another in [1000,100000]),
        and most part of the ill-conditioning comes from different scales of vars.

        In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
        can greatly improve convergence.

        IMPRTANT: you should set scale of your variables  with  MinBLEICSetScale()
        call  (before  or after MinBLEICSetPrecScale() call). Without knowledge of
        the scale of your variables scale-based preconditioner will be  just  unit
        matrix.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state

          -- ALGLIB --
             Copyright 13.10.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetprecscale(minbleicstate state)
        {
            state.prectype = 3;
        }
コード例 #18
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        The subroutine is finite difference variant of MinBLEICCreate().  It  uses
        finite differences in order to differentiate target function.

        Description below contains information which is specific to  this function
        only. We recommend to read comments on MinBLEICCreate() in  order  to  get
        more information about creation of BLEIC optimizer.

        INPUT PARAMETERS:
            N       -   problem dimension, N>0:
                        * if given, only leading N elements of X are used
                        * if not given, automatically determined from size of X
            X       -   starting point, array[0..N-1].
            DiffStep-   differentiation step, >0

        OUTPUT PARAMETERS:
            State   -   structure which stores algorithm state

        NOTES:
        1. algorithm uses 4-point central formula for differentiation.
        2. differentiation step along I-th axis is equal to DiffStep*S[I] where
           S[] is scaling vector which can be set by MinBLEICSetScale() call.
        3. we recommend you to use moderate values of  differentiation  step.  Too
           large step will result in too large truncation  errors, while too small
           step will result in too large numerical  errors.  1.0E-6  can  be  good
           value to start with.
        4. Numerical  differentiation  is   very   inefficient  -   one   gradient
           calculation needs 4*N function evaluations. This function will work for
           any N - either small (1...10), moderate (10...100) or  large  (100...).
           However, performance penalty will be too severe for any N's except  for
           small ones.
           We should also say that code which relies on numerical  differentiation
           is  less  robust and precise. CG needs exact gradient values. Imprecise
           gradient may slow  down  convergence, especially  on  highly  nonlinear
           problems.
           Thus  we  recommend to use this function for fast prototyping on small-
           dimensional problems only, and to implement analytical gradient as soon
           as possible.

          -- ALGLIB --
             Copyright 16.05.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleiccreatef(int n,
            double[] x,
            double diffstep,
            minbleicstate state)
        {
            double[,] c = new double[0,0];
            int[] ct = new int[0];

            alglib.ap.assert(n>=1, "MinBLEICCreateF: N<1");
            alglib.ap.assert(alglib.ap.len(x)>=n, "MinBLEICCreateF: Length(X)<N");
            alglib.ap.assert(apserv.isfinitevector(x, n), "MinBLEICCreateF: X contains infinite or NaN values!");
            alglib.ap.assert(math.isfinite(diffstep), "MinBLEICCreateF: DiffStep is infinite or NaN!");
            alglib.ap.assert((double)(diffstep)>(double)(0), "MinBLEICCreateF: DiffStep is non-positive!");
            minbleicinitinternal(n, x, diffstep, state);
        }
コード例 #19
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This function turns on/off line search reports.
        These reports are described in more details in developer-only  comments on
        MinBLEICState object.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            NeedDRep-   whether line search reports are needed or not

        This function is intended for private use only. Turning it on artificially
        may cause program failure.

          -- ALGLIB --
             Copyright 02.04.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetdrep(minbleicstate state,
            bool needdrep)
        {
            state.drep = needdrep;
        }
コード例 #20
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This function sets boundary constraints for BLEIC optimizer.

        Boundary constraints are inactive by default (after initial creation).
        They are preserved after algorithm restart with MinBLEICRestartFrom().

        INPUT PARAMETERS:
            State   -   structure stores algorithm state
            BndL    -   lower bounds, array[N].
                        If some (all) variables are unbounded, you may specify
                        very small number or -INF.
            BndU    -   upper bounds, array[N].
                        If some (all) variables are unbounded, you may specify
                        very large number or +INF.

        NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
        variable will be "frozen" at X[i]=BndL[i]=BndU[i].

        NOTE 2: this solver has following useful properties:
        * bound constraints are always satisfied exactly
        * function is evaluated only INSIDE area specified by  bound  constraints,
          even  when  numerical  differentiation is used (algorithm adjusts  nodes
          according to boundary constraints)

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetbc(minbleicstate state,
            double[] bndl,
            double[] bndu)
        {
            int i = 0;
            int n = 0;

            n = state.nmain;
            alglib.ap.assert(alglib.ap.len(bndl)>=n, "MinBLEICSetBC: Length(BndL)<N");
            alglib.ap.assert(alglib.ap.len(bndu)>=n, "MinBLEICSetBC: Length(BndU)<N");
            for(i=0; i<=n-1; i++)
            {
                alglib.ap.assert(math.isfinite(bndl[i]) || Double.IsNegativeInfinity(bndl[i]), "MinBLEICSetBC: BndL contains NAN or +INF");
                alglib.ap.assert(math.isfinite(bndu[i]) || Double.IsPositiveInfinity(bndu[i]), "MinBLEICSetBC: BndL contains NAN or -INF");
                state.bndl[i] = bndl[i];
                state.hasbndl[i] = math.isfinite(bndl[i]);
                state.bndu[i] = bndu[i];
                state.hasbndu[i] = math.isfinite(bndu[i]);
            }
            sactivesets.sassetbc(state.sas, bndl, bndu);
        }
コード例 #21
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        NOTES:

        1. This function has two different implementations: one which  uses  exact
           (analytical) user-supplied gradient,  and one which uses function value
           only  and  numerically  differentiates  function  in  order  to  obtain
           gradient.

           Depending  on  the  specific  function  used to create optimizer object
           (either  MinBLEICCreate() for analytical gradient or  MinBLEICCreateF()
           for numerical differentiation) you should choose appropriate variant of
           MinBLEICOptimize() - one  which  accepts  function  AND gradient or one
           which accepts function ONLY.

           Be careful to choose variant of MinBLEICOptimize() which corresponds to
           your optimization scheme! Table below lists different  combinations  of
           callback (function/gradient) passed to MinBLEICOptimize()  and specific
           function used to create optimizer.


                             |         USER PASSED TO MinBLEICOptimize()
           CREATED WITH      |  function only   |  function and gradient
           ------------------------------------------------------------
           MinBLEICCreateF() |     work                FAIL
           MinBLEICCreate()  |     FAIL                work

           Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
           function  and  MinBLEICOptimize()  version.   Attemps   to   use   such
           combination (for  example,  to  create optimizer with MinBLEICCreateF()
           and  to  pass  gradient  information  to  MinCGOptimize()) will lead to
           exception being thrown. Either  you  did  not pass gradient when it WAS
           needed or you passed gradient when it was NOT needed.

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static bool minbleiciteration(minbleicstate state)
        {
            bool result = new bool();
            int n = 0;
            int m = 0;
            int i = 0;
            int j = 0;
            double v = 0;
            double vv = 0;
            double v0 = 0;
            bool b = new bool();
            int mcinfo = 0;
            int actstatus = 0;
            int itidx = 0;
            double penalty = 0;
            double ginit = 0;
            double gdecay = 0;
            int i_ = 0;

            
            //
            // Reverse communication preparations
            // I know it looks ugly, but it works the same way
            // anywhere from C++ to Python.
            //
            // This code initializes locals by:
            // * random values determined during code
            //   generation - on first subroutine call
            // * values from previous call - on subsequent calls
            //
            if( state.rstate.stage>=0 )
            {
                n = state.rstate.ia[0];
                m = state.rstate.ia[1];
                i = state.rstate.ia[2];
                j = state.rstate.ia[3];
                mcinfo = state.rstate.ia[4];
                actstatus = state.rstate.ia[5];
                itidx = state.rstate.ia[6];
                b = state.rstate.ba[0];
                v = state.rstate.ra[0];
                vv = state.rstate.ra[1];
                v0 = state.rstate.ra[2];
                penalty = state.rstate.ra[3];
                ginit = state.rstate.ra[4];
                gdecay = state.rstate.ra[5];
            }
            else
            {
                n = -983;
                m = -989;
                i = -834;
                j = 900;
                mcinfo = -287;
                actstatus = 364;
                itidx = 214;
                b = false;
                v = -686;
                vv = 912;
                v0 = 585;
                penalty = 497;
                ginit = -271;
                gdecay = -581;
            }
            if( state.rstate.stage==0 )
            {
                goto lbl_0;
            }
            if( state.rstate.stage==1 )
            {
                goto lbl_1;
            }
            if( state.rstate.stage==2 )
            {
                goto lbl_2;
            }
            if( state.rstate.stage==3 )
            {
                goto lbl_3;
            }
            if( state.rstate.stage==4 )
            {
                goto lbl_4;
            }
            if( state.rstate.stage==5 )
            {
                goto lbl_5;
            }
            if( state.rstate.stage==6 )
            {
                goto lbl_6;
            }
            if( state.rstate.stage==7 )
            {
                goto lbl_7;
            }
            if( state.rstate.stage==8 )
            {
                goto lbl_8;
            }
            if( state.rstate.stage==9 )
            {
                goto lbl_9;
            }
            if( state.rstate.stage==10 )
            {
                goto lbl_10;
            }
            if( state.rstate.stage==11 )
            {
                goto lbl_11;
            }
            if( state.rstate.stage==12 )
            {
                goto lbl_12;
            }
            if( state.rstate.stage==13 )
            {
                goto lbl_13;
            }
            if( state.rstate.stage==14 )
            {
                goto lbl_14;
            }
            if( state.rstate.stage==15 )
            {
                goto lbl_15;
            }
            if( state.rstate.stage==16 )
            {
                goto lbl_16;
            }
            if( state.rstate.stage==17 )
            {
                goto lbl_17;
            }
            if( state.rstate.stage==18 )
            {
                goto lbl_18;
            }
            if( state.rstate.stage==19 )
            {
                goto lbl_19;
            }
            if( state.rstate.stage==20 )
            {
                goto lbl_20;
            }
            if( state.rstate.stage==21 )
            {
                goto lbl_21;
            }
            if( state.rstate.stage==22 )
            {
                goto lbl_22;
            }
            if( state.rstate.stage==23 )
            {
                goto lbl_23;
            }
            
            //
            // Routine body
            //
            
            //
            // Algorithm parameters:
            // * M          number of L-BFGS corrections.
            //              This coefficient remains fixed during iterations.
            // * GDecay     desired decrease of constrained gradient during L-BFGS iterations.
            //              This coefficient is decreased after each L-BFGS round until
            //              it reaches minimum decay.
            //
            m = Math.Min(5, state.nmain);
            gdecay = initialdecay;
            
            //
            // Init
            //
            n = state.nmain;
            state.steepestdescentstep = false;
            state.userterminationneeded = false;
            state.repterminationtype = 0;
            state.repinneriterationscount = 0;
            state.repouteriterationscount = 0;
            state.repnfev = 0;
            state.repvaridx = -1;
            state.repdebugeqerr = 0.0;
            state.repdebugfs = Double.NaN;
            state.repdebugff = Double.NaN;
            state.repdebugdx = Double.NaN;
            if( (double)(state.stpmax)!=(double)(0) && state.prectype!=0 )
            {
                state.repterminationtype = -10;
                result = false;
                return result;
            }
            apserv.rmatrixsetlengthatleast(ref state.bufyk, m+1, n);
            apserv.rmatrixsetlengthatleast(ref state.bufsk, m+1, n);
            apserv.rvectorsetlengthatleast(ref state.bufrho, m);
            apserv.rvectorsetlengthatleast(ref state.buftheta, m);
            apserv.rvectorsetlengthatleast(ref state.tmp0, n);
            
            //
            // Fill TmpPrec with current preconditioner
            //
            apserv.rvectorsetlengthatleast(ref state.tmpprec, n);
            for(i=0; i<=n-1; i++)
            {
                if( state.prectype==2 )
                {
                    state.tmpprec[i] = state.diagh[i];
                    continue;
                }
                if( state.prectype==3 )
                {
                    state.tmpprec[i] = 1/math.sqr(state.s[i]);
                    continue;
                }
                state.tmpprec[i] = 1;
            }
            sactivesets.sassetprecdiag(state.sas, state.tmpprec);
            
            //
            // Start optimization
            //
            if( !sactivesets.sasstartoptimization(state.sas, state.xstart) )
            {
                state.repterminationtype = -3;
                result = false;
                return result;
            }
            
            //
            //  Check correctness of user-supplied gradient
            //
            if( !((double)(state.diffstep)==(double)(0) && (double)(state.teststep)>(double)(0)) )
            {
                goto lbl_24;
            }
            clearrequestfields(state);
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.sas.xc[i_];
            }
            state.needfg = true;
            i = 0;
        lbl_26:
            if( i>n-1 )
            {
                goto lbl_28;
            }
            alglib.ap.assert(!state.hasbndl[i] || (double)(state.sas.xc[i])>=(double)(state.bndl[i]), "MinBLEICIteration: internal error(State.X is out of bounds)");
            alglib.ap.assert(!state.hasbndu[i] || (double)(state.sas.xc[i])<=(double)(state.bndu[i]), "MinBLEICIteration: internal error(State.X is out of bounds)");
            v = state.x[i];
            state.x[i] = v-state.teststep*state.s[i];
            if( state.hasbndl[i] )
            {
                state.x[i] = Math.Max(state.x[i], state.bndl[i]);
            }
            state.xm1 = state.x[i];
            state.rstate.stage = 0;
            goto lbl_rcomm;
        lbl_0:
            state.fm1 = state.f;
            state.gm1 = state.g[i];
            state.x[i] = v+state.teststep*state.s[i];
            if( state.hasbndu[i] )
            {
                state.x[i] = Math.Min(state.x[i], state.bndu[i]);
            }
            state.xp1 = state.x[i];
            state.rstate.stage = 1;
            goto lbl_rcomm;
        lbl_1:
            state.fp1 = state.f;
            state.gp1 = state.g[i];
            state.x[i] = (state.xm1+state.xp1)/2;
            if( state.hasbndl[i] )
            {
                state.x[i] = Math.Max(state.x[i], state.bndl[i]);
            }
            if( state.hasbndu[i] )
            {
                state.x[i] = Math.Min(state.x[i], state.bndu[i]);
            }
            state.rstate.stage = 2;
            goto lbl_rcomm;
        lbl_2:
            state.x[i] = v;
            if( !optserv.derivativecheck(state.fm1, state.gm1, state.fp1, state.gp1, state.f, state.g[i], state.xp1-state.xm1) )
            {
                state.repvaridx = i;
                state.repterminationtype = -7;
                sactivesets.sasstopoptimization(state.sas);
                result = false;
                return result;
            }
            i = i+1;
            goto lbl_26;
        lbl_28:
            state.needfg = false;
        lbl_24:
            
            //
            // Main cycle of BLEIC-PG algorithm
            //
            state.repterminationtype = 0;
            state.lastgoodstep = 0;
            state.lastscaledgoodstep = 0;
            state.maxscaledgrad = 0;
            state.nonmonotoniccnt = (int)Math.Round(1.5*(n+state.nic))+5;
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.sas.xc[i_];
            }
            clearrequestfields(state);
            if( (double)(state.diffstep)!=(double)(0) )
            {
                goto lbl_29;
            }
            state.needfg = true;
            state.rstate.stage = 3;
            goto lbl_rcomm;
        lbl_3:
            state.needfg = false;
            goto lbl_30;
        lbl_29:
            state.needf = true;
            state.rstate.stage = 4;
            goto lbl_rcomm;
        lbl_4:
            state.needf = false;
        lbl_30:
            state.fc = state.f;
            optserv.trimprepare(state.f, ref state.trimthreshold);
            state.repnfev = state.repnfev+1;
            if( !state.xrep )
            {
                goto lbl_31;
            }
            
            //
            // Report current point
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.sas.xc[i_];
            }
            state.f = state.fc;
            state.xupdated = true;
            state.rstate.stage = 5;
            goto lbl_rcomm;
        lbl_5:
            state.xupdated = false;
        lbl_31:
            if( state.userterminationneeded )
            {
                
                //
                // User requested termination
                //
                sactivesets.sasstopoptimization(state.sas);
                state.repterminationtype = 8;
                result = false;
                return result;
            }
        lbl_33:
            if( false )
            {
                goto lbl_34;
            }
            
            //
            // Preparations
            //
            // (a) calculate unconstrained gradient
            // (b) determine initial active set
            // (c) update MaxScaledGrad
            // (d) check F/G for NAN/INF, abnormally terminate algorithm if needed
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.sas.xc[i_];
            }
            clearrequestfields(state);
            if( (double)(state.diffstep)!=(double)(0) )
            {
                goto lbl_35;
            }
            
            //
            // Analytic gradient
            //
            state.needfg = true;
            state.rstate.stage = 6;
            goto lbl_rcomm;
        lbl_6:
            state.needfg = false;
            goto lbl_36;
        lbl_35:
            
            //
            // Numerical differentiation
            //
            state.needf = true;
            state.rstate.stage = 7;
            goto lbl_rcomm;
        lbl_7:
            state.fbase = state.f;
            i = 0;
        lbl_37:
            if( i>n-1 )
            {
                goto lbl_39;
            }
            v = state.x[i];
            b = false;
            if( state.hasbndl[i] )
            {
                b = b || (double)(v-state.diffstep*state.s[i])<(double)(state.bndl[i]);
            }
            if( state.hasbndu[i] )
            {
                b = b || (double)(v+state.diffstep*state.s[i])>(double)(state.bndu[i]);
            }
            if( b )
            {
                goto lbl_40;
            }
            state.x[i] = v-state.diffstep*state.s[i];
            state.rstate.stage = 8;
            goto lbl_rcomm;
        lbl_8:
            state.fm2 = state.f;
            state.x[i] = v-0.5*state.diffstep*state.s[i];
            state.rstate.stage = 9;
            goto lbl_rcomm;
        lbl_9:
            state.fm1 = state.f;
            state.x[i] = v+0.5*state.diffstep*state.s[i];
            state.rstate.stage = 10;
            goto lbl_rcomm;
        lbl_10:
            state.fp1 = state.f;
            state.x[i] = v+state.diffstep*state.s[i];
            state.rstate.stage = 11;
            goto lbl_rcomm;
        lbl_11:
            state.fp2 = state.f;
            state.g[i] = (8*(state.fp1-state.fm1)-(state.fp2-state.fm2))/(6*state.diffstep*state.s[i]);
            goto lbl_41;
        lbl_40:
            state.xm1 = v-state.diffstep*state.s[i];
            state.xp1 = v+state.diffstep*state.s[i];
            if( state.hasbndl[i] && (double)(state.xm1)<(double)(state.bndl[i]) )
            {
                state.xm1 = state.bndl[i];
            }
            if( state.hasbndu[i] && (double)(state.xp1)>(double)(state.bndu[i]) )
            {
                state.xp1 = state.bndu[i];
            }
            state.x[i] = state.xm1;
            state.rstate.stage = 12;
            goto lbl_rcomm;
        lbl_12:
            state.fm1 = state.f;
            state.x[i] = state.xp1;
            state.rstate.stage = 13;
            goto lbl_rcomm;
        lbl_13:
            state.fp1 = state.f;
            if( (double)(state.xm1)!=(double)(state.xp1) )
            {
                state.g[i] = (state.fp1-state.fm1)/(state.xp1-state.xm1);
            }
            else
            {
                state.g[i] = 0;
            }
        lbl_41:
            state.x[i] = v;
            i = i+1;
            goto lbl_37;
        lbl_39:
            state.f = state.fbase;
            state.needf = false;
        lbl_36:
            state.fc = state.f;
            for(i_=0; i_<=n-1;i_++)
            {
                state.ugc[i_] = state.g[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.cgc[i_] = state.g[i_];
            }
            sactivesets.sasreactivateconstraintsprec(state.sas, state.ugc);
            sactivesets.sasconstraineddirection(state.sas, ref state.cgc);
            ginit = 0.0;
            for(i=0; i<=n-1; i++)
            {
                ginit = ginit+math.sqr(state.cgc[i]*state.s[i]);
            }
            ginit = Math.Sqrt(ginit);
            state.maxscaledgrad = Math.Max(state.maxscaledgrad, ginit);
            if( !math.isfinite(ginit) || !math.isfinite(state.fc) )
            {
                
                //
                // Abnormal termination - infinities in function/gradient
                //
                sactivesets.sasstopoptimization(state.sas);
                state.repterminationtype = -8;
                result = false;
                return result;
            }
            if( state.userterminationneeded )
            {
                
                //
                // User requested termination
                //
                sactivesets.sasstopoptimization(state.sas);
                state.repterminationtype = 8;
                result = false;
                return result;
            }
            
            //
            // LBFGS stage:
            // * during LBFGS iterations we activate new constraints, but never
            //   deactivate already active ones.
            // * we perform at most N iterations of LBFGS before re-evaluating
            //   active set and restarting LBFGS.
            // * first iteration of LBFGS is a special - it is performed with
            //   minimum set of active constraints, algorithm termination can
            //   be performed only at this state. We call this iteration
            //  "steepest descent step".
            //
            // About termination:
            // * LBFGS iterations can be terminated because of two reasons:
            //   * "termination" - non-zero termination code in RepTerminationType,
            //     which means that optimization is done
            //   * "restart" - zero RepTerminationType, which means that we
            //     have to re-evaluate active set and resume LBFGS stage.
            // * one more option is "refresh" - to continue LBFGS iterations,
            //   but with all BFGS updates (Sk/Yk pairs) being dropped;
            //   it happens after changes in active set
            //
            state.bufsize = 0;
            state.steepestdescentstep = true;
            itidx = 0;
        lbl_42:
            if( itidx>n-1 )
            {
                goto lbl_44;
            }
            
            //
            // At the beginning of each iteration:
            // * SAS.XC stores current point
            // * FC stores current function value
            // * UGC stores current unconstrained gradient
            // * CGC stores current constrained gradient
            // * D stores constrained step direction (calculated at this block)
            //
            //
            // Check gradient-based stopping criteria
            //
            // This stopping condition is tested only for step which is the
            // first step of LBFGS (subsequent steps may accumulate active
            // constraints thus they should NOT be used for stopping - gradient
            // may be small when constrained, but these constraints may be
            // deactivated by the subsequent steps)
            //
            if( state.steepestdescentstep && (double)(sactivesets.sasscaledconstrainednorm(state.sas, state.ugc))<=(double)(state.epsg) )
            {
                
                //
                // Gradient is small enough.
                // Optimization is terminated
                //
                state.repterminationtype = 4;
                goto lbl_44;
            }
            
            //
            // 1. Calculate search direction D according to L-BFGS algorithm
            //    using constrained preconditioner to perform inner multiplication.
            // 2. Evaluate scaled length of direction D; restart LBFGS if D is zero
            //    (it may be possible that we found minimum, but it is also possible
            //    that some constraints need deactivation)
            // 3. If D is non-zero, try to use previous scaled step length as initial estimate for new step.
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.work[i_] = state.cgc[i_];
            }
            for(i=state.bufsize-1; i>=0; i--)
            {
                v = 0.0;
                for(i_=0; i_<=n-1;i_++)
                {
                    v += state.bufsk[i,i_]*state.work[i_];
                }
                state.buftheta[i] = v;
                vv = v*state.bufrho[i];
                for(i_=0; i_<=n-1;i_++)
                {
                    state.work[i_] = state.work[i_] - vv*state.bufyk[i,i_];
                }
            }
            sactivesets.sasconstraineddirectionprec(state.sas, ref state.work);
            for(i=0; i<=state.bufsize-1; i++)
            {
                v = 0.0;
                for(i_=0; i_<=n-1;i_++)
                {
                    v += state.bufyk[i,i_]*state.work[i_];
                }
                vv = state.bufrho[i]*(-v+state.buftheta[i]);
                for(i_=0; i_<=n-1;i_++)
                {
                    state.work[i_] = state.work[i_] + vv*state.bufsk[i,i_];
                }
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.d[i_] = -state.work[i_];
            }
            v = 0;
            for(i=0; i<=n-1; i++)
            {
                v = v+math.sqr(state.d[i]/state.s[i]);
            }
            v = Math.Sqrt(v);
            if( (double)(v)==(double)(0) )
            {
                
                //
                // Search direction is zero.
                // If we perform "steepest descent step", algorithm is terminated.
                // Otherwise we just restart LBFGS.
                //
                if( state.steepestdescentstep )
                {
                    state.repterminationtype = 4;
                }
                goto lbl_44;
            }
            alglib.ap.assert((double)(v)>(double)(0), "MinBLEIC: internal error");
            if( (double)(state.lastscaledgoodstep)>(double)(0) && (double)(v)>(double)(0) )
            {
                state.stp = state.lastscaledgoodstep/v;
            }
            else
            {
                state.stp = 1.0/v;
            }
            
            //
            // Calculate bound on step length.
            // Step direction is stored
            //
            sactivesets.sasexploredirection(state.sas, state.d, ref state.curstpmax, ref state.cidx, ref state.cval);
            state.activationstep = state.curstpmax;
            if( state.cidx>=0 && (double)(state.activationstep)==(double)(0) )
            {
                
                //
                // We are exactly at the boundary, immediate activation
                // of constraint is required. LBFGS stage is continued
                // with "refreshed" model.
                //
                // ! IMPORTANT: we do not clear SteepestDescent flag here,
                // !            it is very important for correct stopping
                // !            of algorithm.
                //
                sactivesets.sasimmediateactivation(state.sas, state.cidx, state.cval);
                state.bufsize = 0;
                goto lbl_43;
            }
            if( (double)(state.stpmax)>(double)(0) )
            {
                v = 0.0;
                for(i_=0; i_<=n-1;i_++)
                {
                    v += state.d[i_]*state.d[i_];
                }
                v = Math.Sqrt(v);
                if( (double)(v)>(double)(0) )
                {
                    state.curstpmax = Math.Min(state.curstpmax, state.stpmax/v);
                }
            }
            
            //
            // Report beginning of line search (if requested by caller).
            // See description of the MinBLEICState for more information
            // about fields accessible to caller.
            //
            // Caller may do following:
            // * change State.Stp and load better initial estimate of
            //   the step length.
            // Caller may not terminate algorithm.
            //
            if( !state.drep )
            {
                goto lbl_45;
            }
            clearrequestfields(state);
            state.lsstart = true;
            state.boundedstep = state.cidx>=0;
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.sas.xc[i_];
            }
            state.rstate.stage = 14;
            goto lbl_rcomm;
        lbl_14:
            state.lsstart = false;
        lbl_45:
            
            //
            // Minimize F(x+alpha*d)
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.xn[i_] = state.sas.xc[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.cgn[i_] = state.cgc[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.ugn[i_] = state.ugc[i_];
            }
            state.fn = state.fc;
            state.mcstage = 0;
            linmin.mcsrch(n, ref state.xn, ref state.fn, ref state.cgn, state.d, ref state.stp, state.curstpmax, gtol, ref mcinfo, ref state.nfev, ref state.work, state.lstate, ref state.mcstage);
        lbl_47:
            if( state.mcstage==0 )
            {
                goto lbl_48;
            }
            
            //
            // Perform correction (constraints are enforced)
            // Copy XN to X
            //
            sactivesets.sascorrection(state.sas, state.xn, ref penalty);
            for(i=0; i<=n-1; i++)
            {
                state.x[i] = state.xn[i];
            }
            
            //
            // Gradient, either user-provided or numerical differentiation
            //
            clearrequestfields(state);
            if( (double)(state.diffstep)!=(double)(0) )
            {
                goto lbl_49;
            }
            
            //
            // Analytic gradient
            //
            state.needfg = true;
            state.rstate.stage = 15;
            goto lbl_rcomm;
        lbl_15:
            state.needfg = false;
            state.repnfev = state.repnfev+1;
            goto lbl_50;
        lbl_49:
            
            //
            // Numerical differentiation
            //
            state.needf = true;
            state.rstate.stage = 16;
            goto lbl_rcomm;
        lbl_16:
            state.fbase = state.f;
            i = 0;
        lbl_51:
            if( i>n-1 )
            {
                goto lbl_53;
            }
            v = state.x[i];
            b = false;
            if( state.hasbndl[i] )
            {
                b = b || (double)(v-state.diffstep*state.s[i])<(double)(state.bndl[i]);
            }
            if( state.hasbndu[i] )
            {
                b = b || (double)(v+state.diffstep*state.s[i])>(double)(state.bndu[i]);
            }
            if( b )
            {
                goto lbl_54;
            }
            state.x[i] = v-state.diffstep*state.s[i];
            state.rstate.stage = 17;
            goto lbl_rcomm;
        lbl_17:
            state.fm2 = state.f;
            state.x[i] = v-0.5*state.diffstep*state.s[i];
            state.rstate.stage = 18;
            goto lbl_rcomm;
        lbl_18:
            state.fm1 = state.f;
            state.x[i] = v+0.5*state.diffstep*state.s[i];
            state.rstate.stage = 19;
            goto lbl_rcomm;
        lbl_19:
            state.fp1 = state.f;
            state.x[i] = v+state.diffstep*state.s[i];
            state.rstate.stage = 20;
            goto lbl_rcomm;
        lbl_20:
            state.fp2 = state.f;
            state.g[i] = (8*(state.fp1-state.fm1)-(state.fp2-state.fm2))/(6*state.diffstep*state.s[i]);
            state.repnfev = state.repnfev+4;
            goto lbl_55;
        lbl_54:
            state.xm1 = v-state.diffstep*state.s[i];
            state.xp1 = v+state.diffstep*state.s[i];
            if( state.hasbndl[i] && (double)(state.xm1)<(double)(state.bndl[i]) )
            {
                state.xm1 = state.bndl[i];
            }
            if( state.hasbndu[i] && (double)(state.xp1)>(double)(state.bndu[i]) )
            {
                state.xp1 = state.bndu[i];
            }
            state.x[i] = state.xm1;
            state.rstate.stage = 21;
            goto lbl_rcomm;
        lbl_21:
            state.fm1 = state.f;
            state.x[i] = state.xp1;
            state.rstate.stage = 22;
            goto lbl_rcomm;
        lbl_22:
            state.fp1 = state.f;
            if( (double)(state.xm1)!=(double)(state.xp1) )
            {
                state.g[i] = (state.fp1-state.fm1)/(state.xp1-state.xm1);
            }
            else
            {
                state.g[i] = 0;
            }
            state.repnfev = state.repnfev+2;
        lbl_55:
            state.x[i] = v;
            i = i+1;
            goto lbl_51;
        lbl_53:
            state.f = state.fbase;
            state.needf = false;
        lbl_50:
            
            //
            // Back to MCSRCH
            //
            // NOTE: penalty term from correction is added to FN in order
            //       to penalize increase in infeasibility.
            //
            state.fn = state.f+penaltyfactor*state.maxscaledgrad*penalty;
            for(i_=0; i_<=n-1;i_++)
            {
                state.cgn[i_] = state.g[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.ugn[i_] = state.g[i_];
            }
            sactivesets.sasconstraineddirection(state.sas, ref state.cgn);
            optserv.trimfunction(ref state.fn, ref state.cgn, n, state.trimthreshold);
            linmin.mcsrch(n, ref state.xn, ref state.fn, ref state.cgn, state.d, ref state.stp, state.curstpmax, gtol, ref mcinfo, ref state.nfev, ref state.work, state.lstate, ref state.mcstage);
            goto lbl_47;
        lbl_48:
            for(i_=0; i_<=n-1;i_++)
            {
                state.bufsk[state.bufsize,i_] = -state.sas.xc[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.bufyk[state.bufsize,i_] = -state.cgc[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.bufsk[state.bufsize,i_] = state.bufsk[state.bufsize,i_] + state.xn[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.bufyk[state.bufsize,i_] = state.bufyk[state.bufsize,i_] + state.cgn[i_];
            }
            
            //
            // Check for presence of NAN/INF in function/gradient
            //
            v = state.fn;
            for(i=0; i<=n-1; i++)
            {
                v = 0.1*v+state.ugn[i];
            }
            if( !math.isfinite(v) )
            {
                
                //
                // Abnormal termination - infinities in function/gradient
                //
                state.repterminationtype = -8;
                goto lbl_44;
            }
            
            //
            // Handle possible failure of the line search or request for termination
            //
            if( mcinfo!=1 && mcinfo!=5 )
            {
                
                //
                // We can not find step which decreases function value. We have
                // two possibilities:
                // (a) numerical properties of the function do not allow us to
                //     find good step.
                // (b) we are close to activation of some constraint, and it is
                //     so close that step which activates it leads to change in
                //     target function which is smaller than numerical noise.
                //
                // Optimization algorithm must be able to handle case (b), because
                // inability to handle it will cause failure when algorithm
                // started very close to boundary of the feasible area.
                //
                // In order to correctly handle such cases we allow limited amount
                // of small steps which increase function value.
                //
                v = 0.0;
                for(i=0; i<=n-1; i++)
                {
                    v = v+math.sqr(state.d[i]*state.curstpmax/state.s[i]);
                }
                v = Math.Sqrt(v);
                if( (state.cidx>=0 && (double)(v)<=(double)(maxnonmonotoniclen)) && state.nonmonotoniccnt>0 )
                {
                    
                    //
                    // We enforce non-monotonic step:
                    // * Stp    := CurStpMax
                    // * MCINFO := 5
                    // * XN     := XC+CurStpMax*D
                    // * non-monotonic counter is decreased
                    //
                    // NOTE: UGN/CGN are not updated because step is so short that we assume that
                    //       GN is approximately equal to GC.
                    //
                    state.stp = state.curstpmax;
                    mcinfo = 5;
                    v = state.curstpmax;
                    for(i_=0; i_<=n-1;i_++)
                    {
                        state.xn[i_] = state.sas.xc[i_];
                    }
                    for(i_=0; i_<=n-1;i_++)
                    {
                        state.xn[i_] = state.xn[i_] + v*state.d[i_];
                    }
                    state.nonmonotoniccnt = state.nonmonotoniccnt-1;
                }
                else
                {
                    
                    //
                    // Numerical properties of the function does not allow
                    // us to solve problem. Here we have two possibilities:
                    // * if it is "steepest descent" step, we can terminate
                    //   algorithm because we are close to minimum
                    // * if it is NOT "steepest descent" step, we should restart
                    //   LBFGS iterations.
                    //
                    if( state.steepestdescentstep )
                    {
                        
                        //
                        // Algorithm is terminated
                        //
                        state.repterminationtype = 7;
                        goto lbl_44;
                    }
                    else
                    {
                        
                        //
                        // Re-evaluate active set and restart LBFGS
                        //
                        goto lbl_44;
                    }
                }
            }
            if( state.userterminationneeded )
            {
                goto lbl_44;
            }
            
            //
            // Current point is updated:
            // * move XC/FC/GC to XP/FP/GP
            // * change current point remembered by SAS structure
            // * move XN/FN/GN to XC/FC/GC
            // * report current point and update iterations counter
            // * if MCINFO=1, push new pair SK/YK to LBFGS buffer
            //
            state.fp = state.fc;
            for(i_=0; i_<=n-1;i_++)
            {
                state.xp[i_] = state.sas.xc[i_];
            }
            state.fc = state.fn;
            for(i_=0; i_<=n-1;i_++)
            {
                state.cgc[i_] = state.cgn[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.ugc[i_] = state.ugn[i_];
            }
            actstatus = sactivesets.sasmoveto(state.sas, state.xn, state.cidx>=0 && (double)(state.stp)>=(double)(state.activationstep), state.cidx, state.cval);
            if( !state.xrep )
            {
                goto lbl_56;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.sas.xc[i_];
            }
            clearrequestfields(state);
            state.xupdated = true;
            state.rstate.stage = 23;
            goto lbl_rcomm;
        lbl_23:
            state.xupdated = false;
        lbl_56:
            state.repinneriterationscount = state.repinneriterationscount+1;
            if( mcinfo==1 )
            {
                
                //
                // Accept new LBFGS update given by Sk,Yk
                //
                if( state.bufsize==m )
                {
                    
                    //
                    // Buffer is full, shift contents by one row
                    //
                    for(i=0; i<=state.bufsize-1; i++)
                    {
                        for(i_=0; i_<=n-1;i_++)
                        {
                            state.bufsk[i,i_] = state.bufsk[i+1,i_];
                        }
                        for(i_=0; i_<=n-1;i_++)
                        {
                            state.bufyk[i,i_] = state.bufyk[i+1,i_];
                        }
                    }
                    for(i=0; i<=state.bufsize-2; i++)
                    {
                        state.bufrho[i] = state.bufrho[i+1];
                        state.buftheta[i] = state.buftheta[i+1];
                    }
                }
                else
                {
                    
                    //
                    // Buffer is not full, increase buffer size by 1
                    //
                    state.bufsize = state.bufsize+1;
                }
                v = 0.0;
                for(i_=0; i_<=n-1;i_++)
                {
                    v += state.bufyk[state.bufsize-1,i_]*state.bufsk[state.bufsize-1,i_];
                }
                vv = 0.0;
                for(i_=0; i_<=n-1;i_++)
                {
                    vv += state.bufyk[state.bufsize-1,i_]*state.bufyk[state.bufsize-1,i_];
                }
                if( (double)(v)==(double)(0) || (double)(vv)==(double)(0) )
                {
                    
                    //
                    // Strange internal error in LBFGS - either YK=0
                    // (which should not have been) or (SK,YK)=0 (again,
                    // unexpected). It should not take place because
                    // MCINFO=1, which signals "good" step. But just
                    // to be sure we have special branch of code which
                    // restarts LBFGS
                    //
                    goto lbl_44;
                }
                state.bufrho[state.bufsize-1] = 1/v;
                alglib.ap.assert(state.bufsize<=m, "MinBLEIC: internal error");
                
                //
                // Update length of the good step
                //
                v = 0;
                vv = 0;
                for(i=0; i<=n-1; i++)
                {
                    v = v+math.sqr((state.sas.xc[i]-state.xp[i])/state.s[i]);
                    vv = vv+math.sqr(state.sas.xc[i]-state.xp[i]);
                }
                state.lastgoodstep = Math.Sqrt(vv);
                updateestimateofgoodstep(ref state.lastscaledgoodstep, Math.Sqrt(v));
            }
            
            //
            // Check stopping criteria
            //
            // Step size and function-based stopping criteria are tested only
            // for step which satisfies Wolfe conditions and is the first step of
            // LBFGS (subsequent steps may accumulate active constraints thus
            // they should NOT be used for stopping; step size or function change
            // may be small when constrained, but these constraints may be
            // deactivated by the subsequent steps).
            //
            // MaxIts-based stopping condition is checked for all kinds of steps.
            //
            if( mcinfo==1 && state.steepestdescentstep )
            {
                
                //
                // Step is small enough
                //
                v = 0;
                for(i=0; i<=n-1; i++)
                {
                    v = v+math.sqr((state.sas.xc[i]-state.xp[i])/state.s[i]);
                }
                v = Math.Sqrt(v);
                if( (double)(v)<=(double)(state.epsx) )
                {
                    state.repterminationtype = 2;
                    goto lbl_44;
                }
                
                //
                // Function change is small enough
                //
                if( (double)(Math.Abs(state.fp-state.fc))<=(double)(state.epsf*Math.Max(Math.Abs(state.fc), Math.Max(Math.Abs(state.fp), 1.0))) )
                {
                    state.repterminationtype = 1;
                    goto lbl_44;
                }
            }
            if( state.maxits>0 && state.repinneriterationscount>=state.maxits )
            {
                state.repterminationtype = 5;
                goto lbl_44;
            }
            
            //
            // Clear "steepest descent" flag.
            //
            state.steepestdescentstep = false;
            
            //
            // Smooth reset (LBFGS memory model is refreshed) or hard restart:
            // * LBFGS model is refreshed, if line search was performed with activation of constraints
            // * algorithm is restarted if scaled gradient decreased below GDecay
            //
            if( actstatus>=0 )
            {
                state.bufsize = 0;
                goto lbl_43;
            }
            v = 0.0;
            for(i=0; i<=n-1; i++)
            {
                v = v+math.sqr(state.cgc[i]*state.s[i]);
            }
            if( (double)(Math.Sqrt(v))<(double)(gdecay*ginit) )
            {
                goto lbl_44;
            }
        lbl_43:
            itidx = itidx+1;
            goto lbl_42;
        lbl_44:
            if( state.userterminationneeded )
            {
                
                //
                // User requested termination
                //
                state.repterminationtype = 8;
                goto lbl_34;
            }
            if( state.repterminationtype!=0 )
            {
                
                //
                // Algorithm terminated
                //
                goto lbl_34;
            }
            
            //
            // Decrease decay coefficient. Subsequent L-BFGS stages will
            // have more stringent stopping criteria.
            //
            gdecay = Math.Max(gdecay*decaycorrection, mindecay);
            goto lbl_33;
        lbl_34:
            sactivesets.sasstopoptimization(state.sas);
            state.repouteriterationscount = 1;
            result = false;
            return result;
            
            //
            // Saving state
            //
        lbl_rcomm:
            result = true;
            state.rstate.ia[0] = n;
            state.rstate.ia[1] = m;
            state.rstate.ia[2] = i;
            state.rstate.ia[3] = j;
            state.rstate.ia[4] = mcinfo;
            state.rstate.ia[5] = actstatus;
            state.rstate.ia[6] = itidx;
            state.rstate.ba[0] = b;
            state.rstate.ra[0] = v;
            state.rstate.ra[1] = vv;
            state.rstate.ra[2] = v0;
            state.rstate.ra[3] = penalty;
            state.rstate.ra[4] = ginit;
            state.rstate.ra[5] = gdecay;
            return result;
        }
コード例 #22
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This function sets linear constraints for BLEIC optimizer.

        Linear constraints are inactive by default (after initial creation).
        They are preserved after algorithm restart with MinBLEICRestartFrom().

        INPUT PARAMETERS:
            State   -   structure previously allocated with MinBLEICCreate call.
            C       -   linear constraints, array[K,N+1].
                        Each row of C represents one constraint, either equality
                        or inequality (see below):
                        * first N elements correspond to coefficients,
                        * last element corresponds to the right part.
                        All elements of C (including right part) must be finite.
            CT      -   type of constraints, array[K]:
                        * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
                        * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
                        * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
            K       -   number of equality/inequality constraints, K>=0:
                        * if given, only leading K elements of C/CT are used
                        * if not given, automatically determined from sizes of C/CT

        NOTE 1: linear (non-bound) constraints are satisfied only approximately:
        * there always exists some minor violation (about Epsilon in magnitude)
          due to rounding errors
        * numerical differentiation, if used, may  lead  to  function  evaluations
          outside  of the feasible  area,   because   algorithm  does  NOT  change
          numerical differentiation formula according to linear constraints.
        If you want constraints to be  satisfied  exactly, try to reformulate your
        problem  in  such  manner  that  all constraints will become boundary ones
        (this kind of constraints is always satisfied exactly, both in  the  final
        solution and in all intermediate points).

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetlc(minbleicstate state,
            double[,] c,
            int[] ct,
            int k)
        {
            int n = 0;
            int i = 0;
            int j = 0;
            double v = 0;
            int i_ = 0;

            n = state.nmain;
            
            //
            // First, check for errors in the inputs
            //
            alglib.ap.assert(k>=0, "MinBLEICSetLC: K<0");
            alglib.ap.assert(alglib.ap.cols(c)>=n+1 || k==0, "MinBLEICSetLC: Cols(C)<N+1");
            alglib.ap.assert(alglib.ap.rows(c)>=k, "MinBLEICSetLC: Rows(C)<K");
            alglib.ap.assert(alglib.ap.len(ct)>=k, "MinBLEICSetLC: Length(CT)<K");
            alglib.ap.assert(apserv.apservisfinitematrix(c, k, n+1), "MinBLEICSetLC: C contains infinite or NaN values!");
            
            //
            // Handle zero K
            //
            if( k==0 )
            {
                state.nec = 0;
                state.nic = 0;
                return;
            }
            
            //
            // Equality constraints are stored first, in the upper
            // NEC rows of State.CLEIC matrix. Inequality constraints
            // are stored in the next NIC rows.
            //
            // NOTE: we convert inequality constraints to the form
            // A*x<=b before copying them.
            //
            apserv.rmatrixsetlengthatleast(ref state.cleic, k, n+1);
            state.nec = 0;
            state.nic = 0;
            for(i=0; i<=k-1; i++)
            {
                if( ct[i]==0 )
                {
                    for(i_=0; i_<=n;i_++)
                    {
                        state.cleic[state.nec,i_] = c[i,i_];
                    }
                    state.nec = state.nec+1;
                }
            }
            for(i=0; i<=k-1; i++)
            {
                if( ct[i]!=0 )
                {
                    if( ct[i]>0 )
                    {
                        for(i_=0; i_<=n;i_++)
                        {
                            state.cleic[state.nec+state.nic,i_] = -c[i,i_];
                        }
                    }
                    else
                    {
                        for(i_=0; i_<=n;i_++)
                        {
                            state.cleic[state.nec+state.nic,i_] = c[i,i_];
                        }
                    }
                    state.nic = state.nic+1;
                }
            }
            
            //
            // Normalize rows of State.CLEIC: each row must have unit norm.
            // Norm is calculated using first N elements (i.e. right part is
            // not counted when we calculate norm).
            //
            for(i=0; i<=k-1; i++)
            {
                v = 0;
                for(j=0; j<=n-1; j++)
                {
                    v = v+math.sqr(state.cleic[i,j]);
                }
                if( (double)(v)==(double)(0) )
                {
                    continue;
                }
                v = 1/Math.Sqrt(v);
                for(i_=0; i_<=n;i_++)
                {
                    state.cleic[i,i_] = v*state.cleic[i,i_];
                }
            }
            sactivesets.sassetlc(state.sas, c, ct, k);
        }
コード例 #23
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        BLEIC results

        Buffered implementation of MinBLEICResults() which uses pre-allocated buffer
        to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
        intended to be used in the inner cycles of performance critical algorithms
        where array reallocation penalty is too large to be ignored.

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicresultsbuf(minbleicstate state,
            ref double[] x,
            minbleicreport rep)
        {
            int i = 0;
            int i_ = 0;

            if( alglib.ap.len(x)<state.nmain )
            {
                x = new double[state.nmain];
            }
            rep.iterationscount = state.repinneriterationscount;
            rep.inneriterationscount = state.repinneriterationscount;
            rep.outeriterationscount = state.repouteriterationscount;
            rep.nfev = state.repnfev;
            rep.varidx = state.repvaridx;
            rep.terminationtype = state.repterminationtype;
            if( state.repterminationtype>0 )
            {
                for(i_=0; i_<=state.nmain-1;i_++)
                {
                    x[i_] = state.sas.xc[i_];
                }
            }
            else
            {
                for(i=0; i<=state.nmain-1; i++)
                {
                    x[i] = Double.NaN;
                }
            }
            rep.debugeqerr = state.repdebugeqerr;
            rep.debugfs = state.repdebugfs;
            rep.debugff = state.repdebugff;
            rep.debugdx = state.repdebugdx;
            rep.debugfeasqpits = state.repdebugfeasqpits;
            rep.debugfeasgpaits = state.repdebugfeasgpaits;
        }
コード例 #24
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This function sets stopping conditions for the optimizer.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            EpsG    -   >=0
                        The  subroutine  finishes  its  work   if   the  condition
                        |v|<EpsG is satisfied, where:
                        * |.| means Euclidian norm
                        * v - scaled gradient vector, v[i]=g[i]*s[i]
                        * g - gradient
                        * s - scaling coefficients set by MinBLEICSetScale()
            EpsF    -   >=0
                        The  subroutine  finishes  its work if on k+1-th iteration
                        the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
                        is satisfied.
            EpsX    -   >=0
                        The subroutine finishes its work if  on  k+1-th  iteration
                        the condition |v|<=EpsX is fulfilled, where:
                        * |.| means Euclidian norm
                        * v - scaled step vector, v[i]=dx[i]/s[i]
                        * dx - step vector, dx=X(k+1)-X(k)
                        * s - scaling coefficients set by MinBLEICSetScale()
            MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
                        iterations is unlimited.

        Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
        to automatic stopping criterion selection.

        NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform
              slightly more than MaxIts iterations. I.e., MaxIts  sets  non-strict
              limit on iterations count.

          -- ALGLIB --
             Copyright 28.11.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetcond(minbleicstate state,
            double epsg,
            double epsf,
            double epsx,
            int maxits)
        {
            alglib.ap.assert(math.isfinite(epsg), "MinBLEICSetCond: EpsG is not finite number");
            alglib.ap.assert((double)(epsg)>=(double)(0), "MinBLEICSetCond: negative EpsG");
            alglib.ap.assert(math.isfinite(epsf), "MinBLEICSetCond: EpsF is not finite number");
            alglib.ap.assert((double)(epsf)>=(double)(0), "MinBLEICSetCond: negative EpsF");
            alglib.ap.assert(math.isfinite(epsx), "MinBLEICSetCond: EpsX is not finite number");
            alglib.ap.assert((double)(epsx)>=(double)(0), "MinBLEICSetCond: negative EpsX");
            alglib.ap.assert(maxits>=0, "MinBLEICSetCond: negative MaxIts!");
            if( (((double)(epsg)==(double)(0) && (double)(epsf)==(double)(0)) && (double)(epsx)==(double)(0)) && maxits==0 )
            {
                epsx = 1.0E-6;
            }
            state.epsg = epsg;
            state.epsf = epsf;
            state.epsx = epsx;
            state.maxits = maxits;
        }
コード例 #25
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This subroutine submits request for termination of running  optimizer.  It
        should be called from user-supplied callback when user decides that it  is
        time to "smoothly" terminate optimization process.  As  result,  optimizer
        stops at point which was "current accepted" when termination  request  was
        submitted and returns error code 8 (successful termination).

        INPUT PARAMETERS:
            State   -   optimizer structure

        NOTE: after  request  for  termination  optimizer  may   perform   several
              additional calls to user-supplied callbacks. It does  NOT  guarantee
              to stop immediately - it just guarantees that these additional calls
              will be discarded later.

        NOTE: calling this function on optimizer which is NOT running will have no
              effect.
              
        NOTE: multiple calls to this function are possible. First call is counted,
              subsequent calls are silently ignored.

          -- ALGLIB --
             Copyright 08.10.2014 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicrequesttermination(minbleicstate state)
        {
            state.userterminationneeded = true;
        }
コード例 #26
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This function sets scaling coefficients for BLEIC optimizer.

        ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
        size and gradient are scaled before comparison with tolerances).  Scale of
        the I-th variable is a translation invariant measure of:
        a) "how large" the variable is
        b) how large the step should be to make significant changes in the function

        Scaling is also used by finite difference variant of the optimizer  - step
        along I-th axis is equal to DiffStep*S[I].

        In  most  optimizers  (and  in  the  BLEIC  too)  scaling is NOT a form of
        preconditioning. It just  affects  stopping  conditions.  You  should  set
        preconditioner  by  separate  call  to  one  of  the  MinBLEICSetPrec...()
        functions.

        There is a special  preconditioning  mode, however,  which  uses   scaling
        coefficients to form diagonal preconditioning matrix. You  can  turn  this
        mode on, if you want.   But  you should understand that scaling is not the
        same thing as preconditioning - these are two different, although  related
        forms of tuning solver.

        INPUT PARAMETERS:
            State   -   structure stores algorithm state
            S       -   array[N], non-zero scaling coefficients
                        S[i] may be negative, sign doesn't matter.

          -- ALGLIB --
             Copyright 14.01.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetscale(minbleicstate state,
            double[] s)
        {
            int i = 0;

            alglib.ap.assert(alglib.ap.len(s)>=state.nmain, "MinBLEICSetScale: Length(S)<N");
            for(i=0; i<=state.nmain-1; i++)
            {
                alglib.ap.assert(math.isfinite(s[i]), "MinBLEICSetScale: S contains infinite or NAN elements");
                alglib.ap.assert((double)(s[i])!=(double)(0), "MinBLEICSetScale: S contains zero elements");
                state.s[i] = Math.Abs(s[i]);
            }
            sactivesets.sassetscale(state.sas, s);
        }
コード例 #27
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        This  subroutine  turns  on  verification  of  the  user-supplied analytic
        gradient:
        * user calls this subroutine before optimization begins
        * MinBLEICOptimize() is called
        * prior to  actual  optimization, for each component  of  parameters being
          optimized X[i] algorithm performs following steps:
          * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
            where X[i] is i-th component of the initial point and S[i] is a  scale
            of i-th parameter
          * if needed, steps are bounded with respect to constraints on X[]
          * F(X) is evaluated at these trial points
          * we perform one more evaluation in the middle point of the interval
          * we  build  cubic  model using function values and derivatives at trial
            points and we compare its prediction with actual value in  the  middle
            point
          * in case difference between prediction and actual value is higher  than
            some predetermined threshold, algorithm stops with completion code -7;
            Rep.VarIdx is set to index of the parameter with incorrect derivative.
        * after verification is over, algorithm proceeds to the actual optimization.

        NOTE 1: verification  needs  N (parameters count) gradient evaluations. It
                is very costly and you should use  it  only  for  low  dimensional
                problems,  when  you  want  to  be  sure  that  you've   correctly
                calculated  analytic  derivatives.  You  should  not use it in the
                production code (unless you want to check derivatives provided  by
                some third party).

        NOTE 2: you  should  carefully  choose  TestStep. Value which is too large
                (so large that function behaviour is significantly non-cubic) will
                lead to false alarms. You may use  different  step  for  different
                parameters by means of setting scale with MinBLEICSetScale().

        NOTE 3: this function may lead to false positives. In case it reports that
                I-th  derivative was calculated incorrectly, you may decrease test
                step  and  try  one  more  time  - maybe your function changes too
                sharply  and  your  step  is  too  large for such rapidly chanding
                function.

        INPUT PARAMETERS:
            State       -   structure used to store algorithm state
            TestStep    -   verification step:
                            * TestStep=0 turns verification off
                            * TestStep>0 activates verification

          -- ALGLIB --
             Copyright 15.06.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetgradientcheck(minbleicstate state,
            double teststep)
        {
            alglib.ap.assert(math.isfinite(teststep), "MinBLEICSetGradientCheck: TestStep contains NaN or Infinite");
            alglib.ap.assert((double)(teststep)>=(double)(0), "MinBLEICSetGradientCheck: invalid argument TestStep(TestStep<0)");
            state.teststep = teststep;
        }
コード例 #28
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        Modification of the preconditioner: preconditioning is turned off.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state

          -- ALGLIB --
             Copyright 13.10.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minbleicsetprecdefault(minbleicstate state)
        {
            state.prectype = 0;
        }
コード例 #29
0
ファイル: optimization.cs プロジェクト: orlovk/PtProject
        /*************************************************************************
        Internal initialization subroutine
        *************************************************************************/
        private static void minbleicinitinternal(int n,
            double[] x,
            double diffstep,
            minbleicstate state)
        {
            int i = 0;
            double[,] c = new double[0,0];
            int[] ct = new int[0];

            
            //
            // Initialize
            //
            state.teststep = 0;
            state.nmain = n;
            state.diffstep = diffstep;
            sactivesets.sasinit(n, state.sas);
            state.bndl = new double[n];
            state.hasbndl = new bool[n];
            state.bndu = new double[n];
            state.hasbndu = new bool[n];
            state.xstart = new double[n];
            state.cgc = new double[n];
            state.ugc = new double[n];
            state.xn = new double[n];
            state.cgn = new double[n];
            state.ugn = new double[n];
            state.xp = new double[n];
            state.d = new double[n];
            state.s = new double[n];
            state.x = new double[n];
            state.g = new double[n];
            state.work = new double[n];
            for(i=0; i<=n-1; i++)
            {
                state.bndl[i] = Double.NegativeInfinity;
                state.hasbndl[i] = false;
                state.bndu[i] = Double.PositiveInfinity;
                state.hasbndu[i] = false;
                state.s[i] = 1.0;
            }
            minbleicsetlc(state, c, ct, 0);
            minbleicsetcond(state, 0.0, 0.0, 0.0, 0);
            minbleicsetxrep(state, false);
            minbleicsetdrep(state, false);
            minbleicsetstpmax(state, 0.0);
            minbleicsetprecdefault(state);
            minbleicrestartfrom(state, x);
        }
コード例 #30
0
ファイル: optimization.cs プロジェクト: Ring-r/opt
        /*************************************************************************
        This function:
        1. makes projection of XScaled into equality constrained subspace
           (X is modified in-place)
        2. stores residual from the projection into R
        3. unscales projected XScaled and stores result into XUnscaled with
           additional enforcement
        It calculates set of additional values which are used later for
        modification of the target function F.

        INPUT PARAMETERS:
            State   -   optimizer state (we use its fields to get information
                        about constraints)
            X       -   vector being projected
            R       -   preallocated buffer, used to store residual from projection

        OUTPUT PARAMETERS:
            X       -   projection of input X
            R       -   residual
            RNorm   -   residual norm squared, used later to modify target function
        *************************************************************************/
        private static void projectpointandunscale(minbleicstate state,
            ref double[] xscaled,
            ref double[] xunscaled,
            ref double[] rscaled,
            ref double rnorm2)
        {
            double v = 0;
            int i = 0;
            int nmain = 0;
            int nslack = 0;
            int i_ = 0;

            rnorm2 = 0;

            nmain = state.nmain;
            nslack = state.nslack;
            
            //
            // * subtract XE from XScaled
            // * project XScaled
            // * calculate norm of deviation from null space, store it in RNorm2
            // * calculate residual from projection, store it in R
            // * add XE to XScaled
            // * unscale variables
            //
            for(i_=0; i_<=nmain+nslack-1;i_++)
            {
                xscaled[i_] = xscaled[i_] - state.xe[i_];
            }
            rnorm2 = 0;
            for(i=0; i<=nmain+nslack-1; i++)
            {
                rscaled[i] = 0;
            }
            for(i=0; i<=nmain+nslack-1; i++)
            {
                if( state.activeconstraints[i] )
                {
                    v = xscaled[i];
                    xscaled[i] = 0;
                    rscaled[i] = rscaled[i]+v;
                    rnorm2 = rnorm2+math.sqr(v);
                }
            }
            for(i=0; i<=state.cecnt-1; i++)
            {
                v = 0.0;
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    v += xscaled[i_]*state.cecurrent[i,i_];
                }
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    xscaled[i_] = xscaled[i_] - v*state.cecurrent[i,i_];
                }
                for(i_=0; i_<=nmain+nslack-1;i_++)
                {
                    rscaled[i_] = rscaled[i_] + v*state.cecurrent[i,i_];
                }
                rnorm2 = rnorm2+math.sqr(v);
            }
            for(i_=0; i_<=nmain+nslack-1;i_++)
            {
                xscaled[i_] = xscaled[i_] + state.xe[i_];
            }
            unscalepoint(state, xscaled, ref xunscaled);
        }