Пример #1
0
 /*************************************************************************
 *  Clears request fileds (to be sure that we don't forgot to clear something)
 *************************************************************************/
 private static void lmclearrequestfields(ref minlmstate state)
 {
     state.needf    = false;
     state.needfg   = false;
     state.needfgh  = false;
     state.needfij  = false;
     state.xupdated = false;
 }
Пример #2
0
 /*************************************************************************
 *  This function sets stopping conditions for Levenberg-Marquardt optimization
 *  algorithm.
 *
 *  INPUT PARAMETERS:
 *   State   -   structure which stores algorithm state between calls and
 *               which is used for reverse communication. Must be initialized
 *               with MinLMCreate???()
 *   EpsG    -   >=0
 *               The  subroutine  finishes  its  work   if   the  condition
 ||G||<EpsG is satisfied, where ||.|| means Euclidian norm,
 *               G - gradient.
 *   EpsF    -   >=0
 *               The  subroutine  finishes  its work if on k+1-th iteration
 *               the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
 *               is satisfied.
 *   EpsX    -   >=0
 *               The subroutine finishes its work if  on  k+1-th  iteration
 *               the condition |X(k+1)-X(k)| <= EpsX is fulfilled.
 *   MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
 *               iterations   is    unlimited.   Only   Levenberg-Marquardt
 *               iterations  are  counted  (L-BFGS/CG  iterations  are  NOT
 *               counted  because their cost is very low copared to that of
 *               LM).
 *
 *  Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
 *  automatic stopping criterion selection (small EpsX).
 *
 *  -- ALGLIB --
 *    Copyright 02.04.2010 by Bochkanov Sergey
 *************************************************************************/
 public static void minlmsetcond(ref minlmstate state,
                                 double epsg,
                                 double epsf,
                                 double epsx,
                                 int maxits)
 {
     System.Diagnostics.Debug.Assert((double)(epsg) >= (double)(0), "MinLMSetCond: negative EpsG!");
     System.Diagnostics.Debug.Assert((double)(epsf) >= (double)(0), "MinLMSetCond: negative EpsF!");
     System.Diagnostics.Debug.Assert((double)(epsx) >= (double)(0), "MinLMSetCond: negative EpsX!");
     System.Diagnostics.Debug.Assert(maxits >= 0, "MinLMSetCond: negative MaxIts!");
     if ((double)(epsg) == (double)(0) & (double)(epsf) == (double)(0) & (double)(epsx) == (double)(0) & maxits == 0)
     {
         epsx = 1.0E-6;
     }
     state.epsg   = epsg;
     state.epsf   = epsf;
     state.epsx   = epsx;
     state.maxits = maxits;
 }
Пример #3
0
        /*************************************************************************
        *  Levenberg-Marquardt algorithm results
        *
        *  Called after MinLMIteration returned False.
        *
        *  Input parameters:
        *   State   -   algorithm state (used by MinLMIteration).
        *
        *  Output parameters:
        *   X       -   array[0..N-1], solution
        *   Rep     -   optimization report:
        * Rep.TerminationType completetion code:
        * -1    incorrect parameters were specified
        *  1    relative function improvement is no more than
        *                           EpsF.
        *  2    relative step is no more than EpsX.
        *  4    gradient is no more than EpsG.
        *  5    MaxIts steps was taken
        *  7    stopping conditions are too stringent,
        *                           further improvement is impossible
        * Rep.IterationsCount contains iterations count
        * Rep.NFunc     - number of function calculations
        * Rep.NJac      - number of Jacobi matrix calculations
        * Rep.NGrad     - number of gradient calculations
        * Rep.NHess     - number of Hessian calculations
        * Rep.NCholesky - number of Cholesky decomposition calculations
        *
        *  -- ALGLIB --
        *    Copyright 10.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmresults(ref minlmstate state,
                                        ref double[] x,
                                        ref minlmreport rep)
        {
            int i_ = 0;

            x = new double[state.n - 1 + 1];
            for (i_ = 0; i_ <= state.n - 1; i_++)
            {
                x[i_] = state.x[i_];
            }
            rep.iterationscount = state.repiterationscount;
            rep.terminationtype = state.repterminationtype;
            rep.nfunc           = state.repnfunc;
            rep.njac            = state.repnjac;
            rep.ngrad           = state.repngrad;
            rep.nhess           = state.repnhess;
            rep.ncholesky       = state.repncholesky;
        }
Пример #4
0
        /*************************************************************************
        *   CLASSIC LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR OPTIMIZATION
        *
        *  Optimization using Jacobi matrix. Algorithm  -  classic Levenberg-Marquardt
        *  method.
        *
        *  Function F is represented as sum of squares:
        *
        *   F = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
        *
        *  EXAMPLE
        *
        *  See HTML-documentation.
        *
        *  INPUT PARAMETERS:
        *   N       -   dimension, N>1
        *   M       -   number of functions f[i]
        *   X       -   initial solution, array[0..N-1]
        *
        *  OUTPUT PARAMETERS:
        *   State   -   structure which stores algorithm state between subsequent
        *               calls of MinLMIteration. Used for reverse communication.
        *               This structure should be passed to MinLMIteration subroutine.
        *
        *  See also MinLMIteration, MinLMResults.
        *
        *  NOTES:
        *
        *  1. you may tune stopping conditions with MinLMSetCond() function
        *  2. if target function contains exp() or other fast growing functions,  and
        *  optimization algorithm makes too large steps which leads  to  overflow,
        *  use MinLMSetStpMax() function to bound algorithm's steps.
        *
        *  -- ALGLIB --
        *    Copyright 30.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmcreatefj(int n,
                                         int m,
                                         ref double[] x,
                                         ref minlmstate state)
        {
            int i_ = 0;


            //
            // Prepare RComm
            //
            state.rstate.ia    = new int[3 + 1];
            state.rstate.ba    = new bool[0 + 1];
            state.rstate.ra    = new double[7 + 1];
            state.rstate.stage = -1;

            //
            // prepare internal structures
            //
            lmprepare(n, m, true, ref state);

            //
            // initialize, check parameters
            //
            minlmsetcond(ref state, 0, 0, 0, 0);
            minlmsetxrep(ref state, false);
            minlmsetstpmax(ref state, 0);
            state.n           = n;
            state.m           = m;
            state.flags       = 0;
            state.usermode    = lmmodefj;
            state.wrongparams = false;
            if (n < 1)
            {
                state.wrongparams = true;
                return;
            }
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.x[i_] = x[i_];
            }
        }
Пример #5
0
 /*************************************************************************
 *  Prepare internal structures (except for RComm).
 *
 *  Note: M must be zero for FGH mode, non-zero for FJ/FGJ mode.
 *************************************************************************/
 private static void lmprepare(int n,
                               int m,
                               bool havegrad,
                               ref minlmstate state)
 {
     state.repiterationscount = 0;
     state.repterminationtype = 0;
     state.repnfunc           = 0;
     state.repnjac            = 0;
     state.repngrad           = 0;
     state.repnhess           = 0;
     state.repncholesky       = 0;
     if (n <= 0 | m < 0)
     {
         return;
     }
     if (havegrad)
     {
         state.g = new double[n - 1 + 1];
     }
     if (m != 0)
     {
         state.j  = new double[m - 1 + 1, n - 1 + 1];
         state.fi = new double[m - 1 + 1];
         state.h  = new double[0 + 1, 0 + 1];
     }
     else
     {
         state.j  = new double[0 + 1, 0 + 1];
         state.fi = new double[0 + 1];
         state.h  = new double[n - 1 + 1, n - 1 + 1];
     }
     state.x        = new double[n - 1 + 1];
     state.rawmodel = new double[n - 1 + 1, n - 1 + 1];
     state.model    = new double[n - 1 + 1, n - 1 + 1];
     state.xbase    = new double[n - 1 + 1];
     state.xprec    = new double[n - 1 + 1];
     state.gbase    = new double[n - 1 + 1];
     state.xdir     = new double[n - 1 + 1];
     state.xprev    = new double[n - 1 + 1];
     state.work     = new double[Math.Max(n, m) + 1];
 }
Пример #6
0
        /*************************************************************************
        This  subroutine  restarts  LM  algorithm from new point. All optimization
        parameters are left unchanged.

        This  function  allows  to  solve multiple  optimization  problems  (which
        must have same number of dimensions) without object reallocation penalty.

        INPUT PARAMETERS:
            State   -   structure used for reverse communication previously
                        allocated with MinLMCreateXXX call.
            X       -   new starting point.

          -- ALGLIB --
             Copyright 30.07.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmrestartfrom(minlmstate state,
            double[] x)
        {
            int i_ = 0;

            alglib.ap.assert(alglib.ap.len(x)>=state.n, "MinLMRestartFrom: Length(X)<N!");
            alglib.ap.assert(apserv.isfinitevector(x, state.n), "MinLMRestartFrom: X contains infinite or NaN values!");
            for(i_=0; i_<=state.n-1;i_++)
            {
                state.xbase[i_] = x[i_];
            }
            state.rstate.ia = new int[4+1];
            state.rstate.ba = new bool[0+1];
            state.rstate.ra = new double[2+1];
            state.rstate.stage = -1;
            clearrequestfields(state);
        }
Пример #7
0
        /*************************************************************************
        Levenberg-Marquardt algorithm results

        INPUT PARAMETERS:
            State   -   algorithm state

        OUTPUT PARAMETERS:
            X       -   array[0..N-1], solution
            Rep     -   optimization  report;  includes  termination   codes   and
                        additional information. Termination codes are listed below,
                        see comments for this structure for more info.
                        Termination code is stored in rep.terminationtype field:
                        * -7    derivative correctness check failed;
                                see rep.wrongnum, rep.wrongi, rep.wrongj for
                                more information.
                        * -3    constraints are inconsistent
                        *  1    relative function improvement is no more than
                                EpsF.
                        *  2    relative step is no more than EpsX.
                        *  4    gradient is no more than EpsG.
                        *  5    MaxIts steps was taken
                        *  7    stopping conditions are too stringent,
                                further improvement is impossible
                        *  8    terminated by user who called minlmrequesttermination().
                                X contains point which was "current accepted" when
                                termination request was submitted.

          -- ALGLIB --
             Copyright 10.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmresults(minlmstate state,
            ref double[] x,
            minlmreport rep)
        {
            x = new double[0];

            minlmresultsbuf(state, ref x, rep);
        }
Пример #8
0
        /*************************************************************************
        This function is used to change acceleration settings

        You can choose between three acceleration strategies:
        * AccType=0, no acceleration.
        * AccType=1, secant updates are used to update quadratic model after  each
          iteration. After fixed number of iterations (or after  model  breakdown)
          we  recalculate  quadratic  model  using  analytic  Jacobian  or  finite
          differences. Number of secant-based iterations depends  on  optimization
          settings: about 3 iterations - when we have analytic Jacobian, up to 2*N
          iterations - when we use finite differences to calculate Jacobian.

        AccType=1 is recommended when Jacobian  calculation  cost  is  prohibitive
        high (several Mx1 function vector calculations  followed  by  several  NxN
        Cholesky factorizations are faster than calculation of one M*N  Jacobian).
        It should also be used when we have no Jacobian, because finite difference
        approximation takes too much time to compute.

        Table below list  optimization  protocols  (XYZ  protocol  corresponds  to
        MinLMCreateXYZ) and acceleration types they support (and use by  default).

        ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS:

        protocol    0   1   comment
        V           +   +
        VJ          +   +
        FGH         +

        DAFAULT VALUES:

        protocol    0   1   comment
        V               x   without acceleration it is so slooooooooow
        VJ          x
        FGH         x

        NOTE: this  function should be called before optimization. Attempt to call
        it during algorithm iterations may result in unexpected behavior.

        NOTE: attempt to call this function with unsupported protocol/acceleration
        combination will result in exception being thrown.

          -- ALGLIB --
             Copyright 14.10.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetacctype(minlmstate state,
            int acctype)
        {
            alglib.ap.assert((acctype==0 || acctype==1) || acctype==2, "MinLMSetAccType: incorrect AccType!");
            if( acctype==2 )
            {
                acctype = 0;
            }
            if( acctype==0 )
            {
                state.maxmodelage = 0;
                state.makeadditers = false;
                return;
            }
            if( acctype==1 )
            {
                alglib.ap.assert(state.hasfi, "MinLMSetAccType: AccType=1 is incompatible with current protocol!");
                if( state.algomode==0 )
                {
                    state.maxmodelage = 2*state.n;
                }
                else
                {
                    state.maxmodelage = smallmodelage;
                }
                state.makeadditers = false;
                return;
            }
        }
Пример #9
0
        /*************************************************************************
        This function sets scaling coefficients for LM optimizer.

        ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
        size and gradient are scaled before comparison with tolerances).  Scale of
        the I-th variable is a translation invariant measure of:
        a) "how large" the variable is
        b) how large the step should be to make significant changes in the function

        Generally, scale is NOT considered to be a form of preconditioner.  But LM
        optimizer is unique in that it uses scaling matrix both  in  the  stopping
        condition tests and as Marquardt damping factor.

        Proper scaling is very important for the algorithm performance. It is less
        important for the quality of results, but still has some influence (it  is
        easier  to  converge  when  variables  are  properly  scaled, so premature
        stopping is possible when very badly scalled variables are  combined  with
        relaxed stopping conditions).

        INPUT PARAMETERS:
            State   -   structure stores algorithm state
            S       -   array[N], non-zero scaling coefficients
                        S[i] may be negative, sign doesn't matter.

          -- ALGLIB --
             Copyright 14.01.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetscale(minlmstate state,
            double[] s)
        {
            int i = 0;

            alglib.ap.assert(alglib.ap.len(s)>=state.n, "MinLMSetScale: Length(S)<N");
            for(i=0; i<=state.n-1; i++)
            {
                alglib.ap.assert(math.isfinite(s[i]), "MinLMSetScale: S contains infinite or NAN elements");
                alglib.ap.assert((double)(s[i])!=(double)(0), "MinLMSetScale: S contains zero elements");
                state.s[i] = Math.Abs(s[i]);
            }
        }
Пример #10
0
        /*************************************************************************
        This function turns on/off reporting.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            NeedXRep-   whether iteration reports are needed or not

        If NeedXRep is True, algorithm will call rep() callback function if  it is
        provided to MinLMOptimize(). Both Levenberg-Marquardt and internal  L-BFGS
        iterations are reported.

          -- ALGLIB --
             Copyright 02.04.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetxrep(minlmstate state,
            bool needxrep)
        {
            state.xrep = needxrep;
        }
Пример #11
0
        /*************************************************************************
                        IMPROVED LEVENBERG-MARQUARDT METHOD FOR
                         NON-LINEAR LEAST SQUARES OPTIMIZATION

        DESCRIPTION:
        This function is used to find minimum of function which is represented  as
        sum of squares:
            F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
        using value of function vector f[] only. Finite differences  are  used  to
        calculate Jacobian.


        REQUIREMENTS:
        This algorithm will request following information during its operation:
        * function vector f[] at given point X

        There are several overloaded versions of  MinLMOptimize()  function  which
        correspond  to  different LM-like optimization algorithms provided by this
        unit. You should choose version which accepts fvec() callback.

        You can try to initialize MinLMState structure with VJ  function and  then
        use incorrect version  of  MinLMOptimize()  (for  example,  version  which
        works with general form function and does not accept function vector), but
        it will  lead  to  exception being thrown after first attempt to calculate
        Jacobian.


        USAGE:
        1. User initializes algorithm state with MinLMCreateV() call
        2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
           other functions
        3. User calls MinLMOptimize() function which  takes algorithm  state   and
           callback functions.
        4. User calls MinLMResults() to get solution
        5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
           with same N/M but another starting point and/or another function.
           MinLMRestartFrom() allows to reuse already initialized structure.


        INPUT PARAMETERS:
            N       -   dimension, N>1
                        * if given, only leading N elements of X are used
                        * if not given, automatically determined from size of X
            M       -   number of functions f[i]
            X       -   initial solution, array[0..N-1]
            DiffStep-   differentiation step, >0

        OUTPUT PARAMETERS:
            State   -   structure which stores algorithm state

        See also MinLMIteration, MinLMResults.

        NOTES:
        1. you may tune stopping conditions with MinLMSetCond() function
        2. if target function contains exp() or other fast growing functions,  and
           optimization algorithm makes too large steps which leads  to  overflow,
           use MinLMSetStpMax() function to bound algorithm's steps.

          -- ALGLIB --
             Copyright 30.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmcreatev(int n,
            int m,
            double[] x,
            double diffstep,
            minlmstate state)
        {
            alglib.ap.assert(math.isfinite(diffstep), "MinLMCreateV: DiffStep is not finite!");
            alglib.ap.assert((double)(diffstep)>(double)(0), "MinLMCreateV: DiffStep<=0!");
            alglib.ap.assert(n>=1, "MinLMCreateV: N<1!");
            alglib.ap.assert(m>=1, "MinLMCreateV: M<1!");
            alglib.ap.assert(alglib.ap.len(x)>=n, "MinLMCreateV: Length(X)<N!");
            alglib.ap.assert(apserv.isfinitevector(x, n), "MinLMCreateV: X contains infinite or NaN values!");
            
            //
            // Initialize
            //
            state.teststep = 0;
            state.n = n;
            state.m = m;
            state.algomode = 0;
            state.hasf = false;
            state.hasfi = true;
            state.hasg = false;
            state.diffstep = diffstep;
            
            //
            // Second stage of initialization
            //
            lmprepare(n, m, false, state);
            minlmsetacctype(state, 1);
            minlmsetcond(state, 0, 0, 0, 0);
            minlmsetxrep(state, false);
            minlmsetstpmax(state, 0);
            minlmrestartfrom(state, x);
        }
Пример #12
0
 /*************************************************************************
 Clears request fileds (to be sure that we don't forgot to clear something)
 *************************************************************************/
 private static void clearrequestfields(minlmstate state)
 {
     state.needf = false;
     state.needfg = false;
     state.needfgh = false;
     state.needfij = false;
     state.needfi = false;
     state.xupdated = false;
 }
Пример #13
0
 /*************************************************************************
 *  This function sets maximum step length
 *
 *  INPUT PARAMETERS:
 *   State   -   structure which stores algorithm state between calls and
 *               which is used for reverse communication. Must be
 *               initialized with MinCGCreate???()
 *   StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
 *               want to limit step length.
 *
 *  Use this subroutine when you optimize target function which contains exp()
 *  or  other  fast  growing  functions,  and optimization algorithm makes too
 *  large  steps  which  leads  to overflow. This function allows us to reject
 *  steps  that  are  too  large  (and  therefore  expose  us  to the possible
 *  overflow) without actually calculating function value at the x+stp*d.
 *
 *  NOTE: non-zero StpMax leads to moderate  performance  degradation  because
 *  intermediate  step  of  preconditioned L-BFGS optimization is incompatible
 *  with limits on step size.
 *
 *  -- ALGLIB --
 *    Copyright 02.04.2010 by Bochkanov Sergey
 *************************************************************************/
 public static void minlmsetstpmax(ref minlmstate state,
                                   double stpmax)
 {
     System.Diagnostics.Debug.Assert((double)(stpmax) >= (double)(0), "MinLMSetStpMax: StpMax<0!");
     state.stpmax = stpmax;
 }
Пример #14
0
 /*************************************************************************
 *  This function turns on/off reporting.
 *
 *  INPUT PARAMETERS:
 *   State   -   structure which stores algorithm state between calls and
 *               which is used for reverse communication. Must be
 *               initialized with MinLMCreate???()
 *   NeedXRep-   whether iteration reports are needed or not
 *
 *  Usually  algorithm  returns  from  MinLMIteration()  only  when  it  needs
 *  function/gradient/Hessian. However, with this function we can let it  stop
 *  after  each  iteration  (one iteration may include  more than one function
 *  evaluation), which is indicated by XUpdated field.
 *
 *  Both Levenberg-Marquardt and L-BFGS iterations are reported.
 *
 *
 *  -- ALGLIB --
 *    Copyright 02.04.2010 by Bochkanov Sergey
 *************************************************************************/
 public static void minlmsetxrep(ref minlmstate state,
                                 bool needxrep)
 {
     state.xrep = needxrep;
 }
Пример #15
0
        /*************************************************************************
        This is obsolete function.

        Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ().

          -- ALGLIB --
             Copyright 30.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmcreatefgj(int n,
            int m,
            double[] x,
            minlmstate state)
        {
            minlmcreatefj(n, m, x, state);
        }
Пример #16
0
        /*************************************************************************
        This  subroutine  turns  on  verification  of  the  user-supplied analytic
        gradient:
        * user calls this subroutine before optimization begins
        * MinLMOptimize() is called
        * prior to actual optimization, for  each  function Fi and each  component
          of parameters  being  optimized X[j] algorithm performs following steps:
          * two trial steps are made to X[j]-TestStep*S[j] and X[j]+TestStep*S[j],
            where X[j] is j-th parameter and S[j] is a scale of j-th parameter
          * if needed, steps are bounded with respect to constraints on X[]
          * Fi(X) is evaluated at these trial points
          * we perform one more evaluation in the middle point of the interval
          * we  build  cubic  model using function values and derivatives at trial
            points and we compare its prediction with actual value in  the  middle
            point
          * in case difference between prediction and actual value is higher  than
            some predetermined threshold, algorithm stops with completion code -7;
            Rep.VarIdx is set to index of the parameter with incorrect derivative,
            Rep.FuncIdx is set to index of the function.
        * after verification is over, algorithm proceeds to the actual optimization.

        NOTE 1: verification  needs  N (parameters count) Jacobian evaluations. It
                is  very  costly  and  you  should use it only for low dimensional
                problems,  when  you  want  to  be  sure  that  you've   correctly
                calculated  analytic  derivatives.  You should not  use  it in the
                production code  (unless  you  want  to check derivatives provided
                by some third party).

        NOTE 2: you  should  carefully  choose  TestStep. Value which is too large
                (so large that function behaviour is significantly non-cubic) will
                lead to false alarms. You may use  different  step  for  different
                parameters by means of setting scale with MinLMSetScale().

        NOTE 3: this function may lead to false positives. In case it reports that
                I-th  derivative was calculated incorrectly, you may decrease test
                step  and  try  one  more  time  - maybe your function changes too
                sharply  and  your  step  is  too  large for such rapidly chanding
                function.

        INPUT PARAMETERS:
            State       -   structure used to store algorithm state
            TestStep    -   verification step:
                            * TestStep=0 turns verification off
                            * TestStep>0 activates verification

          -- ALGLIB --
             Copyright 15.06.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetgradientcheck(minlmstate state,
            double teststep)
        {
            alglib.ap.assert(math.isfinite(teststep), "MinLMSetGradientCheck: TestStep contains NaN or Infinite");
            alglib.ap.assert((double)(teststep)>=(double)(0), "MinLMSetGradientCheck: invalid argument TestStep(TestStep<0)");
            state.teststep = teststep;
        }
Пример #17
0
        /*************************************************************************
        *  One Levenberg-Marquardt iteration.
        *
        *  Called after inialization of State structure with MinLMXXX subroutine.
        *  See HTML docs for examples.
        *
        *  Input parameters:
        *   State   -   structure which stores algorithm state between subsequent
        *               calls and which is used for reverse communication. Must be
        *               initialized with MinLMXXX call first.
        *
        *  If subroutine returned False, iterative algorithm has converged.
        *
        *  If subroutine returned True, then:
        * if State.NeedF=True,      -   function value F at State.X[0..N-1]
        *                               is required
        * if State.NeedFG=True      -   function value F and gradient G
        *                               are required
        * if State.NeedFiJ=True     -   function vector f[i] and Jacobi matrix J
        *                               are required
        * if State.NeedFGH=True     -   function value F, gradient G and Hesian H
        *                               are required
        * if State.XUpdated=True    -   algorithm reports about new iteration,
        *                               State.X contains current point,
        *                               State.F contains function value.
        *
        *  One and only one of this fields can be set at time.
        *
        *  Results are stored:
        * function value            -   in MinLMState.F
        * gradient                  -   in MinLMState.G[0..N-1]
        * Jacobi matrix             -   in MinLMState.J[0..M-1,0..N-1]
        * Hessian                   -   in MinLMState.H[0..N-1,0..N-1]
        *
        *  -- ALGLIB --
        *    Copyright 10.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static bool minlmiteration(ref minlmstate state)
        {
            bool   result     = new bool();
            int    n          = 0;
            int    m          = 0;
            int    i          = 0;
            double stepnorm   = 0;
            bool   spd        = new bool();
            double fbase      = 0;
            double fnew       = 0;
            double lambda     = 0;
            double nu         = 0;
            double lambdaup   = 0;
            double lambdadown = 0;
            int    lbfgsflags = 0;
            double v          = 0;
            int    i_         = 0;


            //
            // Reverse communication preparations
            // I know it looks ugly, but it works the same way
            // anywhere from C++ to Python.
            //
            // This code initializes locals by:
            // * random values determined during code
            //   generation - on first subroutine call
            // * values from previous call - on subsequent calls
            //
            if (state.rstate.stage >= 0)
            {
                n          = state.rstate.ia[0];
                m          = state.rstate.ia[1];
                i          = state.rstate.ia[2];
                lbfgsflags = state.rstate.ia[3];
                spd        = state.rstate.ba[0];
                stepnorm   = state.rstate.ra[0];
                fbase      = state.rstate.ra[1];
                fnew       = state.rstate.ra[2];
                lambda     = state.rstate.ra[3];
                nu         = state.rstate.ra[4];
                lambdaup   = state.rstate.ra[5];
                lambdadown = state.rstate.ra[6];
                v          = state.rstate.ra[7];
            }
            else
            {
                n          = -983;
                m          = -989;
                i          = -834;
                lbfgsflags = 900;
                spd        = true;
                stepnorm   = 364;
                fbase      = 214;
                fnew       = -338;
                lambda     = -686;
                nu         = 912;
                lambdaup   = 585;
                lambdadown = 497;
                v          = -271;
            }
            if (state.rstate.stage == 0)
            {
                goto lbl_0;
            }
            if (state.rstate.stage == 1)
            {
                goto lbl_1;
            }
            if (state.rstate.stage == 2)
            {
                goto lbl_2;
            }
            if (state.rstate.stage == 3)
            {
                goto lbl_3;
            }
            if (state.rstate.stage == 4)
            {
                goto lbl_4;
            }
            if (state.rstate.stage == 5)
            {
                goto lbl_5;
            }
            if (state.rstate.stage == 6)
            {
                goto lbl_6;
            }
            if (state.rstate.stage == 7)
            {
                goto lbl_7;
            }
            if (state.rstate.stage == 8)
            {
                goto lbl_8;
            }
            if (state.rstate.stage == 9)
            {
                goto lbl_9;
            }
            if (state.rstate.stage == 10)
            {
                goto lbl_10;
            }
            if (state.rstate.stage == 11)
            {
                goto lbl_11;
            }
            if (state.rstate.stage == 12)
            {
                goto lbl_12;
            }
            if (state.rstate.stage == 13)
            {
                goto lbl_13;
            }
            if (state.rstate.stage == 14)
            {
                goto lbl_14;
            }
            if (state.rstate.stage == 15)
            {
                goto lbl_15;
            }

            //
            // Routine body
            //
            System.Diagnostics.Debug.Assert(state.usermode == lmmodefj | state.usermode == lmmodefgj | state.usermode == lmmodefgh, "LM: internal error");
            if (state.wrongparams)
            {
                state.repterminationtype = -1;
                result = false;
                return(result);
            }

            //
            // prepare params
            //
            n          = state.n;
            m          = state.m;
            lambdaup   = 20;
            lambdadown = 0.5;
            nu         = 1;
            lbfgsflags = 0;

            //
            // if we have F and G
            //
            if (!((state.usermode == lmmodefgj | state.usermode == lmmodefgh) & state.flags / lmflagnoprelbfgs % 2 == 0))
            {
                goto lbl_16;
            }

            //
            // First stage of the hybrid algorithm: LBFGS
            //
            minlbfgs.minlbfgscreate(n, Math.Min(n, lmprelbfgsm), ref state.x, ref state.internalstate);
            minlbfgs.minlbfgssetcond(ref state.internalstate, 0, 0, 0, Math.Max(5, n));
            minlbfgs.minlbfgssetxrep(ref state.internalstate, state.xrep);
            minlbfgs.minlbfgssetstpmax(ref state.internalstate, state.stpmax);
lbl_18:
            if (!minlbfgs.minlbfgsiteration(ref state.internalstate))
            {
                goto lbl_19;
            }
            if (!state.internalstate.needfg)
            {
                goto lbl_20;
            }

            //
            // RComm
            //
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.x[i_] = state.internalstate.x[i_];
            }
            lmclearrequestfields(ref state);
            state.needfg       = true;
            state.rstate.stage = 0;
            goto lbl_rcomm;
lbl_0:
            state.repnfunc = state.repnfunc + 1;
            state.repngrad = state.repngrad + 1;

            //
            // Call LBFGS
            //
            state.internalstate.f = state.f;
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.internalstate.g[i_] = state.g[i_];
            }
lbl_20:
            if (!(state.internalstate.xupdated & state.xrep))
            {
                goto lbl_22;
            }
            lmclearrequestfields(ref state);
            state.f = state.internalstate.f;
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.x[i_] = state.internalstate.x[i_];
            }
            state.xupdated     = true;
            state.rstate.stage = 1;
            goto lbl_rcomm;
lbl_1:
lbl_22:
            goto lbl_18;
lbl_19:
            minlbfgs.minlbfgsresults(ref state.internalstate, ref state.x, ref state.internalrep);
            goto lbl_17;
lbl_16:

            //
            // No first stage.
            // However, we may need to report initial point
            //
            if (!state.xrep)
            {
                goto lbl_24;
            }
            lmclearrequestfields(ref state);
            state.needf        = true;
            state.rstate.stage = 2;
            goto lbl_rcomm;
lbl_2:
            lmclearrequestfields(ref state);
            state.xupdated     = true;
            state.rstate.stage = 3;
            goto lbl_rcomm;
lbl_3:
lbl_24:
lbl_17:

            //
            // Second stage of the hybrid algorithm: LM
            // Initialize quadratic model.
            //
            if (state.usermode != lmmodefgh)
            {
                goto lbl_26;
            }

            //
            // RComm
            //
            lmclearrequestfields(ref state);
            state.needfgh      = true;
            state.rstate.stage = 4;
            goto lbl_rcomm;
lbl_4:
            state.repnfunc = state.repnfunc + 1;
            state.repngrad = state.repngrad + 1;
            state.repnhess = state.repnhess + 1;

            //
            // generate raw quadratic model
            //
            ablas.rmatrixcopy(n, n, ref state.h, 0, 0, ref state.rawmodel, 0, 0);
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.gbase[i_] = state.g[i_];
            }
            fbase = state.f;
lbl_26:
            if (!(state.usermode == lmmodefgj | state.usermode == lmmodefj))
            {
                goto lbl_28;
            }

            //
            // RComm
            //
            lmclearrequestfields(ref state);
            state.needfij      = true;
            state.rstate.stage = 5;
            goto lbl_rcomm;
lbl_5:
            state.repnfunc = state.repnfunc + 1;
            state.repnjac  = state.repnjac + 1;

            //
            // generate raw quadratic model
            //
            ablas.rmatrixgemm(n, n, m, 2.0, ref state.j, 0, 0, 1, ref state.j, 0, 0, 0, 0.0, ref state.rawmodel, 0, 0);
            ablas.rmatrixmv(n, m, ref state.j, 0, 0, 1, ref state.fi, 0, ref state.gbase, 0);
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.gbase[i_] = 2 * state.gbase[i_];
            }
            fbase = 0.0;
            for (i_ = 0; i_ <= m - 1; i_++)
            {
                fbase += state.fi[i_] * state.fi[i_];
            }
lbl_28:
            lambda = 0.001;
lbl_30:
            if (false)
            {
                goto lbl_31;
            }

            //
            // 1. Model = RawModel+lambda*I
            // 2. Try to solve (RawModel+Lambda*I)*dx = -g.
            //    Increase lambda if left part is not positive definite.
            //
            for (i = 0; i <= n - 1; i++)
            {
                for (i_ = 0; i_ <= n - 1; i_++)
                {
                    state.model[i, i_] = state.rawmodel[i, i_];
                }
                state.model[i, i] = state.model[i, i] + lambda;
            }
            spd = trfac.spdmatrixcholesky(ref state.model, n, true);
            state.repncholesky = state.repncholesky + 1;
            if (spd)
            {
                goto lbl_32;
            }
            if (!increaselambda(ref lambda, ref nu, lambdaup))
            {
                goto lbl_34;
            }
            goto lbl_30;
            goto lbl_35;
lbl_34:
            state.repterminationtype = 7;
            lmclearrequestfields(ref state);
            state.needf        = true;
            state.rstate.stage = 6;
            goto lbl_rcomm;
lbl_6:
            goto lbl_31;
lbl_35:
lbl_32:
            densesolver.spdmatrixcholeskysolve(ref state.model, n, true, ref state.gbase, ref state.solverinfo, ref state.solverrep, ref state.xdir);
            if (state.solverinfo >= 0)
            {
                goto lbl_36;
            }
            if (!increaselambda(ref lambda, ref nu, lambdaup))
            {
                goto lbl_38;
            }
            goto lbl_30;
            goto lbl_39;
lbl_38:
            state.repterminationtype = 7;
            lmclearrequestfields(ref state);
            state.needf        = true;
            state.rstate.stage = 7;
            goto lbl_rcomm;
lbl_7:
            goto lbl_31;
lbl_39:
lbl_36:
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.xdir[i_] = -1 * state.xdir[i_];
            }

            //
            // Candidate lambda is found.
            // 1. Save old w in WBase
            // 1. Test some stopping criterions
            // 2. If error(w+wdir)>error(w), increase lambda
            //
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.xprev[i_] = state.x[i_];
            }
            state.fprev = state.f;
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.xbase[i_] = state.x[i_];
            }
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.x[i_] = state.x[i_] + state.xdir[i_];
            }
            stepnorm = 0.0;
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                stepnorm += state.xdir[i_] * state.xdir[i_];
            }
            stepnorm = Math.Sqrt(stepnorm);
            if (!((double)(state.stpmax) > (double)(0) & (double)(stepnorm) > (double)(state.stpmax)))
            {
                goto lbl_40;
            }

            //
            // Step is larger than the limit,
            // larger lambda is needed
            //
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            if (!increaselambda(ref lambda, ref nu, lambdaup))
            {
                goto lbl_42;
            }
            goto lbl_30;
            goto lbl_43;
lbl_42:
            state.repterminationtype = 7;
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.x[i_] = state.xprev[i_];
            }
            lmclearrequestfields(ref state);
            state.needf        = true;
            state.rstate.stage = 8;
            goto lbl_rcomm;
lbl_8:
            goto lbl_31;
lbl_43:
lbl_40:
            lmclearrequestfields(ref state);
            state.needf        = true;
            state.rstate.stage = 9;
            goto lbl_rcomm;
lbl_9:
            state.repnfunc = state.repnfunc + 1;
            fnew           = state.f;
            if ((double)(fnew) <= (double)(fbase))
            {
                goto lbl_44;
            }

            //
            // restore state and continue search for lambda
            //
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            if (!increaselambda(ref lambda, ref nu, lambdaup))
            {
                goto lbl_46;
            }
            goto lbl_30;
            goto lbl_47;
lbl_46:
            state.repterminationtype = 7;
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.x[i_] = state.xprev[i_];
            }
            lmclearrequestfields(ref state);
            state.needf        = true;
            state.rstate.stage = 10;
            goto lbl_rcomm;
lbl_10:
            goto lbl_31;
lbl_47:
lbl_44:
            if (!((double)(state.stpmax) == (double)(0) & (state.usermode == lmmodefgj | state.usermode == lmmodefgh) & state.flags / lmflagnointlbfgs % 2 == 0))
            {
                goto lbl_48;
            }

            //
            // Optimize using LBFGS, with inv(cholesky(H)) as preconditioner.
            //
            // It is possible only when StpMax=0, because we can't guarantee
            // that step remains bounded when preconditioner is used (we need
            // SVD decomposition to do that, which is too slow).
            //
            matinv.rmatrixtrinverse(ref state.model, n, true, false, ref state.invinfo, ref state.invrep);
            if (state.invinfo <= 0)
            {
                goto lbl_50;
            }

            //
            // if matrix can be inverted, use it.
            // just silently move to next iteration otherwise.
            // (will be very, very rare, mostly for specially
            // designed near-degenerate tasks)
            //
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.xbase[i_] = state.x[i_];
            }
            for (i = 0; i <= n - 1; i++)
            {
                state.xprec[i] = 0;
            }
            minlbfgs.minlbfgscreatex(n, Math.Min(n, lmintlbfgsits), ref state.xprec, lbfgsflags, ref state.internalstate);
            minlbfgs.minlbfgssetcond(ref state.internalstate, 0, 0, 0, lmintlbfgsits);
lbl_52:
            if (!minlbfgs.minlbfgsiteration(ref state.internalstate))
            {
                goto lbl_53;
            }

            //
            // convert XPrec to unpreconditioned form, then call RComm.
            //
            for (i = 0; i <= n - 1; i++)
            {
                v = 0.0;
                for (i_ = i; i_ <= n - 1; i_++)
                {
                    v += state.internalstate.x[i_] * state.model[i, i_];
                }
                state.x[i] = state.xbase[i] + v;
            }
            lmclearrequestfields(ref state);
            state.needfg       = true;
            state.rstate.stage = 11;
            goto lbl_rcomm;
lbl_11:
            state.repnfunc = state.repnfunc + 1;
            state.repngrad = state.repngrad + 1;

            //
            // 1. pass State.F to State.InternalState.F
            // 2. convert gradient back to preconditioned form
            //
            state.internalstate.f = state.f;
            for (i = 0; i <= n - 1; i++)
            {
                state.internalstate.g[i] = 0;
            }
            for (i = 0; i <= n - 1; i++)
            {
                v = state.g[i];
                for (i_ = i; i_ <= n - 1; i_++)
                {
                    state.internalstate.g[i_] = state.internalstate.g[i_] + v * state.model[i, i_];
                }
            }

            //
            // next iteration
            //
            goto lbl_52;
lbl_53:

            //
            // change LBFGS flags to NoRealloc.
            // L-BFGS subroutine will use memory allocated from previous run.
            // it is possible since all subsequent calls will be with same N/M.
            //
            lbfgsflags = lbfgsnorealloc;

            //
            // back to unpreconditioned X
            //
            minlbfgs.minlbfgsresults(ref state.internalstate, ref state.xprec, ref state.internalrep);
            for (i = 0; i <= n - 1; i++)
            {
                v = 0.0;
                for (i_ = i; i_ <= n - 1; i_++)
                {
                    v += state.xprec[i_] * state.model[i, i_];
                }
                state.x[i] = state.xbase[i] + v;
            }
lbl_50:
lbl_48:

            //
            // Composite iteration is almost over:
            // * accept new position.
            // * rebuild quadratic model
            //
            state.repiterationscount = state.repiterationscount + 1;
            if (state.usermode != lmmodefgh)
            {
                goto lbl_54;
            }
            lmclearrequestfields(ref state);
            state.needfgh      = true;
            state.rstate.stage = 12;
            goto lbl_rcomm;
lbl_12:
            state.repnfunc = state.repnfunc + 1;
            state.repngrad = state.repngrad + 1;
            state.repnhess = state.repnhess + 1;
            ablas.rmatrixcopy(n, n, ref state.h, 0, 0, ref state.rawmodel, 0, 0);
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.gbase[i_] = state.g[i_];
            }
            fnew = state.f;
lbl_54:
            if (!(state.usermode == lmmodefgj | state.usermode == lmmodefj))
            {
                goto lbl_56;
            }
            lmclearrequestfields(ref state);
            state.needfij      = true;
            state.rstate.stage = 13;
            goto lbl_rcomm;
lbl_13:
            state.repnfunc = state.repnfunc + 1;
            state.repnjac  = state.repnjac + 1;
            ablas.rmatrixgemm(n, n, m, 2.0, ref state.j, 0, 0, 1, ref state.j, 0, 0, 0, 0.0, ref state.rawmodel, 0, 0);
            ablas.rmatrixmv(n, m, ref state.j, 0, 0, 1, ref state.fi, 0, ref state.gbase, 0);
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.gbase[i_] = 2 * state.gbase[i_];
            }
            fnew = 0.0;
            for (i_ = 0; i_ <= m - 1; i_++)
            {
                fnew += state.fi[i_] * state.fi[i_];
            }
lbl_56:

            //
            // Stopping conditions
            //
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.work[i_] = state.xprev[i_];
            }
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                state.work[i_] = state.work[i_] - state.x[i_];
            }
            stepnorm = 0.0;
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                stepnorm += state.work[i_] * state.work[i_];
            }
            stepnorm = Math.Sqrt(stepnorm);
            if ((double)(stepnorm) <= (double)(state.epsx))
            {
                state.repterminationtype = 2;
                goto lbl_31;
            }
            if (state.repiterationscount >= state.maxits & state.maxits > 0)
            {
                state.repterminationtype = 5;
                goto lbl_31;
            }
            v = 0.0;
            for (i_ = 0; i_ <= n - 1; i_++)
            {
                v += state.gbase[i_] * state.gbase[i_];
            }
            v = Math.Sqrt(v);
            if ((double)(v) <= (double)(state.epsg))
            {
                state.repterminationtype = 4;
                goto lbl_31;
            }
            if ((double)(Math.Abs(fnew - fbase)) <= (double)(state.epsf * Math.Max(1, Math.Max(Math.Abs(fnew), Math.Abs(fbase)))))
            {
                state.repterminationtype = 1;
                goto lbl_31;
            }

            //
            // Now, iteration is finally over:
            // * update FBase
            // * decrease lambda
            // * report new iteration
            //
            if (!state.xrep)
            {
                goto lbl_58;
            }
            lmclearrequestfields(ref state);
            state.xupdated     = true;
            state.f            = fnew;
            state.rstate.stage = 14;
            goto lbl_rcomm;
lbl_14:
lbl_58:
            fbase = fnew;
            decreaselambda(ref lambda, ref nu, lambdadown);
            goto lbl_30;
lbl_31:

            //
            // final point is reported
            //
            if (!state.xrep)
            {
                goto lbl_60;
            }
            lmclearrequestfields(ref state);
            state.xupdated     = true;
            state.f            = fnew;
            state.rstate.stage = 15;
            goto lbl_rcomm;
lbl_15:
lbl_60:
            result = false;
            return(result);

            //
            // Saving state
            //
lbl_rcomm:
            result             = true;
            state.rstate.ia[0] = n;
            state.rstate.ia[1] = m;
            state.rstate.ia[2] = i;
            state.rstate.ia[3] = lbfgsflags;
            state.rstate.ba[0] = spd;
            state.rstate.ra[0] = stepnorm;
            state.rstate.ra[1] = fbase;
            state.rstate.ra[2] = fnew;
            state.rstate.ra[3] = lambda;
            state.rstate.ra[4] = nu;
            state.rstate.ra[5] = lambdaup;
            state.rstate.ra[6] = lambdadown;
            state.rstate.ra[7] = v;
            return(result);
        }
Пример #18
0
        /*************************************************************************
            CLASSIC LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR OPTIMIZATION

        DESCRIPTION:
        This function is used to find minimum of function which is represented  as
        sum of squares:
            F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
        using  value  of  F(),  function  vector  f[] and Jacobian of f[]. Classic
        Levenberg-Marquardt method is used.


        REQUIREMENTS:
        This algorithm will request following information during its operation:

        * function value F at given point X
        * function vector f[] and Jacobian of f[] (simultaneously) at given point

        There are several overloaded versions of  MinLMOptimize()  function  which
        correspond  to  different LM-like optimization algorithms provided by this
        unit. You should choose version which accepts func()  and  jac()  function
        pointers. First pointer is used to calculate F at given point, second  one
        calculates calculates f[] and Jacobian df[i]/dx[j].

        You can try to initialize MinLMState structure with FJ  function and  then
        use incorrect version  of  MinLMOptimize()  (for  example,  version  which
        works  with  general  form function and does not provide Jacobian), but it
        will  lead  to  exception  being  thrown  after first attempt to calculate
        Jacobian.


        USAGE:
        1. User initializes algorithm state with MinLMCreateFJ() call
        2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
           other functions
        3. User calls MinLMOptimize() function which  takes algorithm  state   and
           pointers (delegates, etc.) to callback functions.
        4. User calls MinLMResults() to get solution
        5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
           with same N/M but another starting point and/or another function.
           MinLMRestartFrom() allows to reuse already initialized structure.


        INPUT PARAMETERS:
            N       -   dimension, N>1
                        * if given, only leading N elements of X are used
                        * if not given, automatically determined from size of X
            M       -   number of functions f[i]
            X       -   initial solution, array[0..N-1]

        OUTPUT PARAMETERS:
            State   -   structure which stores algorithm state

        See also MinLMIteration, MinLMResults.

        NOTES:
        1. you may tune stopping conditions with MinLMSetCond() function
        2. if target function contains exp() or other fast growing functions,  and
           optimization algorithm makes too large steps which leads  to  overflow,
           use MinLMSetStpMax() function to bound algorithm's steps.

          -- ALGLIB --
             Copyright 30.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmcreatefj(int n,
            int m,
            double[] x,
            minlmstate state)
        {
            int i_ = 0;

            ap.assert(n>=1, "MinLMCreateFJ: N<1!");
            ap.assert(m>=1, "MinLMCreateFJ: M<1!");
            ap.assert(ap.len(x)>=n, "MinLMCreateFJ: Length(X)<N!");
            ap.assert(apserv.isfinitevector(x, n), "MinLMCreateFJ: X contains infinite or NaN values!");
            
            //
            // prepare internal structures
            //
            lmprepare(n, m, true, state);
            
            //
            // initialize, check parameters
            //
            minlmsetcond(state, 0, 0, 0, 0);
            minlmsetxrep(state, false);
            minlmsetstpmax(state, 0);
            state.n = n;
            state.m = m;
            state.flags = 0;
            state.usermode = lmmodefj;
            state.wrongparams = false;
            if( n<1 )
            {
                state.wrongparams = true;
                return;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = x[i_];
            }
            
            //
            // set starting point
            //
            minlmrestartfrom(state, x);
        }
Пример #19
0
        /*************************************************************************
        Prepare internal structures (except for RComm).

        Note: M must be zero for FGH mode, non-zero for FJ/FGJ mode.
        *************************************************************************/
        private static void lmprepare(int n,
            int m,
            bool havegrad,
            minlmstate state)
        {
            if( n<=0 | m<0 )
            {
                return;
            }
            if( havegrad )
            {
                state.g = new double[n-1+1];
            }
            if( m!=0 )
            {
                state.j = new double[m-1+1, n-1+1];
                state.fi = new double[m-1+1];
                state.h = new double[0+1, 0+1];
            }
            else
            {
                state.j = new double[0+1, 0+1];
                state.fi = new double[0+1];
                state.h = new double[n-1+1, n-1+1];
            }
            state.x = new double[n-1+1];
            state.rawmodel = new double[n-1+1, n-1+1];
            state.model = new double[n-1+1, n-1+1];
            state.xbase = new double[n-1+1];
            state.xprec = new double[n-1+1];
            state.gbase = new double[n-1+1];
            state.xdir = new double[n-1+1];
            state.xprev = new double[n-1+1];
            state.work = new double[Math.Max(n, m)+1];
        }
Пример #20
0
        /*************************************************************************
        This function sets stopping conditions for Levenberg-Marquardt optimization
        algorithm.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            EpsG    -   >=0
                        The  subroutine  finishes  its  work   if   the  condition
                        |v|<EpsG is satisfied, where:
                        * |.| means Euclidian norm
                        * v - scaled gradient vector, v[i]=g[i]*s[i]
                        * g - gradient
                        * s - scaling coefficients set by MinLMSetScale()
            EpsF    -   >=0
                        The  subroutine  finishes  its work if on k+1-th iteration
                        the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
                        is satisfied.
            EpsX    -   >=0
                        The subroutine finishes its work if  on  k+1-th  iteration
                        the condition |v|<=EpsX is fulfilled, where:
                        * |.| means Euclidian norm
                        * v - scaled step vector, v[i]=dx[i]/s[i]
                        * dx - ste pvector, dx=X(k+1)-X(k)
                        * s - scaling coefficients set by MinLMSetScale()
            MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
                        iterations   is    unlimited.   Only   Levenberg-Marquardt
                        iterations  are  counted  (L-BFGS/CG  iterations  are  NOT
                        counted because their cost is very low compared to that of
                        LM).

        Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
        automatic stopping criterion selection (small EpsX).

          -- ALGLIB --
             Copyright 02.04.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetcond(minlmstate state,
            double epsg,
            double epsf,
            double epsx,
            int maxits)
        {
            alglib.ap.assert(math.isfinite(epsg), "MinLMSetCond: EpsG is not finite number!");
            alglib.ap.assert((double)(epsg)>=(double)(0), "MinLMSetCond: negative EpsG!");
            alglib.ap.assert(math.isfinite(epsf), "MinLMSetCond: EpsF is not finite number!");
            alglib.ap.assert((double)(epsf)>=(double)(0), "MinLMSetCond: negative EpsF!");
            alglib.ap.assert(math.isfinite(epsx), "MinLMSetCond: EpsX is not finite number!");
            alglib.ap.assert((double)(epsx)>=(double)(0), "MinLMSetCond: negative EpsX!");
            alglib.ap.assert(maxits>=0, "MinLMSetCond: negative MaxIts!");
            if( (((double)(epsg)==(double)(0) && (double)(epsf)==(double)(0)) && (double)(epsx)==(double)(0)) && maxits==0 )
            {
                epsx = 1.0E-6;
            }
            state.epsg = epsg;
            state.epsf = epsf;
            state.epsx = epsx;
            state.maxits = maxits;
        }
Пример #21
0
        /*************************************************************************
        Levenberg-Marquardt algorithm results

        Called after MinLMIteration returned False.

        Input parameters:
            State   -   algorithm state (used by MinLMIteration).

        Output parameters:
            X       -   array[0..N-1], solution
            Rep     -   optimization report:
                        * Rep.TerminationType completetion code:
                            * -1    incorrect parameters were specified
                            *  1    relative function improvement is no more than
                                    EpsF.
                            *  2    relative step is no more than EpsX.
                            *  4    gradient is no more than EpsG.
                            *  5    MaxIts steps was taken
                            *  7    stopping conditions are too stringent,
                                    further improvement is impossible
                        * Rep.IterationsCount contains iterations count
                        * Rep.NFunc     - number of function calculations
                        * Rep.NJac      - number of Jacobi matrix calculations
                        * Rep.NGrad     - number of gradient calculations
                        * Rep.NHess     - number of Hessian calculations
                        * Rep.NCholesky - number of Cholesky decomposition calculations

          -- ALGLIB --
             Copyright 10.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmresults(ref minlmstate state,
            ref double[] x,
            ref minlmreport rep)
        {
            int i_ = 0;

            x = new double[state.n-1+1];
            for(i_=0; i_<=state.n-1;i_++)
            {
                x[i_] = state.x[i_];
            }
            rep.iterationscount = state.repiterationscount;
            rep.terminationtype = state.repterminationtype;
            rep.nfunc = state.repnfunc;
            rep.njac = state.repnjac;
            rep.ngrad = state.repngrad;
            rep.nhess = state.repnhess;
            rep.ncholesky = state.repncholesky;
        }
Пример #22
0
        /*************************************************************************
        This function sets maximum step length

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state
            StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
                        want to limit step length.

        Use this subroutine when you optimize target function which contains exp()
        or  other  fast  growing  functions,  and optimization algorithm makes too
        large  steps  which  leads  to overflow. This function allows us to reject
        steps  that  are  too  large  (and  therefore  expose  us  to the possible
        overflow) without actually calculating function value at the x+stp*d.

        NOTE: non-zero StpMax leads to moderate  performance  degradation  because
        intermediate  step  of  preconditioned L-BFGS optimization is incompatible
        with limits on step size.

          -- ALGLIB --
             Copyright 02.04.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetstpmax(minlmstate state,
            double stpmax)
        {
            alglib.ap.assert(math.isfinite(stpmax), "MinLMSetStpMax: StpMax is not finite!");
            alglib.ap.assert((double)(stpmax)>=(double)(0), "MinLMSetStpMax: StpMax<0!");
            state.stpmax = stpmax;
        }
Пример #23
0
        /*************************************************************************
        Prepare internal structures (except for RComm).

        Note: M must be zero for FGH mode, non-zero for FJ/FGJ mode.
        *************************************************************************/
        private static void lmprepare(int n,
            int m,
            bool havegrad,
            ref minlmstate state)
        {
            state.repiterationscount = 0;
            state.repterminationtype = 0;
            state.repnfunc = 0;
            state.repnjac = 0;
            state.repngrad = 0;
            state.repnhess = 0;
            state.repncholesky = 0;
            if( n<=0 | m<0 )
            {
                return;
            }
            if( havegrad )
            {
                state.g = new double[n-1+1];
            }
            if( m!=0 )
            {
                state.j = new double[m-1+1, n-1+1];
                state.fi = new double[m-1+1];
                state.h = new double[0+1, 0+1];
            }
            else
            {
                state.j = new double[0+1, 0+1];
                state.fi = new double[0+1];
                state.h = new double[n-1+1, n-1+1];
            }
            state.x = new double[n-1+1];
            state.rawmodel = new double[n-1+1, n-1+1];
            state.model = new double[n-1+1, n-1+1];
            state.xbase = new double[n-1+1];
            state.xprec = new double[n-1+1];
            state.gbase = new double[n-1+1];
            state.xdir = new double[n-1+1];
            state.xprev = new double[n-1+1];
            state.work = new double[Math.Max(n, m)+1];
        }
Пример #24
0
        /*************************************************************************
        This function sets boundary constraints for LM optimizer

        Boundary constraints are inactive by default (after initial creation).
        They are preserved until explicitly turned off with another SetBC() call.

        INPUT PARAMETERS:
            State   -   structure stores algorithm state
            BndL    -   lower bounds, array[N].
                        If some (all) variables are unbounded, you may specify
                        very small number or -INF (latter is recommended because
                        it will allow solver to use better algorithm).
            BndU    -   upper bounds, array[N].
                        If some (all) variables are unbounded, you may specify
                        very large number or +INF (latter is recommended because
                        it will allow solver to use better algorithm).

        NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
        variable will be "frozen" at X[i]=BndL[i]=BndU[i].

        NOTE 2: this solver has following useful properties:
        * bound constraints are always satisfied exactly
        * function is evaluated only INSIDE area specified by bound constraints
          or at its boundary

          -- ALGLIB --
             Copyright 14.01.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetbc(minlmstate state,
            double[] bndl,
            double[] bndu)
        {
            int i = 0;
            int n = 0;

            n = state.n;
            alglib.ap.assert(alglib.ap.len(bndl)>=n, "MinLMSetBC: Length(BndL)<N");
            alglib.ap.assert(alglib.ap.len(bndu)>=n, "MinLMSetBC: Length(BndU)<N");
            for(i=0; i<=n-1; i++)
            {
                alglib.ap.assert(math.isfinite(bndl[i]) || Double.IsNegativeInfinity(bndl[i]), "MinLMSetBC: BndL contains NAN or +INF");
                alglib.ap.assert(math.isfinite(bndu[i]) || Double.IsPositiveInfinity(bndu[i]), "MinLMSetBC: BndU contains NAN or -INF");
                state.bndl[i] = bndl[i];
                state.havebndl[i] = math.isfinite(bndl[i]);
                state.bndu[i] = bndu[i];
                state.havebndu[i] = math.isfinite(bndu[i]);
            }
        }
Пример #25
0
        /*************************************************************************
            CLASSIC LEVENBERG-MARQUARDT METHOD FOR NON-LINEAR OPTIMIZATION

        Optimization using Jacobi matrix. Algorithm  -  classic Levenberg-Marquardt
        method.

        Function F is represented as sum of squares:

            F = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])

        EXAMPLE

        See HTML-documentation.

        INPUT PARAMETERS:
            N       -   dimension, N>1
            M       -   number of functions f[i]
            X       -   initial solution, array[0..N-1]

        OUTPUT PARAMETERS:
            State   -   structure which stores algorithm state between subsequent
                        calls of MinLMIteration. Used for reverse communication.
                        This structure should be passed to MinLMIteration subroutine.

        See also MinLMIteration, MinLMResults.

        NOTES:

        1. you may tune stopping conditions with MinLMSetCond() function
        2. if target function contains exp() or other fast growing functions,  and
           optimization algorithm makes too large steps which leads  to  overflow,
           use MinLMSetStpMax() function to bound algorithm's steps.

          -- ALGLIB --
             Copyright 30.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmcreatefj(int n,
            int m,
            ref double[] x,
            ref minlmstate state)
        {
            int i_ = 0;

            
            //
            // Prepare RComm
            //
            state.rstate.ia = new int[3+1];
            state.rstate.ba = new bool[0+1];
            state.rstate.ra = new double[7+1];
            state.rstate.stage = -1;
            
            //
            // prepare internal structures
            //
            lmprepare(n, m, true, ref state);
            
            //
            // initialize, check parameters
            //
            minlmsetcond(ref state, 0, 0, 0, 0);
            minlmsetxrep(ref state, false);
            minlmsetstpmax(ref state, 0);
            state.n = n;
            state.m = m;
            state.flags = 0;
            state.usermode = lmmodefj;
            state.wrongparams = false;
            if( n<1 )
            {
                state.wrongparams = true;
                return;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = x[i_];
            }
        }
Пример #26
0
        /*************************************************************************
        NOTES:

        1. Depending on function used to create state  structure,  this  algorithm
           may accept Jacobian and/or Hessian and/or gradient.  According  to  the
           said above, there ase several versions of this function,  which  accept
           different sets of callbacks.

           This flexibility opens way to subtle errors - you may create state with
           MinLMCreateFGH() (optimization using Hessian), but call function  which
           does not accept Hessian. So when algorithm will request Hessian,  there
           will be no callback to call. In this case exception will be thrown.

           Be careful to avoid such errors because there is no way to find them at
           compile time - you can see them at runtime only.

          -- ALGLIB --
             Copyright 10.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static bool minlmiteration(minlmstate state)
        {
            bool result = new bool();
            int n = 0;
            int m = 0;
            bool bflag = new bool();
            int iflag = 0;
            double v = 0;
            double s = 0;
            double t = 0;
            int i = 0;
            int k = 0;
            int i_ = 0;

            
            //
            // Reverse communication preparations
            // I know it looks ugly, but it works the same way
            // anywhere from C++ to Python.
            //
            // This code initializes locals by:
            // * random values determined during code
            //   generation - on first subroutine call
            // * values from previous call - on subsequent calls
            //
            if( state.rstate.stage>=0 )
            {
                n = state.rstate.ia[0];
                m = state.rstate.ia[1];
                iflag = state.rstate.ia[2];
                i = state.rstate.ia[3];
                k = state.rstate.ia[4];
                bflag = state.rstate.ba[0];
                v = state.rstate.ra[0];
                s = state.rstate.ra[1];
                t = state.rstate.ra[2];
            }
            else
            {
                n = -983;
                m = -989;
                iflag = -834;
                i = 900;
                k = -287;
                bflag = false;
                v = 214;
                s = -338;
                t = -686;
            }
            if( state.rstate.stage==0 )
            {
                goto lbl_0;
            }
            if( state.rstate.stage==1 )
            {
                goto lbl_1;
            }
            if( state.rstate.stage==2 )
            {
                goto lbl_2;
            }
            if( state.rstate.stage==3 )
            {
                goto lbl_3;
            }
            if( state.rstate.stage==4 )
            {
                goto lbl_4;
            }
            if( state.rstate.stage==5 )
            {
                goto lbl_5;
            }
            if( state.rstate.stage==6 )
            {
                goto lbl_6;
            }
            if( state.rstate.stage==7 )
            {
                goto lbl_7;
            }
            if( state.rstate.stage==8 )
            {
                goto lbl_8;
            }
            if( state.rstate.stage==9 )
            {
                goto lbl_9;
            }
            if( state.rstate.stage==10 )
            {
                goto lbl_10;
            }
            if( state.rstate.stage==11 )
            {
                goto lbl_11;
            }
            if( state.rstate.stage==12 )
            {
                goto lbl_12;
            }
            if( state.rstate.stage==13 )
            {
                goto lbl_13;
            }
            if( state.rstate.stage==14 )
            {
                goto lbl_14;
            }
            if( state.rstate.stage==15 )
            {
                goto lbl_15;
            }
            if( state.rstate.stage==16 )
            {
                goto lbl_16;
            }
            if( state.rstate.stage==17 )
            {
                goto lbl_17;
            }
            if( state.rstate.stage==18 )
            {
                goto lbl_18;
            }
            
            //
            // Routine body
            //
            
            //
            // prepare
            //
            n = state.n;
            m = state.m;
            state.repiterationscount = 0;
            state.repterminationtype = 0;
            state.repfuncidx = -1;
            state.repvaridx = -1;
            state.repnfunc = 0;
            state.repnjac = 0;
            state.repngrad = 0;
            state.repnhess = 0;
            state.repncholesky = 0;
            state.userterminationneeded = false;
            
            //
            // check consistency of constraints,
            // enforce feasibility of the solution
            // set constraints
            //
            if( !optserv.enforceboundaryconstraints(ref state.xbase, state.bndl, state.havebndl, state.bndu, state.havebndu, n, 0) )
            {
                state.repterminationtype = -3;
                result = false;
                return result;
            }
            minqp.minqpsetbc(state.qpstate, state.bndl, state.bndu);
            
            //
            //  Check, that transferred derivative value is right
            //
            clearrequestfields(state);
            if( !(state.algomode==1 && (double)(state.teststep)>(double)(0)) )
            {
                goto lbl_19;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            state.needfij = true;
            i = 0;
        lbl_21:
            if( i>n-1 )
            {
                goto lbl_23;
            }
            alglib.ap.assert((state.havebndl[i] && (double)(state.bndl[i])<=(double)(state.x[i])) || !state.havebndl[i], "MinLM: internal error(State.X is out of bounds)");
            alglib.ap.assert((state.havebndu[i] && (double)(state.x[i])<=(double)(state.bndu[i])) || !state.havebndu[i], "MinLMIteration: internal error(State.X is out of bounds)");
            v = state.x[i];
            state.x[i] = v-state.teststep*state.s[i];
            if( state.havebndl[i] )
            {
                state.x[i] = Math.Max(state.x[i], state.bndl[i]);
            }
            state.xm1 = state.x[i];
            state.rstate.stage = 0;
            goto lbl_rcomm;
        lbl_0:
            for(i_=0; i_<=m-1;i_++)
            {
                state.fm1[i_] = state.fi[i_];
            }
            for(i_=0; i_<=m-1;i_++)
            {
                state.gm1[i_] = state.j[i_,i];
            }
            state.x[i] = v+state.teststep*state.s[i];
            if( state.havebndu[i] )
            {
                state.x[i] = Math.Min(state.x[i], state.bndu[i]);
            }
            state.xp1 = state.x[i];
            state.rstate.stage = 1;
            goto lbl_rcomm;
        lbl_1:
            for(i_=0; i_<=m-1;i_++)
            {
                state.fp1[i_] = state.fi[i_];
            }
            for(i_=0; i_<=m-1;i_++)
            {
                state.gp1[i_] = state.j[i_,i];
            }
            state.x[i] = (state.xm1+state.xp1)/2;
            if( state.havebndl[i] )
            {
                state.x[i] = Math.Max(state.x[i], state.bndl[i]);
            }
            if( state.havebndu[i] )
            {
                state.x[i] = Math.Min(state.x[i], state.bndu[i]);
            }
            state.rstate.stage = 2;
            goto lbl_rcomm;
        lbl_2:
            for(i_=0; i_<=m-1;i_++)
            {
                state.fc1[i_] = state.fi[i_];
            }
            for(i_=0; i_<=m-1;i_++)
            {
                state.gc1[i_] = state.j[i_,i];
            }
            state.x[i] = v;
            for(k=0; k<=m-1; k++)
            {
                if( !optserv.derivativecheck(state.fm1[k], state.gm1[k], state.fp1[k], state.gp1[k], state.fc1[k], state.gc1[k], state.xp1-state.xm1) )
                {
                    state.repfuncidx = k;
                    state.repvaridx = i;
                    state.repterminationtype = -7;
                    result = false;
                    return result;
                }
            }
            i = i+1;
            goto lbl_21;
        lbl_23:
            state.needfij = false;
        lbl_19:
            
            //
            // Initial report of current point
            //
            // Note 1: we rewrite State.X twice because
            // user may accidentally change it after first call.
            //
            // Note 2: we set NeedF or NeedFI depending on what
            // information about function we have.
            //
            if( !state.xrep )
            {
                goto lbl_24;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            clearrequestfields(state);
            if( !state.hasf )
            {
                goto lbl_26;
            }
            state.needf = true;
            state.rstate.stage = 3;
            goto lbl_rcomm;
        lbl_3:
            state.needf = false;
            goto lbl_27;
        lbl_26:
            alglib.ap.assert(state.hasfi, "MinLM: internal error 2!");
            state.needfi = true;
            state.rstate.stage = 4;
            goto lbl_rcomm;
        lbl_4:
            state.needfi = false;
            v = 0.0;
            for(i_=0; i_<=m-1;i_++)
            {
                v += state.fi[i_]*state.fi[i_];
            }
            state.f = v;
        lbl_27:
            state.repnfunc = state.repnfunc+1;
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            clearrequestfields(state);
            state.xupdated = true;
            state.rstate.stage = 5;
            goto lbl_rcomm;
        lbl_5:
            state.xupdated = false;
        lbl_24:
            if( state.userterminationneeded )
            {
                
                //
                // User requested termination
                //
                for(i_=0; i_<=n-1;i_++)
                {
                    state.x[i_] = state.xbase[i_];
                }
                state.repterminationtype = 8;
                result = false;
                return result;
            }
            
            //
            // Prepare control variables
            //
            state.nu = 1;
            state.lambdav = -math.maxrealnumber;
            state.modelage = state.maxmodelage+1;
            state.deltaxready = false;
            state.deltafready = false;
            
            //
            // Main cycle.
            //
            // We move through it until either:
            // * one of the stopping conditions is met
            // * we decide that stopping conditions are too stringent
            //   and break from cycle
            //
            //
        lbl_28:
            if( false )
            {
                goto lbl_29;
            }
            
            //
            // First, we have to prepare quadratic model for our function.
            // We use BFlag to ensure that model is prepared;
            // if it is false at the end of this block, something went wrong.
            //
            // We may either calculate brand new model or update old one.
            //
            // Before this block we have:
            // * State.XBase            - current position.
            // * State.DeltaX           - if DeltaXReady is True
            // * State.DeltaF           - if DeltaFReady is True
            //
            // After this block is over, we will have:
            // * State.XBase            - base point (unchanged)
            // * State.FBase            - F(XBase)
            // * State.GBase            - linear term
            // * State.QuadraticModel   - quadratic term
            // * State.LambdaV          - current estimate for lambda
            //
            // We also clear DeltaXReady/DeltaFReady flags
            // after initialization is done.
            //
            bflag = false;
            if( !(state.algomode==0 || state.algomode==1) )
            {
                goto lbl_30;
            }
            
            //
            // Calculate f[] and Jacobian
            //
            if( !(state.modelage>state.maxmodelage || !(state.deltaxready && state.deltafready)) )
            {
                goto lbl_32;
            }
            
            //
            // Refresh model (using either finite differences or analytic Jacobian)
            //
            if( state.algomode!=0 )
            {
                goto lbl_34;
            }
            
            //
            // Optimization using F values only.
            // Use finite differences to estimate Jacobian.
            //
            alglib.ap.assert(state.hasfi, "MinLMIteration: internal error when estimating Jacobian (no f[])");
            k = 0;
        lbl_36:
            if( k>n-1 )
            {
                goto lbl_38;
            }
            
            //
            // We guard X[k] from leaving [BndL,BndU].
            // In case BndL=BndU, we assume that derivative in this direction is zero.
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            state.x[k] = state.x[k]-state.s[k]*state.diffstep;
            if( state.havebndl[k] )
            {
                state.x[k] = Math.Max(state.x[k], state.bndl[k]);
            }
            if( state.havebndu[k] )
            {
                state.x[k] = Math.Min(state.x[k], state.bndu[k]);
            }
            state.xm1 = state.x[k];
            clearrequestfields(state);
            state.needfi = true;
            state.rstate.stage = 6;
            goto lbl_rcomm;
        lbl_6:
            state.repnfunc = state.repnfunc+1;
            for(i_=0; i_<=m-1;i_++)
            {
                state.fm1[i_] = state.fi[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            state.x[k] = state.x[k]+state.s[k]*state.diffstep;
            if( state.havebndl[k] )
            {
                state.x[k] = Math.Max(state.x[k], state.bndl[k]);
            }
            if( state.havebndu[k] )
            {
                state.x[k] = Math.Min(state.x[k], state.bndu[k]);
            }
            state.xp1 = state.x[k];
            clearrequestfields(state);
            state.needfi = true;
            state.rstate.stage = 7;
            goto lbl_rcomm;
        lbl_7:
            state.repnfunc = state.repnfunc+1;
            for(i_=0; i_<=m-1;i_++)
            {
                state.fp1[i_] = state.fi[i_];
            }
            v = state.xp1-state.xm1;
            if( (double)(v)!=(double)(0) )
            {
                v = 1/v;
                for(i_=0; i_<=m-1;i_++)
                {
                    state.j[i_,k] = v*state.fp1[i_];
                }
                for(i_=0; i_<=m-1;i_++)
                {
                    state.j[i_,k] = state.j[i_,k] - v*state.fm1[i_];
                }
            }
            else
            {
                for(i=0; i<=m-1; i++)
                {
                    state.j[i,k] = 0;
                }
            }
            k = k+1;
            goto lbl_36;
        lbl_38:
            
            //
            // Calculate F(XBase)
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            clearrequestfields(state);
            state.needfi = true;
            state.rstate.stage = 8;
            goto lbl_rcomm;
        lbl_8:
            state.needfi = false;
            state.repnfunc = state.repnfunc+1;
            state.repnjac = state.repnjac+1;
            
            //
            // New model
            //
            state.modelage = 0;
            goto lbl_35;
        lbl_34:
            
            //
            // Obtain f[] and Jacobian
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            clearrequestfields(state);
            state.needfij = true;
            state.rstate.stage = 9;
            goto lbl_rcomm;
        lbl_9:
            state.needfij = false;
            state.repnfunc = state.repnfunc+1;
            state.repnjac = state.repnjac+1;
            
            //
            // New model
            //
            state.modelage = 0;
        lbl_35:
            goto lbl_33;
        lbl_32:
            
            //
            // State.J contains Jacobian or its current approximation;
            // refresh it using secant updates:
            //
            // f(x0+dx) = f(x0) + J*dx,
            // J_new = J_old + u*h'
            // h = x_new-x_old
            // u = (f_new - f_old - J_old*h)/(h'h)
            //
            // We can explicitly generate h and u, but it is
            // preferential to do in-place calculations. Only
            // I-th row of J_old is needed to calculate u[I],
            // so we can update J row by row in one pass.
            //
            // NOTE: we expect that State.XBase contains new point,
            // State.FBase contains old point, State.DeltaX and
            // State.DeltaY contain updates from last step.
            //
            alglib.ap.assert(state.deltaxready && state.deltafready, "MinLMIteration: uninitialized DeltaX/DeltaF");
            t = 0.0;
            for(i_=0; i_<=n-1;i_++)
            {
                t += state.deltax[i_]*state.deltax[i_];
            }
            alglib.ap.assert((double)(t)!=(double)(0), "MinLM: internal error (T=0)");
            for(i=0; i<=m-1; i++)
            {
                v = 0.0;
                for(i_=0; i_<=n-1;i_++)
                {
                    v += state.j[i,i_]*state.deltax[i_];
                }
                v = (state.deltaf[i]-v)/t;
                for(i_=0; i_<=n-1;i_++)
                {
                    state.j[i,i_] = state.j[i,i_] + v*state.deltax[i_];
                }
            }
            for(i_=0; i_<=m-1;i_++)
            {
                state.fi[i_] = state.fibase[i_];
            }
            for(i_=0; i_<=m-1;i_++)
            {
                state.fi[i_] = state.fi[i_] + state.deltaf[i_];
            }
            
            //
            // Increase model age
            //
            state.modelage = state.modelage+1;
        lbl_33:
            
            //
            // Generate quadratic model:
            //     f(xbase+dx) =
            //       = (f0 + J*dx)'(f0 + J*dx)
            //       = f0^2 + dx'J'f0 + f0*J*dx + dx'J'J*dx
            //       = f0^2 + 2*f0*J*dx + dx'J'J*dx
            //
            // Note that we calculate 2*(J'J) instead of J'J because
            // our quadratic model is based on Tailor decomposition,
            // i.e. it has 0.5 before quadratic term.
            //
            ablas.rmatrixgemm(n, n, m, 2.0, state.j, 0, 0, 1, state.j, 0, 0, 0, 0.0, state.quadraticmodel, 0, 0);
            ablas.rmatrixmv(n, m, state.j, 0, 0, 1, state.fi, 0, ref state.gbase, 0);
            for(i_=0; i_<=n-1;i_++)
            {
                state.gbase[i_] = 2*state.gbase[i_];
            }
            v = 0.0;
            for(i_=0; i_<=m-1;i_++)
            {
                v += state.fi[i_]*state.fi[i_];
            }
            state.fbase = v;
            for(i_=0; i_<=m-1;i_++)
            {
                state.fibase[i_] = state.fi[i_];
            }
            
            //
            // set control variables
            //
            bflag = true;
        lbl_30:
            if( state.algomode!=2 )
            {
                goto lbl_39;
            }
            alglib.ap.assert(!state.hasfi, "MinLMIteration: internal error (HasFI is True in Hessian-based mode)");
            
            //
            // Obtain F, G, H
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            clearrequestfields(state);
            state.needfgh = true;
            state.rstate.stage = 10;
            goto lbl_rcomm;
        lbl_10:
            state.needfgh = false;
            state.repnfunc = state.repnfunc+1;
            state.repngrad = state.repngrad+1;
            state.repnhess = state.repnhess+1;
            ablas.rmatrixcopy(n, n, state.h, 0, 0, ref state.quadraticmodel, 0, 0);
            for(i_=0; i_<=n-1;i_++)
            {
                state.gbase[i_] = state.g[i_];
            }
            state.fbase = state.f;
            
            //
            // set control variables
            //
            bflag = true;
            state.modelage = 0;
        lbl_39:
            alglib.ap.assert(bflag, "MinLM: internal integrity check failed!");
            state.deltaxready = false;
            state.deltafready = false;
            
            //
            // If Lambda is not initialized, initialize it using quadratic model
            //
            if( (double)(state.lambdav)<(double)(0) )
            {
                state.lambdav = 0;
                for(i=0; i<=n-1; i++)
                {
                    state.lambdav = Math.Max(state.lambdav, Math.Abs(state.quadraticmodel[i,i])*math.sqr(state.s[i]));
                }
                state.lambdav = 0.001*state.lambdav;
                if( (double)(state.lambdav)==(double)(0) )
                {
                    state.lambdav = 1;
                }
            }
            
            //
            // Test stopping conditions for function gradient
            //
            if( (double)(boundedscaledantigradnorm(state, state.xbase, state.gbase))>(double)(state.epsg) )
            {
                goto lbl_41;
            }
            if( state.modelage!=0 )
            {
                goto lbl_43;
            }
            
            //
            // Model is fresh, we can rely on it and terminate algorithm
            //
            state.repterminationtype = 4;
            if( !state.xrep )
            {
                goto lbl_45;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            state.f = state.fbase;
            clearrequestfields(state);
            state.xupdated = true;
            state.rstate.stage = 11;
            goto lbl_rcomm;
        lbl_11:
            state.xupdated = false;
        lbl_45:
            result = false;
            return result;
            goto lbl_44;
        lbl_43:
            
            //
            // Model is not fresh, we should refresh it and test
            // conditions once more
            //
            state.modelage = state.maxmodelage+1;
            goto lbl_28;
        lbl_44:
        lbl_41:
            
            //
            // Find value of Levenberg-Marquardt damping parameter which:
            // * leads to positive definite damped model
            // * within bounds specified by StpMax
            // * generates step which decreases function value
            //
            // After this block IFlag is set to:
            // * -3, if constraints are infeasible
            // * -2, if model update is needed (either Lambda growth is too large
            //       or step is too short, but we can't rely on model and stop iterations)
            // * -1, if model is fresh, Lambda have grown too large, termination is needed
            // *  0, if everything is OK, continue iterations
            //
            // State.Nu can have any value on enter, but after exit it is set to 1.0
            //
            iflag = -99;
        lbl_47:
            if( false )
            {
                goto lbl_48;
            }
            
            //
            // Do we need model update?
            //
            if( state.modelage>0 && (double)(state.nu)>=(double)(suspiciousnu) )
            {
                iflag = -2;
                goto lbl_48;
            }
            
            //
            // Setup quadratic solver and solve quadratic programming problem.
            // After problem is solved we'll try to bound step by StpMax
            // (Lambda will be increased if step size is too large).
            //
            // We use BFlag variable to indicate that we have to increase Lambda.
            // If it is False, we will try to increase Lambda and move to new iteration.
            //
            bflag = true;
            minqp.minqpsetstartingpointfast(state.qpstate, state.xbase);
            minqp.minqpsetoriginfast(state.qpstate, state.xbase);
            minqp.minqpsetlineartermfast(state.qpstate, state.gbase);
            minqp.minqpsetquadratictermfast(state.qpstate, state.quadraticmodel, true, 0.0);
            for(i=0; i<=n-1; i++)
            {
                state.tmp0[i] = state.quadraticmodel[i,i]+state.lambdav/math.sqr(state.s[i]);
            }
            minqp.minqprewritediagonal(state.qpstate, state.tmp0);
            minqp.minqpoptimize(state.qpstate);
            minqp.minqpresultsbuf(state.qpstate, ref state.xdir, state.qprep);
            if( state.qprep.terminationtype>0 )
            {
                
                //
                // successful solution of QP problem
                //
                for(i_=0; i_<=n-1;i_++)
                {
                    state.xdir[i_] = state.xdir[i_] - state.xbase[i_];
                }
                v = 0.0;
                for(i_=0; i_<=n-1;i_++)
                {
                    v += state.xdir[i_]*state.xdir[i_];
                }
                if( math.isfinite(v) )
                {
                    v = Math.Sqrt(v);
                    if( (double)(state.stpmax)>(double)(0) && (double)(v)>(double)(state.stpmax) )
                    {
                        bflag = false;
                    }
                }
                else
                {
                    bflag = false;
                }
            }
            else
            {
                
                //
                // Either problem is non-convex (increase LambdaV) or constraints are inconsistent
                //
                alglib.ap.assert(state.qprep.terminationtype==-3 || state.qprep.terminationtype==-5, "MinLM: unexpected completion code from QP solver");
                if( state.qprep.terminationtype==-3 )
                {
                    iflag = -3;
                    goto lbl_48;
                }
                bflag = false;
            }
            if( !bflag )
            {
                
                //
                // Solution failed:
                // try to increase lambda to make matrix positive definite and continue.
                //
                if( !increaselambda(ref state.lambdav, ref state.nu) )
                {
                    iflag = -1;
                    goto lbl_48;
                }
                goto lbl_47;
            }
            
            //
            // Step in State.XDir and it is bounded by StpMax.
            //
            // We should check stopping conditions on step size here.
            // DeltaX, which is used for secant updates, is initialized here.
            //
            // This code is a bit tricky because sometimes XDir<>0, but
            // it is so small that XDir+XBase==XBase (in finite precision
            // arithmetics). So we set DeltaX to XBase, then
            // add XDir, and then subtract XBase to get exact value of
            // DeltaX.
            //
            // Step length is estimated using DeltaX.
            //
            // NOTE: stopping conditions are tested
            // for fresh models only (ModelAge=0)
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.deltax[i_] = state.xbase[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.deltax[i_] = state.deltax[i_] + state.xdir[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.deltax[i_] = state.deltax[i_] - state.xbase[i_];
            }
            state.deltaxready = true;
            v = 0.0;
            for(i=0; i<=n-1; i++)
            {
                v = v+math.sqr(state.deltax[i]/state.s[i]);
            }
            v = Math.Sqrt(v);
            if( (double)(v)>(double)(state.epsx) )
            {
                goto lbl_49;
            }
            if( state.modelage!=0 )
            {
                goto lbl_51;
            }
            
            //
            // Step is too short, model is fresh and we can rely on it.
            // Terminating.
            //
            state.repterminationtype = 2;
            if( !state.xrep )
            {
                goto lbl_53;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            state.f = state.fbase;
            clearrequestfields(state);
            state.xupdated = true;
            state.rstate.stage = 12;
            goto lbl_rcomm;
        lbl_12:
            state.xupdated = false;
        lbl_53:
            result = false;
            return result;
            goto lbl_52;
        lbl_51:
            
            //
            // Step is suspiciously short, but model is not fresh
            // and we can't rely on it.
            //
            iflag = -2;
            goto lbl_48;
        lbl_52:
        lbl_49:
            
            //
            // Let's evaluate new step:
            // a) if we have Fi vector, we evaluate it using rcomm, and
            //    then we manually calculate State.F as sum of squares of Fi[]
            // b) if we have F value, we just evaluate it through rcomm interface
            //
            // We prefer (a) because we may need Fi vector for additional
            // iterations
            //
            alglib.ap.assert(state.hasfi || state.hasf, "MinLM: internal error 2!");
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.x[i_] + state.xdir[i_];
            }
            clearrequestfields(state);
            if( !state.hasfi )
            {
                goto lbl_55;
            }
            state.needfi = true;
            state.rstate.stage = 13;
            goto lbl_rcomm;
        lbl_13:
            state.needfi = false;
            v = 0.0;
            for(i_=0; i_<=m-1;i_++)
            {
                v += state.fi[i_]*state.fi[i_];
            }
            state.f = v;
            for(i_=0; i_<=m-1;i_++)
            {
                state.deltaf[i_] = state.fi[i_];
            }
            for(i_=0; i_<=m-1;i_++)
            {
                state.deltaf[i_] = state.deltaf[i_] - state.fibase[i_];
            }
            state.deltafready = true;
            goto lbl_56;
        lbl_55:
            state.needf = true;
            state.rstate.stage = 14;
            goto lbl_rcomm;
        lbl_14:
            state.needf = false;
        lbl_56:
            state.repnfunc = state.repnfunc+1;
            if( (double)(state.f)>=(double)(state.fbase) )
            {
                
                //
                // Increase lambda and continue
                //
                if( !increaselambda(ref state.lambdav, ref state.nu) )
                {
                    iflag = -1;
                    goto lbl_48;
                }
                goto lbl_47;
            }
            
            //
            // We've found our step!
            //
            iflag = 0;
            goto lbl_48;
            goto lbl_47;
        lbl_48:
            if( state.userterminationneeded )
            {
                
                //
                // User requested termination
                //
                for(i_=0; i_<=n-1;i_++)
                {
                    state.x[i_] = state.xbase[i_];
                }
                state.repterminationtype = 8;
                result = false;
                return result;
            }
            state.nu = 1;
            alglib.ap.assert(iflag>=-3 && iflag<=0, "MinLM: internal integrity check failed!");
            if( iflag==-3 )
            {
                state.repterminationtype = -3;
                result = false;
                return result;
            }
            if( iflag==-2 )
            {
                state.modelage = state.maxmodelage+1;
                goto lbl_28;
            }
            if( iflag==-1 )
            {
                goto lbl_29;
            }
            
            //
            // Levenberg-Marquardt step is ready.
            // Compare predicted vs. actual decrease and decide what to do with lambda.
            //
            // NOTE: we expect that State.DeltaX contains direction of step,
            // State.F contains function value at new point.
            //
            alglib.ap.assert(state.deltaxready, "MinLM: deltaX is not ready");
            t = 0;
            for(i=0; i<=n-1; i++)
            {
                v = 0.0;
                for(i_=0; i_<=n-1;i_++)
                {
                    v += state.quadraticmodel[i,i_]*state.deltax[i_];
                }
                t = t+state.deltax[i]*state.gbase[i]+0.5*state.deltax[i]*v;
            }
            state.predicteddecrease = -t;
            state.actualdecrease = -(state.f-state.fbase);
            if( (double)(state.predicteddecrease)<=(double)(0) )
            {
                goto lbl_29;
            }
            v = state.actualdecrease/state.predicteddecrease;
            if( (double)(v)>=(double)(0.1) )
            {
                goto lbl_57;
            }
            if( increaselambda(ref state.lambdav, ref state.nu) )
            {
                goto lbl_59;
            }
            
            //
            // Lambda is too large, we have to break iterations.
            //
            state.repterminationtype = 7;
            if( !state.xrep )
            {
                goto lbl_61;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            state.f = state.fbase;
            clearrequestfields(state);
            state.xupdated = true;
            state.rstate.stage = 15;
            goto lbl_rcomm;
        lbl_15:
            state.xupdated = false;
        lbl_61:
            result = false;
            return result;
        lbl_59:
        lbl_57:
            if( (double)(v)>(double)(0.5) )
            {
                decreaselambda(ref state.lambdav, ref state.nu);
            }
            
            //
            // Accept step, report it and
            // test stopping conditions on iterations count and function decrease.
            //
            // NOTE: we expect that State.DeltaX contains direction of step,
            // State.F contains function value at new point.
            //
            // NOTE2: we should update XBase ONLY. In the beginning of the next
            // iteration we expect that State.FIBase is NOT updated and
            // contains old value of a function vector.
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.xbase[i_] = state.xbase[i_] + state.deltax[i_];
            }
            if( !state.xrep )
            {
                goto lbl_63;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            clearrequestfields(state);
            state.xupdated = true;
            state.rstate.stage = 16;
            goto lbl_rcomm;
        lbl_16:
            state.xupdated = false;
        lbl_63:
            state.repiterationscount = state.repiterationscount+1;
            if( state.repiterationscount>=state.maxits && state.maxits>0 )
            {
                state.repterminationtype = 5;
            }
            if( state.modelage==0 )
            {
                if( (double)(Math.Abs(state.f-state.fbase))<=(double)(state.epsf*Math.Max(1, Math.Max(Math.Abs(state.f), Math.Abs(state.fbase)))) )
                {
                    state.repterminationtype = 1;
                }
            }
            if( state.repterminationtype<=0 )
            {
                goto lbl_65;
            }
            if( !state.xrep )
            {
                goto lbl_67;
            }
            
            //
            // Report: XBase contains new point, F contains function value at new point
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            clearrequestfields(state);
            state.xupdated = true;
            state.rstate.stage = 17;
            goto lbl_rcomm;
        lbl_17:
            state.xupdated = false;
        lbl_67:
            result = false;
            return result;
        lbl_65:
            state.modelage = state.modelage+1;
            goto lbl_28;
        lbl_29:
            
            //
            // Lambda is too large, we have to break iterations.
            //
            state.repterminationtype = 7;
            if( !state.xrep )
            {
                goto lbl_69;
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            state.f = state.fbase;
            clearrequestfields(state);
            state.xupdated = true;
            state.rstate.stage = 18;
            goto lbl_rcomm;
        lbl_18:
            state.xupdated = false;
        lbl_69:
            result = false;
            return result;
            
            //
            // Saving state
            //
        lbl_rcomm:
            result = true;
            state.rstate.ia[0] = n;
            state.rstate.ia[1] = m;
            state.rstate.ia[2] = iflag;
            state.rstate.ia[3] = i;
            state.rstate.ia[4] = k;
            state.rstate.ba[0] = bflag;
            state.rstate.ra[0] = v;
            state.rstate.ra[1] = s;
            state.rstate.ra[2] = t;
            return result;
        }
Пример #27
0
        /*************************************************************************
        This function sets stopping conditions for Levenberg-Marquardt optimization
        algorithm.

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state between calls and
                        which is used for reverse communication. Must be initialized
                        with MinLMCreate???()
            EpsG    -   >=0
                        The  subroutine  finishes  its  work   if   the  condition
                        ||G||<EpsG is satisfied, where ||.|| means Euclidian norm,
                        G - gradient.
            EpsF    -   >=0
                        The  subroutine  finishes  its work if on k+1-th iteration
                        the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
                        is satisfied.
            EpsX    -   >=0
                        The subroutine finishes its work if  on  k+1-th  iteration
                        the condition |X(k+1)-X(k)| <= EpsX is fulfilled.
            MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
                        iterations   is    unlimited.   Only   Levenberg-Marquardt
                        iterations  are  counted  (L-BFGS/CG  iterations  are  NOT
                        counted  because their cost is very low copared to that of
                        LM).

        Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
        automatic stopping criterion selection (small EpsX).

          -- ALGLIB --
             Copyright 02.04.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetcond(ref minlmstate state,
            double epsg,
            double epsf,
            double epsx,
            int maxits)
        {
            System.Diagnostics.Debug.Assert((double)(epsg)>=(double)(0), "MinLMSetCond: negative EpsG!");
            System.Diagnostics.Debug.Assert((double)(epsf)>=(double)(0), "MinLMSetCond: negative EpsF!");
            System.Diagnostics.Debug.Assert((double)(epsx)>=(double)(0), "MinLMSetCond: negative EpsX!");
            System.Diagnostics.Debug.Assert(maxits>=0, "MinLMSetCond: negative MaxIts!");
            if( (double)(epsg)==(double)(0) & (double)(epsf)==(double)(0) & (double)(epsx)==(double)(0) & maxits==0 )
            {
                epsx = 1.0E-6;
            }
            state.epsg = epsg;
            state.epsf = epsf;
            state.epsx = epsx;
            state.maxits = maxits;
        }
Пример #28
0
        /*************************************************************************
        Levenberg-Marquardt algorithm results

        Buffered implementation of MinLMResults(), which uses pre-allocated buffer
        to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
        intended to be used in the inner cycles of performance critical algorithms
        where array reallocation penalty is too large to be ignored.

          -- ALGLIB --
             Copyright 10.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmresultsbuf(minlmstate state,
            ref double[] x,
            minlmreport rep)
        {
            int i_ = 0;

            if( alglib.ap.len(x)<state.n )
            {
                x = new double[state.n];
            }
            for(i_=0; i_<=state.n-1;i_++)
            {
                x[i_] = state.x[i_];
            }
            rep.iterationscount = state.repiterationscount;
            rep.terminationtype = state.repterminationtype;
            rep.funcidx = state.repfuncidx;
            rep.varidx = state.repvaridx;
            rep.nfunc = state.repnfunc;
            rep.njac = state.repnjac;
            rep.ngrad = state.repngrad;
            rep.nhess = state.repnhess;
            rep.ncholesky = state.repncholesky;
        }
Пример #29
0
        /*************************************************************************
        This function sets maximum step length

        INPUT PARAMETERS:
            State   -   structure which stores algorithm state between calls and
                        which is used for reverse communication. Must be
                        initialized with MinCGCreate???()
            StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
                        want to limit step length.

        Use this subroutine when you optimize target function which contains exp()
        or  other  fast  growing  functions,  and optimization algorithm makes too
        large  steps  which  leads  to overflow. This function allows us to reject
        steps  that  are  too  large  (and  therefore  expose  us  to the possible
        overflow) without actually calculating function value at the x+stp*d.

        NOTE: non-zero StpMax leads to moderate  performance  degradation  because
        intermediate  step  of  preconditioned L-BFGS optimization is incompatible
        with limits on step size.

          -- ALGLIB --
             Copyright 02.04.2010 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmsetstpmax(ref minlmstate state,
            double stpmax)
        {
            System.Diagnostics.Debug.Assert((double)(stpmax)>=(double)(0), "MinLMSetStpMax: StpMax<0!");
            state.stpmax = stpmax;
        }
Пример #30
0
        /*************************************************************************
        This subroutine submits request for termination of running  optimizer.  It
        should be called from user-supplied callback when user decides that it  is
        time to "smoothly" terminate optimization process.  As  result,  optimizer
        stops at point which was "current accepted" when termination  request  was
        submitted and returns error code 8 (successful termination).

        INPUT PARAMETERS:
            State   -   optimizer structure

        NOTE: after  request  for  termination  optimizer  may   perform   several
              additional calls to user-supplied callbacks. It does  NOT  guarantee
              to stop immediately - it just guarantees that these additional calls
              will be discarded later.

        NOTE: calling this function on optimizer which is NOT running will have no
              effect.
              
        NOTE: multiple calls to this function are possible. First call is counted,
              subsequent calls are silently ignored.

          -- ALGLIB --
             Copyright 08.10.2014 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmrequesttermination(minlmstate state)
        {
            state.userterminationneeded = true;
        }
Пример #31
0
        /*************************************************************************
        One Levenberg-Marquardt iteration.

        Called after inialization of State structure with MinLMXXX subroutine.
        See HTML docs for examples.

        Input parameters:
            State   -   structure which stores algorithm state between subsequent
                        calls and which is used for reverse communication. Must be
                        initialized with MinLMXXX call first.

        If subroutine returned False, iterative algorithm has converged.

        If subroutine returned True, then:
        * if State.NeedF=True,      -   function value F at State.X[0..N-1]
                                        is required
        * if State.NeedFG=True      -   function value F and gradient G
                                        are required
        * if State.NeedFiJ=True     -   function vector f[i] and Jacobi matrix J
                                        are required
        * if State.NeedFGH=True     -   function value F, gradient G and Hesian H
                                        are required
        * if State.XUpdated=True    -   algorithm reports about new iteration,
                                        State.X contains current point,
                                        State.F contains function value.

        One and only one of this fields can be set at time.

        Results are stored:
        * function value            -   in MinLMState.F
        * gradient                  -   in MinLMState.G[0..N-1]
        * Jacobi matrix             -   in MinLMState.J[0..M-1,0..N-1]
        * Hessian                   -   in MinLMState.H[0..N-1,0..N-1]

          -- ALGLIB --
             Copyright 10.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static bool minlmiteration(ref minlmstate state)
        {
            bool result = new bool();
            int n = 0;
            int m = 0;
            int i = 0;
            double stepnorm = 0;
            bool spd = new bool();
            double fbase = 0;
            double fnew = 0;
            double lambda = 0;
            double nu = 0;
            double lambdaup = 0;
            double lambdadown = 0;
            int lbfgsflags = 0;
            double v = 0;
            int i_ = 0;

            
            //
            // Reverse communication preparations
            // I know it looks ugly, but it works the same way
            // anywhere from C++ to Python.
            //
            // This code initializes locals by:
            // * random values determined during code
            //   generation - on first subroutine call
            // * values from previous call - on subsequent calls
            //
            if( state.rstate.stage>=0 )
            {
                n = state.rstate.ia[0];
                m = state.rstate.ia[1];
                i = state.rstate.ia[2];
                lbfgsflags = state.rstate.ia[3];
                spd = state.rstate.ba[0];
                stepnorm = state.rstate.ra[0];
                fbase = state.rstate.ra[1];
                fnew = state.rstate.ra[2];
                lambda = state.rstate.ra[3];
                nu = state.rstate.ra[4];
                lambdaup = state.rstate.ra[5];
                lambdadown = state.rstate.ra[6];
                v = state.rstate.ra[7];
            }
            else
            {
                n = -983;
                m = -989;
                i = -834;
                lbfgsflags = 900;
                spd = true;
                stepnorm = 364;
                fbase = 214;
                fnew = -338;
                lambda = -686;
                nu = 912;
                lambdaup = 585;
                lambdadown = 497;
                v = -271;
            }
            if( state.rstate.stage==0 )
            {
                goto lbl_0;
            }
            if( state.rstate.stage==1 )
            {
                goto lbl_1;
            }
            if( state.rstate.stage==2 )
            {
                goto lbl_2;
            }
            if( state.rstate.stage==3 )
            {
                goto lbl_3;
            }
            if( state.rstate.stage==4 )
            {
                goto lbl_4;
            }
            if( state.rstate.stage==5 )
            {
                goto lbl_5;
            }
            if( state.rstate.stage==6 )
            {
                goto lbl_6;
            }
            if( state.rstate.stage==7 )
            {
                goto lbl_7;
            }
            if( state.rstate.stage==8 )
            {
                goto lbl_8;
            }
            if( state.rstate.stage==9 )
            {
                goto lbl_9;
            }
            if( state.rstate.stage==10 )
            {
                goto lbl_10;
            }
            if( state.rstate.stage==11 )
            {
                goto lbl_11;
            }
            if( state.rstate.stage==12 )
            {
                goto lbl_12;
            }
            if( state.rstate.stage==13 )
            {
                goto lbl_13;
            }
            if( state.rstate.stage==14 )
            {
                goto lbl_14;
            }
            if( state.rstate.stage==15 )
            {
                goto lbl_15;
            }
            
            //
            // Routine body
            //
            System.Diagnostics.Debug.Assert(state.usermode==lmmodefj | state.usermode==lmmodefgj | state.usermode==lmmodefgh, "LM: internal error");
            if( state.wrongparams )
            {
                state.repterminationtype = -1;
                result = false;
                return result;
            }
            
            //
            // prepare params
            //
            n = state.n;
            m = state.m;
            lambdaup = 20;
            lambdadown = 0.5;
            nu = 1;
            lbfgsflags = 0;
            
            //
            // if we have F and G
            //
            if( ! ((state.usermode==lmmodefgj | state.usermode==lmmodefgh) & state.flags/lmflagnoprelbfgs%2==0) )
            {
                goto lbl_16;
            }
            
            //
            // First stage of the hybrid algorithm: LBFGS
            //
            minlbfgs.minlbfgscreate(n, Math.Min(n, lmprelbfgsm), ref state.x, ref state.internalstate);
            minlbfgs.minlbfgssetcond(ref state.internalstate, 0, 0, 0, Math.Max(5, n));
            minlbfgs.minlbfgssetxrep(ref state.internalstate, state.xrep);
            minlbfgs.minlbfgssetstpmax(ref state.internalstate, state.stpmax);
        lbl_18:
            if( ! minlbfgs.minlbfgsiteration(ref state.internalstate) )
            {
                goto lbl_19;
            }
            if( ! state.internalstate.needfg )
            {
                goto lbl_20;
            }
            
            //
            // RComm
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.internalstate.x[i_];
            }
            lmclearrequestfields(ref state);
            state.needfg = true;
            state.rstate.stage = 0;
            goto lbl_rcomm;
        lbl_0:
            state.repnfunc = state.repnfunc+1;
            state.repngrad = state.repngrad+1;
            
            //
            // Call LBFGS
            //
            state.internalstate.f = state.f;
            for(i_=0; i_<=n-1;i_++)
            {
                state.internalstate.g[i_] = state.g[i_];
            }
        lbl_20:
            if( ! (state.internalstate.xupdated & state.xrep) )
            {
                goto lbl_22;
            }
            lmclearrequestfields(ref state);
            state.f = state.internalstate.f;
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.internalstate.x[i_];
            }
            state.xupdated = true;
            state.rstate.stage = 1;
            goto lbl_rcomm;
        lbl_1:
        lbl_22:
            goto lbl_18;
        lbl_19:
            minlbfgs.minlbfgsresults(ref state.internalstate, ref state.x, ref state.internalrep);
            goto lbl_17;
        lbl_16:
            
            //
            // No first stage.
            // However, we may need to report initial point
            //
            if( ! state.xrep )
            {
                goto lbl_24;
            }
            lmclearrequestfields(ref state);
            state.needf = true;
            state.rstate.stage = 2;
            goto lbl_rcomm;
        lbl_2:
            lmclearrequestfields(ref state);
            state.xupdated = true;
            state.rstate.stage = 3;
            goto lbl_rcomm;
        lbl_3:
        lbl_24:
        lbl_17:
            
            //
            // Second stage of the hybrid algorithm: LM
            // Initialize quadratic model.
            //
            if( state.usermode!=lmmodefgh )
            {
                goto lbl_26;
            }
            
            //
            // RComm
            //
            lmclearrequestfields(ref state);
            state.needfgh = true;
            state.rstate.stage = 4;
            goto lbl_rcomm;
        lbl_4:
            state.repnfunc = state.repnfunc+1;
            state.repngrad = state.repngrad+1;
            state.repnhess = state.repnhess+1;
            
            //
            // generate raw quadratic model
            //
            ablas.rmatrixcopy(n, n, ref state.h, 0, 0, ref state.rawmodel, 0, 0);
            for(i_=0; i_<=n-1;i_++)
            {
                state.gbase[i_] = state.g[i_];
            }
            fbase = state.f;
        lbl_26:
            if( ! (state.usermode==lmmodefgj | state.usermode==lmmodefj) )
            {
                goto lbl_28;
            }
            
            //
            // RComm
            //
            lmclearrequestfields(ref state);
            state.needfij = true;
            state.rstate.stage = 5;
            goto lbl_rcomm;
        lbl_5:
            state.repnfunc = state.repnfunc+1;
            state.repnjac = state.repnjac+1;
            
            //
            // generate raw quadratic model
            //
            ablas.rmatrixgemm(n, n, m, 2.0, ref state.j, 0, 0, 1, ref state.j, 0, 0, 0, 0.0, ref state.rawmodel, 0, 0);
            ablas.rmatrixmv(n, m, ref state.j, 0, 0, 1, ref state.fi, 0, ref state.gbase, 0);
            for(i_=0; i_<=n-1;i_++)
            {
                state.gbase[i_] = 2*state.gbase[i_];
            }
            fbase = 0.0;
            for(i_=0; i_<=m-1;i_++)
            {
                fbase += state.fi[i_]*state.fi[i_];
            }
        lbl_28:
            lambda = 0.001;
        lbl_30:
            if( false )
            {
                goto lbl_31;
            }
            
            //
            // 1. Model = RawModel+lambda*I
            // 2. Try to solve (RawModel+Lambda*I)*dx = -g.
            //    Increase lambda if left part is not positive definite.
            //
            for(i=0; i<=n-1; i++)
            {
                for(i_=0; i_<=n-1;i_++)
                {
                    state.model[i,i_] = state.rawmodel[i,i_];
                }
                state.model[i,i] = state.model[i,i]+lambda;
            }
            spd = trfac.spdmatrixcholesky(ref state.model, n, true);
            state.repncholesky = state.repncholesky+1;
            if( spd )
            {
                goto lbl_32;
            }
            if( ! increaselambda(ref lambda, ref nu, lambdaup) )
            {
                goto lbl_34;
            }
            goto lbl_30;
            goto lbl_35;
        lbl_34:
            state.repterminationtype = 7;
            lmclearrequestfields(ref state);
            state.needf = true;
            state.rstate.stage = 6;
            goto lbl_rcomm;
        lbl_6:
            goto lbl_31;
        lbl_35:
        lbl_32:
            densesolver.spdmatrixcholeskysolve(ref state.model, n, true, ref state.gbase, ref state.solverinfo, ref state.solverrep, ref state.xdir);
            if( state.solverinfo>=0 )
            {
                goto lbl_36;
            }
            if( ! increaselambda(ref lambda, ref nu, lambdaup) )
            {
                goto lbl_38;
            }
            goto lbl_30;
            goto lbl_39;
        lbl_38:
            state.repterminationtype = 7;
            lmclearrequestfields(ref state);
            state.needf = true;
            state.rstate.stage = 7;
            goto lbl_rcomm;
        lbl_7:
            goto lbl_31;
        lbl_39:
        lbl_36:
            for(i_=0; i_<=n-1;i_++)
            {
                state.xdir[i_] = -1*state.xdir[i_];
            }
            
            //
            // Candidate lambda is found.
            // 1. Save old w in WBase
            // 1. Test some stopping criterions
            // 2. If error(w+wdir)>error(w), increase lambda
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.xprev[i_] = state.x[i_];
            }
            state.fprev = state.f;
            for(i_=0; i_<=n-1;i_++)
            {
                state.xbase[i_] = state.x[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.x[i_] + state.xdir[i_];
            }
            stepnorm = 0.0;
            for(i_=0; i_<=n-1;i_++)
            {
                stepnorm += state.xdir[i_]*state.xdir[i_];
            }
            stepnorm = Math.Sqrt(stepnorm);
            if( ! ((double)(state.stpmax)>(double)(0) & (double)(stepnorm)>(double)(state.stpmax)) )
            {
                goto lbl_40;
            }
            
            //
            // Step is larger than the limit,
            // larger lambda is needed
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            if( ! increaselambda(ref lambda, ref nu, lambdaup) )
            {
                goto lbl_42;
            }
            goto lbl_30;
            goto lbl_43;
        lbl_42:
            state.repterminationtype = 7;
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xprev[i_];
            }
            lmclearrequestfields(ref state);
            state.needf = true;
            state.rstate.stage = 8;
            goto lbl_rcomm;
        lbl_8:
            goto lbl_31;
        lbl_43:
        lbl_40:
            lmclearrequestfields(ref state);
            state.needf = true;
            state.rstate.stage = 9;
            goto lbl_rcomm;
        lbl_9:
            state.repnfunc = state.repnfunc+1;
            fnew = state.f;
            if( (double)(fnew)<=(double)(fbase) )
            {
                goto lbl_44;
            }
            
            //
            // restore state and continue search for lambda
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xbase[i_];
            }
            if( ! increaselambda(ref lambda, ref nu, lambdaup) )
            {
                goto lbl_46;
            }
            goto lbl_30;
            goto lbl_47;
        lbl_46:
            state.repterminationtype = 7;
            for(i_=0; i_<=n-1;i_++)
            {
                state.x[i_] = state.xprev[i_];
            }
            lmclearrequestfields(ref state);
            state.needf = true;
            state.rstate.stage = 10;
            goto lbl_rcomm;
        lbl_10:
            goto lbl_31;
        lbl_47:
        lbl_44:
            if( ! ((double)(state.stpmax)==(double)(0) & (state.usermode==lmmodefgj | state.usermode==lmmodefgh) & state.flags/lmflagnointlbfgs%2==0) )
            {
                goto lbl_48;
            }
            
            //
            // Optimize using LBFGS, with inv(cholesky(H)) as preconditioner.
            //
            // It is possible only when StpMax=0, because we can't guarantee
            // that step remains bounded when preconditioner is used (we need
            // SVD decomposition to do that, which is too slow).
            //
            matinv.rmatrixtrinverse(ref state.model, n, true, false, ref state.invinfo, ref state.invrep);
            if( state.invinfo<=0 )
            {
                goto lbl_50;
            }
            
            //
            // if matrix can be inverted, use it.
            // just silently move to next iteration otherwise.
            // (will be very, very rare, mostly for specially
            // designed near-degenerate tasks)
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.xbase[i_] = state.x[i_];
            }
            for(i=0; i<=n-1; i++)
            {
                state.xprec[i] = 0;
            }
            minlbfgs.minlbfgscreatex(n, Math.Min(n, lmintlbfgsits), ref state.xprec, lbfgsflags, ref state.internalstate);
            minlbfgs.minlbfgssetcond(ref state.internalstate, 0, 0, 0, lmintlbfgsits);
        lbl_52:
            if( ! minlbfgs.minlbfgsiteration(ref state.internalstate) )
            {
                goto lbl_53;
            }
            
            //
            // convert XPrec to unpreconditioned form, then call RComm.
            //
            for(i=0; i<=n-1; i++)
            {
                v = 0.0;
                for(i_=i; i_<=n-1;i_++)
                {
                    v += state.internalstate.x[i_]*state.model[i,i_];
                }
                state.x[i] = state.xbase[i]+v;
            }
            lmclearrequestfields(ref state);
            state.needfg = true;
            state.rstate.stage = 11;
            goto lbl_rcomm;
        lbl_11:
            state.repnfunc = state.repnfunc+1;
            state.repngrad = state.repngrad+1;
            
            //
            // 1. pass State.F to State.InternalState.F
            // 2. convert gradient back to preconditioned form
            //
            state.internalstate.f = state.f;
            for(i=0; i<=n-1; i++)
            {
                state.internalstate.g[i] = 0;
            }
            for(i=0; i<=n-1; i++)
            {
                v = state.g[i];
                for(i_=i; i_<=n-1;i_++)
                {
                    state.internalstate.g[i_] = state.internalstate.g[i_] + v*state.model[i,i_];
                }
            }
            
            //
            // next iteration
            //
            goto lbl_52;
        lbl_53:
            
            //
            // change LBFGS flags to NoRealloc.
            // L-BFGS subroutine will use memory allocated from previous run.
            // it is possible since all subsequent calls will be with same N/M.
            //
            lbfgsflags = lbfgsnorealloc;
            
            //
            // back to unpreconditioned X
            //
            minlbfgs.minlbfgsresults(ref state.internalstate, ref state.xprec, ref state.internalrep);
            for(i=0; i<=n-1; i++)
            {
                v = 0.0;
                for(i_=i; i_<=n-1;i_++)
                {
                    v += state.xprec[i_]*state.model[i,i_];
                }
                state.x[i] = state.xbase[i]+v;
            }
        lbl_50:
        lbl_48:
            
            //
            // Composite iteration is almost over:
            // * accept new position.
            // * rebuild quadratic model
            //
            state.repiterationscount = state.repiterationscount+1;
            if( state.usermode!=lmmodefgh )
            {
                goto lbl_54;
            }
            lmclearrequestfields(ref state);
            state.needfgh = true;
            state.rstate.stage = 12;
            goto lbl_rcomm;
        lbl_12:
            state.repnfunc = state.repnfunc+1;
            state.repngrad = state.repngrad+1;
            state.repnhess = state.repnhess+1;
            ablas.rmatrixcopy(n, n, ref state.h, 0, 0, ref state.rawmodel, 0, 0);
            for(i_=0; i_<=n-1;i_++)
            {
                state.gbase[i_] = state.g[i_];
            }
            fnew = state.f;
        lbl_54:
            if( ! (state.usermode==lmmodefgj | state.usermode==lmmodefj) )
            {
                goto lbl_56;
            }
            lmclearrequestfields(ref state);
            state.needfij = true;
            state.rstate.stage = 13;
            goto lbl_rcomm;
        lbl_13:
            state.repnfunc = state.repnfunc+1;
            state.repnjac = state.repnjac+1;
            ablas.rmatrixgemm(n, n, m, 2.0, ref state.j, 0, 0, 1, ref state.j, 0, 0, 0, 0.0, ref state.rawmodel, 0, 0);
            ablas.rmatrixmv(n, m, ref state.j, 0, 0, 1, ref state.fi, 0, ref state.gbase, 0);
            for(i_=0; i_<=n-1;i_++)
            {
                state.gbase[i_] = 2*state.gbase[i_];
            }
            fnew = 0.0;
            for(i_=0; i_<=m-1;i_++)
            {
                fnew += state.fi[i_]*state.fi[i_];
            }
        lbl_56:
            
            //
            // Stopping conditions
            //
            for(i_=0; i_<=n-1;i_++)
            {
                state.work[i_] = state.xprev[i_];
            }
            for(i_=0; i_<=n-1;i_++)
            {
                state.work[i_] = state.work[i_] - state.x[i_];
            }
            stepnorm = 0.0;
            for(i_=0; i_<=n-1;i_++)
            {
                stepnorm += state.work[i_]*state.work[i_];
            }
            stepnorm = Math.Sqrt(stepnorm);
            if( (double)(stepnorm)<=(double)(state.epsx) )
            {
                state.repterminationtype = 2;
                goto lbl_31;
            }
            if( state.repiterationscount>=state.maxits & state.maxits>0 )
            {
                state.repterminationtype = 5;
                goto lbl_31;
            }
            v = 0.0;
            for(i_=0; i_<=n-1;i_++)
            {
                v += state.gbase[i_]*state.gbase[i_];
            }
            v = Math.Sqrt(v);
            if( (double)(v)<=(double)(state.epsg) )
            {
                state.repterminationtype = 4;
                goto lbl_31;
            }
            if( (double)(Math.Abs(fnew-fbase))<=(double)(state.epsf*Math.Max(1, Math.Max(Math.Abs(fnew), Math.Abs(fbase)))) )
            {
                state.repterminationtype = 1;
                goto lbl_31;
            }
            
            //
            // Now, iteration is finally over:
            // * update FBase
            // * decrease lambda
            // * report new iteration
            //
            if( ! state.xrep )
            {
                goto lbl_58;
            }
            lmclearrequestfields(ref state);
            state.xupdated = true;
            state.f = fnew;
            state.rstate.stage = 14;
            goto lbl_rcomm;
        lbl_14:
        lbl_58:
            fbase = fnew;
            decreaselambda(ref lambda, ref nu, lambdadown);
            goto lbl_30;
        lbl_31:
            
            //
            // final point is reported
            //
            if( ! state.xrep )
            {
                goto lbl_60;
            }
            lmclearrequestfields(ref state);
            state.xupdated = true;
            state.f = fnew;
            state.rstate.stage = 15;
            goto lbl_rcomm;
        lbl_15:
        lbl_60:
            result = false;
            return result;
            
            //
            // Saving state
            //
        lbl_rcomm:
            result = true;
            state.rstate.ia[0] = n;
            state.rstate.ia[1] = m;
            state.rstate.ia[2] = i;
            state.rstate.ia[3] = lbfgsflags;
            state.rstate.ba[0] = spd;
            state.rstate.ra[0] = stepnorm;
            state.rstate.ra[1] = fbase;
            state.rstate.ra[2] = fnew;
            state.rstate.ra[3] = lambda;
            state.rstate.ra[4] = nu;
            state.rstate.ra[5] = lambdaup;
            state.rstate.ra[6] = lambdadown;
            state.rstate.ra[7] = v;
            return result;
        }
Пример #32
0
        /*************************************************************************
        This function is considered obsolete since ALGLIB 3.1.0 and is present for
        backward  compatibility  only.  We  recommend  to use MinLMCreateVJ, which
        provides similar, but more consistent and feature-rich interface.

          -- ALGLIB --
             Copyright 30.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmcreatefj(int n,
            int m,
            double[] x,
            minlmstate state)
        {
            alglib.ap.assert(n>=1, "MinLMCreateFJ: N<1!");
            alglib.ap.assert(m>=1, "MinLMCreateFJ: M<1!");
            alglib.ap.assert(alglib.ap.len(x)>=n, "MinLMCreateFJ: Length(X)<N!");
            alglib.ap.assert(apserv.isfinitevector(x, n), "MinLMCreateFJ: X contains infinite or NaN values!");
            
            //
            // initialize
            //
            state.teststep = 0;
            state.n = n;
            state.m = m;
            state.algomode = 1;
            state.hasf = true;
            state.hasfi = false;
            state.hasg = false;
            
            //
            // init 2
            //
            lmprepare(n, m, true, state);
            minlmsetacctype(state, 0);
            minlmsetcond(state, 0, 0, 0, 0);
            minlmsetxrep(state, false);
            minlmsetstpmax(state, 0);
            minlmrestartfrom(state, x);
        }
Пример #33
0
        /*************************************************************************
                        IMPROVED LEVENBERG-MARQUARDT METHOD FOR
                         NON-LINEAR LEAST SQUARES OPTIMIZATION

        DESCRIPTION:
        This function is used to find minimum of function which is represented  as
        sum of squares:
            F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
        using value of function vector f[] and Jacobian of f[].


        REQUIREMENTS:
        This algorithm will request following information during its operation:

        * function vector f[] at given point X
        * function vector f[] and Jacobian of f[] (simultaneously) at given point

        There are several overloaded versions of  MinLMOptimize()  function  which
        correspond  to  different LM-like optimization algorithms provided by this
        unit. You should choose version which accepts fvec()  and jac() callbacks.
        First  one  is used to calculate f[] at given point, second one calculates
        f[] and Jacobian df[i]/dx[j].

        You can try to initialize MinLMState structure with VJ  function and  then
        use incorrect version  of  MinLMOptimize()  (for  example,  version  which
        works  with  general  form function and does not provide Jacobian), but it
        will  lead  to  exception  being  thrown  after first attempt to calculate
        Jacobian.


        USAGE:
        1. User initializes algorithm state with MinLMCreateVJ() call
        2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
           other functions
        3. User calls MinLMOptimize() function which  takes algorithm  state   and
           callback functions.
        4. User calls MinLMResults() to get solution
        5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
           with same N/M but another starting point and/or another function.
           MinLMRestartFrom() allows to reuse already initialized structure.


        INPUT PARAMETERS:
            N       -   dimension, N>1
                        * if given, only leading N elements of X are used
                        * if not given, automatically determined from size of X
            M       -   number of functions f[i]
            X       -   initial solution, array[0..N-1]

        OUTPUT PARAMETERS:
            State   -   structure which stores algorithm state

        NOTES:
        1. you may tune stopping conditions with MinLMSetCond() function
        2. if target function contains exp() or other fast growing functions,  and
           optimization algorithm makes too large steps which leads  to  overflow,
           use MinLMSetStpMax() function to bound algorithm's steps.

          -- ALGLIB --
             Copyright 30.03.2009 by Bochkanov Sergey
        *************************************************************************/
        public static void minlmcreatevj(int n,
            int m,
            double[] x,
            minlmstate state)
        {
            ap.assert(n>=1, "MinLMCreateVJ: N<1!");
            ap.assert(m>=1, "MinLMCreateVJ: M<1!");
            ap.assert(ap.len(x)>=n, "MinLMCreateVJ: Length(X)<N!");
            ap.assert(apserv.isfinitevector(x, n), "MinLMCreateVJ: X contains infinite or NaN values!");
            
            //
            // initialize, check parameters
            //
            state.n = n;
            state.m = m;
            state.algomode = 1;
            state.hasf = false;
            state.hasfi = true;
            state.hasg = false;
            
            //
            // second stage of initialization
            //
            lmprepare(n, m, false, state);
            minlmsetacctype(state, 0);
            minlmsetcond(state, 0, 0, 0, 0);
            minlmsetxrep(state, false);
            minlmsetstpmax(state, 0);
            minlmrestartfrom(state, x);
        }
Пример #34
0
        /*************************************************************************
        Prepare internal structures (except for RComm).

        Note: M must be zero for FGH mode, non-zero for V/VJ/FJ/FGJ mode.
        *************************************************************************/
        private static void lmprepare(int n,
            int m,
            bool havegrad,
            minlmstate state)
        {
            int i = 0;

            if( n<=0 || m<0 )
            {
                return;
            }
            if( havegrad )
            {
                state.g = new double[n];
            }
            if( m!=0 )
            {
                state.j = new double[m, n];
                state.fi = new double[m];
                state.fibase = new double[m];
                state.deltaf = new double[m];
                state.fm1 = new double[m];
                state.fp1 = new double[m];
                state.fc1 = new double[m];
                state.gm1 = new double[m];
                state.gp1 = new double[m];
                state.gc1 = new double[m];
            }
            else
            {
                state.h = new double[n, n];
            }
            state.x = new double[n];
            state.deltax = new double[n];
            state.quadraticmodel = new double[n, n];
            state.xbase = new double[n];
            state.gbase = new double[n];
            state.xdir = new double[n];
            state.tmp0 = new double[n];
            
            //
            // prepare internal L-BFGS
            //
            for(i=0; i<=n-1; i++)
            {
                state.x[i] = 0;
            }
            minlbfgs.minlbfgscreate(n, Math.Min(additers, n), state.x, state.internalstate);
            minlbfgs.minlbfgssetcond(state.internalstate, 0.0, 0.0, 0.0, Math.Min(additers, n));
            
            //
            // Prepare internal QP solver
            //
            minqp.minqpcreate(n, state.qpstate);
            minqp.minqpsetalgocholesky(state.qpstate);
            
            //
            // Prepare boundary constraints
            //
            state.bndl = new double[n];
            state.bndu = new double[n];
            state.havebndl = new bool[n];
            state.havebndu = new bool[n];
            for(i=0; i<=n-1; i++)
            {
                state.bndl[i] = Double.NegativeInfinity;
                state.havebndl[i] = false;
                state.bndu[i] = Double.PositiveInfinity;
                state.havebndu[i] = false;
            }
            
            //
            // Prepare scaling matrix
            //
            state.s = new double[n];
            for(i=0; i<=n-1; i++)
            {
                state.s[i] = 1.0;
            }
        }
Пример #35
0
 public override alglib.apobject make_copy()
 {
     minlmstate _result = new minlmstate();
     _result.n = n;
     _result.m = m;
     _result.diffstep = diffstep;
     _result.epsg = epsg;
     _result.epsf = epsf;
     _result.epsx = epsx;
     _result.maxits = maxits;
     _result.xrep = xrep;
     _result.stpmax = stpmax;
     _result.maxmodelage = maxmodelage;
     _result.makeadditers = makeadditers;
     _result.x = (double[])x.Clone();
     _result.f = f;
     _result.fi = (double[])fi.Clone();
     _result.j = (double[,])j.Clone();
     _result.h = (double[,])h.Clone();
     _result.g = (double[])g.Clone();
     _result.needf = needf;
     _result.needfg = needfg;
     _result.needfgh = needfgh;
     _result.needfij = needfij;
     _result.needfi = needfi;
     _result.xupdated = xupdated;
     _result.userterminationneeded = userterminationneeded;
     _result.algomode = algomode;
     _result.hasf = hasf;
     _result.hasfi = hasfi;
     _result.hasg = hasg;
     _result.xbase = (double[])xbase.Clone();
     _result.fbase = fbase;
     _result.fibase = (double[])fibase.Clone();
     _result.gbase = (double[])gbase.Clone();
     _result.quadraticmodel = (double[,])quadraticmodel.Clone();
     _result.bndl = (double[])bndl.Clone();
     _result.bndu = (double[])bndu.Clone();
     _result.havebndl = (bool[])havebndl.Clone();
     _result.havebndu = (bool[])havebndu.Clone();
     _result.s = (double[])s.Clone();
     _result.lambdav = lambdav;
     _result.nu = nu;
     _result.modelage = modelage;
     _result.xdir = (double[])xdir.Clone();
     _result.deltax = (double[])deltax.Clone();
     _result.deltaf = (double[])deltaf.Clone();
     _result.deltaxready = deltaxready;
     _result.deltafready = deltafready;
     _result.teststep = teststep;
     _result.repiterationscount = repiterationscount;
     _result.repterminationtype = repterminationtype;
     _result.repfuncidx = repfuncidx;
     _result.repvaridx = repvaridx;
     _result.repnfunc = repnfunc;
     _result.repnjac = repnjac;
     _result.repngrad = repngrad;
     _result.repnhess = repnhess;
     _result.repncholesky = repncholesky;
     _result.rstate = (rcommstate)rstate.make_copy();
     _result.choleskybuf = (double[])choleskybuf.Clone();
     _result.tmp0 = (double[])tmp0.Clone();
     _result.actualdecrease = actualdecrease;
     _result.predicteddecrease = predicteddecrease;
     _result.xm1 = xm1;
     _result.xp1 = xp1;
     _result.fm1 = (double[])fm1.Clone();
     _result.fp1 = (double[])fp1.Clone();
     _result.fc1 = (double[])fc1.Clone();
     _result.gm1 = (double[])gm1.Clone();
     _result.gp1 = (double[])gp1.Clone();
     _result.gc1 = (double[])gc1.Clone();
     _result.internalstate = (minlbfgs.minlbfgsstate)internalstate.make_copy();
     _result.internalrep = (minlbfgs.minlbfgsreport)internalrep.make_copy();
     _result.qpstate = (minqp.minqpstate)qpstate.make_copy();
     _result.qprep = (minqp.minqpreport)qprep.make_copy();
     return _result;
 }
Пример #36
0
        /*************************************************************************
        Returns norm of bounded scaled anti-gradient.

        Bounded antigradient is a vector obtained from  anti-gradient  by  zeroing
        components which point outwards:
            result = norm(v)
            v[i]=0     if ((-g[i]<0)and(x[i]=bndl[i])) or
                          ((-g[i]>0)and(x[i]=bndu[i]))
            v[i]=-g[i]*s[i] otherwise, where s[i] is a scale for I-th variable

        This function may be used to check a stopping criterion.

          -- ALGLIB --
             Copyright 14.01.2011 by Bochkanov Sergey
        *************************************************************************/
        private static double boundedscaledantigradnorm(minlmstate state,
            double[] x,
            double[] g)
        {
            double result = 0;
            int n = 0;
            int i = 0;
            double v = 0;

            result = 0;
            n = state.n;
            for(i=0; i<=n-1; i++)
            {
                v = -(g[i]*state.s[i]);
                if( state.havebndl[i] )
                {
                    if( (double)(x[i])<=(double)(state.bndl[i]) && (double)(-g[i])<(double)(0) )
                    {
                        v = 0;
                    }
                }
                if( state.havebndu[i] )
                {
                    if( (double)(x[i])>=(double)(state.bndu[i]) && (double)(-g[i])>(double)(0) )
                    {
                        v = 0;
                    }
                }
                result = result+math.sqr(v);
            }
            result = Math.Sqrt(result);
            return result;
        }
Пример #37
0
        /*************************************************************************
        Prepare internal structures (except for RComm).

        Note: M must be zero for FGH mode, non-zero for V/VJ/FJ/FGJ mode.
        *************************************************************************/
        private static void lmprepare(int n,
            int m,
            bool havegrad,
            minlmstate state)
        {
            int i = 0;

            if( n<=0 | m<0 )
            {
                return;
            }
            if( havegrad )
            {
                state.g = new double[n];
            }
            if( m!=0 )
            {
                state.j = new double[m, n];
                state.fi = new double[m];
                state.fibase = new double[m];
                state.deltaf = new double[m];
                state.fm2 = new double[m];
                state.fm1 = new double[m];
                state.fp2 = new double[m];
                state.fp1 = new double[m];
            }
            else
            {
                state.h = new double[n, n];
            }
            state.x = new double[n];
            state.deltax = new double[n];
            state.quadraticmodel = new double[n, n];
            state.dampedmodel = new double[n, n];
            state.xbase = new double[n];
            state.gbase = new double[n];
            state.xdir = new double[n];
            
            //
            // prepare internal L-BFGS
            //
            for(i=0; i<=n-1; i++)
            {
                state.x[i] = 0;
            }
            minlbfgs.minlbfgscreate(n, Math.Min(additers, n), state.x, state.internalstate);
            minlbfgs.minlbfgssetcond(state.internalstate, 0.0, 0.0, 0.0, Math.Min(additers, n));
        }