Example #1
0
        /// <summary>
        /// Construct a Nelder Mead trainer with a definable step.
        /// </summary>
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        /// <param name="stepValue">The step value. This value defines, to some degree the range
        /// of different weights that will be tried.</param>
        public NelderMeadTraining(BasicNetwork network,
                                  IMLDataSet training, double stepValue) :
            base(TrainingImplementationType.OnePass)
        {
            this._network = network;
            Training      = training;

            _start          = NetworkCODEC.NetworkToArray(network);
            _trainedWeights = NetworkCODEC.NetworkToArray(network);

            int n = _start.Length;

            _p      = new double[n * (n + 1)];
            _pstar  = new double[n];
            _p2Star = new double[n];
            _pbar   = new double[n];
            _y      = new double[n + 1];

            _nn  = n + 1;
            _del = 1.0;
            _rq  = EncogFramework.DefaultDoubleEqual * n;

            _step   = new double[NetworkCODEC.NetworkSize(network)];
            _jcount = _konvge = 500;
            EngineArray.Fill(_step, stepValue);
        }
Example #2
0
        /// <summary>
        /// Encode the network to a genome.
        /// </summary>
        public override void Encode()
        {
            double[] net = NetworkCODEC.NetworkToArray((BasicNetwork)Organism);

            for (int i = 0; i < net.Length; i++)
            {
                ((DoubleGene)networkChromosome.Genes[i]).Value = net[i];
            }
        }
        /// <inheritdoc />
        public override void Iteration()
        {
            if (!_initComplete)
            {
                _hessian.Init(_network, Training);
                _initComplete = true;
            }

            PreIteration();

            _hessian.Clear();
            _weights = NetworkCODEC.NetworkToArray(_network);

            _hessian.Compute();
            double currentError = _hessian.SSE;

            SaveDiagonal();

            double startingError = currentError;
            bool   done          = false;
            bool   singular;

            while (!done)
            {
                ApplyLambda();
                var decomposition = new LUDecomposition(_hessian.HessianMatrix);

                singular = decomposition.IsNonsingular;

                if (singular)
                {
                    _deltas = decomposition.Solve(_hessian.Gradients);
                    UpdateWeights();
                    currentError = CalculateError();
                }

                if (!singular || currentError >= startingError)
                {
                    _lambda *= ScaleLambda;
                    if (_lambda > LambdaMax)
                    {
                        _lambda = LambdaMax;
                        done    = true;
                    }
                }
                else
                {
                    _lambda /= ScaleLambda;
                    done     = true;
                }
            }

            Error = currentError;

            PostIteration();
        }
Example #4
0
        /// <summary>
        /// Called just before a training iteration.
        /// </summary>
        public void PreIteration()
        {
            BasicNetwork network = this.train.Network;

            if (network != null)
            {
                this.lastError   = this.train.Error;
                this.lastNetwork = NetworkCODEC.NetworkToArray(network);
                this.train.Error = this.lastError;
            }
        }
        /// <summary>
        /// Perform one iteration.
        /// </summary>
        public override void Iteration()
        {
            LUDecomposition decomposition;

            PreIteration();

            _hessian.Clear();
            _weights = NetworkCODEC.NetworkToArray(_network);

            _hessian.Compute();
            double currentError = _hessian.SSE;

            SaveDiagonal();

            double startingError = currentError;
            bool   done          = false;

            while (!done)
            {
                ApplyLambda();
                decomposition = new LUDecomposition(_hessian.HessianMatrix);

                if (decomposition.IsNonsingular)
                {
                    _deltas = decomposition.Solve(_hessian.Gradients);

                    UpdateWeights();
                    currentError = CalculateError();

                    if (currentError < startingError)
                    {
                        _lambda /= LevenbergMarquardtTraining.ScaleLambda;
                        done     = true;
                    }
                }

                if (!done)
                {
                    _lambda *= LevenbergMarquardtTraining.ScaleLambda;
                    if (_lambda > LevenbergMarquardtTraining.LambdaMax)
                    {
                        _lambda = LevenbergMarquardtTraining.LambdaMax;
                        done    = true;
                    }
                }
            }

            Error = currentError;

            PostIteration();
        }
        /// <summary>
        /// Randomize the weights and thresholds. This function does most of the
        /// work of the class. Each call to this class will randomize the data
        /// according to the current temperature. The higher the temperature the
        /// more randomness.
        /// </summary>
        public void Randomize()
        {
            double[] array = NetworkCODEC
                             .NetworkToArray(network);

            for (int i = 0; i < array.Length; i++)
            {
                double add = NeuralSimulatedAnnealing.CUT - ThreadSafeRandom.NextDouble();
                add     /= this.anneal.StartTemperature;
                add     *= this.anneal.Temperature;
                array[i] = array[i] + add;
            }

            NetworkCODEC.ArrayToNetwork(array, network);
        }
        /// <summary>
        /// Randomize the weights and bias values. This function does most of the
        /// work of the class. Each call to this class will randomize the data
        /// according to the current temperature. The higher the temperature the more
        /// randomness.
        /// </summary>
        ///
        public void Randomize()
        {
            double[] array = NetworkCODEC
                             .NetworkToArray(_network);

            for (int i = 0; i < array.Length; i++)
            {
                double add = Cut - ThreadSafeRandom.NextDouble();
                add     /= _anneal.StartTemperature;
                add     *= _anneal.Temperature;
                array[i] = array[i] + add;
            }

            NetworkCODEC.ArrayToNetwork(array,
                                        _network);
        }
        /// <summary>
        /// Constructor.
        /// </summary>
        /// <param name="network">an initialised Encog network.
        ///                          The networks in the swarm will be created with
        ///                          the same topology as this network.</param>
        /// <param name="randomizer">any type of Encog network weight initialisation
        ///                          object.</param>
        /// <param name="calculateScore">any type of Encog network scoring/fitness object.</param>
        /// <param name="populationSize">the swarm size.</param>
        public NeuralPSO(BasicNetwork network,
                         IRandomizer randomizer, ICalculateScore calculateScore,
                         int populationSize)
            : base(TrainingImplementationType.Iterative)
        {
            // initialisation of the member variables
            m_populationSize = populationSize;
            m_randomizer     = randomizer;
            m_calculateScore = calculateScore;
            m_bestNetwork    = network;

            m_networks        = new BasicNetwork[m_populationSize];
            m_velocities      = null;
            m_bestVectors     = new double[m_populationSize][];
            m_bestErrors      = new double[m_populationSize];
            m_bestVectorIndex = -1;

            // get a vector from the network.
            m_bestVector = NetworkCODEC.NetworkToArray(m_bestNetwork);

            m_va = new VectorAlgebra();
        }
 /// <summary>
 /// Returns the state of a network in the swarm
 /// </summary>
 /// <param name="particleIndex">index of the network in the swarm</param>
 /// <returns>an array of weights and biases for the given network</returns>
 protected double[] GetNetworkState(int particleIndex)
 {
     return(NetworkCODEC.NetworkToArray(m_networks[particleIndex]));
 }
 /// <summary>
 /// Get the network as an array of doubles.
 /// </summary>
 /// <returns>The network as an array of doubles.</returns>
 public double[] GetArray()
 {
     return(NetworkCODEC
            .NetworkToArray(network));
 }
Example #11
0
        /// <summary>
        /// Perform one iteration.
        /// </summary>
        public override void Iteration()
        {
            LUDecomposition decomposition = null;
            double          trace         = 0;

            PreIteration();

            this.weights = NetworkCODEC.NetworkToArray(this.network);

            IComputeJacobian j = new JacobianChainRule(this.network,
                                                       this.indexableTraining);

            double sumOfSquaredErrors  = j.Calculate(this.weights);
            double sumOfSquaredWeights = CalculateSumOfSquaredWeights();

            // this.setError(j.getError());
            CalculateHessian(j.Jacobian, j.RowErrors);

            // Define the objective function
            // bayesian regularization objective function
            double objective = this.beta * sumOfSquaredErrors + this.alpha
                               * sumOfSquaredWeights;
            double current = objective + 1.0;

            // Start the main Levenberg-Macquardt method
            this.lambda /= LevenbergMarquardtTraining.SCALE_LAMBDA;

            // We'll try to find a direction with less error
            // (or where the objective function is smaller)
            while ((current >= objective) &&
                   (this.lambda < LevenbergMarquardtTraining.LAMBDA_MAX))
            {
                this.lambda *= LevenbergMarquardtTraining.SCALE_LAMBDA;

                // Update diagonal (Levenberg-Marquardt formula)
                for (int i = 0; i < this.parametersLength; i++)
                {
                    this.hessian[i][i] = this.diagonal[i]
                                         + (this.lambda + this.alpha);
                }

                // Decompose to solve the linear system
                decomposition = new LUDecomposition(
                    this.hessianMatrix);

                // Check if the Jacobian has become non-invertible
                if (!decomposition.IsNonsingular)
                {
                    continue;
                }

                // Solve using LU (or SVD) decomposition
                this.deltas = decomposition.Solve(this.gradient);

                // Update weights using the calculated deltas
                sumOfSquaredWeights = UpdateWeights();

                // Calculate the new error
                sumOfSquaredErrors = 0.0;
                for (int i = 0; i < this.trainingLength; i++)
                {
                    this.indexableTraining.GetRecord(i, this.pair);
                    INeuralData actual = this.network.Compute(this.pair
                                                              .Input);
                    double e = this.pair.Ideal[0]
                               - actual[0];
                    sumOfSquaredErrors += e * e;
                }
                sumOfSquaredErrors /= 2.0;

                // Update the objective function
                current = this.beta * sumOfSquaredErrors + this.alpha
                          * sumOfSquaredWeights;

                // If the object function is bigger than before, the method
                // is tried again using a greater dumping factor.
            }

            // If this iteration caused a error drop, then next iteration
            // will use a smaller damping factor.
            this.lambda /= LevenbergMarquardtTraining.SCALE_LAMBDA;

            if (useBayesianRegularization && decomposition != null)
            {
                // Compute the trace for the inverse Hessian
                trace = Trace(decomposition.Inverse());

                // Poland update's formula:
                gamma = this.parametersLength - (alpha * trace);
                alpha = this.parametersLength / (2.0 * sumOfSquaredWeights + trace);
                beta  = Math.Abs((this.trainingLength - gamma) / (2.0 * sumOfSquaredErrors));
            }

            this.Error = sumOfSquaredErrors;

            PostIteration();
        }
Example #12
0
        /// <summary>
        /// Perform one iteration.
        /// </summary>
        ///
        public override void Iteration()
        {
            LUDecomposition decomposition = null;

            PreIteration();

            _weights = NetworkCODEC.NetworkToArray(_network);

            IComputeJacobian j = new JacobianChainRule(_network,
                                                       _indexableTraining);

            double sumOfSquaredErrors  = j.Calculate(_weights);
            double sumOfSquaredWeights = CalculateSumOfSquaredWeights();

            // this.setError(j.getError());
            CalculateHessian(j.Jacobian, j.RowErrors);

            // Define the objective function
            // bayesian regularization objective function
            double objective = _beta * sumOfSquaredErrors + _alpha
                               * sumOfSquaredWeights;
            double current = objective + 1.0d;

            // Start the main Levenberg-Macquardt method
            _lambda /= ScaleLambda;

            // We'll try to find a direction with less error
            // (or where the objective function is smaller)
            while ((current >= objective) &&
                   (_lambda < LambdaMax))
            {
                _lambda *= ScaleLambda;

                // Update diagonal (Levenberg-Marquardt formula)
                for (int i = 0; i < _parametersLength; i++)
                {
                    _hessian[i][i] = _diagonal[i]
                                     + (_lambda + _alpha);
                }

                // Decompose to solve the linear system
                decomposition = new LUDecomposition(_hessianMatrix);

                // Check if the Jacobian has become non-invertible
                if (!decomposition.IsNonsingular)
                {
                    continue;
                }

                // Solve using LU (or SVD) decomposition
                _deltas = decomposition.Solve(_gradient);

                // Update weights using the calculated deltas
                sumOfSquaredWeights = UpdateWeights();

                // Calculate the new error
                sumOfSquaredErrors = 0.0d;
                for (int i = 0; i < _trainingLength; i++)
                {
                    _indexableTraining.GetRecord(i, _pair);
                    IMLData actual = _network
                                     .Compute(_pair.Input);
                    double e = _pair.Ideal[0]
                               - actual[0];
                    sumOfSquaredErrors += e * e;
                }
                sumOfSquaredErrors /= 2.0d;

                // Update the objective function
                current = _beta * sumOfSquaredErrors + _alpha
                          * sumOfSquaredWeights;

                // If the object function is bigger than before, the method
                // is tried again using a greater dumping factor.
            }

            // If this iteration caused a error drop, then next iteration
            // will use a smaller damping factor.
            _lambda /= ScaleLambda;

            if (_useBayesianRegularization && (decomposition != null))
            {
                // Compute the trace for the inverse Hessian
                double trace = Trace(decomposition.Inverse());

                // Poland update's formula:
                _gamma = _parametersLength - (_alpha * trace);
                _alpha = _parametersLength
                         / (2.0d * sumOfSquaredWeights + trace);
                _beta = Math.Abs((_trainingLength - _gamma)
                                 / (2.0d * sumOfSquaredErrors));
            }

            Error = sumOfSquaredErrors;

            PostIteration();
        }