예제 #1
0
        /// <summary>
        ///     Update the weights in the neural network.
        /// </summary>
        public void UpdateWeights()
        {
            var w = (double[])_weights.Clone();

            for (int i = 0; i < w.Length; i++)
            {
                w[i] += _deltas[i];
            }

            NetworkCODEC.ArrayToNetwork(w, _network);
        }
예제 #2
0
        /// <summary>
        /// Called just before a training iteration.
        /// </summary>
        public void PreIteration()
        {
            BasicNetwork network = this.train.Network;

            if (network != null)
            {
                this.lastError   = this.train.Error;
                this.lastNetwork = NetworkCODEC.NetworkToArray(network);
                this.train.Error = this.lastError;
            }
        }
예제 #3
0
        /// <summary>
        /// Decode the genomes into a neural network.
        /// </summary>
        ///
        public override sealed void Decode()
        {
            var net = new double[_networkChromosome.Genes.Count];

            for (int i = 0; i < net.Length; i++)
            {
                var gene = (DoubleGene)_networkChromosome.Genes[i];
                net[i] = gene.Value;
            }
            NetworkCODEC.ArrayToNetwork(net, (BasicNetwork)Organism);
        }
        public void TestRandomizeNeuronOutput()
        {
            double[]     d       = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
            BasicNetwork network = EncogUtility.SimpleFeedForward(2, 3, 0, 1, false);

            NetworkCODEC.ArrayToNetwork(d, network);
            PruneSelective prune = new PruneSelective(network);

            prune.RandomizeNeuron(100, 100, 2, 0);
            Assert.AreEqual("100,100,100,100,0,0,0,0,0,0,0,0,0", network.DumpWeights());
        }
        /// <summary>
        /// Perform one iteration.
        /// </summary>
        public override void Iteration()
        {
            LUDecomposition decomposition;

            PreIteration();

            _hessian.Clear();
            _weights = NetworkCODEC.NetworkToArray(_network);

            _hessian.Compute();
            double currentError = _hessian.SSE;

            SaveDiagonal();

            double startingError = currentError;
            bool   done          = false;

            while (!done)
            {
                ApplyLambda();
                decomposition = new LUDecomposition(_hessian.HessianMatrix);

                if (decomposition.IsNonsingular)
                {
                    _deltas = decomposition.Solve(_hessian.Gradients);

                    UpdateWeights();
                    currentError = CalculateError();

                    if (currentError < startingError)
                    {
                        _lambda /= LevenbergMarquardtTraining.ScaleLambda;
                        done     = true;
                    }
                }

                if (!done)
                {
                    _lambda *= LevenbergMarquardtTraining.ScaleLambda;
                    if (_lambda > LevenbergMarquardtTraining.LambdaMax)
                    {
                        _lambda = LevenbergMarquardtTraining.LambdaMax;
                        done    = true;
                    }
                }
            }

            Error = currentError;

            PostIteration();
        }
예제 #6
0
        /// <summary>
        /// Update the weights.
        /// </summary>
        ///
        /// <returns>The sum squared of the weights.</returns>
        public double UpdateWeights()
        {
            double result = 0;
            var    w      = (double[])_weights.Clone();

            for (int i = 0; i < w.Length; i++)
            {
                w[i]   += _deltas[i];
                result += w[i] * w[i];
            }

            NetworkCODEC.ArrayToNetwork(w, _network);

            return(result / 2.0d);
        }
        /// <summary>
        /// Randomize the weights and thresholds. This function does most of the
        /// work of the class. Each call to this class will randomize the data
        /// according to the current temperature. The higher the temperature the
        /// more randomness.
        /// </summary>
        public void Randomize()
        {
            double[] array = NetworkCODEC
                             .NetworkToArray(network);

            for (int i = 0; i < array.Length; i++)
            {
                double add = NeuralSimulatedAnnealing.CUT - ThreadSafeRandom.NextDouble();
                add     /= this.anneal.StartTemperature;
                add     *= this.anneal.Temperature;
                array[i] = array[i] + add;
            }

            NetworkCODEC.ArrayToNetwork(array, network);
        }
        /// <summary>
        /// Randomize the weights and bias values. This function does most of the
        /// work of the class. Each call to this class will randomize the data
        /// according to the current temperature. The higher the temperature the more
        /// randomness.
        /// </summary>
        ///
        public void Randomize()
        {
            double[] array = NetworkCODEC
                             .NetworkToArray(_network);

            for (int i = 0; i < array.Length; i++)
            {
                double add = Cut - ThreadSafeRandom.NextDouble();
                add     /= _anneal.StartTemperature;
                add     *= _anneal.Temperature;
                array[i] = array[i] + add;
            }

            NetworkCODEC.ArrayToNetwork(array,
                                        _network);
        }
예제 #9
0
        /// <summary>
        /// Update the weights.
        /// </summary>
        /// <returns>The sum squared of the weights.</returns>
        public double UpdateWeights()
        {
            double result = 0;

            double[] w = (double[])this.weights.Clone();

            for (int i = 0; i < w.Length; i++)
            {
                w[i]   += this.deltas[i];
                result += w[i] * w[i];
            }

            NetworkCODEC.ArrayToNetwork(w, this.network);

            return(result / 2.0);
        }
        //
        // You can use the following additional attributes as you write your tests:
        //
        // Use ClassInitialize to run code before running the first test in the class
        // [ClassInitialize()]
        // public static void MyClassInitialize(TestContext testContext) { }
        //
        // Use ClassCleanup to run code after all tests in a class have run
        // [ClassCleanup()]
        // public static void MyClassCleanup() { }
        //
        // Use TestInitialize to run code before running each test
        // [TestInitialize()]
        // public void MyTestInitialize() { }
        //
        // Use TestCleanup to run code after each test has run
        // [TestCleanup()]
        // public void MyTestCleanup() { }
        //
        #endregion

        private BasicNetwork ObtainNetwork()
        {
            BasicNetwork network = EncogUtility.SimpleFeedForward(2, 3, 0, 4, false);

            double[] weights = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 };
            NetworkCODEC.ArrayToNetwork(weights, network);

            Assert.AreEqual(1.0, network.GetWeight(1, 0, 0), 0.01);
            Assert.AreEqual(2.0, network.GetWeight(1, 1, 0), 0.01);
            Assert.AreEqual(3.0, network.GetWeight(1, 2, 0), 0.01);
            Assert.AreEqual(4.0, network.GetWeight(1, 3, 0), 0.01);

            Assert.AreEqual(5.0, network.GetWeight(1, 0, 1), 0.01);
            Assert.AreEqual(6.0, network.GetWeight(1, 1, 1), 0.01);
            Assert.AreEqual(7.0, network.GetWeight(1, 2, 1), 0.01);
            Assert.AreEqual(8.0, network.GetWeight(1, 3, 1), 0.01);

            Assert.AreEqual(9.0, network.GetWeight(1, 0, 2), 0.01);
            Assert.AreEqual(10.0, network.GetWeight(1, 1, 2), 0.01);
            Assert.AreEqual(11.0, network.GetWeight(1, 2, 2), 0.01);
            Assert.AreEqual(12.0, network.GetWeight(1, 3, 2), 0.01);

            Assert.AreEqual(13.0, network.GetWeight(1, 0, 3), 0.01);
            Assert.AreEqual(14.0, network.GetWeight(1, 1, 3), 0.01);
            Assert.AreEqual(15.0, network.GetWeight(1, 2, 3), 0.01);
            Assert.AreEqual(16.0, network.GetWeight(1, 3, 3), 0.01);

            Assert.AreEqual(17.0, network.GetWeight(0, 0, 0), 0.01);
            Assert.AreEqual(18.0, network.GetWeight(0, 1, 0), 0.01);
            Assert.AreEqual(19.0, network.GetWeight(0, 2, 0), 0.01);
            Assert.AreEqual(20.0, network.GetWeight(0, 0, 1), 0.01);
            Assert.AreEqual(21.0, network.GetWeight(0, 1, 1), 0.01);
            Assert.AreEqual(22.0, network.GetWeight(0, 2, 1), 0.01);

            Assert.AreEqual(20.0, network.GetWeight(0, 0, 1), 0.01);
            Assert.AreEqual(21.0, network.GetWeight(0, 1, 1), 0.01);
            Assert.AreEqual(22.0, network.GetWeight(0, 2, 1), 0.01);

            Assert.AreEqual(23.0, network.GetWeight(0, 0, 2), 0.01);
            Assert.AreEqual(24.0, network.GetWeight(0, 1, 2), 0.01);
            Assert.AreEqual(25.0, network.GetWeight(0, 2, 2), 0.01);


            return(network);
        }
예제 #11
0
        /// <summary>
        /// Constructor.
        /// </summary>
        /// <param name="network">an initialised Encog network.
        ///                          The networks in the swarm will be created with
        ///                          the same topology as this network.</param>
        /// <param name="randomizer">any type of Encog network weight initialisation
        ///                          object.</param>
        /// <param name="calculateScore">any type of Encog network scoring/fitness object.</param>
        /// <param name="populationSize">the swarm size.</param>
        public NeuralPSO(BasicNetwork network,
                         IRandomizer randomizer, ICalculateScore calculateScore,
                         int populationSize)
            : base(TrainingImplementationType.Iterative)
        {
            // initialisation of the member variables
            m_populationSize = populationSize;
            m_randomizer     = randomizer;
            m_calculateScore = calculateScore;
            m_bestNetwork    = network;

            m_networks        = new BasicNetwork[m_populationSize];
            m_velocities      = null;
            m_bestVectors     = new double[m_populationSize][];
            m_bestErrors      = new double[m_populationSize];
            m_bestVectorIndex = -1;

            // get a vector from the network.
            m_bestVector = NetworkCODEC.NetworkToArray(m_bestNetwork);

            m_va = new VectorAlgebra();
        }
예제 #12
0
        /// <summary>
        /// Called just after a training iteration.
        /// </summary>
        public void PostIteration()
        {
            if (this.ready)
            {
                if (this.train.Error > this.lastError)
                {
#if logging
                    if (this.logger.IsDebugEnabled)
                    {
                        this.logger
                        .Debug("Greedy strategy dropped last iteration.");
                    }
#endif
                    this.train.Error = this.lastError;
                    NetworkCODEC.ArrayToNetwork(this.lastNetwork, this.train
                                                .Network);
                }
            }
            else
            {
                this.ready = true;
            }
        }
예제 #13
0
        /// <inheritdoc/>
        public override void Iteration()
        {
            if (_converged)
            {
                return;
            }

            int n = _start.Length;

            for (int i = 0; i < n; i++)
            {
                _p[i + n * n] = _start[i];
            }
            _y[n] = Fn(_start);
            for (int j = 0; j < n; j++)
            {
                double x = _start[j];
                _start[j] = _start[j] + _step[j] * _del;
                for (int i = 0; i < n; i++)
                {
                    _p[i + j * n] = _start[i];
                }
                _y[j]     = Fn(_start);
                _start[j] = x;
            }

            /*
             * The simplex construction is complete.
             *
             * Find highest and lowest Y values. YNEWLO = Y(IHI) indicates the
             * vertex of the simplex to be replaced.
             */
            _ylo = _y[0];
            _ilo = 0;

            for (int i = 1; i < _nn; i++)
            {
                if (_y[i] < _ylo)
                {
                    _ylo = _y[i];
                    _ilo = i;
                }
            }

            /*
             * Inner loop.
             */
            for (;;)
            {
                /*
                 * if (kcount <= icount) { break; }
                 */
                _ynewlo = _y[0];
                _ihi    = 0;

                for (int i = 1; i < _nn; i++)
                {
                    if (_ynewlo < _y[i])
                    {
                        _ynewlo = _y[i];
                        _ihi    = i;
                    }
                }

                /*
                 * Calculate PBAR, the centroid of the simplex vertices excepting
                 * the vertex with Y value YNEWLO.
                 */
                for (int i = 0; i < n; i++)
                {
                    _z = 0.0;
                    for (int j = 0; j < _nn; j++)
                    {
                        _z = _z + _p[i + j * n];
                    }
                    _z       = _z - _p[i + _ihi * n];
                    _pbar[i] = _z / n;
                }

                /*
                 * Reflection through the centroid.
                 */
                for (int i = 0; i < n; i++)
                {
                    _pstar[i] = _pbar[i] + rcoeff
                                * (_pbar[i] - _p[i + _ihi * n]);
                }
                _ystar = Fn(_pstar);

                /*
                 * Successful reflection, so extension.
                 */
                if (_ystar < _ylo)
                {
                    for (int i = 0; i < n; i++)
                    {
                        _p2Star[i] = _pbar[i] + ecoeff
                                     * (_pstar[i] - _pbar[i]);
                    }
                    _y2Star = Fn(_p2Star);

                    /*
                     * Check extension.
                     */
                    if (_ystar < _y2Star)
                    {
                        for (int i = 0; i < n; i++)
                        {
                            _p[i + _ihi * n] = _pstar[i];
                        }
                        _y[_ihi] = _ystar;
                    }

                    /*
                     * Retain extension or contraction.
                     */
                    else
                    {
                        for (int i = 0; i < n; i++)
                        {
                            _p[i + _ihi * n] = _p2Star[i];
                        }
                        _y[_ihi] = _y2Star;
                    }
                }

                /*
                 * No extension.
                 */
                else
                {
                    _l = 0;
                    for (int i = 0; i < _nn; i++)
                    {
                        if (_ystar < _y[i])
                        {
                            _l = _l + 1;
                        }
                    }

                    if (1 < _l)
                    {
                        for (int i = 0; i < n; i++)
                        {
                            _p[i + _ihi * n] = _pstar[i];
                        }
                        _y[_ihi] = _ystar;
                    }

                    /*
                     * Contraction on the Y(IHI) side of the centroid.
                     */
                    else if (_l == 0)
                    {
                        for (int i = 0; i < n; i++)
                        {
                            _p2Star[i] = _pbar[i] + ccoeff
                                         * (_p[i + _ihi * n] - _pbar[i]);
                        }
                        _y2Star = Fn(_p2Star);

                        /*
                         * Contract the whole simplex.
                         */
                        if (_y[_ihi] < _y2Star)
                        {
                            for (int j = 0; j < _nn; j++)
                            {
                                for (int i = 0; i < n; i++)
                                {
                                    _p[i + j * n] = (_p[i + j * n] + _p[i
                                                                        + _ilo * n]) * 0.5;
                                    _trainedWeights[i] = _p[i + j * n];
                                }
                                _y[j] = Fn(_trainedWeights);
                            }
                            _ylo = _y[0];
                            _ilo = 0;

                            for (int i = 1; i < _nn; i++)
                            {
                                if (_y[i] < _ylo)
                                {
                                    _ylo = _y[i];
                                    _ilo = i;
                                }
                            }
                            continue;
                        }

                        /*
                         * Retain contraction.
                         */
                        for (int i = 0; i < n; i++)
                        {
                            _p[i + _ihi * n] = _p2Star[i];
                        }
                        _y[_ihi] = _y2Star;
                    }

                    /*
                     * Contraction on the reflection side of the centroid.
                     */
                    else if (_l == 1)
                    {
                        for (int i = 0; i < n; i++)
                        {
                            _p2Star[i] = _pbar[i] + ccoeff
                                         * (_pstar[i] - _pbar[i]);
                        }
                        _y2Star = Fn(_p2Star);

                        /*
                         * Retain reflection?
                         */
                        if (_y2Star <= _ystar)
                        {
                            for (int i = 0; i < n; i++)
                            {
                                _p[i + _ihi * n] = _p2Star[i];
                            }
                            _y[_ihi] = _y2Star;
                        }
                        else
                        {
                            for (int i = 0; i < n; i++)
                            {
                                _p[i + _ihi * n] = _pstar[i];
                            }
                            _y[_ihi] = _ystar;
                        }
                    }
                }

                /*
                 * Check if YLO improved.
                 */
                if (_y[_ihi] < _ylo)
                {
                    _ylo = _y[_ihi];
                    _ilo = _ihi;
                }
                _jcount = _jcount - 1;

                if (0 < _jcount)
                {
                    continue;
                }

                /*
                 * Check to see if minimum reached.
                 */
                // if (icount <= kcount)
                {
                    _jcount = _konvge;

                    _z = 0.0;
                    for (int i = 0; i < _nn; i++)
                    {
                        _z = _z + _y[i];
                    }
                    double x = _z / _nn;

                    _z = 0.0;
                    for (int i = 0; i < _nn; i++)
                    {
                        var inner = _y[i] - x;
                        _z = _z + inner * inner;
                    }

                    if (_z <= _rq)
                    {
                        break;
                    }
                }
            }

            /*
             * Factorial tests to check that YNEWLO is a local minimum.
             */
            for (int i = 0; i < n; i++)
            {
                _trainedWeights[i] = _p[i + _ilo * n];
            }
            _ynewlo = _y[_ilo];

            bool fault = false;

            for (int i = 0; i < n; i++)
            {
                _del = _step[i] * eps;
                _trainedWeights[i] += _del;
                _z = Fn(_trainedWeights);
                if (_z < _ynewlo)
                {
                    fault = true;
                    break;
                }
                _trainedWeights[i] = _trainedWeights[i] - _del
                                     - _del;
                _z = Fn(_trainedWeights);
                if (_z < _ynewlo)
                {
                    fault = true;
                    break;
                }
                _trainedWeights[i] += _del;
            }

            if (!fault)
            {
                _converged = true;
            }
            else
            {
                /*
                 * Restart the procedure.
                 */
                for (int i = 0; i < n; i++)
                {
                    _start[i] = _trainedWeights[i];
                }
                _del = eps;
            }

            Error = _ynewlo;
            NetworkCODEC.ArrayToNetwork(_trainedWeights, _network);
        }
예제 #14
0
 /// <summary>
 /// Calculate the error for the neural network with a given set of weights.
 /// </summary>
 /// <param name="weights">The weights to use.</param>
 /// <returns>The current error.</returns>
 public double Fn(double[] weights)
 {
     NetworkCODEC.ArrayToNetwork(weights, _network);
     return(_network.CalculateError(Training));
 }
 /// <summary>
 /// Convert an array of doubles to the current best network.
 /// </summary>
 ///
 /// <param name="array">An array.</param>
 public void PutArray(double[] array)
 {
     NetworkCODEC.ArrayToNetwork(array,
                                 _network);
 }
예제 #16
0
 /// <summary>
 /// Sets the state of the networks in the swarm
 /// </summary>
 /// <param name="particleIndex">index of the network in the swarm</param>
 /// <param name="state">an array of weights and biases</param>
 protected void SetNetworkState(int particleIndex, double[] state)
 {
     NetworkCODEC.ArrayToNetwork(state, m_networks[particleIndex]);
 }
예제 #17
0
 /// <summary>
 /// Returns the state of a network in the swarm
 /// </summary>
 /// <param name="particleIndex">index of the network in the swarm</param>
 /// <returns>an array of weights and biases for the given network</returns>
 protected double[] GetNetworkState(int particleIndex)
 {
     return(NetworkCODEC.NetworkToArray(m_networks[particleIndex]));
 }
예제 #18
0
 /// <summary>
 /// Determine if this neural network is equal to another. Equal neural
 /// networks have the same weight matrix and bias values, within a specified
 /// precision.
 /// </summary>
 ///
 /// <param name="other">The other neural network.</param>
 /// <param name="precision">The number of decimal places to compare to.</param>
 /// <returns>True if the two neural networks are equal.</returns>
 public bool Equals(BasicNetwork other, int precision)
 {
     return(NetworkCODEC.Equals(this, other, precision));
 }
예제 #19
0
        /// <summary>
        /// Perform one iteration.
        /// </summary>
        ///
        public override void Iteration()
        {
            LUDecomposition decomposition = null;

            PreIteration();

            _weights = NetworkCODEC.NetworkToArray(_network);

            IComputeJacobian j = new JacobianChainRule(_network,
                                                       _indexableTraining);

            double sumOfSquaredErrors  = j.Calculate(_weights);
            double sumOfSquaredWeights = CalculateSumOfSquaredWeights();

            // this.setError(j.getError());
            CalculateHessian(j.Jacobian, j.RowErrors);

            // Define the objective function
            // bayesian regularization objective function
            double objective = _beta * sumOfSquaredErrors + _alpha
                               * sumOfSquaredWeights;
            double current = objective + 1.0d;

            // Start the main Levenberg-Macquardt method
            _lambda /= ScaleLambda;

            // We'll try to find a direction with less error
            // (or where the objective function is smaller)
            while ((current >= objective) &&
                   (_lambda < LambdaMax))
            {
                _lambda *= ScaleLambda;

                // Update diagonal (Levenberg-Marquardt formula)
                for (int i = 0; i < _parametersLength; i++)
                {
                    _hessian[i][i] = _diagonal[i]
                                     + (_lambda + _alpha);
                }

                // Decompose to solve the linear system
                decomposition = new LUDecomposition(_hessianMatrix);

                // Check if the Jacobian has become non-invertible
                if (!decomposition.IsNonsingular)
                {
                    continue;
                }

                // Solve using LU (or SVD) decomposition
                _deltas = decomposition.Solve(_gradient);

                // Update weights using the calculated deltas
                sumOfSquaredWeights = UpdateWeights();

                // Calculate the new error
                sumOfSquaredErrors = 0.0d;
                for (int i = 0; i < _trainingLength; i++)
                {
                    _indexableTraining.GetRecord(i, _pair);
                    IMLData actual = _network
                                     .Compute(_pair.Input);
                    double e = _pair.Ideal[0]
                               - actual[0];
                    sumOfSquaredErrors += e * e;
                }
                sumOfSquaredErrors /= 2.0d;

                // Update the objective function
                current = _beta * sumOfSquaredErrors + _alpha
                          * sumOfSquaredWeights;

                // If the object function is bigger than before, the method
                // is tried again using a greater dumping factor.
            }

            // If this iteration caused a error drop, then next iteration
            // will use a smaller damping factor.
            _lambda /= ScaleLambda;

            if (_useBayesianRegularization && (decomposition != null))
            {
                // Compute the trace for the inverse Hessian
                double trace = Trace(decomposition.Inverse());

                // Poland update's formula:
                _gamma = _parametersLength - (_alpha * trace);
                _alpha = _parametersLength
                         / (2.0d * sumOfSquaredWeights + trace);
                _beta = Math.Abs((_trainingLength - _gamma)
                                 / (2.0d * sumOfSquaredErrors));
            }

            Error = sumOfSquaredErrors;

            PostIteration();
        }
 /// <summary>
 /// Get the network as an array of doubles.
 /// </summary>
 /// <returns>The network as an array of doubles.</returns>
 public double[] GetArray()
 {
     return(NetworkCODEC
            .NetworkToArray(network));
 }
예제 #21
0
        /// <summary>
        /// Perform one iteration.
        /// </summary>
        public override void Iteration()
        {
            LUDecomposition decomposition = null;
            double          trace         = 0;

            PreIteration();

            this.weights = NetworkCODEC.NetworkToArray(this.network);

            IComputeJacobian j = new JacobianChainRule(this.network,
                                                       this.indexableTraining);

            double sumOfSquaredErrors  = j.Calculate(this.weights);
            double sumOfSquaredWeights = CalculateSumOfSquaredWeights();

            // this.setError(j.getError());
            CalculateHessian(j.Jacobian, j.RowErrors);

            // Define the objective function
            // bayesian regularization objective function
            double objective = this.beta * sumOfSquaredErrors + this.alpha
                               * sumOfSquaredWeights;
            double current = objective + 1.0;

            // Start the main Levenberg-Macquardt method
            this.lambda /= LevenbergMarquardtTraining.SCALE_LAMBDA;

            // We'll try to find a direction with less error
            // (or where the objective function is smaller)
            while ((current >= objective) &&
                   (this.lambda < LevenbergMarquardtTraining.LAMBDA_MAX))
            {
                this.lambda *= LevenbergMarquardtTraining.SCALE_LAMBDA;

                // Update diagonal (Levenberg-Marquardt formula)
                for (int i = 0; i < this.parametersLength; i++)
                {
                    this.hessian[i][i] = this.diagonal[i]
                                         + (this.lambda + this.alpha);
                }

                // Decompose to solve the linear system
                decomposition = new LUDecomposition(
                    this.hessianMatrix);

                // Check if the Jacobian has become non-invertible
                if (!decomposition.IsNonsingular)
                {
                    continue;
                }

                // Solve using LU (or SVD) decomposition
                this.deltas = decomposition.Solve(this.gradient);

                // Update weights using the calculated deltas
                sumOfSquaredWeights = UpdateWeights();

                // Calculate the new error
                sumOfSquaredErrors = 0.0;
                for (int i = 0; i < this.trainingLength; i++)
                {
                    this.indexableTraining.GetRecord(i, this.pair);
                    INeuralData actual = this.network.Compute(this.pair
                                                              .Input);
                    double e = this.pair.Ideal[0]
                               - actual[0];
                    sumOfSquaredErrors += e * e;
                }
                sumOfSquaredErrors /= 2.0;

                // Update the objective function
                current = this.beta * sumOfSquaredErrors + this.alpha
                          * sumOfSquaredWeights;

                // If the object function is bigger than before, the method
                // is tried again using a greater dumping factor.
            }

            // If this iteration caused a error drop, then next iteration
            // will use a smaller damping factor.
            this.lambda /= LevenbergMarquardtTraining.SCALE_LAMBDA;

            if (useBayesianRegularization && decomposition != null)
            {
                // Compute the trace for the inverse Hessian
                trace = Trace(decomposition.Inverse());

                // Poland update's formula:
                gamma = this.parametersLength - (alpha * trace);
                alpha = this.parametersLength / (2.0 * sumOfSquaredWeights + trace);
                beta  = Math.Abs((this.trainingLength - gamma) / (2.0 * sumOfSquaredErrors));
            }

            this.Error = sumOfSquaredErrors;

            PostIteration();
        }