Esempio n. 1
0
        /// <inheritdoc />
        public void Iteration()
        {
            var layerInput = new double[0];

            double[] prevLayerInput;

            ErrorCalc.Clear();
            for (var n = 0; n < _trainingInput.Length; n++)
            {
                for (var i = 0; i < _network.Layers.Length; i++)
                {
                    if (i == 0)
                    {
                        prevLayerInput = new double[_network.InputCount];
                        Array.Copy(_trainingInput[n], 0, prevLayerInput, 0, _network.InputCount);
                    }
                    else
                    {
                        prevLayerInput = new double[_network.Layers[i].InputCount];
                        Array.Copy(layerInput, 0, prevLayerInput, 0, _network.Layers[i].InputCount);
                    }

                    layerInput = new double[_network.Layers[i].OutputCount];
                    _network.Layers[i].SampleHgivenV(prevLayerInput, layerInput);
                }

                TrainLogisticLayer(layerInput, _trainingIdeal[n]);
            }
        }
Esempio n. 2
0
        public static double CalculateError(IErrorCalculation calc, double[][] actual, double[][] ideal)
        {

            // First we are going to calculate by passing in 1d arrays to
            // the error calculation.  This is the most common case.

            calc.Clear();

            Assert.AreEqual(double.PositiveInfinity, calc.Calculate(), 0.0001);

            for (int i = 0; i < actual.Length; i++)
            {
                double[] actualData = actual[i];
                double[] idealData = ideal[i];
                calc.UpdateError(actualData, idealData, 1.0);
            }
            Assert.AreEqual(20, calc.SetSize);
            double error1 = calc.Calculate();

            // Secondly we are going to calculate by passing individual
            // elements.  This is less common, but the error calculation
            // should result in the same as above.

            calc.Clear();

            Assert.AreEqual(double.PositiveInfinity, calc.Calculate(), 0.0001);

            for (int i = 0; i < actual.Length; i++)
            {
                double[] actualData = actual[i];
                double[] idealData = ideal[i];
                for (int j = 0; j < actualData.Length; j++)
                {
                    calc.UpdateError(actualData[j], idealData[j]);
                }
            }
            Assert.AreEqual(20, calc.SetSize);
            double error2 = calc.Calculate();

            // these two should always equal
            Assert.AreEqual(error1, error2, 0.0001);


            return error2;
        }
Esempio n. 3
0
        /// <summary>
        ///     Train.  Single iteration.
        /// </summary>
        public void Iteration()
        {
            int rowCount      = _trainingData.Count;
            int inputColCount = _trainingData[0].Input.Length;

            Matrix <double> xMatrix = new DenseMatrix(rowCount, inputColCount + 1);
            Matrix <double> yMatrix = new DenseMatrix(rowCount, 1);

            for (int row = 0; row < _trainingData.Count; row++)
            {
                BasicData dataRow = _trainingData[row];
                int       colSize = dataRow.Input.Count();

                xMatrix[row, 0] = 1;
                for (int col = 0; col < colSize; col++)
                {
                    xMatrix[row, col + 1] = dataRow.Input[col];
                }
                yMatrix[row, 0] = dataRow.Ideal[0];
            }

            // Calculate the least squares solution
            QR qr = xMatrix.QR();
            Matrix <double> beta = qr.Solve(yMatrix);

            double sum = 0.0;

            for (int i = 0; i < inputColCount; i++)
            {
                sum += yMatrix[i, 0];
            }
            double mean = sum / inputColCount;

            for (int i = 0; i < inputColCount; i++)
            {
                double dev = yMatrix[i, 0] - mean;
                _sst += dev * dev;
            }

            Matrix <double> residuals = xMatrix.Multiply(beta).Subtract(yMatrix);

            _sse = residuals.L2Norm() * residuals.L2Norm();

            for (int i = 0; i < _algorithm.LongTermMemory.Length; i++)
            {
                _algorithm.LongTermMemory[i] = beta[i, 0];
            }

            // calculate error
            _errorCalculation.Clear();
            foreach (BasicData dataRow in _trainingData)
            {
                double[] output = _algorithm.ComputeRegression(dataRow.Input);
                _errorCalculation.UpdateError(output, dataRow.Ideal, 1.0);
            }
            _error = _errorCalculation.Calculate();
        }
Esempio n. 4
0
        public static double CalculateError(IErrorCalculation calc, double[][] actual, double[][] ideal)
        {
            // First we are going to calculate by passing in 1d arrays to
            // the error calculation.  This is the most common case.

            calc.Clear();

            Assert.AreEqual(double.PositiveInfinity, calc.Calculate(), 0.0001);

            for (int i = 0; i < actual.Length; i++)
            {
                double[] actualData = actual[i];
                double[] idealData  = ideal[i];
                calc.UpdateError(actualData, idealData, 1.0);
            }
            Assert.AreEqual(20, calc.SetSize);
            double error1 = calc.Calculate();

            // Secondly we are going to calculate by passing individual
            // elements.  This is less common, but the error calculation
            // should result in the same as above.

            calc.Clear();

            Assert.AreEqual(double.PositiveInfinity, calc.Calculate(), 0.0001);

            for (int i = 0; i < actual.Length; i++)
            {
                double[] actualData = actual[i];
                double[] idealData  = ideal[i];
                for (int j = 0; j < actualData.Length; j++)
                {
                    calc.UpdateError(actualData[j], idealData[j]);
                }
            }
            Assert.AreEqual(20, calc.SetSize);
            double error2 = calc.Calculate();

            // these two should always equal
            Assert.AreEqual(error1, error2, 0.0001);


            return(error2);
        }
Esempio n. 5
0
        /// <summary>
        /// Calculate the error with the specified error calculation.
        /// </summary>
        /// <param name="calc">The error calculation.</param>
        /// <returns>The error.</returns>
        public double CalculateError(IErrorCalculation calc)
        {
            calc.Clear();

            for (int row = 0; row < Actual.Length; row++)
            {
                calc.UpdateError(Actual[row], Ideal[row], 1.0);
            }

            return calc.Calculate();
        }
Esempio n. 6
0
        /// <summary>
        /// Calculate the error with the specified error calculation.
        /// </summary>
        /// <param name="calc">The error calculation.</param>
        /// <returns>The error.</returns>
        public double CalculateError(IErrorCalculation calc)
        {
            calc.Clear();

            for (int row = 0; row < Actual.Length; row++)
            {
                calc.UpdateError(Actual[row], Ideal[row], 1.0);
            }

            return(calc.Calculate());
        }
Esempio n. 7
0
        /// <summary>
        ///     Calculate error for regression.
        /// </summary>
        /// <param name="dataset">The dataset.</param>
        /// <param name="model">The model to evaluate.</param>
        /// <param name="calc">The error calculation.</param>
        /// <returns>The error.</returns>
        public static double CalculateRegressionError(IList <BasicData> dataset,
                                                      IRegressionAlgorithm model,
                                                      IErrorCalculation calc)
        {
            calc.Clear();
            foreach (var item in dataset)
            {
                var output = model.ComputeRegression(item.Input);
                calc.UpdateError(output, item.Ideal, 1.0);
            }

            return(calc.Calculate());
        }
Esempio n. 8
0
        /// <inheritdoc />
        public double CalculateScore(IMLMethod algo)
        {
            var ralgo = (IRegressionAlgorithm)algo;

            // evaulate
            _errorCalc.Clear();
            foreach (var pair in _trainingData)
            {
                var output = ralgo.ComputeRegression(pair.Input);
                _errorCalc.UpdateError(output, pair.Ideal, 1.0);
            }

            return(_errorCalc.Calculate());
        }
Esempio n. 9
0
        /// <inheritdoc/>
        public double CalculateScore(IMLMethod algo)
        {
            IErrorCalculation ec = ErrorCalc.Create();

            IRegressionAlgorithm ralgo = (IRegressionAlgorithm)algo;

            // evaulate
            ec.Clear();
            foreach (BasicData pair in _trainingData)
            {
                double[] output = ralgo.ComputeRegression(pair.Input);
                ec.UpdateError(output, pair.Ideal, 1.0);
            }

            return(ec.Calculate());
        }
Esempio n. 10
0
        /// <inheritdoc />
        public void Iteration()
        {
            _gradients.Reset();
            _errorCalc.Clear();

            // Calculate gradients for entire training set, RPROP does not do online.
            foreach (var element in _training)
            {
                _gradients.Process(_errorCalc, element.Input, element.Ideal);
            }
            LastError = _errorCalc.Calculate();

            // Apply the gradients according to the RPROP algorithm.
            for (var i = 0; i < _gradients.Gradients.Length; i++)
            {
                var delta = CalculateWeightDelta(_gradients.Gradients, _lastGradients, i);
                _lastGradients[i]    = _gradients.Gradients[i];
                _lastDelta[i]        = delta;
                _network.Weights[i] += delta;
            }
        }
Esempio n. 11
0
        /// <inheritdoc />
        public double CalculateScore(IMLMethod algo)
        {
            IErrorCalculation ec = ErrorCalc.Create();

            var ralgo  = (IRegressionAlgorithm)algo;
            var genome = (IGenome)ralgo;

            if (genome.Count > _maxLength)
            {
                return(double.PositiveInfinity);
            }

            // evaulate
            ec.Clear();
            foreach (BasicData pair in _trainingData)
            {
                double[] output = ralgo.ComputeRegression(pair.Input);
                ec.UpdateError(output, pair.Ideal, 1.0);
            }

            return(ec.Calculate());
        }
Esempio n. 12
0
        /// <inheritdoc />
        public void Iteration()
        {
            _network.NetworkTraining = true;

            // alert the layers that a new batch is starting.
            foreach (var layer in _network.Layers)
            {
                layer.TrainingBatch(Stochastic);
            }

            // begin the iteration
            _gradients.Reset();
            _errorCalc.Clear();

            var iterationSize = BatchSize == 0
                ? _training.Count
                : Math.Min(BatchSize, _training.Count);


            for (var i = 0; i < iterationSize; i++)
            {
                BasicData element;

                if (IsOnlineTraining)
                {
                    if (Stochastic != null)
                    {
                        var stochasticIndex = Stochastic.NextInt(0, _training.Count);
                        element = _training[stochasticIndex];
                    }
                    else
                    {
                        element = _training[_currentIndex++];
                    }
                }
                else
                {
                    element = _training[i];
                }
                _gradients.Process(_errorCalc, element.Input, element.Ideal);
            }

            if (_currentIndex > _training.Count || BatchSize == 0)
            {
                _currentIndex = 0;
            }

            _currentError = _errorCalc.Calculate();

            for (var i = 0; i < _network.Weights.Length; i++)
            {
                double delta;

                if (NesterovUpdate)
                {
                    var prevNesterov = _lastDelta[i];

                    _lastDelta[i] = Momentum * prevNesterov
                                    + _gradients.Gradients[i] * LearningRate;
                    delta = Momentum * prevNesterov - (1 + Momentum) * _lastDelta[i];
                }
                else
                {
                    delta         = _gradients.Gradients[i] * -LearningRate + _lastDelta[i] * Momentum;
                    _lastDelta[i] = delta;
                }

                _network.Weights[i] += delta;
            }
            _network.NetworkTraining = false;
        }