コード例 #1
0
        /// <inheritdoc />
        public void Iteration()
        {
            _gradients.Reset();
            _errorCalc.Clear();

            // Calculate gradients for entire training set, RPROP does not do online.
            foreach (var element in _training)
            {
                _gradients.Process(_errorCalc, element.Input, element.Ideal);
            }
            LastError = _errorCalc.Calculate();

            // Apply the gradients according to the RPROP algorithm.
            for (var i = 0; i < _gradients.Gradients.Length; i++)
            {
                var delta = CalculateWeightDelta(_gradients.Gradients, _lastGradients, i);
                _lastGradients[i]    = _gradients.Gradients[i];
                _lastDelta[i]        = delta;
                _network.Weights[i] += delta;
            }
        }
コード例 #2
0
        /// <inheritdoc />
        public void Iteration()
        {
            _network.NetworkTraining = true;

            // alert the layers that a new batch is starting.
            foreach (var layer in _network.Layers)
            {
                layer.TrainingBatch(Stochastic);
            }

            // begin the iteration
            _gradients.Reset();
            _errorCalc.Clear();

            var iterationSize = BatchSize == 0
                ? _training.Count
                : Math.Min(BatchSize, _training.Count);


            for (var i = 0; i < iterationSize; i++)
            {
                BasicData element;

                if (IsOnlineTraining)
                {
                    if (Stochastic != null)
                    {
                        var stochasticIndex = Stochastic.NextInt(0, _training.Count);
                        element = _training[stochasticIndex];
                    }
                    else
                    {
                        element = _training[_currentIndex++];
                    }
                }
                else
                {
                    element = _training[i];
                }
                _gradients.Process(_errorCalc, element.Input, element.Ideal);
            }

            if (_currentIndex > _training.Count || BatchSize == 0)
            {
                _currentIndex = 0;
            }

            _currentError = _errorCalc.Calculate();

            for (var i = 0; i < _network.Weights.Length; i++)
            {
                double delta;

                if (NesterovUpdate)
                {
                    var prevNesterov = _lastDelta[i];

                    _lastDelta[i] = Momentum * prevNesterov
                                    + _gradients.Gradients[i] * LearningRate;
                    delta = Momentum * prevNesterov - (1 + Momentum) * _lastDelta[i];
                }
                else
                {
                    delta         = _gradients.Gradients[i] * -LearningRate + _lastDelta[i] * Momentum;
                    _lastDelta[i] = delta;
                }

                _network.Weights[i] += delta;
            }
            _network.NetworkTraining = false;
        }