예제 #1
0
 public abstract void AdaptWeights(NeuralNetwork network, Vector <double> errors, double currentDataError, double previousDataError);
예제 #2
0
        public override void AdaptWeights(NeuralNetwork network, Vector <double> errors, double currentDataError, double previousDataError)
        {
            #region helper variables
            int numberOfLayers = network.Layers.Count();
            var layers         = network.Layers;
            int biasModifier   = network.IsBiasExisting ? 1 : 0; // TODO: check THat
            #endregion
            if (LastWeightsChange is null)                       // then initialize it with proper size
            {
                LastWeightsChange = new Matrix <double> [numberOfLayers];
                for (int layerIndex = 0; layerIndex < numberOfLayers; layerIndex++) // create weights changes vectors
                {
                    LastWeightsChange[layerIndex] = Matrix <double> .Build.Dense(layers[layerIndex].Weights.RowCount, layers[layerIndex].Weights.ColumnCount);
                }
            }

            #region calculate propagated errors
            Vector <double>[] propagatedErrors = new Vector <double> [numberOfLayers];
            for (int layerIndex = numberOfLayers - 1; layerIndex >= 0; layerIndex--) // begin with the ulitmate layer
            {
                propagatedErrors[layerIndex] = Vector <double> .Build.Dense(layers[layerIndex].Weights.ColumnCount);

                for (int neuronIndex = 0; neuronIndex < layers[layerIndex].Weights.ColumnCount; neuronIndex++)
                {
                    if (layerIndex == numberOfLayers - 1) // for last layer is simple errors
                    {
                        propagatedErrors[layerIndex] = errors;
                    }
                    else
                    {
                        double propagatedError = 0;
                        for (int weightIndex = 0; weightIndex < layers[layerIndex + 1].Weights.ColumnCount; weightIndex++)
                        {
                            propagatedError += layers[layerIndex + 1].Weights[neuronIndex + biasModifier, weightIndex] * propagatedErrors[layerIndex + 1][weightIndex]; //weight * propagated error
                        }
                        propagatedErrors[layerIndex][neuronIndex] = propagatedError;
                    }
                }
            }
            #endregion
            #region adapt weights using propagated error, outputs and derivatives
            for (int layerIndex = numberOfLayers - 1; layerIndex >= 0; layerIndex--) // begin with the lastlayer
            {
                for (int neuronIndex = 0; neuronIndex < layers[layerIndex].Weights.ColumnCount; neuronIndex++)
                {
                    for (int weightIndex = 0; weightIndex < layers[layerIndex].Weights.RowCount; weightIndex++)
                    {
                        var signal                = network.LastOutputs[layerIndex][weightIndex];
                        var currentNeuronError    = propagatedErrors[layerIndex][neuronIndex];
                        var activationFunc        = network.Layers[layerIndex].ActivationFunction as IDifferentiable;
                        var derivative            = network.LastDerivatives[layerIndex][neuronIndex];
                        var backPropagationImpact = derivative * signal * currentNeuronError * LearningRateHandler.LearningRate;
                        if (currentDataError < previousDataError * MaxErrorIncreaseCoefficient) // accept that step and add momentum modifier
                        {
                            var momentumImpact = MomentumCoefficient * LastWeightsChange[layerIndex][weightIndex, neuronIndex];
                            layers[layerIndex].Weights[weightIndex, neuronIndex]   += backPropagationImpact + momentumImpact;
                            LastWeightsChange[layerIndex][weightIndex, neuronIndex] = backPropagationImpact + momentumImpact; //update weights last change stored value
                        }
                        else // ignore momentum
                        {
                            layers[layerIndex].Weights[weightIndex, neuronIndex]   += backPropagationImpact;
                            LastWeightsChange[layerIndex][weightIndex, neuronIndex] = backPropagationImpact;
                        }
                    }
                }
            }
            #endregion
        }
예제 #3
0
파일: OnlineTrainer.cs 프로젝트: 210183/IAD
        public void TrainNetwork(ref NeuralNetwork networkToTrain, int maxEpochs, double desiredErrorRate = 0)
        {
            var learnSet = DataProvider.LearnSet; //shorter

            var TempTestErrorHistory = Vector <double> .Build.Dense(maxEpochs + 1);

            Vector <double> TemporaryEpochErrorHistory = Vector <double> .Build.Dense(maxEpochs, 0); //place to temporary store epoch errors for all epochs

            TempTestErrorHistory[0]       = Double.MaxValue;                                         // assume error at beginning is maximal
            TemporaryEpochErrorHistory[0] = Double.MaxValue;                                         // assume error at beginning is maximal

            int             shuffleAmount = (int)Math.Round(Math.Log(learnSet.Length, 2));           // calculate how much should shuffle learn set depending on its size
            Vector <double> output;
            Vector <double> errorVector;
            int             EpochIndex = 1; // epochs are counted starting from 1

            while (EpochIndex < maxEpochs)
            {
                DataProvider.ShuffleDataSet(learnSet, shuffleAmount);                       // shuffle some data in learn set
                CurrentEpochErrorVector = Vector <double> .Build.Dense(learnSet.Length, 0); // init with 0s

                #region calculate epoch
                for (int dataIndex = 0; dataIndex < learnSet.Length; dataIndex++)
                {
                    output      = networkToTrain.CalculateOutput(learnSet[dataIndex].X, CalculateMode.OutputsAndDerivatives);
                    errorVector = ErrorCalculator.CalculateErrorVector(output, learnSet[dataIndex].D);
                    CurrentEpochErrorVector[dataIndex] = ErrorCalculator.CalculateErrorSum(output, learnSet[dataIndex].D);
                    #region adapt weights
                    LearningAlgorithm.AdaptWeights(networkToTrain, errorVector, CurrentEpochErrorVector[dataIndex], CurrentEpochErrorVector[dataIndex.Previous()]);
                    #endregion
                }
                #endregion
                #region epoch error
                TemporaryEpochErrorHistory[EpochIndex] = ErrorCalculator.CalculateEpochError(CurrentEpochErrorVector);
                if (TemporaryEpochErrorHistory[EpochIndex] <= desiredErrorRate) //learning is done
                {
                    return;
                }
                #endregion
                #region Adapt Learning Rate
                LearningAlgorithm.AdaptLearningRate(TemporaryEpochErrorHistory[EpochIndex], TemporaryEpochErrorHistory[EpochIndex.Previous()]);
                #endregion
                #region create and store test results
                var testError = tester.TestNetwork(networkToTrain, DataProvider);
                TempTestErrorHistory[EpochIndex] = testError;
                #endregion
                #region update best network state
                if (TempTestErrorHistory[EpochIndex] < BestError)
                {
                    BestNetworkState = networkToTrain.DeepCopy();
                    BestError        = TempTestErrorHistory[EpochIndex];
                }
                #endregion

                EpochIndex++;
            }
            #region save errors for all epochs that actually were calculated
            TestErrorHistory = Vector <double> .Build.Dense(EpochIndex);

            EpochErrorHistory = Vector <double> .Build.Dense(EpochIndex);

            TemporaryEpochErrorHistory.CopySubVectorTo(EpochErrorHistory, 0, 0, EpochIndex);
            TempTestErrorHistory.CopySubVectorTo(TestErrorHistory, 0, 0, EpochIndex);
            #endregion
            //restore best network state ( on set for verifying)
            networkToTrain = BestNetworkState;
        }
 /// <summary>
 /// A ctor to clone a given network.
 /// </summary>
 /// <param name="net">The given network.</param>
 public NeuralNetwork(NeuralNetwork net) : this(net.Layers)
 {
 }