コード例 #1
0
    void Backpropagation()
    {
        int outputLayer          = layers.Count - 1;
        int numberOfHiddenLayers = layers.Count > 2 ? layers.Count - 2 : 0;

        //Output layer
        for (int i = 0; i < layers[outputLayer].GetNeurons().Count; i++)     //Iterate the neurons in the output layer
        //Calculate the error for the neuron by subtracting the actual output from the desired output  of this output neuron
        {
            double error = desiredOutputs[i][trainingIndex] - layers[outputLayer].GetNeurons()[i].GetOutput();
            //Calculate the errorGradient for the neuron (used for the errorGradientSum in the hidden layer to follow)
            layers[outputLayer].GetNeurons()[i].SetErrorGradient(ActivationFunctionHandler.TriggerDerativeFunction(layers[outputLayer].GetNeurons()[i].GetActivationFunction(),
                                                                                                                   layers[outputLayer].GetNeurons()[i].GetOutput()) * error);
            //Update the neuron's weights
            for (int j = 0; j < layers[outputLayer].GetNeurons()[i].GetWeights().Count; j++)
            {
                layers[outputLayer].GetNeurons()[i].GetWeights()[j] += alpha * layers[outputLayer].GetNeurons()[i].GetInputs()[j] * error;
            }
            //Update the neuron's bias
            layers[outputLayer].GetNeurons()[i].SetBias(alpha * -1 * layers[outputLayer].GetNeurons()[i].GetErrorGradient());
        }
        //Hidden layer
        for (int i = numberOfHiddenLayers; i > 1; i--)             //Iterate the hidden layers
        {
            for (int j = 0; j < layers[i].GetNeurons().Count; j++) //Iterate the layer's neurons
            //Calculate the errorGradientSum for the previous layer
            {
                double errorGradientSum = 0;
                for (int k = 0; k < layers[i + 1].GetNeurons().Count; k++)
                {
                    errorGradientSum += layers[i + 1].GetNeurons()[k].GetErrorGradient() * layers[i + 1].GetNeurons()[k].GetWeights()[j];
                }
                //Calculate the errorGradient for the neuron (used for the errorGradientSum in the hidden layer to follow)
                layers[i].GetNeurons()[j].SetErrorGradient(ActivationFunctionHandler.TriggerDerativeFunction(layers[i].GetNeurons()[j].GetActivationFunction(),
                                                                                                             layers[i].GetNeurons()[j].GetOutput()) * errorGradientSum);
                //Update the neuron's weights
                for (int k = 0; k < layers[i].GetNeurons()[j].GetWeights().Count; k++)
                {
                    layers[i].GetNeurons()[j].GetWeights()[k] += alpha * layers[i].GetNeurons()[j].GetInputs()[k] * layers[i].GetNeurons()[j].GetErrorGradient();
                }
                //Update the neuron's bias
                layers[i].GetNeurons()[j].SetBias(alpha * -1 * layers[i].GetNeurons()[j].GetErrorGradient());
            }
        }
    }