Пример #1
0
        /// <summary>
        /// Tworzy nową instancję klasy Perceptron.
        /// </summary>
        /// <param name="inputsNumber">Liczba inputów jaką perceptron może przyjąć.</param>
        /// <param name="activationFunction">Funkcja aktywacyjna dla tego perceptrona.</param>
        /// <param name="addBias">Wskazuje na to czy bias ma być dodawany.</param>
        /// <param name="weights">Wagi jakie perceptron otrzyma na początku.</param>
        public Perceptron(byte inputsNumber, ActivationFunctionHandler activationFunction, bool addBias, float[] weights)
        {
            this.inputsNumber       = inputsNumber;
            this.activationFunction = activationFunction;
            this.addBias            = addBias;

            LearningRate = 0.001f;
            this.weights = weights;
        }
Пример #2
0
        /// <summary>
        /// Tworzy nową instancję klasy Perceptron.
        /// </summary>
        /// <param name="inputsNumber">Liczba inputów jaką perceptron może przyjąć.</param>
        /// <param name="activationFunction">Funkcja aktywacyjna dla tego perceptrona.</param>
        /// <param name="addBias">Wskazuje na to czy bias ma być dodawany.</param>
        public Perceptron(byte inputsNumber, ActivationFunctionHandler activationFunction, bool addBias)
        {
            this.inputsNumber       = inputsNumber;
            this.activationFunction = activationFunction;
            this.addBias            = addBias;

            LearningRate = 0.001f;
            weights      = new float[inputsNumber];
            for (byte i = 0; i < weights.Length; i++)
            {
                weights[i] = RandomGenerator.GetFloat(-1, 1);
            }
        }
    void Backpropagation()
    {
        int outputLayer          = layers.Count - 1;
        int numberOfHiddenLayers = layers.Count > 2 ? layers.Count - 2 : 0;

        //Output layer
        for (int i = 0; i < layers[outputLayer].GetNeurons().Count; i++)     //Iterate the neurons in the output layer
        //Calculate the error for the neuron by subtracting the actual output from the desired output  of this output neuron
        {
            double error = desiredOutputs[i][trainingIndex] - layers[outputLayer].GetNeurons()[i].GetOutput();
            //Calculate the errorGradient for the neuron (used for the errorGradientSum in the hidden layer to follow)
            layers[outputLayer].GetNeurons()[i].SetErrorGradient(ActivationFunctionHandler.TriggerDerativeFunction(layers[outputLayer].GetNeurons()[i].GetActivationFunction(),
                                                                                                                   layers[outputLayer].GetNeurons()[i].GetOutput()) * error);
            //Update the neuron's weights
            for (int j = 0; j < layers[outputLayer].GetNeurons()[i].GetWeights().Count; j++)
            {
                layers[outputLayer].GetNeurons()[i].GetWeights()[j] += alpha * layers[outputLayer].GetNeurons()[i].GetInputs()[j] * error;
            }
            //Update the neuron's bias
            layers[outputLayer].GetNeurons()[i].SetBias(alpha * -1 * layers[outputLayer].GetNeurons()[i].GetErrorGradient());
        }
        //Hidden layer
        for (int i = numberOfHiddenLayers; i > 1; i--)             //Iterate the hidden layers
        {
            for (int j = 0; j < layers[i].GetNeurons().Count; j++) //Iterate the layer's neurons
            //Calculate the errorGradientSum for the previous layer
            {
                double errorGradientSum = 0;
                for (int k = 0; k < layers[i + 1].GetNeurons().Count; k++)
                {
                    errorGradientSum += layers[i + 1].GetNeurons()[k].GetErrorGradient() * layers[i + 1].GetNeurons()[k].GetWeights()[j];
                }
                //Calculate the errorGradient for the neuron (used for the errorGradientSum in the hidden layer to follow)
                layers[i].GetNeurons()[j].SetErrorGradient(ActivationFunctionHandler.TriggerDerativeFunction(layers[i].GetNeurons()[j].GetActivationFunction(),
                                                                                                             layers[i].GetNeurons()[j].GetOutput()) * errorGradientSum);
                //Update the neuron's weights
                for (int k = 0; k < layers[i].GetNeurons()[j].GetWeights().Count; k++)
                {
                    layers[i].GetNeurons()[j].GetWeights()[k] += alpha * layers[i].GetNeurons()[j].GetInputs()[k] * layers[i].GetNeurons()[j].GetErrorGradient();
                }
                //Update the neuron's bias
                layers[i].GetNeurons()[j].SetBias(alpha * -1 * layers[i].GetNeurons()[j].GetErrorGradient());
            }
        }
    }
Пример #4
0
        /// <summary>
        /// Tworzy nową 3 warstwową sieć neuronową z określonym rozmiarem każdej warstwy i określonymi funkcjami aktywacyjnymi.
        /// </summary>
        /// <param name="inputsLayerSize">Ilość inputów.</param>
        /// <param name="hiddenLayerSize">Ilość ukrytych neuronów.</param>
        /// <param name="outputsLayerSize">Ilość outputów.</param>
        /// <param name="hiddenLayerActivationFunction">Funkcja aktywacyjna dla neuronów ukrytej warstwy.</param>
        /// <param name="outputLayerActivationFunction">Funkcja aktywacyjna dla neuronów warstwy outputów.</param>
        /// <param name="hiddenLayerDerivativeFunction">Pochodna dla funckcji aktywacyjnej warstwy ukrytej.</param>
        /// <param name="outputLayerDerivativeFunction">Pochodna dla funckcji aktywacyjnej warstwy outputów.</param>
        public SimpleNeuralNetwork(int inputsLayerSize, int hiddenLayerSize, int outputsLayerSize, ActivationFunctionHandler hiddenLayerActivationFunction, ActivationFunctionHandler outputLayerActivationFunction, FloatOperationHandler hiddenLayerDerivativeFunction, FloatOperationHandler outputLayerDerivativeFunction)
        {
            InputsLayerSize  = inputsLayerSize;
            HiddenLayerSize  = hiddenLayerSize;
            OutputsLayerSize = outputsLayerSize;
            HiddenLayerActivationFunction = hiddenLayerActivationFunction;
            OutputLayerActivationFunction = outputLayerActivationFunction;
            HiddenLayerDerivativeFunction = hiddenLayerDerivativeFunction;
            OutputLayerDerivativeFunction = outputLayerDerivativeFunction;

            weightsIh = new Matrix(HiddenLayerSize, InputsLayerSize);
            weightsHo = new Matrix(OutputsLayerSize, HiddenLayerSize);
            weightsIh.Randomize(-1, 1);
            weightsHo.Randomize(-1, 1);
            biasH = new Matrix(HiddenLayerSize, 1);
            biasO = new Matrix(OutputsLayerSize, 1);
            biasH.Randomize(-1, 1);
            biasO.Randomize(-1, 1);
        }