예제 #1
0
        // main function of the neural network to train and store the outputs using backprop & feedforward
        public void compute()
        {
            int epoch = 0; // used for iterating through tests N times

            // hidden layer of neurons
            Neuron hidden1 = new Neuron();
            Neuron hidden2 = new Neuron();
            Neuron hidden3 = new Neuron();
            Neuron hidden4 = new Neuron();

            //output neuron
            Neuron output = new Neuron();

            output.randomizeWeights();

            hidden1.randomizeWeights();
            hidden2.randomizeWeights();
            hidden3.randomizeWeights();
            hidden4.randomizeWeights();

            //double errorRate = 1;

            //output.error = 1;
            while (epoch < 100) // iterate based on the output error OR use epochs > 100 for reliable numbers
            {
                //errorRate = 0;
                epoch++;
                for (int i = 0; i < 16; i++)
                {
                    // set each neurons inputs to the training set
                    hidden1.inputs = new double[] { inputs[i, 0], inputs[i, 1], inputs[i, 2], inputs[i, 3] };
                    hidden2.inputs = new double[] { inputs[i, 0], inputs[i, 1], inputs[i, 2], inputs[i, 3] };
                    hidden3.inputs = new double[] { inputs[i, 0], inputs[i, 1], inputs[i, 2], inputs[i, 3] };
                    hidden4.inputs = new double[] { inputs[i, 0], inputs[i, 1], inputs[i, 2], inputs[i, 3] };

                    // set the outputnode inputs to the results of the 4 neurons sigmoid transfer
                    output.inputs = new double[] { hidden1.output(), hidden2.output(), hidden3.output(), hidden4.output() };

                    // display the training set and the "output of the output" neuron
                    // we set the output nodes inputs above, and now we're using those inputs * weights + bias formula in the output function
                    textBox1.AppendText(inputs[i, 0] + " " + inputs[i, 1] + " " + inputs[i, 2] + " " + inputs[i, 3] + " = " + output.output() + "\r\n");
                    outputResults.Add(output.output());
                    neuronOutputs.Add(new double[] { hidden1.output(), hidden2.output(), hidden3.output(), hidden4.output() });

                    // calculate the error
                    output.error = sigmoid.derivative(output.output()) * (answers[i] - output.output());
                    output.tweakWeights();

                    // calculate the neurons error rates based on their individual outputs * the error rate of the output neuron * the weights of the output neuron
                    // the sigmoid deriv is used for adjusting the weights in order to reduce the error rate of each neuron
                    hidden1.error = sigmoid.derivative(hidden1.output()) * output.error * output.weights[0];
                    hidden2.error = sigmoid.derivative(hidden2.output()) * output.error * output.weights[1];
                    hidden3.error = sigmoid.derivative(hidden3.output()) * output.error * output.weights[2];
                    hidden4.error = sigmoid.derivative(hidden4.output()) * output.error * output.weights[3];

                    // the line below is irrelevant since our sigmoid function takes care of the error rate
                    //double error = answers[i] - output.output();

                    // tweak the neurons weights using the error rates by adding the error rate * input value to the weights
                    hidden1.tweakWeights();
                    hidden2.tweakWeights();
                    hidden3.tweakWeights();
                    hidden4.tweakWeights();

                    //errorRate += Math.Abs(error);
                }
            }
        }