private void button1_Click(object sender, EventArgs e) { neuron = new Neuron("AND-Neuron", 0, 2); neuron.addNewInput("input1", 0, 0); neuron.addNewInput("input2", 0, 0); PerceptronNetwork pn = new PerceptronNetwork(neuron); TrainingTemplate andTemplate = new TrainingTemplate("AND Template"); andTemplate.addTrainingRow(new TrainingRow(new List<double> { 0, 0 }, new List<double> { 0 })); andTemplate.addTrainingRow(new TrainingRow(new List<double> { 0, 1 }, new List<double> { 0 })); andTemplate.addTrainingRow(new TrainingRow(new List<double> { 1, 0 }, new List<double> { 0 })); andTemplate.addTrainingRow(new TrainingRow(new List<double> { 1, 1 }, new List<double> { 1 })); TrainingTemplate orTemplate = new TrainingTemplate("OR Template"); orTemplate.addTrainingRow(new TrainingRow(new List<double> { 0, 0 }, new List<double> { 0 })); orTemplate.addTrainingRow(new TrainingRow(new List<double> { 0, 1 }, new List<double> { 1 })); orTemplate.addTrainingRow(new TrainingRow(new List<double> { 1, 0 }, new List<double> { 1 })); orTemplate.addTrainingRow(new TrainingRow(new List<double> { 1, 1 }, new List<double> { 1 })); TrainingTemplate xorTemplate = new TrainingTemplate("XOR Template"); xorTemplate.addTrainingRow(new TrainingRow(new List<double> { 0, 0 }, new List<double> { 0 })); xorTemplate.addTrainingRow(new TrainingRow(new List<double> { 0, 1 }, new List<double> { 1 })); xorTemplate.addTrainingRow(new TrainingRow(new List<double> { 1, 0 }, new List<double> { 1 })); xorTemplate.addTrainingRow(new TrainingRow(new List<double> { 1, 1 }, new List<double> { 0 })); templatesList = new List<TrainingTemplate>(); ErrorHistory errorProg = new ErrorHistory(); double error = pn.train(xorTemplate, 100, errorProg); labelWeight1.Text = neuron.inputs[0].weight.ToString("N3"); labelWeight2.Text = neuron.inputs[1].weight.ToString("N3"); labelError.Text = error.ToString("N3"); for (int X = 0; X < errorProg.errorPoints.Count; X++) { chart1.Series["Error"].Points.AddXY(X, errorProg.errorPoints[X]); } //chart1.DataBind(errorProg); }
/// <summary> /// The constructor /// </summary> public NNSim() { this.xor_template = new TrainingTemplate(); xor_template.addTrainingRow(new TrainingRow(new List<double> { 0, 0 }, new List<double> { 0 })); xor_template.addTrainingRow(new TrainingRow(new List<double> { 0, 1 }, new List<double> { 1 })); xor_template.addTrainingRow(new TrainingRow(new List<double> { 1, 0 }, new List<double> { 1 })); xor_template.addTrainingRow(new TrainingRow(new List<double> { 1, 1 }, new List<double> { 0 })); this.input1Neuron = new Neuron("input1", 0, 1); this.input2Neuron = new Neuron("input2", 0, 1); this.hidden1Neuron = new Neuron("hidden1", 0, 1); this.hidden2Neuron = new Neuron("hidden2", 0, 1); this.outputNeuron = new Neuron("output", 0, 1); input1Neuron.fireRule = new SigmoidFireRule(); input2Neuron.fireRule = new SigmoidFireRule(); hidden1Neuron.fireRule = new SigmoidFireRule(); hidden1Neuron.fireRule = new SigmoidFireRule(); outputNeuron.fireRule = new SigmoidFireRule(); input1Neuron.addNewInput("network_in_1", 1, 1); input2Neuron.addNewInput("network_in_2", 1, 1); hidden1Neuron.inputs.Add(input1Neuron.output); hidden1Neuron.inputs.Add(input2Neuron.output); hidden2Neuron.inputs.Add(input1Neuron.output); hidden2Neuron.inputs.Add(input2Neuron.output); outputNeuron.inputs.Add(hidden1Neuron.output); outputNeuron.inputs.Add(hidden2Neuron.output); errors = new double[4]; }
public virtual double TrainNetwork(TrainingTemplate trainingTemplate, NeuronBase neuron, int maxGenerations) { throw new NotImplementedException(); }
public double train(TrainingTemplate trainingTemplate, int extMaxGenerations, ErrorHistory errorProg) { //Note to self 0.1 is right out of my ass return trainer.trainNetwork(trainingTemplate,inputLayer,outputLayer,hiddenLayer,extMaxGenerations,0.1,errorProg); }
public double trainNetwork(TrainingTemplate trainingTemplate, AriesNeuroNet.Neurons.Neuron neuron, int maxGenerations, ErrorHistory errorProg) { int stableLimit = trainingTemplate.rows.Count; int stableGenerations = 0; int currentGeneration = 0; double error = 0; List<double> errorHistory = new List<double>(); //We innitialize the flags bool adjustedWeights = false; bool stableGenFlag = true; bool genLimitFlag = true; bool templateFlag = false; //Step 1 initialize the neurons to randomize weights neuron.randomizeWeights(); /* * Possible breaking mecanism * if(flag) { * Console.Writeline(adequate message) * return error * which breaks the function * } * */ for (int currentRow = 0; currentRow < trainingTemplate.rows.Count; currentRow++) { // I extract the current row TrainingRow row = trainingTemplate.rows[currentRow]; do { // I begin a new generation //Console.WriteLine("========================================================================"); //Console.WriteLine("Begining Generation: " + currentGeneration); //Console.WriteLine("Current row: " + currentRow); // I reset the adjutedWeights flag adjustedWeights = false; // I set the inputs neuron.setInputValues(row.inputs); // I fire the neuron neuron.fireNeuron(); // I get the expected output out of the template double expectedOutput = row.outputs[0]; // I get the real output fromt he neuron double realOutput = neuron.output.weightedReading; Console.WriteLine("Output is " + realOutput); // I calculate the error error = expectedOutput - realOutput; //Console.WriteLine("Error is " + error); // I make a decision based on the error if (error == 0) { //Console.WriteLine("I have not found an error"); stableGenerations++; // I set the flag so that I exit the while adjustedWeights = false; } else { //Console.WriteLine("I found an error"); //Console.WriteLine("The error is " + error); // I reset the stable generations counter stableGenerations = 0; // These are for debugging purposes //List<double> oldWeights = new List<double>(); //List<double> newWeights = new List<double>(); // I mark that I needed to adjust the weights. adjustedWeights = true; //Do the heavy duty processing foreach (NeuronPort input in neuron.inputs) { //oldWeights.Add(input.weight); input.weight += input.reading * learningRate * error; // To do finish this //newWeights.Add(input.weight); } Console.WriteLine("I corrected with " + (learningRate * error)); // I log the error to history errorHistory.Add(error); // I publish the old weights /* Console.WriteLine("Old weights: "); foreach (double weight in oldWeights) { Console.Write(weight + " "); } // I publish the new weights Console.WriteLine("New weights: " + newWeights); foreach (double weight in newWeights) { Console.Write(weight + " "); } * */ } // I mark that I've finished these generation currentGeneration++; //I check the conditions stableGenFlag = stableGenerations < stableLimit; genLimitFlag = currentGeneration < maxGenerations; templateFlag = currentRow < trainingTemplate.rows.Count; //the breaking conditions if (!stableGenFlag) { //Console.WriteLine("Ended due to stable limit gen") ; return error; } if (!genLimitFlag) { //Console.WriteLine("Ended due to limit of gens to train"); return error; } } while (adjustedWeights); //maybe not necesary ??? if (!stableGenFlag) { return error; } if (!genLimitFlag) { return error; } } errorProg.errorPoints = errorHistory; return error; }
public double trainNetwork2(TrainingTemplate trainingTemplate, AriesNeuroNet.Neurons.Neuron neuron, int maxGenerations) { int stableLimit = trainingTemplate.rows.Count; int stableGenerations = 0; int currentGeneration = 0; int currentRow = 0; List<double> errorHistory = new List<double>(); //Step 1 initialize the neurons to randomize weights neuron.randomizeWeights(); //We take into account that we might not need to adjust the weights bool adjustedWeights = false; bool stableGenFlag = true; bool genLimitFlag = true; bool templateFlag = false; double error = 0; do { //Main loop Console.WriteLine("========================================================================"); Console.WriteLine("Begining Generation: " + currentGeneration); //We assume this time arround we don't need to adjust adjustedWeights = false; //TODO add some printfs //Set the imputs // I need to cycle through the various templates List<double> inputs = trainingTemplate.rows[currentRow].inputs; neuron.setInputValues(inputs); //Fire the neuron neuron.fireNeuron(); //Get the expected output from the template this works only for training a perceptron double expectedOutput = trainingTemplate.rows[currentRow].outputs[0]; //Get the real output double realOutput = neuron.output.weightedReading; Console.WriteLine("Output is " + realOutput); //Calculate the error error = expectedOutput - realOutput; Console.WriteLine("Error is " + error); //Process the error if (error == 0) { Console.WriteLine("I have not found an error"); stableGenerations++; // I move on to the next training row currentRow = (currentRow + 1) % trainingTemplate.rows.Count; //Maybe I'm not so sure of this adjustedWeights = false; } else { stableGenerations = 0; Console.WriteLine("I found an error"); Console.WriteLine("The error is " + error); // Publish the old weights List<double> oldWeights = new List<double>(); List<double> newWeights = new List<double>(); // I mark that I needed to adjust the weights. adjustedWeights = true; //Do the heavy duty processing foreach (NeuronPort input in neuron.inputs) { oldWeights.Add(input.weight); input.weight += input.reading * learningRate * error; // To do finish this newWeights.Add(input.weight); } Console.WriteLine("I corrected with " + (learningRate * error)); Console.WriteLine("Old weights "); foreach (double weight in oldWeights) { Console.Write(weight + " "); } Console.WriteLine("New weights " + newWeights); foreach (double weight in newWeights) { Console.Write(weight + " "); } } //This constantly jumps throw the templates maybe not the best choice // Need to split this up into 2 loops one loops over the rows in the template // One loops in the rows correcting the errors // Or see wiki article currentGeneration++; //I check the conditions stableGenFlag = stableGenerations < stableLimit; genLimitFlag = currentGeneration < maxGenerations; templateFlag = currentRow < trainingTemplate.rows.Count; Console.WriteLine("adjustedWeights "+ adjustedWeights +" templateFlag " + templateFlag + " stableGenFlag " + stableGenFlag + " genLimitFlag " + genLimitFlag); Console.WriteLine("End of Generation: " + (currentGeneration-1)); Console.WriteLine("========================================================================"); Console.ReadKey(); } while (adjustedWeights /*&& stableGenFlag*/ && genLimitFlag && templateFlag); return error; }
static void Main(string[] args) { //We create the training template TrainingTemplate xor_template = new TrainingTemplate(); xor_template.addTrainingRow(new TrainingRow(new List<double> { 0, 0 }, new List<double> { 0 })); xor_template.addTrainingRow(new TrainingRow(new List<double> { 0, 1 }, new List<double> { 1 })); xor_template.addTrainingRow(new TrainingRow(new List<double> { 1, 0 }, new List<double> { 1 })); xor_template.addTrainingRow(new TrainingRow(new List<double> { 1, 1 }, new List<double> { 0 })); //We create the network SimpleNetwork sn = new SimpleNetwork(); // We create the neurons Neuron input1Neuron = new Neuron("input1", 0, 1); Neuron input2Neuron = new Neuron("input2", 0, 1); Neuron hidden1Neuron = new Neuron("hidden1", 0, 1); Neuron hidden2Neuron = new Neuron("hidden2", 0, 1); Neuron outputNeuron = new Neuron("output", 0, 1); //We asign them Sigmoid fire functions input1Neuron.fireRule = new SigmoidFireRule(); input2Neuron.fireRule = new SigmoidFireRule(); hidden1Neuron.fireRule = new SigmoidFireRule(); hidden1Neuron.fireRule = new SigmoidFireRule(); outputNeuron.fireRule = new SigmoidFireRule(); // We bind them together input1Neuron.addNewInput("network_in_1", 1, 1); input2Neuron.addNewInput("network_in_2", 1, 1); hidden1Neuron.inputs.Add(input1Neuron.output); hidden1Neuron.inputs.Add(input2Neuron.output); hidden2Neuron.inputs.Add(input1Neuron.output); hidden2Neuron.inputs.Add(input2Neuron.output); outputNeuron.inputs.Add(hidden1Neuron.output); // We put them into layers sn.inputLayer.neurons.Add(input1Neuron); sn.inputLayer.neurons.Add(input2Neuron); sn.hiddenLayer.neurons.Add(hidden1Neuron); sn.hiddenLayer.neurons.Add(hidden2Neuron); sn.outputLayer.neurons.Add(outputNeuron); // We train double error = sn.train(xor_template, 5000, new ErrorHistory()); Console.WriteLine(error); Console.ReadKey(); }
public double trainNetwork2(TrainingTemplate trainingTemplate, AriesNeuroNet.Neurons.Neuron neuron, int maxGenerations) { int stableLimit = trainingTemplate.rows.Count; int stableGenerations = 0; int currentGeneration = 0; int currentRow = 0; List <double> errorHistory = new List <double>(); //Step 1 initialize the neurons to randomize weights neuron.randomizeWeights(); //We take into account that we might not need to adjust the weights bool adjustedWeights = false; bool stableGenFlag = true; bool genLimitFlag = true; bool templateFlag = false; double error = 0; do { //Main loop Console.WriteLine("========================================================================"); Console.WriteLine("Begining Generation: " + currentGeneration); //We assume this time arround we don't need to adjust adjustedWeights = false; //TODO add some printfs //Set the imputs // I need to cycle through the various templates List <double> inputs = trainingTemplate.rows[currentRow].inputs; neuron.setInputValues(inputs); //Fire the neuron neuron.fireNeuron(); //Get the expected output from the template this works only for training a perceptron double expectedOutput = trainingTemplate.rows[currentRow].outputs[0]; //Get the real output double realOutput = neuron.output.weightedReading; Console.WriteLine("Output is " + realOutput); //Calculate the error error = expectedOutput - realOutput; Console.WriteLine("Error is " + error); //Process the error if (error == 0) { Console.WriteLine("I have not found an error"); stableGenerations++; // I move on to the next training row currentRow = (currentRow + 1) % trainingTemplate.rows.Count; //Maybe I'm not so sure of this adjustedWeights = false; } else { stableGenerations = 0; Console.WriteLine("I found an error"); Console.WriteLine("The error is " + error); // Publish the old weights List <double> oldWeights = new List <double>(); List <double> newWeights = new List <double>(); // I mark that I needed to adjust the weights. adjustedWeights = true; //Do the heavy duty processing foreach (NeuronPort input in neuron.inputs) { oldWeights.Add(input.weight); input.weight += input.reading * learningRate * error; // To do finish this newWeights.Add(input.weight); } Console.WriteLine("I corrected with " + (learningRate * error)); Console.WriteLine("Old weights "); foreach (double weight in oldWeights) { Console.Write(weight + " "); } Console.WriteLine("New weights " + newWeights); foreach (double weight in newWeights) { Console.Write(weight + " "); } } //This constantly jumps throw the templates maybe not the best choice // Need to split this up into 2 loops one loops over the rows in the template // One loops in the rows correcting the errors // Or see wiki article currentGeneration++; //I check the conditions stableGenFlag = stableGenerations < stableLimit; genLimitFlag = currentGeneration < maxGenerations; templateFlag = currentRow < trainingTemplate.rows.Count; Console.WriteLine("adjustedWeights " + adjustedWeights + " templateFlag " + templateFlag + " stableGenFlag " + stableGenFlag + " genLimitFlag " + genLimitFlag); Console.WriteLine("End of Generation: " + (currentGeneration - 1)); Console.WriteLine("========================================================================"); Console.ReadKey(); } while (adjustedWeights /*&& stableGenFlag*/ && genLimitFlag && templateFlag); return(error); }
public double trainNetwork(TrainingTemplate trainingTemplate, AriesNeuroNet.Neurons.Neuron neuron, int maxGenerations, ErrorHistory errorProg) { int stableLimit = trainingTemplate.rows.Count; int stableGenerations = 0; int currentGeneration = 0; double error = 0; List <double> errorHistory = new List <double>(); //We innitialize the flags bool adjustedWeights = false; bool stableGenFlag = true; bool genLimitFlag = true; bool templateFlag = false; //Step 1 initialize the neurons to randomize weights neuron.randomizeWeights(); /* * Possible breaking mecanism * if(flag) { * Console.Writeline(adequate message) * return error * which breaks the function * } * */ for (int currentRow = 0; currentRow < trainingTemplate.rows.Count; currentRow++) { // I extract the current row TrainingRow row = trainingTemplate.rows[currentRow]; do { // I begin a new generation //Console.WriteLine("========================================================================"); //Console.WriteLine("Begining Generation: " + currentGeneration); //Console.WriteLine("Current row: " + currentRow); // I reset the adjutedWeights flag adjustedWeights = false; // I set the inputs neuron.setInputValues(row.inputs); // I fire the neuron neuron.fireNeuron(); // I get the expected output out of the template double expectedOutput = row.outputs[0]; // I get the real output fromt he neuron double realOutput = neuron.output.weightedReading; Console.WriteLine("Output is " + realOutput); // I calculate the error error = expectedOutput - realOutput; //Console.WriteLine("Error is " + error); // I make a decision based on the error if (error == 0) { //Console.WriteLine("I have not found an error"); stableGenerations++; // I set the flag so that I exit the while adjustedWeights = false; } else { //Console.WriteLine("I found an error"); //Console.WriteLine("The error is " + error); // I reset the stable generations counter stableGenerations = 0; // These are for debugging purposes //List<double> oldWeights = new List<double>(); //List<double> newWeights = new List<double>(); // I mark that I needed to adjust the weights. adjustedWeights = true; //Do the heavy duty processing foreach (NeuronPort input in neuron.inputs) { //oldWeights.Add(input.weight); input.weight += input.reading * learningRate * error; // To do finish this //newWeights.Add(input.weight); } Console.WriteLine("I corrected with " + (learningRate * error)); // I log the error to history errorHistory.Add(error); // I publish the old weights /* * Console.WriteLine("Old weights: "); * foreach (double weight in oldWeights) * { * Console.Write(weight + " "); * } * * // I publish the new weights * Console.WriteLine("New weights: " + newWeights); * foreach (double weight in newWeights) * { * Console.Write(weight + " "); * } * */ } // I mark that I've finished these generation currentGeneration++; //I check the conditions stableGenFlag = stableGenerations < stableLimit; genLimitFlag = currentGeneration < maxGenerations; templateFlag = currentRow < trainingTemplate.rows.Count; //the breaking conditions if (!stableGenFlag) { //Console.WriteLine("Ended due to stable limit gen") ; return(error); } if (!genLimitFlag) { //Console.WriteLine("Ended due to limit of gens to train"); return(error); } } while (adjustedWeights); //maybe not necesary ??? if (!stableGenFlag) { return(error); } if (!genLimitFlag) { return(error); } } errorProg.errorPoints = errorHistory; return(error); }
public double train(TrainingTemplate trainingTemplate, int extMaxGenerations, ErrorHistory errorProg) { //This is simple but the ideea is suposed to be that in larger networks here I do a foreach over the neurons double error = this.trainingMethod.trainNetwork(trainingTemplate, perceptron, extMaxGenerations, errorProg); return error; }
/// <summary> /// trains the netwrok though back propagation /// </summary> /// <param name="trainingTemplate"></param> /// <param name="inputLayer">the input layer of the network</param> /// <param name="outputLayer">the output layer of the network</param> /// <param name="hiddenLayer">the hidden layer of the network</param> /// <param name="maxGenerations"></param> /// <param name="errorProg"></param> /// <returns></returns> public double trainNetwork(TrainingTemplate trainingTemplate, Layer inputLayer, Layer outputLayer, Layer hiddenLayer, int maxGenerations, double acceptableError, ErrorHistory errorProg) { // Step 0 innitialize int stableLimit = trainingTemplate.rows.Count; int stableGenerations = 0; int currentGeneration = 0; double error = 0; List<double> errorHistory = new List<double>(); bool adjustedWeights = false; bool stableGenFlag = true; bool genLimitFlag = true; bool templateFlag = false; foreach (Neuron neuron in inputLayer.neurons) { neuron.randomizeWeights(); } foreach (Neuron neuron in hiddenLayer.neurons) { neuron.randomizeWeights(); } //Should I set the output neuron's weights to 1 ? for (int currentRow = 0; currentRow < trainingTemplate.rows.Count; currentRow++) { // I extract the current row TrainingRow row = trainingTemplate.rows[currentRow]; do { // I reset the adjutedWeights flag adjustedWeights = false; //extract the inputs and distribute them List<double> templateInputs = row.inputs; // Need to reverse them as to pop them templateInputs.Reverse(); //Fire the neurons foreach (Neuron neuron in inputLayer.neurons) { foreach (NeuronPort input in neuron.inputs) { // We get the last input from the template and check if it's not null if it's null we put 0 double var_input = 0; if (templateInputs.Count > 0) { var_input = templateInputs.Last(); templateInputs.RemoveAt(templateInputs.Count -1); } input.reading = var_input; } neuron.fireNeuron(); } // fire the hidden layer foreach (Neuron neuron in hiddenLayer.neurons) { neuron.fireNeuron(); } //Fire the output layer outputLayer.neurons[0].fireNeuron(); error = outputLayer.neurons[0].output.weightedReading - trainingTemplate.rows[0].outputs[0]; if (error > acceptableError) { //Process the error here Neuron outputNeuron = outputLayer.neurons[0]; //First of calculate the deltaSigma for output layer outputNeuron.nodeDelta = (-1 * error) * outputNeuron.fireRule.fireNeuronDerivative(outputNeuron.inputs, outputNeuron.bias); // and hidden layer (no input layer) foreach (Neuron neuron in hiddenLayer.neurons) { neuron.nodeDelta = outputNeuron.nodeDelta * (neuron.fireRule.fireNeuronDerivative(neuron.inputs, neuron.bias) + neuron.output.weight); // need to add sum of weights going out } //EXperimental !!! foreach (Neuron neuron in inputLayer.neurons) { neuron.nodeDelta = outputNeuron.nodeDelta * (neuron.fireRule.fireNeuronDerivative(neuron.inputs, neuron.bias) + neuron.output.weight); // need to add sum of weights going out } //Calculate the gradient //For the output outputNeuron.output.gradient = outputNeuron.output.weightedReading * outputNeuron.nodeDelta; foreach (Neuron neuron in hiddenLayer.neurons) { neuron.output.gradient = neuron.output.weightedReading * outputNeuron.nodeDelta; } //Experimental!!! foreach (Neuron neuron in inputLayer.neurons) { neuron.output.gradient = neuron.output.weightedReading * outputNeuron.nodeDelta; } //Calculate the new weights //For the output layer outputNeuron.deltaWeight = learningRate * outputNeuron.output.gradient + moementum * outputNeuron.deltaWeight; outputNeuron.output.weight += outputNeuron.deltaWeight; //for the hidden layer foreach (Neuron neuron in hiddenLayer.neurons) { neuron.deltaWeight = learningRate * neuron.output.gradient + moementum * neuron.deltaWeight; neuron.output.weight += neuron.deltaWeight; } //For the input layer ? foreach (Neuron neuron in inputLayer.neurons) { neuron.deltaWeight = learningRate * neuron.output.gradient + moementum * neuron.deltaWeight; neuron.output.weight += neuron.deltaWeight; } } // I mark that I've finished these generation currentGeneration++; //I check the conditions stableGenFlag = stableGenerations < stableLimit; genLimitFlag = currentGeneration < maxGenerations; templateFlag = currentRow < trainingTemplate.rows.Count; //the breaking conditions if (!stableGenFlag) { //Console.WriteLine("Ended due to stable limit gen") ; return error; } if (!genLimitFlag) { //Console.WriteLine("Ended due to limit of gens to train"); return error; } } while (error > acceptableError); }; //IAppDomainSetup still need to return the error history return error; }
/// <summary> /// trains the netwrok though back propagation /// </summary> /// <param name="trainingTemplate"></param> /// <param name="inputLayer">the input layer of the network</param> /// <param name="outputLayer">the output layer of the network</param> /// <param name="hiddenLayer">the hidden layer of the network</param> /// <param name="maxGenerations"></param> /// <param name="errorProg"></param> /// <returns></returns> public double trainNetwork(TrainingTemplate trainingTemplate, Layer inputLayer, Layer outputLayer, Layer hiddenLayer, int maxGenerations, double acceptableError, ErrorHistory errorProg) { // Step 0 innitialize int stableLimit = trainingTemplate.rows.Count; int stableGenerations = 0; int currentGeneration = 0; double error = 0; List <double> errorHistory = new List <double>(); bool adjustedWeights = false; bool stableGenFlag = true; bool genLimitFlag = true; bool templateFlag = false; foreach (Neuron neuron in inputLayer.neurons) { neuron.randomizeWeights(); } foreach (Neuron neuron in hiddenLayer.neurons) { neuron.randomizeWeights(); } //Should I set the output neuron's weights to 1 ? for (int currentRow = 0; currentRow < trainingTemplate.rows.Count; currentRow++) { // I extract the current row TrainingRow row = trainingTemplate.rows[currentRow]; do { // I reset the adjutedWeights flag adjustedWeights = false; //extract the inputs and distribute them List <double> templateInputs = row.inputs; // Need to reverse them as to pop them templateInputs.Reverse(); //Fire the neurons foreach (Neuron neuron in inputLayer.neurons) { foreach (NeuronPort input in neuron.inputs) { // We get the last input from the template and check if it's not null if it's null we put 0 double var_input = 0; if (templateInputs.Count > 0) { var_input = templateInputs.Last(); templateInputs.RemoveAt(templateInputs.Count - 1); } input.reading = var_input; } neuron.fireNeuron(); } // fire the hidden layer foreach (Neuron neuron in hiddenLayer.neurons) { neuron.fireNeuron(); } //Fire the output layer outputLayer.neurons[0].fireNeuron(); error = outputLayer.neurons[0].output.weightedReading - trainingTemplate.rows[0].outputs[0]; if (error > acceptableError) { //Process the error here Neuron outputNeuron = outputLayer.neurons[0]; //First of calculate the deltaSigma for output layer outputNeuron.nodeDelta = (-1 * error) * outputNeuron.fireRule.fireNeuronDerivative(outputNeuron.inputs, outputNeuron.bias); // and hidden layer (no input layer) foreach (Neuron neuron in hiddenLayer.neurons) { neuron.nodeDelta = outputNeuron.nodeDelta * (neuron.fireRule.fireNeuronDerivative(neuron.inputs, neuron.bias) + neuron.output.weight); // need to add sum of weights going out } //EXperimental !!! foreach (Neuron neuron in inputLayer.neurons) { neuron.nodeDelta = outputNeuron.nodeDelta * (neuron.fireRule.fireNeuronDerivative(neuron.inputs, neuron.bias) + neuron.output.weight); // need to add sum of weights going out } //Calculate the gradient //For the output outputNeuron.output.gradient = outputNeuron.output.weightedReading * outputNeuron.nodeDelta; foreach (Neuron neuron in hiddenLayer.neurons) { neuron.output.gradient = neuron.output.weightedReading * outputNeuron.nodeDelta; } //Experimental!!! foreach (Neuron neuron in inputLayer.neurons) { neuron.output.gradient = neuron.output.weightedReading * outputNeuron.nodeDelta; } //Calculate the new weights //For the output layer outputNeuron.deltaWeight = learningRate * outputNeuron.output.gradient + moementum * outputNeuron.deltaWeight; outputNeuron.output.weight += outputNeuron.deltaWeight; //for the hidden layer foreach (Neuron neuron in hiddenLayer.neurons) { neuron.deltaWeight = learningRate * neuron.output.gradient + moementum * neuron.deltaWeight; neuron.output.weight += neuron.deltaWeight; } //For the input layer ? foreach (Neuron neuron in inputLayer.neurons) { neuron.deltaWeight = learningRate * neuron.output.gradient + moementum * neuron.deltaWeight; neuron.output.weight += neuron.deltaWeight; } } // I mark that I've finished these generation currentGeneration++; //I check the conditions stableGenFlag = stableGenerations < stableLimit; genLimitFlag = currentGeneration < maxGenerations; templateFlag = currentRow < trainingTemplate.rows.Count; //the breaking conditions if (!stableGenFlag) { //Console.WriteLine("Ended due to stable limit gen") ; return(error); } if (!genLimitFlag) { //Console.WriteLine("Ended due to limit of gens to train"); return(error); } } while (error > acceptableError); } ; //IAppDomainSetup still need to return the error history return(error); }