public void testDataSetPopulation() { DataSet irisDataSet = DataSetFactory.getIrisDataSet(); INumerizer numerizer = new IrisDataSetNumerizer(); NeuralNetworkDataSet innds = new IrisNeuralNetworkDataSet(); innds.CreateExamplesFromDataSet(irisDataSet, numerizer); NeuralNetworkConfig config = new NeuralNetworkConfig(); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_INPUTS, 4); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_OUTPUTS, 3); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_HIDDEN_NEURONS, 6); config.SetConfig(FeedForwardNeuralNetwork.LOWER_LIMIT_WEIGHTS, -2.0); config.SetConfig(FeedForwardNeuralNetwork.UPPER_LIMIT_WEIGHTS, 2.0); FeedForwardNeuralNetwork ffnn = new FeedForwardNeuralNetwork(config); ffnn.SetTrainingScheme(new BackPropagationLearning(0.1, 0.9)); ffnn.TrainOn(innds, 10); innds.RefreshDataset(); ffnn.TestOnDataSet(innds); }
public void testFeedForwardAndBAckLoopWorksWithMomentum() { // example 11.14 of Neural Network Design by Hagan, Demuth and Beale Matrix hiddenLayerWeightMatrix = new Matrix(2, 1); hiddenLayerWeightMatrix.Set(0, 0, -0.27); hiddenLayerWeightMatrix.Set(1, 0, -0.41); Vector hiddenLayerBiasVector = new Vector(2); hiddenLayerBiasVector.SetValue(0, -0.48); hiddenLayerBiasVector.SetValue(1, -0.13); Vector input = new Vector(1); input.SetValue(0, 1); Matrix outputLayerWeightMatrix = new Matrix(1, 2); outputLayerWeightMatrix.Set(0, 0, 0.09); outputLayerWeightMatrix.Set(0, 1, -0.17); Vector outputLayerBiasVector = new Vector(1); outputLayerBiasVector.SetValue(0, 0.48); Vector error = new Vector(1); error.SetValue(0, 1.261); double learningRate = 0.1; double momentumFactor = 0.5; FeedForwardNeuralNetwork ffnn = new FeedForwardNeuralNetwork( hiddenLayerWeightMatrix, hiddenLayerBiasVector, outputLayerWeightMatrix, outputLayerBiasVector); ffnn.SetTrainingScheme(new BackPropagationLearning(learningRate, momentumFactor)); ffnn.ProcessInput(input); ffnn.ProcessError(error); Matrix finalHiddenLayerWeights = ffnn.GetHiddenLayerWeights(); Assert.AreEqual(-0.2675, finalHiddenLayerWeights.Get(0, 0), 0.001); Assert.AreEqual(-0.4149, finalHiddenLayerWeights.Get(1, 0), 0.001); Vector hiddenLayerBias = ffnn.GetHiddenLayerBias(); Assert.AreEqual(-0.4775, hiddenLayerBias.GetValue(0), 0.001); Assert.AreEqual(-0.1349, hiddenLayerBias.GetValue(1), 0.001); Matrix finalOutputLayerWeights = ffnn.GetOutputLayerWeights(); Assert.AreEqual(0.1304, finalOutputLayerWeights.Get(0, 0), 0.001); Assert.AreEqual(-0.1235, finalOutputLayerWeights.Get(0, 1), 0.001); Vector outputLayerBias = ffnn.GetOutputLayerBias(); Assert.AreEqual(0.6061, outputLayerBias.GetValue(0), 0.001); }
public void FeedForwardNeuralNetwork_LoadModelSparseMatrix() { var target = new FeedForwardNeuralNetwork <double>( new[] { 2L, 3L, 2L }); var parser = new DoubleParser <string>(); var matrix = TestsHelper.ReadMatrix( 5, 5, "[[-1.0, 1.0, 0.5, 0, 0], [1.0, -1.0, 0.5, 0, 0], [0, 0, 0, -1.0, 2.0], [0, 0, 0, 0.5, -1.5], [0, 0, 0, 1.0, -0.5]]", (i, j) => new SparseDictionaryMatrix <double>(i, j, 0), parser, true); var vector = TestsHelper.ReadVector( 5, "[-1.0, 0.0, 1.0, -0.5, 0.5]", new SparseDictionaryMathVectorFactory <double>(), parser, true); var model = new NeuralNetworkModel <double, SparseDictionaryMatrix <double>, IMathVector <double> >( matrix, vector); target.LoadModelSparse <SparseDictionaryMatrix <double>, ILongSparseMatrixLine <double>, IMathVector <double> >( model); this.AssertTargetFromMatrix( model, target); }
internal static void backPropogationDemo() { try { DataSet irisDataSet = DataSetFactory.getIrisDataSet(); INumerizer numerizer = new IrisDataSetNumerizer(); NeuralNetworkDataSet innds = new IrisNeuralNetworkDataSet(); innds.CreateExamplesFromDataSet(irisDataSet, numerizer); NeuralNetworkConfig config = new NeuralNetworkConfig(); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_INPUTS, 4); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_OUTPUTS, 3); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_HIDDEN_NEURONS, 6); config.SetConfig(FeedForwardNeuralNetwork.LOWER_LIMIT_WEIGHTS, -2.0); config.SetConfig(FeedForwardNeuralNetwork.UPPER_LIMIT_WEIGHTS, 2.0); FeedForwardNeuralNetwork ffnn = new FeedForwardNeuralNetwork(config); ffnn.SetTrainingScheme(new BackPropagationLearning(0.1, 0.9)); ffnn.TrainOn(innds, 1000); innds.RefreshDataset(); int[] result = ffnn.TestOnDataSet(innds); System.Console.WriteLine(result[0] + " right, " + result[1] + " wrong"); } catch (Exception e) { throw e; } }
public void FeedForwardNeuralNetwork_LoadModelComplexDisconnectedSparseMatrix() { var schema = new[] { 5L, 3L, 4L, 2L, 5L }; var target = new FeedForwardNeuralNetwork <double>( schema); var model = this.GetComplexUnconnectedTestModel(); target.LoadModelSparse <CoordinateSparseMathMatrix <double>, ILongSparseMatrixLine <double>, ArrayMathVector <double> >( model); // Verificação do carregamento this.AssertTargetFromMatrix( model, target); }
/// <summary> /// Verifica se o modelo corresponde ao que se encontra armazenado /// na rede. /// </summary> /// <typeparam name="C">O tipo dos objectos que constituem os coeficientes.</typeparam> /// <typeparam name="M">O tipo dos objectos que constituem as matrizes.</typeparam> /// <typeparam name="V">O tipo dos objectos que constituem os vectores.</typeparam> /// <param name="expected">O model com os valores esperados.</param> /// <param name="actual">A rede que contém os valores a comparar.</param> private void AssertTargetFromMatrix <C, M, V>( NeuralNetworkModel <C, M, V> expected, FeedForwardNeuralNetwork <C> actual) where M : ILongMatrix <C> where V : IVector <C> { var actualTresholds = actual.InternalTresholds; var expectedTresholds = expected.Tresholds; Assert.AreEqual(expectedTresholds.LongLength, actualTresholds.LongLength); for (var i = 0; i < actualTresholds.LongLength; ++i) { Assert.AreEqual(expectedTresholds[i], actualTresholds[i]); } var expectedMatrix = expected.WeightsMatrix; var actualMatrix = actual.InternalWeights; Assert.AreEqual(expectedMatrix.GetLength(0), actualMatrix.LongLength); var pointer = 0; var currCol = 0L; var schema = actual.Schema; var currLine = schema[pointer + 1]; for (var i = 0; i < actualMatrix.LongLength; ++i) { if (i == currLine) { currCol += schema[pointer++]; currLine += schema[pointer + 1]; } var actualLine = actualMatrix[i]; for (var j = 0; j < actualLine.LongLength; ++j) { var actualVal = actualLine[j]; var expectedVal = expectedMatrix[i, currCol + j]; Assert.AreEqual(expectedVal, actualVal); } } }
internal static void backPropogationDemo() { try { System.Console.WriteLine(Util.ntimes("*", 100)); System.Console.WriteLine( "\n BackpropagationDemo - Running BackProp on Iris data Set with {0} epochs of learning ", epochs); System.Console.WriteLine(Util.ntimes("*", 100)); DataSet animalDataSet = DataSetFactory.getAnimalDataSet(); INumerizer numerizer = new AnimalDataSetNumerizer(); NeuralNetworkDataSet innds = new IrisNeuralNetworkDataSet(); innds.CreateExamplesFromDataSet(animalDataSet, numerizer); NeuralNetworkConfig config = new NeuralNetworkConfig(); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_INPUTS, 20); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_OUTPUTS, 3); config.SetConfig(FeedForwardNeuralNetwork.NUMBER_OF_HIDDEN_NEURONS, numNeuronsPerLayer); config.SetConfig(FeedForwardNeuralNetwork.LOWER_LIMIT_WEIGHTS, -2.0); config.SetConfig(FeedForwardNeuralNetwork.UPPER_LIMIT_WEIGHTS, 2.0); FeedForwardNeuralNetwork ffnn = new FeedForwardNeuralNetwork(config); ffnn.SetTrainingScheme(new BackPropagationLearning(0.1, 0.9)); ffnn.TrainOn(innds, epochs); innds.RefreshDataset(); int[] result = ffnn.TestOnDataSet(innds); System.Console.WriteLine(result[0] + " right, " + result[1] + " wrong"); } catch (Exception e) { throw e; } }
public void FeedForwardNeuralNetwork_ThreeLayerTrainTest() { var target = new FeedForwardNeuralNetwork <double>( new[] { 2L, 2L, 1L }); var pattern = new NeuralNetworkTrainingPattern <double, ArrayMathVector <double>, ArrayMathVector <double> >[] { new NeuralNetworkTrainingPattern <double, ArrayMathVector <double>, ArrayMathVector <double> >( new ArrayMathVector <double>(new[] { 0.0, 0.0 }), new ArrayMathVector <double>(new[] { 0.0 })), new NeuralNetworkTrainingPattern <double, ArrayMathVector <double>, ArrayMathVector <double> >( new ArrayMathVector <double>(new[] { 0.1, 0.0 }), new ArrayMathVector <double>(new[] { 1.0 })), new NeuralNetworkTrainingPattern <double, ArrayMathVector <double>, ArrayMathVector <double> >( new ArrayMathVector <double>(new[] { 0.0, 1.0 }), new ArrayMathVector <double>(new[] { 1.0 })), new NeuralNetworkTrainingPattern <double, ArrayMathVector <double>, ArrayMathVector <double> >( new ArrayMathVector <double>(new[] { 1.0, 1.0 }), new ArrayMathVector <double>(new[] { 0.0 })) }; var rand = new Random(); var field = new DoubleField(); target.Train( pattern, 100, field, (d1, d2) => { return(1.0 / (1.0 + Math.Exp(-d2 + d1))); }, (u, v, l) => { var result = 0.0; for (var i = 0L; i < l; ++i) { result += u[i] * v[i]; } return(result); }, (y) => y * (1 - y), (w, y, i) => w[i], (c, w) => { //var len = c.LongLength; //for(var i = 0L;i< len; ++i) //{ // c[i] = rand.NextDouble(); //} //len = w.LongLength; //for(var i = 0L; i < len; ++i) //{ // var curr = w[i]; // var innerLen = curr.LongLength; // for (var j = 0L; j < innerLen; ++j) // { // w[i][j] = rand.NextDouble(); // } //} c[0] = 0.6; c[1] = 0.6; c[2] = 0.6; w[0][0] = 1.0; w[0][1] = -1.0; w[1][0] = -1.0; w[1][1] = 1.0; w[2][0] = 1.0; w[2][1] = 1.0; }); var outputMatrix = target.InternalReserveOutput(); Func <double, double, double> activationFunction = (d1, d2) => { if (d2 > d1) { return(1.0); } else { return(0.0); } }; Func <double[], double[], long, double> propFunc = (u, v, l) => { var result = 0.0; for (var i = 0L; i < l; ++i) { result += u[i] * v[i]; } return(result); }; target.InternalComputeLayerOutputs( new ArrayMathVector <double>(new[] { 0.0, 1.0 }), outputMatrix, activationFunction, propFunc); Assert.Inconclusive("Test not yet completed."); }
public void FeedFrowardNeuralNetwork_InternalComputeOutputs() { var target = new FeedForwardNeuralNetwork <double>( new[] { 2L, 3L, 2L }); var parser = new DoubleParser <string>(); var matrix = TestsHelper.ReadMatrix( 5, 5, "[[-1.0, 1.0, 0.5, 0, 0], [1.0, -1.0, 0.5, 0, 0], [0, 0, 0, -1.0, 2.0], [0, 0, 0, 0.5, -1.5], [0, 0, 0, 1.0, -0.5]]", (i, j) => new SparseDictionaryMatrix <double>(i, j, 0), parser, true); var vector = TestsHelper.ReadVector( 5, "[0.5, 0.5, 0.5, 0.5, 0.5]", new SparseDictionaryMathVectorFactory <double>(), parser, true); var model = new NeuralNetworkModel <double, SparseDictionaryMatrix <double>, IMathVector <double> >( matrix, vector); target.LoadModel(model); var outputMatrix = target.InternalReserveOutput(); target.InternalComputeLayerOutputs( new ArrayMathVector <double>(new[] { 1.0, -1.0 }), outputMatrix, (d1, d2) => { if (d2 > d1) { return(1.0); } else { return(0.0); } }, (u, v, l) => { var result = 0.0; for (var i = 0L; i < l; ++i) { result += u[i] * v[i]; } return(result); }); Assert.AreEqual(target.Schema.LongCount() - 1L, outputMatrix.LongLength); var currOut = outputMatrix[0]; Assert.AreEqual(0.0, currOut[0]); Assert.AreEqual(1.0, currOut[1]); Assert.AreEqual(0.0, currOut[2]); currOut = outputMatrix[1]; Assert.AreEqual(0.0, currOut[0]); Assert.AreEqual(0.0, currOut[1]); }
public void FeedFrowardNeuralNetwork_RunSimpleMatrixTest() { var target = new FeedForwardNeuralNetwork <double>( new[] { 2L, 3L, 2L }); var parser = new DoubleParser <string>(); var matrix = TestsHelper.ReadMatrix( 5, 5, "[[-1.0, 1.0, 0.5, 0, 0], [1.0, -1.0, 0.5, 0, 0], [0, 0, 0, -1.0, 2.0], [0, 0, 0, 0.5, -1.5], [0, 0, 0, 1.0, -0.5]]", (i, j) => new SparseDictionaryMatrix <double>(i, j, 0), parser, true); var vector = TestsHelper.ReadVector( 5, "[0.5, 0.5, 0.5, 0.5, 0.5]", new SparseDictionaryMathVectorFactory <double>(), parser, true); var model = new NeuralNetworkModel <double, SparseDictionaryMatrix <double>, IMathVector <double> >( matrix, vector); target.LoadModel(model); var actual = target.Run( new[] { 1.0, 0.0 }, (u, v, l) => { var result = 0.0; for (var i = 0L; i < l; ++i) { result += u[i] * v[i]; } return(result); }, (d1, d2) => { if (d2 > d1) { return(1.0); } else { return(0.0); } }); var expected = new[] { 0.0, 0.0 }; CollectionAssert.AreEqual(expected, actual); actual = target.Run( new[] { 0.0, 1.0 }, (u, v, l) => { var result = 0.0; for (var i = 0L; i < l; ++i) { result += u[i] * v[i]; } return(result); }, (d1, d2) => { if (d2 > d1) { return(1.0); } else { return(0.0); } }); expected = new[] { 0.0, 1.0 }; CollectionAssert.AreEqual(expected, actual); actual = target.Run( new[] { 1.0, -1.0 }, (u, v, l) => { var result = 0.0; for (var i = 0L; i < l; ++i) { result += u[i] * v[i]; } return(result); }, (d1, d2) => { if (d2 > d1) { return(1.0); } else { return(0.0); } }); expected = new[] { 0.0, 0.0 }; CollectionAssert.AreEqual(expected, actual); }
public void Train(FeedForwardNeuralNetwork net, double[][] input, double[][] desiredOutput) { int inputNeuronCount = net.InputSignal.Length; int outputNeuronCount = net.OutputSignal.Length; if (input[0].Length != inputNeuronCount || desiredOutput[0].Length != net.OutputSignal.Length) throw new Exception("Input or output values number are invalid."); // maximum allowed epoches const int maxEpoches = 500000000; bool go = true; //number of learning sequences int sequences = input.Length; double[][] signal = net.Signal; double[][] error = net.Error; double[][][] weight = net.Weight; double[][][] weightChange = net.WeightChange; int[] neurons = net.Neurons; int layerCount = net.Layers; double previousAverageError = 0; while (go) { // check for infinite loop if (net.Epoch >= maxEpoches) throw new Exception("Training takes too long. Try to change network architecture."); double averageError=0; for (int curr = 0; curr < sequences; curr++) { double[] currInput = input[curr]; double[] currDesiredOutput = desiredOutput[curr]; //setting input signals net.InputSignal = currInput; //executing network net.Pulse(); //calculating errors of output neurons for (int i = 0; i < outputNeuronCount; i++) { error[layerCount - 1][i] = net.ActivationFunction.Derivative(signal[layerCount - 1][i]) * (currDesiredOutput[i] - signal[layerCount - 1][i]); averageError += 0.5f * (currDesiredOutput[i] - signal[layerCount - 1][i]) * (currDesiredOutput[i] - signal[layerCount - 1][i]); } _learningRate = _learningRate * (averageError >= previousAverageError ? 0.2 : 5); //_learningRate * (_adaptationRate * averageError * previousAverageError + 1); previousAverageError = averageError; //calculating all errors and wight changes for (int layer = layerCount - 1; layer > 0; layer--) { for (int i = 0; i < neurons[layer - 1]; i++) { double temp = 0; for (int j = 0; j < neurons[layer]; j++) { double delta = _learningRate * error[layer][j] * signal[layer - 1][i]; //adding momentum to prevent from detecting a local minima weight[layer - 1][i][j] += delta + _momentumRate * weightChange[layer - 1][i][j]; weightChange[layer - 1][i][j] = delta; temp += error[layer][j] * weight[layer - 1][i][j]; } error[layer - 1][i] = net.ActivationFunction.Derivative(signal[layer - 1][i]) * temp; } } } if (averageError <= _precision) go = false; } }