public void XOR(int iterations, double minimumAccuracy) { var mlp = new MultilayerPerceptron <Tanh>(2, new int[] { 10, 10 }, 1); double[][] inputs = new double[][] { new double[] { 0, 0 }, new double[] { 1, 0 }, new double[] { 0, 1 }, new double[] { 1, 1 }, }; double[][] outputs = new double[][] { new double[] { 0 }, new double[] { 1 }, new double[] { 1 }, new double[] { 0 }, }; double[] output = new double[1]; for (int i = 0; i < iterations; i++) { for (int j = 0; j < inputs.Length; j++) { mlp.Predict(inputs[j], output); mlp.Train(outputs[j], 0.4, 0.9); } } for (int j = 0; j < inputs.Length; j++) { mlp.Predict(inputs[j], output); double diff = 1 - Math.Abs(output[0] - outputs[j][0]); Assert.GreaterOrEqual(diff, minimumAccuracy); } }
private static void Main() { // The mlp takes column vectors as input and gives column vectors as output. The dlib::matrix // object is used to represent the column vectors. So the first thing we do here is declare // a convenient typedef for the matrix object we will be using. // This typedef declares a matrix with 2 rows and 1 column. It will be the // object that contains each of our 2 dimensional samples. (Note that if you wanted // more than 2 features in this vector you can simply change the 2 to something else) //typedef matrix<double, 2, 1 > sample_type; // make an instance of a sample matrix so we can use it below using (var sample = new SampleType(2, 1)) { // Create a multi-layer perceptron network. This network has 2 nodes on the input layer // (which means it takes column vectors of length 2 as input) and 5 nodes in the first // hidden layer. Note that the other 4 variables in the mlp's constructor are left at // their default values. using (var net = new MultilayerPerceptron <Kernel1>(2, 5)) { // Now let's put some data into our sample and train on it. We do this // by looping over 41*41 points and labeling them according to their // distance from the origin. for (var i = 0; i < 1000; ++i) { for (var r = -20; r <= 20; ++r) { for (var c = -20; c <= 20; ++c) { sample[0] = r; sample[1] = c; // if this point is less than 10 from the origin if (Math.Sqrt((double)r * r + c * c) <= 10) { net.Train(sample, 1); } else { net.Train(sample, 0); } } } } // Now we have trained our mlp. Let's see how well it did. // Note that if you run this program multiple times you will get different results. This // is because the mlp network is randomly initialized. // each of these statements prints out the output of the network given a particular sample. sample[0] = 3.123; sample[1] = 4; using (var ret = net.Operator(sample)) Console.WriteLine($"This sample should be close to 1 and it is classified as a {ret}"); sample[0] = 13.123; sample[1] = 9.3545; using (var ret = net.Operator(sample)) Console.WriteLine($"This sample should be close to 0 and it is classified as a {ret}"); sample[0] = 13.123; sample[1] = 0; using (var ret = net.Operator(sample)) Console.WriteLine($"This sample should be close to 0 and it is classified as a {ret}"); } } }
public void Iris(int iterations, double minimumAccuracy) { string[] lines = File.ReadAllLines("./Data/iris.csv"); double[][] inputs = new double[lines.Length][]; double[][] outputs = new double[lines.Length][]; for (int i = 0; i < lines.Length; i++) { inputs[i] = new double[4]; outputs[i] = new double[1]; string[] values = lines[i].Split(','); for (int j = 0; j < 4; j++) { inputs[i][j] = double.Parse(values[j]); } outputs[i][0] = Map(values[4]); } for (int i = 0; i < 4; i++) { double min = double.PositiveInfinity; double max = double.NegativeInfinity; for (int j = 0; j < inputs.Length; j++) { if (inputs[j][i] < min) { min = inputs[j][i]; } if (inputs[j][i] > max) { max = inputs[j][i]; } } for (int j = 0; j < inputs.Length; j++) { inputs[j][i] = Normalize((inputs[j][i] - min) / (max - min)); } } var mlp = new MultilayerPerceptron <Tanh>(4, new int[] { 10, 10 }, 1); double normalizedCorrect = 0; double[] output = new double[1]; for (int i = 0; i < iterations; i++) { int correct = 0; for (int j = 0; j < inputs.Length; j++) { mlp.Predict(inputs[j], output); mlp.Train(outputs[j], 0.1, 0.4); double expectedOutput = outputs[j][0]; if (Map(expectedOutput) == Map(output[0])) { correct++; } } normalizedCorrect = (double)correct / inputs.Length; } Assert.GreaterOrEqual(normalizedCorrect, minimumAccuracy); }