public static void Run() { double[][] input = { new double[] { 0, 0 }, new double[] { 0, 1 }, new double[] { 1, 0 }, new double[] { 1, 1 } }; double[][] output = { new double[] { 0 }, new double[] { 1 }, new double[] { 1 }, new double[] { 0 } }; var network = new ActivationNetwork(new SigmoidFunction(2), 2, 4, 1); var y = network.Compute(new double[] { 1, 0 }); var teacher = new LevenbergMarquardtLearning(network); teacher.RunEpoch(input, output); var z = network.Compute(new double[] { 1, 0 }); teacher.RunEpoch(input, output); teacher.RunEpoch(input, output); teacher.RunEpoch(input, output); teacher.RunEpoch(input, output); var badsf = network.Compute(new double[] { 1, 0 }); var test2 = network.Compute(new double[] { 1, 1 }); }
public void JacobianByChainRuleTest2() { // Network with two hidden layers: 2-4-3-1 Accord.Math.Tools.SetupGenerator(0); double[][] input = { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; double[][] output = { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; Neuron.RandGenerator = new ThreadSafeRandom(0); ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 4, 3, 1); var teacher1 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByFiniteDifferences); var teacher2 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByBackpropagation); // Set lambda to lambda max so no iterations are performed teacher1.LearningRate = 1e30f; teacher2.LearningRate = 1e30f; teacher1.RunEpoch(input, output); teacher2.RunEpoch(input, output); PrivateObject privateTeacher1 = new PrivateObject(teacher1); PrivateObject privateTeacher2 = new PrivateObject(teacher2); var jacobian1 = (float[][])privateTeacher1.GetField("jacobian"); var jacobian2 = (float[][])privateTeacher2.GetField("jacobian"); for (int i = 0; i < jacobian1.Length; i++) { for (int j = 0; j < jacobian1[i].Length; j++) { double j1 = jacobian1[i][j]; double j2 = jacobian2[i][j]; Assert.AreEqual(j1, j2, 1e-4); Assert.IsFalse(Double.IsNaN(j1)); Assert.IsFalse(Double.IsNaN(j2)); } } }
public void RunEpochTest4() { Accord.Math.Tools.SetupGenerator(0); double[][] input = { new double[] { 0, 0 }, }; double[][] output = { new double[] { 0 }, }; Neuron.RandGenerator = new ThreadSafeRandom(0); ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 1); var teacher = new LevenbergMarquardtLearning(network, true, JacobianMethod.ByBackpropagation); double error = 1.0; for (int i = 0; i < 1000; i++) { error = teacher.RunEpoch(input, output); } for (int i = 0; i < input.Length; i++) { Assert.AreEqual(network.Compute(input[i])[0], output[i][0], 0.1); } }
public AdvancedNeuralNetwork(string[] inputFieldNames, string outputFieldName, int[] neuronsCount, double learningRate = 0.1, double sigmoidAlphaValue = 2, bool useRegularization = false, bool useNguyenWidrow = false, bool useSameWeights = false, JacobianMethod method = JacobianMethod.ByBackpropagation) { this.neuronsCount = neuronsCount; this.learningRate = learningRate; this.useRegularization = useRegularization; this.useNguyenWidrow = useNguyenWidrow; this.useSameWeights = useSameWeights; this.method = method; this.sigmoidAlphaValue = sigmoidAlphaValue; this.inputFieldNames = inputFieldNames; this.outputFieldName = outputFieldName; // create multi-layer neural network theNetwork = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), //Andere Function möglich??? inputFieldNames.Length, neuronsCount); if (useNguyenWidrow) { if (useSameWeights) { Accord.Math.Random.Generator.Seed = 0; } NguyenWidrow initializer = new NguyenWidrow(theNetwork); initializer.Randomize(); } // create teacher teacher = new LevenbergMarquardtLearning(theNetwork, useRegularization, method); // set learning rate and momentum teacher.LearningRate = learningRate; }
public void JacobianByChainRuleTest4() { // Network with no hidden layers: 3-1 double[][] input = { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; double[][] output = { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; // Neuron.RandGenerator = new ThreadSafeRandom(0); Accord.Math.Random.Generator.Seed = 0; ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 1); var teacher1 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByFiniteDifferences); var teacher2 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByBackpropagation); // Set lambda to lambda max so no iterations are performed teacher1.LearningRate = 1e30f; teacher2.LearningRate = 1e30f; teacher1.RunEpoch(input, output); teacher2.RunEpoch(input, output); var jacobian1 = teacher1.Jacobian; var jacobian2 = teacher2.Jacobian; for (int i = 0; i < jacobian1.Length; i++) { for (int j = 0; j < jacobian1[i].Length; j++) { double j1 = jacobian1[i][j]; double j2 = jacobian2[i][j]; Assert.AreEqual(j1, j2, 1e-5); Assert.IsFalse(Double.IsNaN(j1)); Assert.IsFalse(Double.IsNaN(j2)); } } }
public void Train(TrainingData data) { var inputs = data.Inputs.ToArray(); var outputs = data.Outputs.ToArray(); var teacher = new LevenbergMarquardtLearning(network); teacher.RunEpoch(inputs, outputs); }
private void Train() { stopTraining = false; //var errorsList = new ArrayList(); ISupervisedLearning teacher; if (selectedMethod == Methods.Backpropagation) { teacher = new BackPropagationLearning(actNet) { LearningRate = double.Parse(txtbxLearningRate.Text), Momentum = double.Parse(txtbxMomentum.Text) }; } else if (selectedMethod == Methods.LevenbergMarquardt) { teacher = new LevenbergMarquardtLearning(actNet) { LearningRate = double.Parse(txtbxLearningRate.Text), }; } else { throw new Exception("No method is selected"); } var iterations = epochs; int percentage; while (!stopTraining) { var error = teacher.RunEpoch(input, target); if (stopTraining || (error <= errorLimit) || (iterations == 0)) { break; } this.Invoke((MethodInvoker) delegate { percentage = (int)Math.Round((double)(100 * (epochs - iterations)) / epochs); this.progbarTrainingProcess.Value = percentage; this.lblTrainingProcess.Text = "Training (" + percentage + " %" + ")"; chrtError.Series["Error"].Points.Add(new DataPoint(epochs - iterations, error)); }); iterations--; } stopTraining = true; this.Invoke((MethodInvoker) delegate { this.btnTrain.Text = "Train"; this.progbarTrainingProcess.Value = 100; this.lblTrainingProcess.Text = "Done (100 %)"; Save(); }); }
public void ConstructorTest() { // Four training samples of the xor function // two inputs (x and y) double[][] input = { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; // one output (z = x ^ y) double[][] output = { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), // use a bipolar sigmoid activation function 2, // two inputs 3, // three hidden neurons 1 // one output neuron ); // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning( network, // the neural network false, // whether or not to use Bayesian regularization JacobianMethod.ByBackpropagation // Jacobian calculation method ); // set learning rate and momentum teacher.LearningRate = 0.1f; // start the supervisioned learning for (int i = 0; i < 1000; i++) { double error = teacher.RunEpoch(input, output); } // If we reached here, the constructor test has passed. }
private static void network(double[][] inputs, int[] outputs) { // Since we would like to learn binary outputs in the form // [-1,+1], we can use a bipolar sigmoid activation function IActivationFunction function = new BipolarSigmoidFunction(); // In our problem, we have 2 inputs (x, y pairs), and we will // be creating a network with 5 hidden neurons and 1 output: // var network = new ActivationNetwork(function, inputsCount: 2, neuronsCount: new[] { 5, 1 }); // Create a Levenberg-Marquardt algorithm var teacher = new LevenbergMarquardtLearning(network) { UseRegularization = true }; // Because the network is expecting multiple outputs, // we have to convert our single variable into arrays // var y = outputs.ToDouble().ToJagged(); // Iterate until stop criteria is met double error = double.PositiveInfinity; double previous; do { previous = error; // Compute one learning iteration error = teacher.RunEpoch(inputs, y); } while (Math.Abs(previous - error) < 1e-10 * previous); // Classify the samples using the model int[] answers = inputs.Apply(network.Compute).GetColumn(0).Apply(System.Math.Sign); // Plot the results ScatterplotBox.Show("Expected results", inputs, outputs); ScatterplotBox.Show("Network results", inputs, answers) .Hold(); }
public void LMBuild(List <train> datalist) { double[][] inputs; double[][] outputs; double[][] matrix; GetData(out inputs, out outputs, out matrix, datalist); // create neural network network_lm = new ActivationNetwork( new BipolarSigmoidFunction(), 9, // two inputs in the network 3, // two neurons in the first layer 1 //ont neuron in the second layer ); // Randomly initialize the network new NguyenWidrow(network_lm).Randomize(); // create teacher teacher_lm = new LevenbergMarquardtLearning(network_lm); int times = 0; // loop while (times++ < 50) { // run epoch of learning procedure double error = teacher_lm.RunEpoch(inputs, outputs); // check error value to see if we need to stop // ... } // Checks if the network has learned /* for (int i = 0; i < inputs.Length; i++) * { * double[] answer = network.Compute(inputs[i]); * * log(answer[0].ToString()) ; * * // actual should be equal to expected * }*/ }
public void RunEpochTest1() { Accord.Math.Tools.SetupGenerator(0); double[][] input = { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; double[][] output = { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; Neuron.RandGenerator = new ThreadSafeRandom(0); ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 2, 1); var teacher = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByFiniteDifferences); double error = 1.0; while (error > 1e-5) { error = teacher.RunEpoch(input, output); } for (int i = 0; i < input.Length; i++) { Assert.AreEqual(network.Compute(input[i])[0], output[i][0], 0.1); } }
public void Learn() { const int hiddenNeurons = 5; var numberOfInputs = GetControllerOutputProperties().Length; var numberOfClasses = Enum.GetNames(typeof(OutputClass)).Length; var outputs = Accord.Statistics.Tools.Expand(GetOutputs(), numberOfClasses, -1, 1); var inputs = GetLearnInputs(); var activationFunction = new BipolarSigmoidFunction(2); NeuralNetwork = new ActivationNetwork(activationFunction, numberOfInputs, hiddenNeurons, numberOfClasses); new NguyenWidrow(NeuralNetwork).Randomize(); var teacher = new LevenbergMarquardtLearning(NeuralNetwork); for (var i = 0; i < 10; i++) { teacher.RunEpoch(inputs, outputs); } }
public void RunEpochTest3() { Accord.Math.Tools.SetupGenerator(0); double[,] dataset = yinyang; double[][] input = dataset.GetColumns(0, 1).ToArray(); double[][] output = dataset.GetColumn(2).ToArray(); Neuron.RandGenerator = new ThreadSafeRandom(0); ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 5, 1); var teacher = new LevenbergMarquardtLearning(network, true, JacobianMethod.ByBackpropagation); Assert.IsTrue(teacher.UseRegularization); double error = 1.0; for (int i = 0; i < 500; i++) { error = teacher.RunEpoch(input, output); } double[][] actual = new double[output.Length][]; for (int i = 0; i < input.Length; i++) { actual[i] = network.Compute(input[i]); } for (int i = 0; i < input.Length; i++) { Assert.AreEqual(Math.Sign(output[i][0]), Math.Sign(actual[i][0])); } }
// Worker thread void SearchSolution() { // number of learning samples int samples = data.GetLength(0); // data transformation factor double yFactor = 1.7 / chart.RangeY.Length; double yMin = chart.RangeY.Min; double xFactor = 2.0 / chart.RangeX.Length; double xMin = chart.RangeX.Min; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; for (int i = 0; i < samples; i++) { input[i] = new double[1]; output[i] = new double[1]; // set input input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0; // set output output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85; } // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), 1, neuronsInFirstLayer, 1); if (useNguyenWidrow) { NguyenWidrow initializer = new NguyenWidrow(network); initializer.Randomize(); } // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(network, useRegularization); // set learning rate and momentum teacher.LearningRate = learningRate; // iterations int iteration = 1; // solution array double[,] solution = new double[50, 2]; double[] networkInput = new double[1]; // calculate X values to be used with solution function for (int j = 0; j < 50; j++) { solution[j, 0] = chart.RangeX.Min + (double)j * chart.RangeX.Length / 49; } // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution for (int j = 0; j < 50; j++) { networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0; solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin; } chart.UpdateDataSeries("solution", solution); // calculate error double learningError = 0.0; for (int j = 0, k = data.GetLength(0); j < k; j++) { networkInput[0] = input[j][0]; learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin)); } // set current iteration's info SetText(currentIterationBox, iteration.ToString()); SetText(currentErrorBox, learningError.ToString("F3")); // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } // enable settings controls EnableControls(true); }
static void TestAnn2() { // Here we will be creating a neural network to process 3-valued input // vectors and classify them into 4-possible classes. We will be using // a single hidden layer with 5 hidden neurons to accomplish this task. // int numberOfInputs = 3; int numberOfClasses = 4; int hiddenNeurons = 5; // Those are the input vectors and their expected class labels // that we expect our network to learn. // double[][] input = { new double[] { -1, -1, -1 }, // 0 new double[] { -1, 1, -1 }, // 1 new double[] { 1, -1, -1 }, // 1 new double[] { 1, 1, -1 }, // 0 new double[] { -1, -1, 1 }, // 2 new double[] { -1, 1, 1 }, // 3 new double[] { 1, -1, 1 }, // 3 new double[] { 1, 1, 1 } // 2 }; int[] labels = { 0, 1, 1, 0, 2, 3, 3, 2 }; // In order to perform multi-class classification, we have to select a // decision strategy in order to be able to interpret neural network // outputs as labels. For this, we will be expanding our 4 possible class // labels into 4-dimensional output vectors where one single dimension // corresponding to a label will contain the value +1 and -1 otherwise. double[][] outputs = Accord.Statistics.Tools.Expand(labels, numberOfClasses, -1, 1); // Next we can proceed to create our network var function = new BipolarSigmoidFunction(2); var network = new ActivationNetwork(function, numberOfInputs, hiddenNeurons, numberOfClasses); // Heuristically randomize the network new NguyenWidrow(network).Randomize(); // Create the learning algorithm var teacher = new LevenbergMarquardtLearning(network); // Teach the network for 10 iterations: double error = Double.PositiveInfinity; for (int i = 0; i < 10; i++) { error = teacher.RunEpoch(input, outputs); } // At this point, the network should be able to // perfectly classify the training input points. for (int i = 0; i < input.Length; i++) { int answer; double[] output = network.Compute(input[i]); //double response = output.Max(out answer); int expected = labels[i]; // at this point, the variables 'answer' and // 'expected' should contain the same value. } }
static void TestAnn() { // initialize input and output values double[][] input = { new double[] { 0.5, 0 }, new double[] { 0.2, 0.1 }, new double[] { 0.4, 0.5 }, new double[] { 0.3, 0.7 }, new double[] { 0.75, 0.25 }, new double[] { 0.3, 0.1 }, new double[] { 0.2, 0.5 }, new double[] { 0.2, 0.6 } }; double[][] output = { new double[] { 0.5 }, new double[] { 0.3 }, new double[] { 0.9 }, new double[] { 1 }, new double[] { 1 }, new double[] { 0.4 }, new double[] { 0.7 }, new double[] { 0.8 } }; int[] numbers = new int[] { 3, 3, 2, 1 }; // create neural network ActivationNetwork network = new ActivationNetwork(new SigmoidFunction(1), 2, numbers); //2 : two inputs in the network // 2 : two neurons in the first layer // 1 : one neuron in the second layer // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(network); bool needToStop = false; // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output); // check error value to see if we need to stop // ... //Console.WriteLine(error); if (error <= 0.001) { needToStop = true; } } Write(network.Output, "network.Output"); int index = 0; foreach (Layer layr in network.Layers) { index += 1; Write(layr.Output, string.Format("network.Layers> [{0}] <.Output", index)); } //Write(network.Layers[1].Output, "network.Layers[1].Output"); //Write(network.Layers[1].Neurons[0].Weights, "network.Layers[1].Neurons[0].Weights"); double[] testData = { 0.25, 0.25 }; Write(network.Compute(testData), "testData"); Console.WriteLine("-------------------------------------"); double[][] inputtest = { new double[] { 0.1, 0 }, new double[] { 0.1, 0.1 }, new double[] { 0.1, 0.2 }, new double[] { 0.2, 0.2 } }; for (int j = 0; j < 4; j++) { Write(network.Compute(inputtest[j]), "result of inputtest"); } }
public void BlockHessianTest1() { // Network with no hidden layers: 3-1 Accord.Math.Tools.SetupGenerator(0); double[][] input = { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; double[][] output = { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; Neuron.RandGenerator = new ThreadSafeRandom(0); ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 1); var teacher1 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByFiniteDifferences); var teacher2 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByBackpropagation); teacher2.Blocks = 2; // Set lambda to lambda max so no iterations are performed teacher1.LearningRate = 1e30f; teacher2.LearningRate = 1e30f; teacher1.RunEpoch(input, output); teacher2.RunEpoch(input, output); var hessian1 = teacher1.Hessian; var hessian2 = teacher1.Hessian; for (int i = 0; i < hessian1.Length; i++) { for (int j = 0; j < hessian1[i].Length; j++) { double j1 = hessian1[i][j]; double j2 = hessian2[i][j]; Assert.AreEqual(j1, j2, 1e-4); Assert.IsFalse(Double.IsNaN(j1)); Assert.IsFalse(Double.IsNaN(j2)); } } Assert.IsTrue(hessian1.IsUpperTriangular()); Assert.IsTrue(hessian2.IsUpperTriangular()); var gradient1 = teacher1.Gradient; var gradient2 = teacher2.Gradient; for (int i = 0; i < gradient1.Length; i++) { double j1 = gradient1[i]; double j2 = gradient2[i]; Assert.AreEqual(j1, j2, 1e-5); Assert.IsFalse(Double.IsNaN(j1)); Assert.IsFalse(Double.IsNaN(j2)); } }
public void JacobianByChainRuleTest() { // Network with one hidden layer: 2-2-1 Accord.Math.Tools.SetupGenerator(0); double[][] input = { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; double[][] output = { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; Neuron.RandGenerator = new ThreadSafeRandom(0); ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 2, 1); var teacher1 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByFiniteDifferences); var teacher2 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByBackpropagation); // Set lambda to lambda max so no iterations are performed teacher1.LearningRate = 1e30f; teacher2.LearningRate = 1e30f; teacher1.RunEpoch(input, output); teacher2.RunEpoch(input, output); PrivateObject privateTeacher1 = new PrivateObject(teacher1); PrivateObject privateTeacher2 = new PrivateObject(teacher2); var jacobian1 = (float[][])privateTeacher1.GetField("jacobian"); var jacobian2 = (float[][])privateTeacher2.GetField("jacobian"); Assert.AreEqual(jacobian1[0][0], -0.47895513745387097, 1e-6); Assert.AreEqual(jacobian1[0][1], -0.05863886707282373, 1e-6); Assert.AreEqual(jacobian1[0][2], 0.057751100929897485, 1e-6); Assert.AreEqual(jacobian1[0][3], 0.0015185010717608583, 1e-6); Assert.AreEqual(jacobian1[7][0], -0.185400783651892, 1e-6); Assert.AreEqual(jacobian1[7][1], 0.025575161626462877, 1e-6); Assert.AreEqual(jacobian1[7][2], 0.070494677797224889, 1e-6); Assert.AreEqual(jacobian1[7][3], 0.037740463822781616, 1e-6); Assert.AreEqual(jacobian2[0][0], -0.4789595904719437, 1e-6); Assert.AreEqual(jacobian2[0][1], -0.058636153936941729, 1e-6); Assert.AreEqual(jacobian2[0][2], 0.057748435491340212, 1e-6); Assert.AreEqual(jacobian2[0][3], 0.0015184453425611988, 1e-6); Assert.AreEqual(jacobian2[7][0], -0.1854008206574258, 1e-6); Assert.AreEqual(jacobian2[7][1], 0.025575150379247645, 1e-6); Assert.AreEqual(jacobian2[7][2], 0.070494269423259301, 1e-6); Assert.AreEqual(jacobian2[7][3], 0.037740117733922635, 1e-6); for (int i = 0; i < jacobian1.Length; i++) { for (int j = 0; j < jacobian1[i].Length; j++) { double j1 = jacobian1[i][j]; double j2 = jacobian2[i][j]; Assert.AreEqual(j1, j2, 1e-4); Assert.IsFalse(Double.IsNaN(j1)); Assert.IsFalse(Double.IsNaN(j2)); } } }
// Worker thread void SearchSolution( ) { // initialize input and output values double[][] input = null; double[][] output = null; if (sigmoidType == 0) { // unipolar data input = new double[4][] { new double[] { 0, 0 }, new double[] { 0, 1 }, new double[] { 1, 0 }, new double[] { 1, 1 } }; output = new double[4][] { new double[] { 0 }, new double[] { 1 }, new double[] { 1 }, new double[] { 0 } }; } else { // bipolar data input = new double[4][] { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; output = new double[4][] { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; } // create neural network ActivationNetwork network = new ActivationNetwork( (sigmoidType == 0) ? (IActivationFunction) new SigmoidFunction(sigmoidAlphaValue) : (IActivationFunction) new BipolarSigmoidFunction(sigmoidAlphaValue), 2, 2, 1); // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(network); // set learning rate teacher.LearningRate = learningRate; // iterations int iteration = 0; // statistic files StreamWriter errorsFile = null; try { // check if we need to save statistics to files if (saveStatisticsToFiles) { // open files errorsFile = File.CreateText("errors.csv"); } // erros list ArrayList errorsList = new ArrayList( ); // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output); errorsList.Add(error); // save current error if (errorsFile != null) { errorsFile.WriteLine(error); } // show current iteration & error SetText(currentIterationBox, iteration.ToString( )); SetText(currentErrorBox, error.ToString( )); iteration++; // check if we need to stop if (error <= learningErrorLimit) { break; } } // show error's dynamics double[,] errors = new double[errorsList.Count, 2]; for (int i = 0, n = errorsList.Count; i < n; i++) { errors[i, 0] = i; errors[i, 1] = (double)errorsList[i]; } errorChart.RangeX = new Range(0, errorsList.Count - 1); errorChart.UpdateDataSeries("error", errors); } catch (IOException) { MessageBox.Show("Failed writing file", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error); } finally { // close files if (errorsFile != null) { errorsFile.Close( ); } } // enable settings controls EnableControls(true); }
public void JacobianByChainRuleTest_MultipleOutput() { // Network with no hidden layers: 3-4 int numberOfInputs = 3; int numberOfClasses = 4; double[][] input = { new double[] { -1, -1, -1 }, // 0 new double[] { -1, 1, -1 }, // 1 new double[] { 1, -1, -1 }, // 1 new double[] { 1, 1, -1 }, // 0 new double[] { -1, -1, 1 }, // 2 new double[] { -1, 1, 1 }, // 3 new double[] { 1, -1, 1 }, // 3 new double[] { 1, 1, 1 } // 2 }; int[] labels = { 0, 1, 1, 0, 2, 3, 3, 2, }; double[][] output = Accord.Statistics.Tools .Expand(labels, numberOfClasses, -1, 1); // Neuron.RandGenerator = new ThreadSafeRandom(0); Accord.Math.Random.Generator.Seed = 0; ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), numberOfInputs, numberOfClasses); var teacher1 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByFiniteDifferences); var teacher2 = new LevenbergMarquardtLearning(network, false, JacobianMethod.ByBackpropagation); // Set lambda to lambda max so no iterations are performed teacher1.LearningRate = 1e30f; teacher2.LearningRate = 1e30f; teacher1.RunEpoch(input, output); teacher2.RunEpoch(input, output); var jacobian1 = teacher1.Jacobian; var jacobian2 = teacher2.Jacobian; for (int i = 0; i < jacobian1.Length; i++) { for (int j = 0; j < jacobian1[i].Length; j++) { double j1 = jacobian1[i][j]; double j2 = jacobian2[i][j]; Assert.AreEqual(j1, j2, 1e-3); Assert.IsFalse(Double.IsNaN(j1)); Assert.IsFalse(Double.IsNaN(j2)); } } }
public void ZeroLambdaTest() { Accord.Math.Random.Generator.Seed = 0; double[,] data = null; // open selected file using (TextReader stream = new StringReader(Properties.Resources.ZeroLambda)) using (CsvReader reader = new CsvReader(stream, false)) { data = reader.ToTable().ToMatrix(System.Globalization.CultureInfo.InvariantCulture); } // number of learning samples int samples = data.GetLength(0); var ranges = data.GetRange(dimension: 0); Assert.AreEqual(2, ranges.Length); var rangeX = ranges[0]; var rangeY = ranges[1]; // data transformation factor double yFactor = 1.7 / rangeY.Length; double yMin = rangeY.Min; double xFactor = 2.0 / rangeX.Length; double xMin = rangeX.Min; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; for (int i = 0; i < samples; i++) { input[i] = new double[1]; output[i] = new double[1]; input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0; // set input output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85; // set output } // Neuron.RandGenerator = new ThreadSafeRandom(0); Accord.Math.Random.Generator.Seed = 0; // create multi-layer neural network var network = new ActivationNetwork( new BipolarSigmoidFunction(5), 1, 12, 1); // create teacher var teacher = new LevenbergMarquardtLearning(network, true); #if MONO teacher.ParallelOptions.MaxDegreeOfParallelism = 1; #endif teacher.LearningRate = 1; // iterations int iteration = 1; int iterations = 2000; // solution array double[,] solution = new double[samples, 2]; double[] networkInput = new double[1]; bool needToStop = false; double learningError = 0; // loop while (!needToStop) { Assert.AreNotEqual(0, teacher.LearningRate); // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution for (int j = 0; j < samples; j++) { networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0; solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin; } // calculate error learningError = 0.0; for (int j = 0; j < samples; j++) { networkInput[0] = input[j][0]; learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin)); } // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } Assert.IsTrue(learningError < 0.13); }
public void SearchSolution() { int length = tmp.Count; double[][] inputs; double[][] outputs; double[][] matrix; GetData(out inputs, out outputs, out matrix, tmp); // create multi-layer neural network this.ann = new ActivationNetwork(new BipolarSigmoidFunction(sigmoidAlphaValue), 9, neuronsInFirstLayer, 1); if (useNguyenWidrow) { if (useSameWeights) { Accord.Math.Random.Generator.Seed = 1; } NguyenWidrow initializer = new NguyenWidrow(ann); initializer.Randomize(); } // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(ann, useRegularization); // set learning rate and momentum teacher.LearningRate = learningRate; // iterations iteration = 1; var ranges = matrix.GetRange(0); double[][] map = Matrix.Mesh(ranges[0], 200, ranges[1], 200); // var sw = Stopwatch.StartNew(); // loop while (true) { // run epoch of learning procedure error = teacher.RunEpoch(inputs, outputs) / length; var result = map.Apply(ann.Compute).GetColumn(0).Apply(Math.Sign); var graph = map.ToMatrix().InsertColumn(result.ToDouble()); // increase current iteration iteration++; //elapsed = sw.Elapsed; // updateStatus(); // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } ANN_END = true; // sw.Stop(); }
//Machine Learning void IDataminingDatabase.doMachineLearning(string[] inputFields, string outcomeField, string instrument, string savePath) { string name = "ANN"; double learningRate = 0.1; double sigmoidAlphaValue = 2; int iterations = 100; bool useRegularization = false; bool useNguyenWidrow = false; bool useSameWeights = false; progress.setProgress(name, "Creating ANN..."); // create multi-layer neural network ActivationNetwork ann = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), inputFields.Length, 20, 2); //How many neuros ???? Standart is 1 if (useNguyenWidrow) { progress.setProgress(name, "Creating NguyenWidrow..."); if (useSameWeights) { Accord.Math.Random.Generator.Seed = 0; } NguyenWidrow initializer = new NguyenWidrow(ann); initializer.Randomize(); } progress.setProgress(name, "Creating LevenbergMarquardtLearning..."); // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(ann, useRegularization); //, JacobianMethod.ByBackpropagation // set learning rate and momentum teacher.LearningRate = learningRate; IMongoQuery fieldsExistQuery = Query.And(Query.Exists(outcomeField + "_buy"), Query.Exists(outcomeField + "_sell")); foreach (string inputField in inputFields) { fieldsExistQuery = Query.And(fieldsExistQuery, Query.Exists(inputField)); } progress.setProgress(name, "Importing..."); // Load Data long start = database.getFirstTimestamp(); long end = database.getLastTimestamp(); var collection = mongodb.getDB().GetCollection("prices"); var docs = collection.FindAs <BsonDocument>(Query.And(fieldsExistQuery, Query.EQ("instrument", instrument), Query.LT("timestamp", end), Query.GTE("timestamp", start))).SetSortOrder(SortBy.Ascending("timestamp")); docs.SetFlags(QueryFlags.NoCursorTimeout); long resultCount = docs.Count(); //Press into Array from progress.setProgress(name, "Casting to array..."); double[][] inputs = new double[resultCount][]; // [inputFields.Length] double[][] outputs = new double[resultCount][]; // [2] int row = 0; foreach (var doc in docs) { outputs[row] = new double[] { doc[outcomeField + "_buy"].AsInt32, doc[outcomeField + "_sell"].AsInt32 }; double[] inputRow = new double[inputFields.Length]; for (int i = 0; i < inputFields.Length; i++) { double value = doc[inputFields[i]].AsDouble; if (double.IsInfinity(value) || double.IsNegativeInfinity(value) || double.IsNaN(value)) { throw new Exception("Invalid value!"); } else { inputRow[i] = value; } } inputs[row] = inputRow; //Check these! :) ??? row++; } // Teach the ANN for (int iteration = 0; iteration < iterations; iteration++) { progress.setProgress(name, "Teaching... " + iteration + " of " + iterations); double error = teacher.RunEpoch(inputs, outputs); if (savePath != null) { ann.Save(savePath); } } //Compute Error progress.setProgress(name, "Calculating error..."); int successes = 0; int fails = 0; for (int i = 0; i < inputs.Length; i++) { var realOutput = outputs[i]; //Buys double[] calculated = ann.Compute(inputs[i]); if (calculated[0] == 0 || calculated[0] == realOutput[0]) { successes++; } if (calculated[0] == 1 && realOutput[0] == 0) { fails++; } //Sells if (calculated[1] == 0 || calculated[1] == realOutput[1]) { successes++; } if (calculated[1] == 1 && realOutput[1] == 0) { fails++; } } double successRate = (double)successes / (inputs.Length * 2); double failRate = (double)fails / (inputs.Length * 2); progress.setProgress(name, "Finished with successRate of " + successRate + " failRate of " + failRate); }
// Worker thread void SearchSolution() { // number of learning samples int samples = sourceMatrix.GetLength(0); // prepare learning data double[][] inputs = sourceMatrix.Submatrix(null, 0, 1).ToArray(); double[][] outputs = sourceMatrix.GetColumn(2).Transpose().ToArray(); // create multi-layer neural network ann = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), 2, neuronsInFirstLayer, 1); if (useNguyenWidrow) { if (useSameWeights) { Accord.Math.Tools.SetupGenerator(0); } NguyenWidrow initializer = new NguyenWidrow(ann); initializer.Randomize(); } // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(ann, useRegularization); // set learning rate and momentum teacher.LearningRate = learningRate; // iterations iteration = 1; var ranges = Matrix.Range(sourceMatrix); double[][] map = Matrix.Mesh(ranges[0], ranges[1], 0.05, 0.05); var sw = Stopwatch.StartNew(); // loop while (!needToStop) { // run epoch of learning procedure error = teacher.RunEpoch(inputs, outputs) / samples; var result = map.Apply(ann.Compute).GetColumn(0).Apply(Math.Sign); var graph = map.ToMatrix().InsertColumn(result.ToDouble()); CreateScatterplot(zedGraphControl2, graph); // increase current iteration iteration++; elapsed = sw.Elapsed; updateStatus(); // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } sw.Stop(); // enable settings controls EnableControls(true); }
public NeuralNetworkController() { network = new ActivationNetwork(new SigmoidFunction(), 5, 4, 3); network.Randomize(); teacher = new LevenbergMarquardtLearning(network); }
// Worker thread void SearchSolution() { // number of learning samples int samples = data.GetLength(0); // prepare learning data DoubleRange unit = new DoubleRange(-1, 1); double[][] input = data.GetColumn(0).Scale(fromRange: xRange, toRange: unit).ToArray(); double[][] output = data.GetColumn(1).Scale(fromRange: yRange, toRange: unit).ToArray(); // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), 1, neuronsInFirstLayer, 1); if (useNguyenWidrow) { new NguyenWidrow(network).Randomize(); } // create teacher var teacher = new LevenbergMarquardtLearning(network, useRegularization); // set learning rate and momentum teacher.LearningRate = learningRate; // iterations int iteration = 1; // solution array double[,] solution = new double[samples, 2]; // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution for (int j = 0; j < samples; j++) { double x = input[j][0]; double y = network.Compute(new[] { x })[0]; solution[j, 0] = x.Scale(fromRange: unit, toRange: xRange); solution[j, 1] = y.Scale(fromRange: unit, toRange: yRange); } chart.UpdateDataSeries("solution", solution); // calculate error double learningError = 0.0; for (int j = 0; j < samples; j++) { double x = input[j][0]; double expected = data[j, 1]; double actual = network.Compute(new[] { x })[0]; learningError += Math.Abs(expected - actual); } // set current iteration's info SetText(currentIterationBox, iteration.ToString()); SetText(currentErrorBox, learningError.ToString("F3")); // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } // enable settings controls EnableControls(true); }
static void Main(string[] args) { // sample input double[][] inputs = { new double[] { 0, 0 }, new double[] { 1, 0 }, new double[] { 0, 1 }, new double[] { 1, 1 }, }; // sample binary output int[] outputs = { 0, 1, 1, 0, }; // sample binary output for Neural Network double[][] nnOutputs = { new double[] { 1, 0 }, new double[] { 0, 1 }, new double[] { 0, 1 }, new double[] { 1, 0 }, }; // sample multinomial output int[] multiOutputs = { 0, 1, 1, 2, }; // 1. Binary Logistic Regression var learner = new IterativeReweightedLeastSquares <LogisticRegression>() { MaxIterations = 100 }; var model = learner.Learn(inputs, outputs); var preds = model.Decide(inputs); Console.WriteLine("\n\n*Binary Logistic Regression Predictions: {0}", String.Join(", ", preds)); // 2. Multinomial Logistic Regression var learner2 = new MultinomialLogisticLearning <GradientDescent>() { MiniBatchSize = 4 }; var model2 = learner2.Learn(inputs, multiOutputs); var preds2 = model2.Decide(inputs); Console.WriteLine("\n\n*Multinomial Logistic Regression Predictions: {0}", String.Join(", ", preds2)); // 3. Binary Naive Bayes Classifier var learner3 = new NaiveBayesLearning <NormalDistribution>(); var model3 = learner3.Learn(inputs, outputs); var preds3 = model2.Decide(inputs); Console.WriteLine("\n\n*Binary Naive Bayes Predictions: {0}", String.Join(", ", preds3)); // 4. RandomForest var learner4 = new RandomForestLearning() { NumberOfTrees = 3, CoverageRatio = 0.9, SampleRatio = 0.9 }; var model4 = learner4.Learn(inputs, outputs); var preds4 = model4.Decide(inputs); Console.WriteLine("\n\n*Binary RandomForest Classifier Predictions: {0}", String.Join(", ", preds4)); // 5. SVM var learner5 = new SequentialMinimalOptimization <Gaussian>(); var model5 = learner.Learn(inputs, outputs); var preds5 = model5.Decide(inputs); Console.WriteLine("\n\n*Binary SVM Predictions: {0}", String.Join(", ", preds5)); // 6. Neural Network var network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 1, 2 ); var teacher = new LevenbergMarquardtLearning(network); Console.WriteLine("\n-- Training Neural Network"); int numEpoch = 3; double error = Double.PositiveInfinity; for (int i = 0; i < numEpoch; i++) { error = teacher.RunEpoch(inputs, nnOutputs); Console.WriteLine("* Epoch {0} - error: {1:0.0000}", i + 1, error); } double[][] nnPreds = inputs.Select( x => network.Compute(x) ).ToArray(); int[] preds6 = nnPreds.Select( x => x.ToList().IndexOf(x.Max()) ).ToArray(); Console.WriteLine("\n\n*Binary Neural Network Predictions: {0}", String.Join(", ", preds6)); Console.WriteLine("\n\n\n\nDONE!!"); Console.ReadKey(); }
public void Train(Policy[] policies) { // Feature retrieving foreach (Policy policy in policies) { foreach (string ruleName in policy.RuleNames) { string[] queries = _queriesRetriever.RetrieveQueriesFor(policy.Name, ruleName); string[] rawContents = _contentRetriever.RetrieveContents(queries); string[] rawContentsContext = _contextRetriever.RetrieveContentContext(rawContents); string[] preprocessedContents = _preprocessor.PreprocessContents(rawContentsContext); CountedWord[] interactionsWords = _wordsRetriever.RetrieveWords(preprocessedContents); string[] wordList = _wordListManager.Get(policy.Name, ruleName); if (wordList == null) { wordList = _wordListRetriever.RetrieveWordList(interactionsWords); _wordListManager.Save(policy.Name, ruleName, wordList); } double[][] inputs = _featuresRetriever.RetrieveFeatures(interactionsWords, wordList); double[] outputs = _labelsRetriever.RetrieveRuleLabes(policy.Name, ruleName); ActivationNetwork network = _nnManager.Get(policy.Name, ruleName); // Create a Levenberg-Marquardt algorithm var teacher = new LevenbergMarquardtLearning(network) { UseRegularization = true }; // Because the network is expecting multiple outputs, // we have to convert our single variable into arrays //We should make sure outputs are [0...1] double[][] y = outputs.ToJagged(); // Iterate until stop criteria is met double error = double.PositiveInfinity; double previous; Dictionary <int, double> epochError = new Dictionary <int, double>(); int currentEpoch = 1; do { previous = error; // Compute one learning iteration error = teacher.RunEpoch(inputs, y); epochError.Add(currentEpoch++, error); } while (Math.Abs(previous - error) > 0.001); _nnManager.Save(network, policy.Name, ruleName); // Classify the samples using the model double[] decimalAnswers = inputs.Apply(network.Compute).GetColumn(0); // can be used as probability. } } }
private static void BuildNNModel(double[][] trainInput, int[] trainOutput, double[][] testInput, int[] testOutput) { double[][] outputs = Accord.Math.Jagged.OneHot(trainOutput); var function = new BipolarSigmoidFunction(2); var network = new ActivationNetwork( new BipolarSigmoidFunction(2), 91, 20, 10 ); var teacher = new LevenbergMarquardtLearning(network); Console.WriteLine("\n-- Training Neural Network"); int numEpoch = 10; double error = Double.PositiveInfinity; for (int i = 0; i < numEpoch; i++) { error = teacher.RunEpoch(trainInput, outputs); Console.WriteLine("* Epoch {0} - error: {1:0.0000}", i + 1, error); } Console.WriteLine(""); List <int> inSamplePredsList = new List <int>(); for (int i = 0; i < trainInput.Length; i++) { double[] output = network.Compute(trainInput[i]); int pred = output.ToList().IndexOf(output.Max()); inSamplePredsList.Add(pred); } List <int> outSamplePredsList = new List <int>(); for (int i = 0; i < testInput.Length; i++) { double[] output = network.Compute(testInput[i]); int pred = output.ToList().IndexOf(output.Max()); outSamplePredsList.Add(pred); } int[] inSamplePreds = inSamplePredsList.ToArray(); int[] outSamplePreds = outSamplePredsList.ToArray(); // Accuracy double inSampleAccuracy = 1 - new ZeroOneLoss(trainOutput).Loss(inSamplePreds); double outSampleAccuracy = 1 - new ZeroOneLoss(testOutput).Loss(outSamplePreds); Console.WriteLine("* In-Sample Accuracy: {0:0.0000}", inSampleAccuracy); Console.WriteLine("* Out-of-Sample Accuracy: {0:0.0000}", outSampleAccuracy); // Build confusion matrix int[][] confMatrix = BuildConfusionMatrix( testOutput, outSamplePreds, 10 ); System.IO.File.WriteAllLines( Path.Combine( @"\\Mac\Home\Documents\c-sharp-machine-learning\ch.8\input-data", "nn-conf-matrix.csv" ), confMatrix.Select(x => String.Join(",", x)) ); // Precision Recall PrintPrecisionRecall(confMatrix); DrawROCCurve(testOutput, outSamplePreds, 10, "NN"); }
public void Learn() { try { //if (object.Equals(networkStruct, null)) { networkStruct = GetLayersStruct(LayersStruct); } if (Equals(networkStruct, null)) { return; } if (ANN_InputsCount == -1 | ANN_OuputsCount == -1) { return; } if (Equals(ActiveFunction_Params, null)) { throw new Exception("No activation function parameterss are specified !!!"); } if (ActiveFunction_Params.Length < 1) { throw new Exception("No activation function parameterss are specified !!!"); } if (Equals(LearningAlgorithm_Params, null)) { throw new Exception("No learning algorithm parameters are specified !!!"); } // create neural network // Network = new ActivationNetwork(new SigmoidFunction(1),mInputsCount, networkStruct); //Network = new ActivationNetwork(new BipolarSigmoidFunction(2), mInputsCount, networkStruct); //2 : two inputs in the network // 2 : two neurons in the first layer // 1 : one neuron in the second layer switch (ActivationFunction) { case ActivationFunctionEnum.LinearFunction: Network = new ActivationNetwork(new LinearFunction(ActiveFunction_Params[0]), ANN_InputsCount, networkStruct); break; case ActivationFunctionEnum.SigmoidFunction: Network = new ActivationNetwork(new SigmoidFunction(ActiveFunction_Params[0]), ANN_InputsCount, networkStruct); break; case ActivationFunctionEnum.BipolarSigmoidFunction: Network = new ActivationNetwork(new BipolarSigmoidFunction(ActiveFunction_Params[0]), ANN_InputsCount, networkStruct); break; default: Network = new ActivationNetwork(new SigmoidFunction(ActiveFunction_Params[0]), ANN_InputsCount, networkStruct); break; } // create teacher ISupervisedLearning teacher = null; //LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(Network); //BackPropagationLearning teacher = new BackPropagationLearning(Network); // EvolutionaryLearning teacher = new EvolutionaryLearning(Network, 25); switch (LearningAlgorithm) { case LearningAlgorithmEnum.BackPropagationLearning: if (LearningAlgorithm_Params.Length < 2) { throw new Exception("No activation function parameterss are specified !!!"); } teacher = new BackPropagationLearning(Network); var teacherBP = (BackPropagationLearning)teacher; teacherBP.LearningRate = LearningAlgorithm_Params[0]; teacherBP.Momentum = LearningAlgorithm_Params[1]; teacher = teacherBP; break; case LearningAlgorithmEnum.LevenbergMarquardtLearning: if (LearningAlgorithm_Params.Length < 2) { throw new Exception("No activation function parameterss are specified !!!"); } teacher = new LevenbergMarquardtLearning(Network); var teacherLM = (LevenbergMarquardtLearning)teacher; teacherLM.LearningRate = LearningAlgorithm_Params[0]; teacherLM.Adjustment = LearningAlgorithm_Params[1]; teacherLM.UseRegularization = false; teacher = teacherLM; break; case LearningAlgorithmEnum.BayesianLevenbergMarquardtLearning: throw new NotImplementedException("The implementation is not finished yet."); if (LearningAlgorithm_Params.Length < 4) { throw new Exception("No activation function parameterss are specified !!!"); } teacher = new LevenbergMarquardtLearning(Network); var teacherBLM = (LevenbergMarquardtLearning)teacher; teacherBLM.UseRegularization = true; teacherBLM.LearningRate = LearningAlgorithm_Params[0]; teacherBLM.Adjustment = LearningAlgorithm_Params[1]; teacherBLM.Alpha = LearningAlgorithm_Params[2]; teacherBLM.Beta = LearningAlgorithm_Params[3]; teacher = teacherBLM; break; case LearningAlgorithmEnum.EvolutionaryLearningGA: if (LearningAlgorithm_Params.Length < 1) { throw new Exception("No activation function parameterss are specified !!!"); } teacher = new EvolutionaryLearning(Network, (int)LearningAlgorithm_Params[0]); var teacherEGA = (EvolutionaryLearning)teacher; break; case LearningAlgorithmEnum.RGA_Learning: throw new NotImplementedException(); // teacher = new RGA_Learning(Network, EOA_PopulationSize, RGA_MutationPhrequency); break; case LearningAlgorithmEnum.GSA_Learning: throw new NotImplementedException(); // teacher = new GSA_Learning(Network, EOA_PopulationSize, MaxIteration, GSA_Go, GSA_Alpha); break; case LearningAlgorithmEnum.GWO_Learning: throw new NotImplementedException(); // teacher = new GWO_Learning(Network, EOA_PopulationSize, MaxIteration, GWO_Version, IGWO_uParameter); break; case LearningAlgorithmEnum.HPSOGWO_Learning: throw new NotImplementedException(); //teacher = new HPSOGWO_Learning(Network, EOA_PopulationSize, MaxIteration, HPSOGWO_C1, HPSOGWO_C2, HPSOGWO_C3); break; case LearningAlgorithmEnum.mHPSOGWO_Learning: throw new NotImplementedException(); //teacher = new HPSOGWO_Learning(Network, EOA_PopulationSize, MaxIteration, HPSOGWO_C1, HPSOGWO_C2, HPSOGWO_C3); break; case LearningAlgorithmEnum.PSOGSA_Learning: if (Equals(LearningAlgorithm_Params, null)) { throw new Exception("No activation function parameterss are specified!!!"); } if (LearningAlgorithm_Params.Length < 6) { throw new Exception("No activation function parameterss are specified!!!"); } teacher = new PSOGSA_Learning(Network, (int)LearningAlgorithm_Params[0], (int)LearningAlgorithm_Params[1], (int)LearningAlgorithm_Params[2], (int)LearningAlgorithm_Params[3], (int)LearningAlgorithm_Params[4], (int)LearningAlgorithm_Params[5]); break; } bool needToStop = false; IterationCounter = 0; double error = double.NaN; // loop while (!needToStop) { // run epoch of learning procedure error = teacher.RunEpoch(mTraining_Inputs, mTraining_Outputs); IterationCounter += 1; // check error value to see if we need to stop // ... //Console.WriteLine(error); if (error <= mTeachingError || IterationCounter >= MaxIteration) { needToStop = true; } } FinalTeachingErr = error; //---------------------------------- switch (LearningAlgorithm) { case LearningAlgorithmEnum.GSA_Learning: throw new NotImplementedException(); //GSA_Learning gsaL = (GSA_Learning)teacher; //this.BestChart = gsaL.Best_Chart; //this.BestWeights = gsaL.BestSolution; //Set best weights parameters to the network: //SetBestWeightsToTheNetwork(); break; case LearningAlgorithmEnum.HPSOGWO_Learning: throw new NotImplementedException(); //HPSOGWO_Learning hpgwoL = (HPSOGWO_Learning)teacher; //this.BestChart = hpgwoL.Best_Chart; //this.BestWeights = hpgwoL.BestSolution; //Set best weights parameters to the network: //SetBestWeightsToTheNetwork(); break; case LearningAlgorithmEnum.mHPSOGWO_Learning: throw new NotImplementedException(); //HPSOGWO_Learning hpsgwoL = (HPSOGWO_Learning)teacher; //this.BestChart = hpsgwoL.Best_Chart; //this.BestWeights = hpsgwoL.BestSolution; //Set best weights parameters to the network: //SetBestWeightsToTheNetwork(); break; case LearningAlgorithmEnum.GWO_Learning: throw new NotImplementedException(); //GWO_Learning gwoL = (GWO_Learning)teacher; //this.BestChart = gwoL.Best_Chart; //this.BestWeights = gwoL.BestSolution; //Set best weights parameters to the network: //SetBestWeightsToTheNetwork(); break; case LearningAlgorithmEnum.RGA_Learning: throw new NotImplementedException(); //RGA_Learning rgaL = (RGA_Learning)teacher; //this.BestChart = rgaL.Best_Chart; //this.BestWeights = rgaL.BestSolution; //Set best weights parameters to the network: //SetBestWeightsToTheNetwork(); break; case LearningAlgorithmEnum.PSOGSA_Learning: PSOGSA_Learning psogsaL = (PSOGSA_Learning)teacher; BestChart = psogsaL.Best_Chart; BestWeights = psogsaL.BestSolution; //Set best weights parameters to the network: SetBestWeightsToTheNetwork(); break; } } catch (Exception ex) { throw ex; } }