public void test() { // initialize input and output values var input = new double[4][] { new double[] { 0, 0 }, new double[] { 0, 1 }, new double[] { 1, 0 }, new double[] { 1, 1 } }; var output = new double[4][] { new double[] { 0 }, new double[] { 1 }, new double[] { 1 }, new double[] { 1 } }; // create neural network var network = new ActivationNetwork( new SigmoidFunction(2), 2, // two inputs in the network //2, // two neurons in the first layer 1); // one neuron in the second layer // create teacher var teacher = new BackPropagationLearning(network); // loop while (true) { // run epoch of learning procedure var error = teacher.RunEpoch(input, output); // check error value to see if we need to stop // ... if (error < 0.001) { break; } } Console.WriteLine(network.Compute(new double[] { 0, 0 })[0] + "," + network.Compute(new double[] { 0, 1 })[0] + "," + network.Compute(new double[] { 1, 0 })[0] + "," + network.Compute(new double[] { 1, 1 })[0]); }
public void Test() { ActivationNetwork network = new ActivationNetwork( new SigmoidFunction(), 2, // two inputs in the network 2, // two neurons in the first layer 1); // one neuron in the second layer BackPropagationLearning teacher = new BackPropagationLearning(network); double lastError = double.MaxValue; int counter = 0; while (true) { counter++; var error = teacher.RunEpoch(input, output); if (lastError - error < 0.0000001 && error < 0.001) break; lastError = error; } //var bla = network.Compute(input[0])[0]; //var round = Math.Round(network.Compute(input[0])[0], 2); //var result = output[0][0]; //Assert.IsTrue(Math.Abs(round - result) < double.Epsilon); Assert.IsTrue(Math.Abs(network.Compute(input[0])[0] - output[0][0]) < 0.03); Assert.IsTrue(Math.Abs(network.Compute(input[1])[0] - output[1][0]) < 0.03); Assert.IsTrue(Math.Abs(network.Compute(input[2])[0] - output[2][0]) < 0.03); Assert.IsTrue(Math.Abs(network.Compute(input[3])[0] - output[3][0]) < 0.03); Console.WriteLine($"Loop counter = {counter}."); }
static void Main(string[] args) { // initialize input and output values double[][] input = new double[4][] { new double[] {0, 0}, new double[] {0, 1}, new double[] {1, 0}, new double[] {1, 1} }; double[][] output = new double[4][] { new double[] {0}, new double[] {1}, new double[] {1}, new double[] {0} }; // create neural network ActivationNetwork network = new ActivationNetwork( new SigmoidFunction(1), 2, // two inputs in the network 2, // two neurons in the first layer 1); // one neuron in the second layer // create teacher BackPropagationLearning teacher = new BackPropagationLearning(network); // loop for (int i = 0; i < 10000; i++) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output); // check error value to see if we need to stop // ... Console.Out.WriteLine("#" + i + "\t" + error); } double[] ret1 = network.Compute(new double[] { 0, 0 }); double[] ret2 = network.Compute(new double[] { 1, 0 }); double[] ret3 = network.Compute(new double[] { 0, 1 }); double[] ret4 = network.Compute(new double[] { 1, 1 }); Console.Out.WriteLine(); Console.Out.WriteLine("Eval(0, 0) = " + ret1[0]); Console.Out.WriteLine("Eval(1, 0) = " + ret2[0]); Console.Out.WriteLine("Eval(0, 1) = " + ret3[0]); Console.Out.WriteLine("Eval(1, 1) = " + ret4[0]); Console.ReadLine(); }
public void TestGenetic() { ActivationNetwork network = new ActivationNetwork(new SigmoidFunction(), 2, 2, 1); EvolutionaryLearning superTeacher = new EvolutionaryLearning(network, 10); double lastError = double.MaxValue; int counter = 0; while (true) { counter++; var error = superTeacher.RunEpoch(input, output); if (lastError - error < 0.0000001 && error < 0.0001) break; lastError = error; } Assert.IsTrue(Math.Abs(network.Compute(input[0])[0] - output[0][0]) < 0.03); Assert.IsTrue(Math.Abs(network.Compute(input[1])[0] - output[1][0]) < 0.03); Assert.IsTrue(Math.Abs(network.Compute(input[2])[0] - output[2][0]) < 0.03); Assert.IsTrue(Math.Abs(network.Compute(input[3])[0] - output[3][0]) < 0.03); Console.WriteLine($"Loop counter = {counter}."); }
public void Test() { var network = new ActivationNetwork(new SigmoidFunction(), inputCount, firstLayerNeurons, secondLayerNeurons, thirdLayerNeurons, lastLayerNeurons); var teacher = new BackPropagationLearning(network); var lastError = double.MaxValue; var counter = 0; while (true) { counter++; var error = teacher.RunEpoch(input, output); if ((lastError - error < 0.00001 && error < 0.01) || counter > 1200000) break; lastError = error; } var result1 = network.Compute(new double[] {1, 0, 1, 0, 1, 0, 1, 0}); Console.WriteLine($"2 + 2, 2 * 2 = {result1[0]}, {result1[1]}"); var result2 = network.Compute(new double[] {0, 1, 0, 1, 1, 0, 0, 1}); Console.WriteLine($"1 + 1, 2 * 1 = {result2[0]}, {result2[1]}"); var result3 = network.Compute(new double[] {1, 0, 1, 0, 0, 1, 0, 0}); Console.WriteLine($"2 + 2, 1 * 0 = {result3[0]}, {result3[1]}"); var result4 = network.Compute(new double[] {0, 1, 0, 0, 0, 1, 1, 0}); Console.WriteLine($"1 + 0, 1 * 2 = {result4[0]}, {result4[1]}"); }
public double Evaluate(IChromosome chromosome) { // Конструираме невронна мрежа и изчисляваме резултата DoubleArrayChromosome dac = (DoubleArrayChromosome)chromosome; ActivationNetwork Network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), mArchitecture[0], mArchitecture[1], mArchitecture[2]); int current = 0; int i = 0; // Тегла на скрит слой for (i = 0; i < mArchitecture[1]; i++) { for(int j=0; j < mArchitecture[0]; j++){ Network[0][i][j] = dac.Value[current++]; } } // Тегла на изходен слой for (i = 0; i < mArchitecture[2]; i++) { for (int j = 0; j < mArchitecture[1]; j++) { Network[1][i][j] = dac.Value[current++]; } } double Sum = 0.0; for (int cnt = 0; cnt < mInput.Length; cnt++) { double[] predicted_output = Network.Compute(mInput[cnt]); for (int l = 0; l < predicted_output.Length; l++) { Sum += (predicted_output[l] - mOutput[cnt][l]) * (predicted_output[l] - mOutput[cnt][l]); } } return 100-Sum; }
public void RunEpochTest1() { Accord.Math.Tools.SetupGenerator(0); double[][] input = { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; double[][] output = { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; Neuron.RandGenerator = new ThreadSafeRandom(0); ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 2, 1); var teacher = new ParallelResilientBackpropagationLearning(network); double error = 1.0; while (error > 1e-5) error = teacher.RunEpoch(input, output); for (int i = 0; i < input.Length; i++) { double actual = network.Compute(input[i])[0]; double expected = output[i][0]; Assert.AreEqual(expected, actual, 0.01); Assert.IsFalse(Double.IsNaN(actual)); } }
private double ComputeCVError(ActivationNetwork network, double[][] dataIn, double[][] dataOut) { double error = 0; for (int i = 0; i < dataIn.Length; i++) { double[] output = network.Compute(dataIn[i]); for (int j = 0; j < output.Length; j++) error += (output[j] - dataOut[i][j]) * (output[j] - dataOut[i][j]); } return error / 2 / dataIn.Length; }
static void Main(string[] args) { Console.WriteLine("This is a demo application that combines Linear Discriminant Analysis (LDA) and Multilayer Perceptron(MLP)."); double[,] inputs = { { 4, 1 }, { 2, 4 }, { 2, 3 }, { 3, 6 }, { 4, 4 }, { 9, 10 }, { 6, 8 }, { 9, 5 }, { 8, 7 }, { 10, 8 } }; int[] output = { 1, 1, 2, 1, 1, 2, 2, 2, 1, 2 }; Console.WriteLine("\r\nProcessing sample data, pease wait..."); //1.1 var lda = new LinearDiscriminantAnalysis(inputs, output); //1.2 Compute the analysis lda.Compute(); //1.3 double[,] projection = lda.Transform(inputs); //both LDA and MLP have a little bit different inputs //e.x double[,] to double[][], etc. //e.x. LDA needs int classes and MLP needs classes to be in the range [0..1] #region convertions int vector_count = projection.GetLength(0); int dimensions = projection.GetLength(1); //==================================================================== // conver for NN double[][] input2 = new double[vector_count][]; double[][] output2 = new double[vector_count][]; for (int i = 0; i < input2.Length; i++) { input2[i] = new double[projection.GetLength(1)]; for (int j = 0; j < projection.GetLength(1); j++) { input2[i][j] = projection[i, j]; } output2[i] = new double[1]; //we turn classes from ints to doubles in the range [0..1], because we use sigmoid for the NN output2[i][0] = Convert.ToDouble(output[i]) / 10; } #endregion //2.1 create neural network ActivationNetwork network = new ActivationNetwork( new SigmoidFunction(2), dimensions, // inputs neurons in the network dimensions, // neurons in the first layer 1); // one neuron in the second layer //2.2 create teacher BackPropagationLearning teacher = new BackPropagationLearning(network); //2.3 loop int p = 0; while (true) { // run epoch of learning procedure double error = teacher.RunEpoch(input2, output2); p++; if (p > 1000000) break; // instead of iterations we can check error values to see if we need to stop } //==================================================================== //3. Classify double[,] sample = { { 10, 8 } }; double[,] projectedSample = lda.Transform(sample); double[] projectedSample2 = new double[2]; projectedSample2[0] = projectedSample[0, 0]; projectedSample2[1] = projectedSample[0, 1]; double[] classs = network.Compute(projectedSample2); Console.WriteLine("========================"); //we convert back to int classes by first rounding and then multipling by 10 (because we devided to 10 before) //if you do not get the expected result //- rounding might be a problem //- try more training Console.WriteLine(Math.Round(classs[0], 1, MidpointRounding.AwayFromZero)*10); Console.ReadLine(); }
// Worker thread void SearchSolution() { // number of learning samples int samples = data.GetLength(0); // data transformation factor double yFactor = 1.7 / chart.RangeY.Length; double yMin = chart.RangeY.Min; double xFactor = 2.0 / chart.RangeX.Length; double xMin = chart.RangeX.Min; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; for (int i = 0; i < samples; i++) { input[i] = new double[1]; output[i] = new double[1]; // set input input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0; // set output output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85; } // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), 1, neuronsInFirstLayer, 1); // create teacher BackPropagationLearning teacher = new BackPropagationLearning(network); // set learning rate and momentum teacher.LearningRate = learningRate; teacher.Momentum = momentum; // iterations int iteration = 1; // solution array double[,] solution = new double[50, 2]; double[] networkInput = new double[1]; // calculate X values to be used with solution function for (int j = 0; j < 50; j++) { solution[j, 0] = chart.RangeX.Min + (double)j * chart.RangeX.Length / 49; } // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution for (int j = 0; j < 50; j++) { networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0; solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin; } chart.UpdateDataSeries("solution", solution); // calculate error double learningError = 0.0; for (int j = 0, k = data.GetLength(0); j < k; j++) { networkInput[0] = input[j][0]; learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin)); } // set current iteration's info UpdateTextbox(currentIterationBox, iteration.ToString()); //currentIterationBox.Text = iteration.ToString(); UpdateTextbox(currentErrorBox, learningError.ToString("F3")); //currentErrorBox.Text = learningError.ToString("F3"); // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) break; } // enable settings controls EnableControls(true); }
static void Learn() { var network = new ActivationNetwork( new SigmoidFunction(), baseMaker.InputSize, arguments.NeuronsCount, baseMaker.OutputSize ); network.Randomize(); foreach (var l in network.Layers) foreach (var n in l.Neurons) for (int i = 0; i < n.Weights.Length; i++) n.Weights[i] = rnd.NextDouble() * 2 - 1; var teacher = new BackPropagationLearning(network); teacher.LearningRate = 1; teacher.Momentum = 0; while (true) { var watch = new Stopwatch(); watch.Start(); while (watch.ElapsedMilliseconds < 500) { teacher.RunEpoch(baseMaker.Inputs, baseMaker.Answers); } watch.Stop(); var count = 0; percentage = new double[baseMaker.OutputSize, baseMaker.OutputSize]; for (int i = 0; i < baseMaker.OutputSize; i++) for (int j = 0; j < baseMaker.OutputSize * 5; j++) { var task = baseMaker.GenerateRandom(i); var output = network.Compute(task); var max = output.Max(); var maxIndex = Enumerable.Range(0, output.Length).Where(z => output[z] == max).First(); percentage[i, maxIndex]++; if (i != maxIndex) totalErrors++; count++; } var maxPercentage = percentage.Cast<double>().Max(); for (int i = 0; i < baseMaker.OutputSize; i++) for (int j = 0; j < baseMaker.OutputSize; j++) percentage[i, j] /= maxPercentage; totalErrors /= count; form.BeginInvoke(new Action(Update)); } }
public EstimationResult Estimate(IEnumerable<IDateValue> dateValues) { var data = dateValues.ToArray(); var samplesCount = data.Length - LayerWidth; var factor = 1.7 / data.Length; var yMin = data.Min(x => x.Value); var input = new double[samplesCount][]; var output = new double[samplesCount][]; for (var i = 0; i < samplesCount; i++) { input[i] = new double[LayerWidth]; output[i] = new double[1]; for (var j = 0; j < LayerWidth; j++) input[i][j] = (data[i + j].Value - yMin) * factor - 0.85; output[i][0] = (data[i + LayerWidth].Value - yMin) * factor - 0.85; } var network = new ActivationNetwork( new BipolarSigmoidFunction(SigmoidAlphaValue), LayerWidth, LayerWidth * 2, 1); var teacher = new BackPropagationLearning(network) { LearningRate = LearningRate, Momentum = Momentum }; var solutionSize = data.Length - LayerWidth; var solution = new double[solutionSize, 2]; var networkInput = new double[LayerWidth]; for (var j = 0; j < solutionSize; j++) solution[j, 0] = j + LayerWidth; TimesLoop.Do(Iterations, () => { teacher.RunEpoch(input, output); for (int i = 0, n = data.Length - LayerWidth; i < n; i++) { for (var j = 0; j < LayerWidth; j++) networkInput[j] = (data[i + j].Value - yMin) * factor - 0.85; solution[i, 1] = (network.Compute(networkInput)[0] + 0.85) / factor + yMin; } }); return EstimationResult.Create(solution[0, 1], this); }
public double[] Run(int[] numOfHiddenNeurals) { // validation and testing error double[] errors = new double[2]; // number of learning samples int samples = _data.Length - _windowSize; int trainingSamples = samples - _predictionSize; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; // sample indices int[] indices = new int[samples]; int[] trainingIndices = new int[trainingSamples]; // normalization function var normalizeFunc = new MinMaxNormalization(_xMax, _xMin, _data.Max(), _data.Min()); for (int i = 0; i < samples; i++) { input[i] = new double[_windowSize]; output[i] = new double[_outputNum]; // set input for (int j = 0; j < _windowSize; j++) { input[i][j] = normalizeFunc.Compute(_data[i + j]); } // set output for (int j = 0; j < _outputNum; j++) { output[i][j] = normalizeFunc.Compute(_data[i + _windowSize + j]); } indices[i] = i; } // randomize the sample indices Utils.Shuffle<int>(indices); output.Swap(indices); input.Swap(indices); // get training samples double[][] trainingInput = new double[trainingSamples][]; double[][] trainingOutput = new double[trainingSamples][]; for (int i = 0; i < trainingSamples; i++) { trainingInput[i] = new double[_windowSize]; trainingOutput[i] = new double[_outputNum]; // set input for (int j = 0; j < _windowSize; j++) { trainingInput[i][j] = input[i][j]; } // set output for (int j = 0; j < _outputNum; j++) { trainingOutput[i][j] = output[i][j]; } trainingIndices[i] = i; } //// randomize the sample indices //Utils.Shuffle<int>(trainingIndices); //trainingOutput.Swap(trainingIndices); //trainingInput.Swap(trainingIndices); // create multi-layer neural network int[] neuronsCount = numOfHiddenNeurals.Concat(new int[] { _outputNum }).ToArray(); ActivationNetwork network = new ActivationNetwork( _function, _windowSize, neuronsCount); // create teacher BackPropagationLearning teacher = new BackPropagationLearning(network); // set learning rate and momentum teacher.LearningRate = _learningRate; teacher.Momentum = 0.0; //var teacher = new ParallelResilientBackpropagationLearning(network); //teacher.Reset(_learningRate); // iterations int iteration = 1; // solution array int solutionSize = _data.Length - _windowSize; double[,] solution = new double[solutionSize, 1 + _outputNum]; // calculate X values to be used with solution function for (int j = 0; j < solutionSize; j++) { solution[j, 0] = j + _windowSize; } // loop var needToStop = false; while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(trainingInput, trainingOutput) / trainingSamples; // calculate solution and learning and prediction errors every 5 double learningError = 0.0; double predictionError = 0.0; if (iteration % 5 == 0) { // go through all the data for (int i = 0; i < samples; i++) { double y = 0.0; double o = 0.0; double err = 0.0; for (int j = 0; j < _outputNum; j++) { y = output[i][j]; o = network.Compute(input[i])[j]; err += (o - y) * (o - y) / _outputNum; // evalue the function solution[i, j + 1] = normalizeFunc.Inverse(o); } // calculate prediction error (MSE) if (i >= trainingSamples) { predictionError += err;// Math.Pow((solution[i, 1] - normalizeFunc.Inverse(output[i][0])), 2.0); } else { learningError += err; } } } // Adaptive Learning - decrease the learning rate // n(t) = n0 * a^(t/T) // a = 1/10^x, x >= 1 teacher.LearningRate = _learningRate * Math.Pow(0.1, iteration / _iterations); // increase iteration iteration++; // check if we need to stop if ((_iterations != 0) && (iteration > _iterations)) { errors[0] = learningError / trainingSamples; errors[1] = predictionError / _predictionSize; Console.WriteLine("Final Learning MSE Error: " + errors[0]); Console.WriteLine("Final Prediction MSE Error: " + errors[1]); Console.WriteLine("Final Learning Rate: " + teacher.LearningRate); Console.WriteLine("Window Size: " + _windowSize + "\n" + "Number of Hidden Neurons: " + neuronsCount[0] + "\n" + "Output Size: " + _outputNum); break; } } ////print result to file //Console.WriteLine("Real Values\t\tRegression Values"); //for (int i = samples - 1; i >= trainingSamples; --i) //{ // Console.WriteLine(normalizeFunc.Inverse(output[i][0]) + "\t\t" + solution[i, 1]); //} return errors; }
public static void ExactTrainingData() { TrainData[] tdatas; // AForge.Neuro.Learning.BackPropagationLearning beural=new AForge.Neuro.Learning.BackPropagationLearning(new AForge.Neuro.ActivationNetwork( tdatas = traindatas.ToArray(); double[][] output = new double[tdatas.Length-1][]; double[][] input = new double[tdatas.Length-1][]; for (int i = 1; i < tdatas.Length; i++) { int spddiff = 0, voldiff = 0, occdiff = 0, u_spddifft = 0, u_voldifft = 0, u_occdifft = 0, d_spddifft = 0, d_voldifft = 0, d_occdifft = 0,level=0; tdatas[i].getTrainData(ref voldiff, ref spddiff, ref occdiff); u_spddifft = tdatas[i].vd1.AvgSpd - tdatas[i - 1].vd1.AvgSpd; u_voldifft = tdatas[i].vd1.Volume - tdatas[i-1].vd1.Volume; u_occdifft = tdatas[i].vd1.Occupancy - tdatas[i - 1].vd1.Occupancy; d_spddifft = tdatas[i].vd2.AvgSpd - tdatas[i - 1].vd2.AvgSpd; d_voldifft = tdatas[i].vd2.Volume - tdatas[i - 1].vd2.Volume; d_occdifft = tdatas[i].vd2.Occupancy - tdatas[i - 1].vd2.Occupancy; level = tdatas[i-1].Level; output[i-1] = new double[1]; output[i-1][0] = level; input[i-1] = new double[9]; input[i-1][0] = spddiff; input[i-1][1] = voldiff; input[i-1][2] = occdiff; input[i-1][3] = u_spddifft; input[i-1][4] = u_voldifft; input[i-1][5] = u_occdifft; input[i-1][6] = d_spddifft; input[i-1][7] = d_voldifft; input[i-1][8] = d_occdifft; Console.WriteLine(spddiff + "," + voldiff + "," + occdiff + "," + u_spddifft + "," + u_voldifft + "," + u_occdifft + "," + d_spddifft + "," + d_voldifft + "," + d_occdifft+","+level); } ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(1.5),9, 25, 1 ); // create teacher BackPropagationLearning teacher = new BackPropagationLearning(network); teacher.Momentum = 0.1; teacher.LearningRate = 0.01; // loop double err=100; int cnt = 0; while (err / tdatas.Length >0.00079) { // run epoch of learning procedure err = teacher.RunEpoch( input, output ); Console.WriteLine(err / tdatas.Length); cnt++; } for (int i = 0; i < tdatas.Length-1; i++) { if (System.Convert.ToInt32(output[i][0]) != System.Convert.ToInt32(network.Compute(input[i])[0])) Console.WriteLine("fail"); // Console.WriteLine("chreck:"+Math.Abs((output[i][0] - network.Compute(input[i])[0]))>0,5?:"fail",""); } }
// Worker thread void SearchSolution( ) { windowSize = _WordToID.Count ; //windowSize = 3; // number of learning samples int samples = data.Length - predictionSize - windowSize; //int samples = 157; // data transformation factor double factor = 1.7 / chart.RangeY.Length; double yMin = chart.RangeY.Min; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; for ( int i = 0; i < samples; i++ ) { input[i] = new double[windowSize]; output[i] = new double[1]; // set input for ( int j = 0; j < windowSize; j++ ) { input[i][j] = ( data[i + j] - yMin ) * factor - 0.85; } // set output output[i][0] = ( data[i + windowSize] - yMin ) * factor - 0.85; } // create multi-layer neural network _network = new ActivationNetwork( new BipolarSigmoidFunction( sigmoidAlphaValue ), //new ThresholdFunction(), windowSize, windowSize*2, 1); // create teacher BackPropagationLearning teacher = new BackPropagationLearning( _network ); // set learning rate and momentum teacher.LearningRate = learningRate; teacher.Momentum = momentum; // iterations int iteration = 1; // solution array int solutionSize = data.Length - windowSize; double[,] solution = new double[solutionSize, 2]; double[] networkInput = new double[windowSize]; // calculate X values to be used with solution function for ( int j = 0; j < solutionSize; j++ ) { solution[j, 0] = j + windowSize; } // loop while ( !needToStop ) { // run epoch of learning procedure double error = teacher.RunEpoch( input, output ) / samples; // calculate solution and learning and prediction errors double learningError = 0.0; double predictionError = 0.0; // go through all the data for ( int i = 0, n = data.Length - windowSize; i < n; i++ ) { // put values from current window as network's input for ( int j = 0; j < windowSize; j++ ) { networkInput[j] = ( data[i + j] - yMin ) * factor - 0.85; } // evalue the function solution[i, 1] = ( _network.Compute( networkInput)[0] + 0.85 ) / factor + yMin; if (iteration == iterations) { string format = string.Format("[{0}][{1}] = [{2}] + [{3}] = {4}", i, networkInput[0], _network[0].Output[0], _network[0].Output[1], solution[i, 1]); //listBox1.Items.Add(format); } // calculate prediction error if ( i >= n - predictionSize ) { predictionError += Math.Abs( solution[i, 1] - data[windowSize + i] ); } else { learningError += Math.Abs( solution[i, 1] - data[windowSize + i] ); } } // update solution on the chart chart.UpdateDataSeries( "solution", solution ); // set current iteration's info //currentIterationBox.Text = iteration.ToString( ); //currentLearningErrorBox.Text = learningError.ToString( "F3" ); //currentPredictionErrorBox.Text = predictionError.ToString( "F3" ); // increase current iteration iteration++; // check if we need to stop if ( ( iterations != 0 ) && ( iteration > iterations ) ) break; } // show new solution for ( int j = windowSize, k = 0, n = data.Length; j < n; j++, k++ ) { lock (this) { //dataList.Items[j].SubItems.Add(solution[k, 1].ToString()); } } //listBox1.Items.Add(network.ToString()); // enable settings controls //EnableControls( true ); }
private void SearchSolution() { MemoryStream ms = new MemoryStream(); mNetwork = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), mInput[0].Length, mHiddenNeurons, mOutput[0].Length); // create teacher BackPropagationLearning teacher = new BackPropagationLearning(mNetwork); // set learning rate and momentum teacher.LearningRate = mLearningRate; teacher.Momentum = mMomentum; bool needToStop = false; int iteration = 0; while (!needToStop) { double error = teacher.RunEpoch(mInput, mOutput) / mInput.Length; mErrors[iteration] = error; double test_error = 0.0; for (int i = 0; i < mTestInput.Length; i++) { double[] test_result = mNetwork.Compute(mTestInput[i]); test_error += ( mTestOutput[i][0] - test_result[0])*( mTestOutput[i][0] - test_result[0]); } mTestErrors[iteration] = Math.Sqrt(test_error); if (min_test_error > mTestErrors[iteration]) { min_test_error = mTestErrors[iteration]; // mTestBestNetwork = mNetwork; ms = new MemoryStream(); mNetwork.Save(this.Id + ".txt"); } iteration++; if (iteration >= mIterations) needToStop = true; } mTestBestNetwork = (ActivationNetwork)ActivationNetwork.Load(this.Id + ".txt"); }
// Worker thread void SearchSolution() { // number of learning samples int samples = data.Length - predictionSize - windowSize; // data transformation factor double factor = 1.7 / chart.RangeY.Length; double yMin = chart.RangeY.Min; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; for (int i = 0; i < samples; i++) { input[i] = new double[windowSize]; output[i] = new double[1]; // set input for (int j = 0; j < windowSize; j++) { input[i][j] = (data[i + j] - yMin) * factor - 0.85; } // set output output[i][0] = (data[i + windowSize] - yMin) * factor - 0.85; } // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), windowSize, windowSize * 2, 1); // create teacher var teacher = new ParallelResilientBackpropagationLearning(network); teacher.Reset(initialStep); // run at least one backpropagation epoch //teacher2.RunEpoch(input, output); // iterations int iteration = 1; // solution array int solutionSize = data.Length - windowSize; double[,] solution = new double[solutionSize, 2]; double[] networkInput = new double[windowSize]; // calculate X values to be used with solution function for (int j = 0; j < solutionSize; j++) { solution[j, 0] = j + windowSize; } // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution and learning and prediction errors double learningError = 0.0; double predictionError = 0.0; // go through all the data for (int i = 0, n = data.Length - windowSize; i < n; i++) { // put values from current window as network's input for (int j = 0; j < windowSize; j++) { networkInput[j] = (data[i + j] - yMin) * factor - 0.85; } // evalue the function solution[i, 1] = (network.Compute(networkInput)[0] + 0.85) / factor + yMin; // calculate prediction error if (i >= n - predictionSize) { predictionError += Math.Abs(solution[i, 1] - data[windowSize + i]); } else { learningError += Math.Abs(solution[i, 1] - data[windowSize + i]); } } // update solution on the chart chart.UpdateDataSeries("solution", solution); // set current iteration's info SetText(currentIterationBox, iteration.ToString()); SetText(currentLearningErrorBox, learningError.ToString("F3")); SetText(currentPredictionErrorBox, predictionError.ToString("F3")); // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) break; } // show new solution for (int j = windowSize, k = 0, n = data.Length; j < n; j++, k++) { AddSubItem(dataList, j, solution[k, 1].ToString()); } // enable settings controls EnableControls(true); }
public void Learn() { var network = new ActivationNetwork(new BipolarSigmoidFunction(), Constants.StoneCount, 1); var teacher = new BackPropagationLearning(network);//new PerceptronLearning(network); var data = LoadData("4-6-2012-04-24.know"); double error = 1.0; int index = 0; while (error > 0.001 && index < 100000) { error = teacher.RunEpoch(data.Item1, data.Item2); index++; } network.Save("4-6-2012-04-24.bp.net"); var text = "□○○○●○○□○●●□□●□□"; var i = ToDouble(text);//-2 var o = network.Compute(i); var eval = o[0] * 2 * Constants.StoneCount - Constants.StoneCount; Console.WriteLine("{0} {1}", text, eval); }
public void LearnDemo() { ActivationNetwork network = new ActivationNetwork(new ThresholdFunction(), 2, 1);//Constants.StoneCount PerceptronLearning teacher = new PerceptronLearning(network); double[][] input = new double[4][]; double[][] output = new double[4][]; input[0] = new double[] { 0, 0 }; output[0] = new double[] { 0 }; input[1] = new double[] { 0, 1 }; output[1] = new double[] { 0 }; input[2] = new double[] { 1, 0 }; output[2] = new double[] { 0 }; input[3] = new double[] { 1, 1 }; output[3] = new double[] { 1 }; double error = 1.0; while (error > 0.001) { error = teacher.RunEpoch(input, output); } var k = network.Compute(new double[] { 0.9, 0.7 }); var o = network.Output; network.Save("a.txt"); }
static void NeuralNetworkAccompanimentTest() { // initialize input and output values double[][] input = new double[4][] { new double[] {0, 0}, new double[] {0, 1}, new double[] {1, 0}, new double[] {1, 1} }; double[][] output = new double[4][] { new double[] {0}, new double[] {1}, new double[] {1}, new double[] {0} }; SigmoidFunction sig = new SigmoidFunction(); Accord.Neuro.Networks.RestrictedBoltzmannMachine boltz = new Accord.Neuro.Networks.RestrictedBoltzmannMachine(200, 200); // create neural network ActivationNetwork network = new ActivationNetwork( new SigmoidFunction(2), 200, 20, 200); //BackPropagationLearning teacher = new BackPropagationLearning(network); //LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(network); Accord.Neuro.Learning.ParallelResilientBackpropagationLearning teacher = new ParallelResilientBackpropagationLearning(network); Accord.Neuro.Networks.DeepBeliefNetwork dpn = new Accord.Neuro.Networks.DeepBeliefNetwork(200, 20); // teacher.IncreaseFactor = 1.01; Composition c = Composition.LoadFromMIDI("test/other/ff7tifa.mid"); MusicPlayer player = new MusicPlayer(); //player.Play(c.Tracks[0]); List<double[]> inputs = new List<double[]>(); List<double[]> outputs = new List<double[]>(); inputs.Add(GetDoublesFromNotes((c.Tracks[0].GetMainSequence() as MelodySequence).ToArray())); outputs.Add(GetDoublesFromNotes((c.Tracks[1].GetMainSequence() as MelodySequence).ToArray())); // inputs.Add(GetDoublesFromNotes((c.Tracks[1].GetMainSequence() as MelodySequence).ToArray())); // outputs.Add(GetDoublesFromNotes((c.Tracks[2].GetMainSequence() as MelodySequence).ToArray())); // inputs.Add(GetDoublesFromNotes((c.Tracks[0].GetMainSequence() as MelodySequence).ToArray())); // outputs.Add(GetDoublesFromNotes((c.Tracks[3].GetMainSequence() as MelodySequence).ToArray())); int its = 0; while (its++ < 10000) { double error = teacher.RunEpoch(inputs.ToArray(), outputs.ToArray()); Console.WriteLine("{0}: Error - {1}", its, error); } var input_melody = (c.Tracks[0].GetMainSequence() as MelodySequence); var new_notes = network.Compute(GetDoublesFromNotes(input_melody.ToArray())); var new_mel = GetMelodyFromDoubles(new_notes); player.Play(new_mel); Console.ReadLine(); }
double[][] Compute(ActivationNetwork net, double[][] input) { double[][] result = new double[input.Length][]; for (int i = 0; i < input.Length; i++) { double[] tmp = net.Compute(input[i]); result[i] = new double[tmp.Length]; for (int j = 0; j < tmp.Length; j++) { result[i][j] = tmp[j]; } } return result; }