private static void TestNeuralNetwork() { MinMaxNormalization normalization = new MinMaxNormalization(); NeuralNetwork neuralNetwork = new NeuralNetwork(numHiddenLayers, trainingEpochs, normalization); neuralNetwork.Train(dataX, dataY); float[] results = neuralNetwork.Predict(testX); float[] errorNorm = new float[testY.GetLength(0)]; for (int i = 0; i < testY.GetLength(0); i++) { errorNorm[i] = (float)Math.Pow(results[i] - testY[i], 2); } double totalError = errorNorm.Sum(); Assert.True(totalError < 0.01); }
public double[] Run(int[] numOfHiddenNeurals) { // validation and testing error double[] errors = new double[2]; // number of learning samples int samples = _data.Length - _windowSize; int trainingSamples = samples - _predictionSize; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; // sample indices int[] indices = new int[samples]; int[] trainingIndices = new int[trainingSamples]; // normalization function var normalizeFunc = new MinMaxNormalization(_xMax, _xMin, _data.Max(), _data.Min()); for (int i = 0; i < samples; i++) { input[i] = new double[_windowSize]; output[i] = new double[_outputNum]; // set input for (int j = 0; j < _windowSize; j++) { input[i][j] = normalizeFunc.Compute(_data[i + j]); } // set output for (int j = 0; j < _outputNum; j++) { output[i][j] = normalizeFunc.Compute(_data[i + _windowSize + j]); } indices[i] = i; } // randomize the sample indices Utils.Shuffle<int>(indices); output.Swap(indices); input.Swap(indices); // get training samples double[][] trainingInput = new double[trainingSamples][]; double[][] trainingOutput = new double[trainingSamples][]; for (int i = 0; i < trainingSamples; i++) { trainingInput[i] = new double[_windowSize]; trainingOutput[i] = new double[_outputNum]; // set input for (int j = 0; j < _windowSize; j++) { trainingInput[i][j] = input[i][j]; } // set output for (int j = 0; j < _outputNum; j++) { trainingOutput[i][j] = output[i][j]; } trainingIndices[i] = i; } //// randomize the sample indices //Utils.Shuffle<int>(trainingIndices); //trainingOutput.Swap(trainingIndices); //trainingInput.Swap(trainingIndices); // create multi-layer neural network int[] neuronsCount = numOfHiddenNeurals.Concat(new int[] { _outputNum }).ToArray(); ActivationNetwork network = new ActivationNetwork( _function, _windowSize, neuronsCount); // create teacher BackPropagationLearning teacher = new BackPropagationLearning(network); // set learning rate and momentum teacher.LearningRate = _learningRate; teacher.Momentum = 0.0; //var teacher = new ParallelResilientBackpropagationLearning(network); //teacher.Reset(_learningRate); // iterations int iteration = 1; // solution array int solutionSize = _data.Length - _windowSize; double[,] solution = new double[solutionSize, 1 + _outputNum]; // calculate X values to be used with solution function for (int j = 0; j < solutionSize; j++) { solution[j, 0] = j + _windowSize; } // loop var needToStop = false; while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(trainingInput, trainingOutput) / trainingSamples; // calculate solution and learning and prediction errors every 5 double learningError = 0.0; double predictionError = 0.0; if (iteration % 5 == 0) { // go through all the data for (int i = 0; i < samples; i++) { double y = 0.0; double o = 0.0; double err = 0.0; for (int j = 0; j < _outputNum; j++) { y = output[i][j]; o = network.Compute(input[i])[j]; err += (o - y) * (o - y) / _outputNum; // evalue the function solution[i, j + 1] = normalizeFunc.Inverse(o); } // calculate prediction error (MSE) if (i >= trainingSamples) { predictionError += err;// Math.Pow((solution[i, 1] - normalizeFunc.Inverse(output[i][0])), 2.0); } else { learningError += err; } } } // Adaptive Learning - decrease the learning rate // n(t) = n0 * a^(t/T) // a = 1/10^x, x >= 1 teacher.LearningRate = _learningRate * Math.Pow(0.1, iteration / _iterations); // increase iteration iteration++; // check if we need to stop if ((_iterations != 0) && (iteration > _iterations)) { errors[0] = learningError / trainingSamples; errors[1] = predictionError / _predictionSize; Console.WriteLine("Final Learning MSE Error: " + errors[0]); Console.WriteLine("Final Prediction MSE Error: " + errors[1]); Console.WriteLine("Final Learning Rate: " + teacher.LearningRate); Console.WriteLine("Window Size: " + _windowSize + "\n" + "Number of Hidden Neurons: " + neuronsCount[0] + "\n" + "Output Size: " + _outputNum); break; } } ////print result to file //Console.WriteLine("Real Values\t\tRegression Values"); //for (int i = samples - 1; i >= trainingSamples; --i) //{ // Console.WriteLine(normalizeFunc.Inverse(output[i][0]) + "\t\t" + solution[i, 1]); //} return errors; }