This class implements a backpropagation training algorithm for feed forward neural networks. It is used in the same manner as any other training class that implements the Train interface. Backpropagation is a common neural network training algorithm. It works by analyzing the error of the output of the neural network. Each neuron in the output layer's contribution, according to weight, to this error is determined. These weights are then adjusted to minimize this error. This process continues working its way backwards through the layers of the neural network. This implementation of the backpropagation algorithm uses both momentum and a learning rate. The learning rate specifies the degree to which the weight matrixes will be modified through each iteration. The momentum specifies how much the previous learning iteration affects the current. To use no momentum at all specify zero. One primary problem with backpropagation is that the magnitude of the partial derivative is often detrimental to the training of the neural network. The other propagation methods of Manhatten and Resilient address this issue in different ways. In general, it is suggested that you use the resilient propagation technique for most Encog training tasks over back propagation.
상속: Propagation, IMomentum, ILearningRate
예제 #1
0
        public static long BenchmarkEncog(double[][] input, double[][] output)
        {
            BasicNetwork network = new BasicNetwork();
            network.AddLayer(new BasicLayer(null, true,
                    input[0].Length));
            network.AddLayer(new BasicLayer(new ActivationSigmoid(), true,
                    HIDDEN_COUNT));
            network.AddLayer(new BasicLayer(new ActivationSigmoid(), false,
                    output[0].Length));
            network.Structure.FinalizeStructure();
            network.Reset();

            IMLDataSet trainingSet = new BasicMLDataSet(input, output);

            // train the neural network
            IMLTrain train = new Backpropagation(network, trainingSet, 0.7, 0.7);

            Stopwatch sw = new Stopwatch();
            sw.Start();
            // run epoch of learning procedure
            for (int i = 0; i < ITERATIONS; i++)
            {
                train.Iteration();
            }
            sw.Stop();

            return sw.ElapsedMilliseconds;
        }
예제 #2
0
        public virtual void TrainNetwork()
        {
            var train = new Backpropagation(Network.Model, TrainingSet, Parameters.LearingCoefficient, Parameters.InertiaCoefficient)
            {
                BatchSize = 1
            };

            var iteration = 1;

            var errors = new List<double[]>();

            do
            {
                train.Iteration();

                var validationError = Network.Model.CalculateError(ValidationSet);

                errors.Add(new[] { iteration, train.Error, validationError });

                Console.WriteLine(
                    @"Iteration #" + iteration++ +
                    @" Training error:" + String.Format("{0:N10}", train.Error) +
                    @", Validation error:" + String.Format("{0:N10}", validationError));

            } while ((iteration < Parameters.IterationsCount) && (train.Error > Parameters.AcceptedError));

            train.FinishTraining();

            ErrorSet = errors.ToArray();
        }
        public void TestBPROP()
        {
            IMLDataSet trainingData = new BasicMLDataSet(XOR.XORInput, XOR.XORIdeal);

            BasicNetwork network = NetworkUtil.CreateXORNetworkUntrained();

            IMLTrain bprop = new Backpropagation(network, trainingData, 0.7, 0.9);
            NetworkUtil.TestTraining(bprop, 0.01);
        }
        public Predictor(String fileName, TextBox txtOutput, CSVData data, double percentValidation)
        {
            m_network = (BasicNetwork)EncogDirectoryPersistence.LoadObject(new FileInfo(fileName));
            m_txtOutputWindow = txtOutput;
            m_data = data;

            // Populate the input and output arrays
            LoadData(percentValidation);

            m_train = new Backpropagation(m_network, new BasicMLDataSet(m_inputTraining, m_outputTraining));
        }
예제 #5
0
        /// <inheritDoc/>
        public override void CreateTrainer(OpenCLTrainingProfile profile, Boolean singleThreaded)
        {
            Propagation.Propagation train = new Backpropagation(Network, Training,
                   profile, LearningRate, Momentum);

            if (singleThreaded)
                train.NumThreads = 1;

            foreach (IStrategy strategy in Strategies)
            {
                train.AddStrategy(strategy);
            }

            Train = train;
        }
예제 #6
0
        public double[][] TrainModel(int iterations, double learningRate, double momentum, IMLDataSet trainingDataSet, IMLDataSet validationDataSet)
        {
            var training = new Backpropagation(Model, trainingDataSet, learningRate, momentum);
            training.BatchSize = 1;

            var errors = new List<double[]>();

            for (int i = 0; i < iterations; ++i )
            {
                training.Iteration(1);
                var validationError = Model.CalculateError(validationDataSet);

                errors.Add(new double[] { i, training.Error, validationError });
            }
            return errors.ToArray();
        }
        /// <summary>
        /// 
        /// </summary>
        /// <param name="learningRate">Stala uczenia (zwykle w okolicach 0.1)</param>
        /// <param name="epochNumber">Na ilu wybranych losowo przykladach uczyc siec</param>
        /// <param name="momentum">Współczynnik bezwładności.</param>
        /// <param name="dataSet">Dane do nauki sieci</param>
        public void Train(double learningRate, int epochNumber,
            double momentum, InputDataSet dataSet)
        {
            if (learningRate <= 0.0 || epochNumber <= 0)
                throw new PerceptronWrapperException("Invalid arguments");
            if (_network.InputCount != dataSet.InputDataSize ||
                _network.OutputCount != dataSet.OutputDataSize ||
                dataSet.InputDataCount <= 0)
                    throw new PerceptronWrapperException("Invalid data set size");

            IMLDataSet data = new BasicMLDataSet(dataSet.InputSet, dataSet.OutputSet);
            var backprop = new Backpropagation(_network, data, learningRate, momentum);

            // Uczymy siec za pomoca backpropagation
            backprop.Iteration(epochNumber);
            // :) - Prosciej sie nie da
        }
            public void Update(State s, double value)
            {
                var pair        = new BasicMLDataPair(new BasicMLData(s.AsDoubles()), new BasicMLData(new double[] { value }));
                var trainingSet = new BasicMLDataSet(new[] { pair });

                var train = new Encog.Neural.Networks.Training.Propagation.Back.Backpropagation(_network, trainingSet);

                train.BatchSize = 1;

                train.Iteration(2);
                if (train.Error > 0.01)
                {
                    train.Iteration(3);
                }

                var error = train.Error;
            }
        public Predictor(TextBox txtOutput, CSVData data, int hiddenNodes, double percentValidation)
        {
            m_txtOutputWindow = txtOutput;
            m_data = data;

            // Populate the input and output arrays
            LoadData(percentValidation);

            // Create Neural Network
            m_network = new BasicNetwork();
            m_network.AddLayer(new BasicLayer(null, true, m_data.InputNodes));
            m_network.AddLayer(new BasicLayer(new ActivationSigmoid(), true, hiddenNodes));
            m_network.AddLayer(new BasicLayer(new ActivationSigmoid(), false, m_data.OutputNodes));
            m_network.Structure.FinalizeStructure();
            m_network.Reset();

            m_train = new Backpropagation(m_network, new BasicMLDataSet(m_inputTraining, m_outputTraining));
        }
예제 #10
0
        public List<Tuple<int, double, double>> Train(int iterationCount, double learnRate, double momentum)
        {
            List<Tuple<int, double, double>> error = new List<Tuple<int, double, double>>();

            var training = new Backpropagation(network, trainingData, learnRate, momentum);
            training.BatchSize = 1;

            for (int i = 0; i < iterationCount; ++i)
            {
                training.Iteration(1);
                network.CalculateError(validationData);
                double val_error = network.CalculateError(validationData);
                error.Add(new Tuple<int, double, double>(i, training.Error, val_error));
                //if(i%100 == 0)
                    //Console.WriteLine("{0}: [{1}; {2}]", i, training.Error, val_error);
            }

            training.FinishTraining();

            return error.Skip(100).ToList();
        }
예제 #11
0
        private static void Main(string[] args)
        {
            // create a neural network, without using a factory
            var network = new BasicNetwork();
            network.AddLayer(new BasicLayer(null, true, 2));
            network.AddLayer(new BasicLayer(new ActivationSigmoid(), true, 3));
            network.AddLayer(new BasicLayer(new ActivationSigmoid(), false, 1));
            network.Structure.FinalizeStructure();
            network.Reset();

            // create training data
            IMLDataSet trainingSet = new BasicMLDataSet(XORInput, XORIdeal);

            // train the neural network
            IMLTrain train = new Backpropagation(network, trainingSet, 0.5, 0.2);

            int epoch = 1;

            do
            {
                train.Iteration();
                Console.WriteLine(@"Epoch #" + epoch + @" Error:" + train.Error);
                epoch++;
            }
            while (train.Error > 0.01);

            // test the neural network

            Console.WriteLine(@"Neural Network Results:");
            foreach (IMLDataPair pair in trainingSet)
            {
                IMLData output = network.Compute(pair.Input);
                Console.WriteLine(pair.Input[0] + @"," + pair.Input[1]
                                  + @", actual=" + output[0] + @",ideal=" + pair.Ideal[0]);
            }

            Console.Read();
        }
        /// <summary>
        /// Program entry point.
        /// </summary>
        /// <param name="app">Holds arguments and other info.</param>
        public void Execute(IExampleInterface app)
        {
            // create a neural network, without using a factory
            var network = new BasicNetwork();
            network.AddLayer(new BasicLayer(null, true, 2));
            network.AddLayer(new BasicLayer(new ActivationSigmoid(), true, 3));
            network.AddLayer(new BasicLayer(new ActivationSigmoid(), false, 1));
            network.Structure.FinalizeStructure();
            network.Reset();

            // create training data
            IMLDataSet trainingSet = new BasicMLDataSet(XORInput, XORIdeal);

            // train the neural network using online (batch=1)
            Propagation train = new Backpropagation(network, trainingSet, 0.7, 0.3);
            train.BatchSize = 1;

            int epoch = 1;

            do
            {
                train.Iteration();
                Console.WriteLine(@"Epoch #" + epoch + @" Error:" + train.Error);
                epoch++;
            } while (train.Error > 0.01);

            // test the neural network
            Console.WriteLine(@"Neural Network Results:");
            foreach (IMLDataPair pair in trainingSet)
            {
                IMLData output = network.Compute(pair.Input);
                Console.WriteLine(pair.Input[0] + @"," + pair.Input[1]
                                  + @", actual=" + output[0] + @",ideal=" + pair.Ideal[0]);
            }
        }
예제 #13
0
        private void trainNetworkBackprop()
        {
            // IMLTrain train = new Backpropagation(this.network, this.input,this.ideal, 0.000001, 0.1);

            IMLDataSet aset = new BasicMLDataSet(input, ideal);
            int epoch = 1;
            // train the neural network
            ICalculateScore score = new TrainingSetScore(aset);
            IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100);
            IMLTrain trainMain = new Backpropagation(network, aset, 0.001, 0.0);
            StopTrainingStrategy stop = new StopTrainingStrategy();
            var pop = new NEATPopulation(INPUT_SIZE, OUTPUT_SIZE, 1000);
            // train the neural network
            var step = new ActivationStep();
            step.Center = 0.5;
            pop.OutputActivationFunction = step;
            var train = new NEATTraining(score, pop);
            trainMain.AddStrategy(new Greedy());
            trainMain.AddStrategy(new HybridStrategy(trainAlt));
            trainMain.AddStrategy(stop);
            trainMain.AddStrategy(new HybridStrategy(train));

            network.ClearContext();

            while (!stop.ShouldStop())
            {
                trainMain.Iteration();
                train.Iteration();
                Console.WriteLine(@"Training " + @"Epoch #" + epoch + @" Error:" + trainMain.Error+ @" Genetic iteration:"+trainAlt.IterationNumber+ @"neat iteration:"+train.IterationNumber );
                epoch++;
            }
        }
예제 #14
0
        private double TrainNetwork(String what, BasicNetwork network, IMLDataSet trainingSet)
        {
            // train the neural network
            ICalculateScore score = new TrainingSetScore(trainingSet);
            IMLTrain trainAlt = new NeuralSimulatedAnnealing(
                network, score, 10, 2, 100);


            IMLTrain trainMain = new Backpropagation(network, trainingSet, 0.00001, 0.0);

            var stop = new StopTrainingStrategy();
            trainMain.AddStrategy(new Greedy());
            trainMain.AddStrategy(new HybridStrategy(trainAlt));
            trainMain.AddStrategy(stop);

            int epoch = 0;
            while (!stop.ShouldStop())
            {
                trainMain.Iteration();
                app.WriteLine("Training " + what + ", Epoch #" + epoch + " Error:" + trainMain.Error);
                epoch++;
            }
            return trainMain.Error;
        }
        private double TrainNetwork(String what, BasicNetwork network, IMLDataSet trainingSet, string Method)
        {
            // train the neural network
            ICalculateScore score = new TrainingSetScore(trainingSet);
            IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100);
            IMLTrain trainMain;
            if (Method.Equals("Leven"))
            {
                Console.WriteLine("Using LevenbergMarquardtTraining");
                trainMain = new LevenbergMarquardtTraining(network, trainingSet);
            }
            else
                 trainMain = new Backpropagation(network, trainingSet);

            var stop = new StopTrainingStrategy();
            trainMain.AddStrategy(new Greedy());
            trainMain.AddStrategy(new HybridStrategy(trainAlt));
            trainMain.AddStrategy(stop);

            int epoch = 0;
            while (!stop.ShouldStop())
            {
                trainMain.Iteration();
                app.WriteLine("Training " + what + ", Epoch #" + epoch + " Error:" + trainMain.Error);
                epoch++;
            }
            return trainMain.Error;
        }
        public static double TrainNetworks(BasicNetwork network, IMLDataSet minis)
        {
            Backpropagation trainMain = new Backpropagation(network, minis,0.0001,0.6);
            //set the number of threads below.
            trainMain.ThreadCount = 0;
            // train the neural network
            ICalculateScore score = new TrainingSetScore(minis);
            IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100);
           // IMLTrain trainMain = new Backpropagation(network, minis, 0.0001, 0.01);
            
            StopTrainingStrategy stop = new StopTrainingStrategy(0.0001, 200);
            trainMain.AddStrategy(new Greedy());
            trainMain.AddStrategy(new HybridStrategy(trainAlt));
            trainMain.AddStrategy(stop);

            //prune strategy not in GIT!...Removing it.
            //PruneStrategy strategypruning = new PruneStrategy(0.91d, 0.001d, 10, network,minis, 0, 20);
            //trainMain.AddStrategy(strategypruning);

            EncogUtility.TrainConsole(trainMain,network,minis, 15.2);


            var sw = new Stopwatch();
            sw.Start();
            while (!stop.ShouldStop())
            {
                trainMain.Iteration();
                
                Console.WriteLine(@"Iteration #:" + trainMain.IterationNumber + @" Error:" + trainMain.Error + @" Genetic Iteration:" + trainAlt.IterationNumber);
            }
            sw.Stop();
            Console.WriteLine(@"Total elapsed time in seconds:" + TimeSpan.FromMilliseconds(sw.ElapsedMilliseconds).Seconds);

            return trainMain.Error;
        }
        private void CanItLearnRulesWith(IList<IMLDataPair> inputData, IList<IMLDataPair> verfData, int hiddenLayerCount, int neuronCount, IActivationFunction actFunc, double learnRate, double momentum, int batchSize, int maxEpochs)
        {
            var model = new DbModel();
            var funcName = actFunc.GetType().Name;
            var tdCount = inputData.Count();
            if (model.TicTacToeResult.Any(r => r.HiddenLayerCount == hiddenLayerCount &&
                r.NeuronPerLayercount == neuronCount &&
                r.ActivationFunction == funcName &&
                r.LearningRate == learnRate &&
                r.BatchSize == batchSize &&
                r.Momentum == momentum &&
                r.Name == Name &&
                r.Epochs == maxEpochs &&
                r.TrainingDataCount == tdCount))
                return;

            var nn = CreateNetwork(inputData, hiddenLayerCount, neuronCount, actFunc);
            var train = new Backpropagation(nn, new BasicMLDataSet(inputData), learnRate, momentum);
            train.BatchSize = batchSize;
            int epoch = 1;
            do
            {
                train.Iteration();
                epoch++;
            } while (epoch < maxEpochs);

            int good = verfData.Count(verf => { var output = nn.Compute(verf.Input); return Enumerable.Range(0, 9).All(i => Math.Round(output[i]) == Math.Round(verf.Ideal[i])); });
            int bad = VerfDataCount - good;

            var result = new TicTacToeResult()
            {
                HiddenLayerCount = hiddenLayerCount,
                NeuronPerLayercount = neuronCount,
                ActivationFunction = funcName,
                Bad = bad,
                Good = good,
                TrainingDataCount = tdCount,
                Momentum = momentum,
                LearningRate = learnRate,
                BatchSize = batchSize,
                Epochs = epoch,
                Error = train.Error,
                Name = Name,
            };

            model.TicTacToeResult.Add(result);
            model.SaveChanges();
        }
        public static double TrainNetworks(BasicNetwork network, IMLDataSet minis)
        {
            // train the neural network
            ICalculateScore score = new TrainingSetScore(minis);
            IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100);
            IMLTrain trainMain = new Backpropagation(network, minis, 0.0001, 0.01);
            StopTrainingStrategy stop = new StopTrainingStrategy(0.0001, 200);
            trainMain.AddStrategy(new Greedy());
            trainMain.AddStrategy(new HybridStrategy(trainAlt));
            trainMain.AddStrategy(stop);


            var sw = new Stopwatch();
            sw.Start();
            while (!stop.ShouldStop())
            {
                trainMain.Iteration();
                Console.WriteLine(@"Iteration #:" + trainMain.IterationNumber + @" Error:" + trainMain.Error + @" Genetic Iteration:" + trainAlt.IterationNumber);
            }
            sw.Stop();

            return trainMain.Error;
        }
        /// <summary>
        /// Uczy sieć.
        /// </summary>
        /// <param name="learningRate">Stala uczenia (zwykle w okolicach 0.1)</param>
        /// <param name="epochNumber">Na ilu wybranych losowo przykladach uczyc siec</param>
        /// <param name="momentum">Współczynnik bezwładności.</param>
        /// <param name="inputData">The input into the neural network for training.</param>
        /// <param name="idealData">The idea into the neural network for training.</param>
        public void Train(double learningRate, int epochNumber,
            double momentum, double[][] inputData, double[][] idealData)
        {
            if (learningRate <= 0.0 || epochNumber <= 0)
            {
                throw new PerceptronWrapperException("Invalid arguments");
            }

            if (inputData.Any(x => x.Length != basicNetwork.InputCount)
                || idealData.Any(x => x.Length != basicNetwork.OutputCount)
                || inputData.GetLength(0) <= 0)
            {
                throw new PerceptronWrapperException("Invalid data set size");
            }

            IMLDataSet data = new BasicMLDataSet(inputData, idealData);
            var backprop = new Backpropagation(basicNetwork, data, learningRate, momentum);

            // Uczymy siec za pomoca backpropagation
            backprop.Iteration(epochNumber);
            // :) - Prosciej sie nie da
        }
예제 #20
0
        public override void Run()
        {
            testNetwork = new BasicNetwork();

            testNetwork.AddLayer(new BasicLayer(null, true, 2));
            testNetwork.AddLayer(new BasicLayer(new ActivationSigmoid(), true, 4));
            testNetwork.AddLayer(new BasicLayer(new ActivationSigmoid(), false, 1));
            testNetwork.Structure.FinalizeStructure();
            testNetwork.Reset();

            // create training data
            IMLDataSet trainingSet = new BasicMLDataSet(XORInput, XORIdeal);

            // train the neural network
            IMLTrain train = new Backpropagation(testNetwork, trainingSet);
            //IMLTrain train = new ResilientPropagation(testNetwork, trainingSet); //Encog manual says it is the best general one

            int epoch = 1;

            do
            {
                train.Iteration();
                Console.WriteLine(@"Epoch #" + epoch + @" Error:" + train.Error);
                epoch++;
            } while (train.Error > 0.0001);

            // test the neural network
            Console.WriteLine(@"Neural Network Results:");
            foreach (IMLDataPair pair in trainingSet)
            {
                IMLData output = testNetwork.Compute(pair.Input);
                Console.WriteLine(pair.Input[0] + @"," + pair.Input[1]
                                  + @", actual=" + output[0] + @",ideal=" + pair.Ideal[0]);
            }
        }
예제 #21
0
 public double[][] Train(int iterationsCount, double learningRate, double momentum)
 {
     var train = new Backpropagation(network, trainingData, learningRate, momentum);
     train.BatchSize = 1;
     double[][] errors = new double[2][];
     errors[0] = new double[iterationsCount];
     errors[1] = new double[iterationsCount];
     for (int i = 0; i < iterationsCount; i++)
     {
         train.Iteration(1);
         errors[0][i] = train.Error;
         errors[1][i] = network.CalculateError(validationData);
     }
     train.FinishTraining();
     return errors;
 }