Example #1
0
        private static void Train()
        {
            Console.WriteLine("Network Training");
            PrintUnderline(50);
            Console.WriteLine("\t1. Train to minimum error");
            Console.WriteLine("\t2. Train to max epoch");
            Console.WriteLine("\t3. Network Menu");
            PrintNewLine();
            //BackPropTrainParams trainParams = null;
            BackPropLearning learning = null;

            switch (GetInput("\tYour Choice: ", 1, 3))
            {
            case 1:
                var minError = GetDouble("\tMinimum Error: ", 0.000000001, 1.0);
                PrintNewLine();
                Console.WriteLine("\tTraining...");
                //trainParams = new BackPropTrainParams { Training = TrainingType.MinimumError, MinimumError = minError };
                learning = new BackPropLearning(_network)
                {
                    MinimumError = minError
                };
                learning.TrainByError(_dataSets);
                //_network.Train(_dataSets, trainParams);
                Console.WriteLine("\t**Training Complete**");
                PrintNewLine();
                NetworkMenu();
                break;

            case 2:
                var maxEpoch = GetInput("\tMax Epoch: ", 1, int.MaxValue);
                if (!maxEpoch.HasValue)
                {
                    PrintNewLine();
                    NetworkMenu();
                    return;
                }
                PrintNewLine();
                Console.WriteLine("\tTraining...");
                learning = new BackPropLearning(_network)
                {
                    NumEpochs = maxEpoch.Value
                };
                learning.TrainByEpochs(_dataSets);
                Console.WriteLine("\t**Training Complete**");
                PrintNewLine();
                break;

            case 3:
                NetworkMenu();
                break;
            }
            PrintNewLine();
        }
Example #2
0
        public void BackPropRationalSigmoidXorTest()
        {
            var nnet     = new Network(2, new int[] { 2 }, 1, typeof(RationalSigmoid), typeof(RationalSigmoid));
            var learning = new BackPropLearning(nnet)
            {
                MinimumError = 0.1,
                Momentum     = 0.5,
                LearnRate    = 0.1
            };

            learning.TrainByError(BuildXorDataSets(), 20000);
            System.Diagnostics.Debug.WriteLine("trained in {0} epochs", learning.ResultEpochs);
            CheckResults(nnet, 0.15);
        }
Example #3
0
        static void Main(string[] args)
        {
            _network = new NeuronNet(2, 3, 1);
            var learning = new BackPropLearning(_network);
            var param    = new BackPropParams()
            {
                CallBack       = cb,
                Eta            = 0.9,
                Alpha          = 0.05,
                ErrorStopValue = 0.05
            };

            learning.TrainNetworkBySample(XorSamples, XorSamples, param);
            //learning.TrainNetworkByBatch(XorSamples, XorSamples, param);

            //var param2 = new RPropParams();
            //var learning2 = new RPropLearning(_network);
            //learning2.Train(XorSamples, XorSamples, param2);

            Console.WriteLine("press enter...");
            Console.ReadLine();
        }
Example #4
0
        public void testWeightsAndBiasesUpdatedCorrectly()
        {
            Matrix weightMatrix1 = new Matrix(2, 1);

            weightMatrix1.set(0, 0, -0.27);
            weightMatrix1.set(1, 0, -0.41);

            Vector biasVector1 = new Vector(2);

            biasVector1.setValue(0, -0.48);
            biasVector1.setValue(1, -0.13);

            Layer layer1 = new Layer(weightMatrix1, biasVector1,
                                     new LogSigActivationFunction());
            LayerSensitivity layer1Sensitivity = new LayerSensitivity(layer1);

            Vector inputVector1 = new Vector(1);

            inputVector1.setValue(0, 1);

            layer1.feedForward(inputVector1);

            Matrix weightMatrix2 = new Matrix(1, 2);

            weightMatrix2.set(0, 0, 0.09);
            weightMatrix2.set(0, 1, -0.17);

            Vector biasVector2 = new Vector(1);

            biasVector2.setValue(0, 0.48);

            Layer layer2 = new Layer(weightMatrix2, biasVector2,
                                     new PureLinearActivationFunction());
            Vector inputVector2 = layer1.getLastActivationValues();

            layer2.feedForward(inputVector2);

            Vector errorVector = new Vector(1);

            errorVector.setValue(0, 1.261);
            LayerSensitivity layer2Sensitivity = new LayerSensitivity(layer2);

            layer2Sensitivity.sensitivityMatrixFromErrorMatrix(errorVector);

            layer1Sensitivity
            .sensitivityMatrixFromSucceedingLayer(layer2Sensitivity);

            BackPropLearning.calculateWeightUpdates(layer2Sensitivity, layer1
                                                    .getLastActivationValues(), 0.1);

            BackPropLearning.calculateBiasUpdates(layer2Sensitivity, 0.1);

            BackPropLearning.calculateWeightUpdates(layer1Sensitivity,
                                                    inputVector1, 0.1);

            BackPropLearning.calculateBiasUpdates(layer1Sensitivity, 0.1);

            layer2.updateWeights();
            Matrix newWeightMatrix2 = layer2.getWeightMatrix();

            Assert.AreEqual(0.171, newWeightMatrix2.get(0, 0), 0.001);
            Assert.AreEqual(-0.0772, newWeightMatrix2.get(0, 1), 0.001);

            layer2.updateBiases();
            Vector newBiasVector2 = layer2.getBiasVector();

            Assert.AreEqual(0.7322, newBiasVector2.getValue(0), 0.00001);

            layer1.updateWeights();
            Matrix newWeightMatrix1 = layer1.getWeightMatrix();

            Assert.AreEqual(-0.265, newWeightMatrix1.get(0, 0), 0.001);
            Assert.AreEqual(-0.419, newWeightMatrix1.get(1, 0), 0.001);

            layer1.updateBiases();
            Vector newBiasVector1 = layer1.getBiasVector();

            Assert.AreEqual(-0.475, newBiasVector1.getValue(0), 0.001);
            Assert.AreEqual(-0.139, newBiasVector1.getValue(1), 0.001);
        }
Example #5
0
        public void testBiasUpdateMatrixesFormedCorrectly()
        {
            Matrix weightMatrix1 = new Matrix(2, 1);

            weightMatrix1.set(0, 0, -0.27);
            weightMatrix1.set(1, 0, -0.41);

            Vector biasVector1 = new Vector(2);

            biasVector1.setValue(0, -0.48);
            biasVector1.setValue(1, -0.13);

            Layer layer1 = new Layer(weightMatrix1, biasVector1,
                                     new LogSigActivationFunction());
            LayerSensitivity layer1Sensitivity = new LayerSensitivity(layer1);

            Vector inputVector1 = new Vector(1);

            inputVector1.setValue(0, 1);

            layer1.feedForward(inputVector1);

            Matrix weightMatrix2 = new Matrix(1, 2);

            weightMatrix2.set(0, 0, 0.09);
            weightMatrix2.set(0, 1, -0.17);

            Vector biasVector2 = new Vector(1);

            biasVector2.setValue(0, 0.48);

            Layer layer2 = new Layer(weightMatrix2, biasVector2,
                                     new PureLinearActivationFunction());
            LayerSensitivity layer2Sensitivity = new LayerSensitivity(layer2);
            Vector           inputVector2      = layer1.getLastActivationValues();

            layer2.feedForward(inputVector2);

            Vector errorVector = new Vector(1);

            errorVector.setValue(0, 1.261);
            layer2Sensitivity.sensitivityMatrixFromErrorMatrix(errorVector);

            layer1Sensitivity
            .sensitivityMatrixFromSucceedingLayer(layer2Sensitivity);

            Vector biasUpdateVector2 = BackPropLearning.calculateBiasUpdates(
                layer2Sensitivity, 0.1);

            Assert.AreEqual(0.2522, biasUpdateVector2.getValue(0), 0.001);

            Vector lastBiasUpdateVector2 = layer2.getLastBiasUpdateVector();

            Assert.AreEqual(0.2522, lastBiasUpdateVector2.getValue(0), 0.001);

            Vector penultimateBiasUpdateVector2 = layer2
                                                  .getPenultimateBiasUpdateVector();

            Assert.AreEqual(0.0, penultimateBiasUpdateVector2.getValue(0),
                            0.001);

            Vector biasUpdateVector1 = BackPropLearning.calculateBiasUpdates(
                layer1Sensitivity, 0.1);

            Assert.AreEqual(0.00495, biasUpdateVector1.getValue(0), 0.001);
            Assert.AreEqual(-0.00997, biasUpdateVector1.getValue(1), 0.001);

            Vector lastBiasUpdateVector1 = layer1.getLastBiasUpdateVector();

            Assert.AreEqual(0.00495, lastBiasUpdateVector1.getValue(0), 0.001);
            Assert.AreEqual(-0.00997, lastBiasUpdateVector1.getValue(1), 0.001);

            Vector penultimateBiasUpdateVector1 = layer1
                                                  .getPenultimateBiasUpdateVector();

            Assert.AreEqual(0.0, penultimateBiasUpdateVector1.getValue(0),
                            0.001);
            Assert.AreEqual(0.0, penultimateBiasUpdateVector1.getValue(1),
                            0.001);
        }