Пример #1
0
 void Start()
 {
     inputs = new float[1];
     neuralNetwork = gameObject.AddComponent<NeuralNetwork>();
     neuralNetwork.inputs = inputs;
     neuralNetwork.nbOutputs = 1;
     trainer = gameObject.AddComponent<RPropTrainer>();
     mover = GetComponent<Mover>();
     errors = new float[neuralNetwork.nbOutputs];
 }
Пример #2
0
 /// <summary>
 /// Creates new network and associated trainer.
 /// </summary>
 /// <param name="settings">Non-recurrent-network settings</param>
 /// <param name="trainingInputVectors">Collection of training input samples</param>
 /// <param name="trainingOutputVectors">Collection of training output (desired) samples</param>
 /// <param name="rand">Random object to be used</param>
 /// <param name="net">Created network</param>
 /// <param name="trainer">Created associated trainer</param>
 public static void CreateNetworkAndTrainer(INonRecurrentNetworkSettings settings,
                                            List <double[]> trainingInputVectors,
                                            List <double[]> trainingOutputVectors,
                                            Random rand,
                                            out INonRecurrentNetwork net,
                                            out INonRecurrentNetworkTrainer trainer
                                            )
 {
     if (IsFF(settings))
     {
         //Feed forward network
         FeedForwardNetworkSettings netCfg = (FeedForwardNetworkSettings)settings;
         FeedForwardNetwork         ffn    = new FeedForwardNetwork(trainingInputVectors[0].Length, trainingOutputVectors[0].Length, netCfg);
         net = ffn;
         if (netCfg.TrainerCfg.GetType() == typeof(QRDRegrTrainerSettings))
         {
             trainer = new QRDRegrTrainer(ffn, trainingInputVectors, trainingOutputVectors, (QRDRegrTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(RidgeRegrTrainerSettings))
         {
             trainer = new RidgeRegrTrainer(ffn, trainingInputVectors, trainingOutputVectors, (RidgeRegrTrainerSettings)netCfg.TrainerCfg);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(ElasticRegrTrainerSettings))
         {
             trainer = new ElasticRegrTrainer(ffn, trainingInputVectors, trainingOutputVectors, (ElasticRegrTrainerSettings)netCfg.TrainerCfg);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(RPropTrainerSettings))
         {
             trainer = new RPropTrainer(ffn, trainingInputVectors, trainingOutputVectors, (RPropTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else
         {
             throw new ArgumentException($"Unknown trainer {netCfg.TrainerCfg}");
         }
     }
     else if (IsPP(settings))
     {
         //Parallel perceptron network
         //Check output
         if (trainingOutputVectors[0].Length != 1)
         {
             throw new InvalidOperationException($"Can't create ParallelPerceptron. Only single output value is allowed.");
         }
         ParallelPerceptronSettings netCfg = (ParallelPerceptronSettings)settings;
         ParallelPerceptron         ppn    = new ParallelPerceptron(trainingInputVectors[0].Length, netCfg);
         net     = ppn;
         trainer = new PDeltaRuleTrainer(ppn, trainingInputVectors, trainingOutputVectors, netCfg.PDeltaRuleTrainerCfg, rand);
     }
     else
     {
         throw new InvalidOperationException($"Unknown network settings");
     }
     net.RandomizeWeights(rand);
     return;
 }
Пример #3
0
        /// <summary>
        /// Runs the example code.
        /// </summary>
        public void Run()
        {
            //Create configuration of the feed forward network having Identity output layer and two LeakyReLU hidden layers
            //with associated resilient back propagation trainer configuration
            const int                  HiddenLayerSize = 3;
            HiddenLayerSettings        hiddenLayerCfg  = new HiddenLayerSettings(HiddenLayerSize, new LeakyReLUSettings());
            FeedForwardNetworkSettings ffNetCfg        = new FeedForwardNetworkSettings(new IdentitySettings(),
                                                                                        new HiddenLayersSettings(hiddenLayerCfg, hiddenLayerCfg),
                                                                                        new RPropTrainerSettings(2, 200)
                                                                                        );
            //Collect training data
            VectorBundle trainingData = CreateTrainingData();
            //Create network instance
            //We specify 2 input values, 3 output values and previously prepared network structure configuration
            FeedForwardNetwork ffNet = new FeedForwardNetwork(2, 3, ffNetCfg);

            //Training
            _log.Write("Training");
            _log.Write("--------");
            //Create trainer instance
            RPropTrainer trainer = new RPropTrainer(ffNet,
                                                    trainingData.InputVectorCollection,
                                                    trainingData.OutputVectorCollection,
                                                    (RPropTrainerSettings)ffNetCfg.TrainerCfg,
                                                    new Random(0)
                                                    );

            //Training loop
            while (trainer.Iteration() && trainer.MSE > 1e-6)
            {
                _log.Write($"  Attempt {trainer.Attempt} / Epoch {trainer.AttemptEpoch,3} Mean Squared Error = {Math.Round(trainer.MSE, 8).ToString(CultureInfo.InvariantCulture)}", false);
            }
            _log.Write(string.Empty);

            //Training is done
            //Display network computation results
            _log.Write("Trained network computations:");
            _log.Write("-----------------------------");
            foreach (double[] input in trainingData.InputVectorCollection)
            {
                double[] results = ffNet.Compute(input);
                _log.Write($"  Input {input[0]} {input[1]} Results: AND={Math.Round(results[0])} OR={Math.Round(results[1])} XOR={Math.Round(results[2])}");
            }
            _log.Write(string.Empty);

            //Finished
            return;
        } //Run
Пример #4
0
        /// <summary>
        /// Trains FF network to solve boolean algebra. It shows how to do it on the lowest level,
        /// without use of TNRNetBuilder.
        /// </summary>
        private void FullyManualLearning()
        {
            _log.Write("Example of a FF network low level training:");
            //Create FF network configuration.
            FeedForwardNetworkSettings ffNetCfg = CreateFFNetConfig();

            _log.Write($"Network configuration xml:");
            _log.Write(ffNetCfg.GetXml(true).ToString());
            //Collect training data
            VectorBundle trainingData = CreateTrainingData();
            //Create network instance
            //We specify 2 input values, 3 output values and previously prepared network structure configuration
            FeedForwardNetwork ffNet = new FeedForwardNetwork(2,       //The number of input values
                                                              3,       //The number of output values
                                                              ffNetCfg //Network structure and a trainer
                                                              );

            //Training
            _log.Write(string.Empty);
            _log.Write("  Training");
            _log.Write(string.Empty);
            //Create the trainer instance
            RPropTrainer trainer = new RPropTrainer(ffNet,
                                                    trainingData.InputVectorCollection,
                                                    trainingData.OutputVectorCollection,
                                                    (RPropTrainerSettings)ffNetCfg.TrainerCfg,
                                                    new Random(0)
                                                    );

            //Training loop
            while (trainer.Iteration())
            {
                _log.Write($"    Attempt {trainer.Attempt} / Epoch {trainer.AttemptEpoch,3} Mean Squared Error = {Math.Round(trainer.MSE, 8).ToString(CultureInfo.InvariantCulture)}", true);
                //Check training exit condition
                if (trainer.MSE < 1e-7)
                {
                    break;
                }
            }
            _log.Write(string.Empty);

            //Training is done
            //Display the network computation results
            DisplayNetworkComputations(ffNet);
            //Finished
            return;
        }
Пример #5
0
 private static void CreateNetAndTreainer(ReadoutLayerSettings.ReadoutUnitSettings settings,
                                          List <double[]> trainingPredictorsCollection,
                                          List <double[]> trainingIdealOutputsCollection,
                                          Random rand,
                                          out INonRecurrentNetwork net,
                                          out INonRecurrentNetworkTrainer trainer
                                          )
 {
     if (settings.NetType == ReadoutLayerSettings.ReadoutUnitSettings.ReadoutUnitNetworkType.FF)
     {
         FeedForwardNetworkSettings netCfg = (FeedForwardNetworkSettings)settings.NetSettings;
         FeedForwardNetwork         ffn    = new FeedForwardNetwork(trainingPredictorsCollection[0].Length, 1, netCfg);
         net = ffn;
         if (netCfg.TrainerCfg.GetType() == typeof(QRDRegrTrainerSettings))
         {
             trainer = new QRDRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (QRDRegrTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(RidgeRegrTrainerSettings))
         {
             trainer = new RidgeRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (RidgeRegrTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(ElasticRegrTrainerSettings))
         {
             trainer = new ElasticRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (ElasticRegrTrainerSettings)netCfg.TrainerCfg);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(RPropTrainerSettings))
         {
             trainer = new RPropTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (RPropTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else
         {
             throw new ArgumentException($"Unknown trainer {netCfg.TrainerCfg}");
         }
     }
     else
     {
         ParallelPerceptronSettings netCfg = (ParallelPerceptronSettings)settings.NetSettings;
         ParallelPerceptron         ppn    = new ParallelPerceptron(trainingPredictorsCollection[0].Length, netCfg);
         net     = ppn;
         trainer = new PDeltaRuleTrainer(ppn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.PDeltaRuleTrainerCfg, rand);
     }
     net.RandomizeWeights(rand);
     return;
 }
Пример #6
0
        private static void CreateNetAndTreainer(ReadoutLayerSettings.ReadoutUnitSettings settings,
                                                 List <double[]> trainingPredictorsCollection,
                                                 List <double[]> trainingIdealOutputsCollection,
                                                 Random rand,
                                                 out INonRecurrentNetwork net,
                                                 out INonRecurrentNetworkTrainer trainer
                                                 )
        {
            if (settings.NetType == ReadoutLayerSettings.ReadoutUnitSettings.ReadoutUnitNetworkType.FF)
            {
                FeedForwardNetworkSettings netCfg = (FeedForwardNetworkSettings)settings.NetSettings;
                FeedForwardNetwork         ffn    = new FeedForwardNetwork(trainingPredictorsCollection[0].Length, 1, netCfg);
                net = ffn;
                switch (netCfg.RegressionMethod)
                {
                case FeedForwardNetworkSettings.TrainingMethodType.Linear:
                    trainer = new LinRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, settings.RegressionAttemptEpochs, rand, netCfg.LinRegrTrainerCfg);
                    break;

                case FeedForwardNetworkSettings.TrainingMethodType.Resilient:
                    trainer = new RPropTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.RPropTrainerCfg);
                    break;

                default:
                    throw new ArgumentException($"Not supported regression method {netCfg.RegressionMethod}");
                }
            }
            else
            {
                ParallelPerceptronSettings netCfg = (ParallelPerceptronSettings)settings.NetSettings;
                ParallelPerceptron         ppn    = new ParallelPerceptron(trainingPredictorsCollection[0].Length, netCfg);
                net     = ppn;
                trainer = new PDeltaRuleTrainer(ppn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.PDeltaRuleTrainerCfg);
            }
            net.RandomizeWeights(rand);
            return;
        }