예제 #1
0
        /// <summary>
        /// Creates the readout layer configuration.
        /// </summary>
        /// <param name="foldDataRatio">Specifies what part of available data to be used as the fold data.</param>
        /// <param name="numOfAttempts">Number of regression attempts. Each readout network will try to learn numOfAttempts times.</param>
        /// <param name="numOfEpochs">Number of training epochs within an attempt.</param>
        ReadoutLayerSettings CreateReadoutLayerCfg(double foldDataRatio, int numOfAttempts, int numOfEpochs)
        {
            //For each output field we will use prediction of two networks
            //First network having only Identity output neuron and associated the resilient back propagation trainer
            FeedForwardNetworkSettings ffNet1Cfg = new FeedForwardNetworkSettings(new AFAnalogIdentitySettings(),
                                                                                  null,
                                                                                  new RPropTrainerSettings(numOfAttempts, numOfEpochs)
                                                                                  );
            //Second network having Identity output neuron, hidden layer consisting of 5 LeakyReLU neurons
            //and associated the resilient back propagation trainer
            HiddenLayerSettings        hiddenLayerCfg = new HiddenLayerSettings(5, new AFAnalogLeakyReLUSettings());
            FeedForwardNetworkSettings ffNet2Cfg      = new FeedForwardNetworkSettings(new AFAnalogIdentitySettings(),
                                                                                       new HiddenLayersSettings(hiddenLayerCfg),
                                                                                       new RPropTrainerSettings(numOfAttempts, numOfEpochs)
                                                                                       );
            //Create the cluster chain configuration for the forecast and the default configuration for the forecast task.
            CrossvalidationSettings           crossvalidationCfg  = new CrossvalidationSettings(foldDataRatio);
            TNRNetClusterRealNetworksSettings networksCfg         = new TNRNetClusterRealNetworksSettings(ffNet1Cfg, ffNet2Cfg);
            TNRNetClusterRealSettings         realClusterCfg      = new TNRNetClusterRealSettings(networksCfg, new TNRNetClusterRealWeightsSettings());
            TNRNetClusterChainRealSettings    realClusterChainCfg = new TNRNetClusterChainRealSettings(crossvalidationCfg, new TNRNetClustersRealSettings(realClusterCfg));
            TaskDefaultsSettings taskDefaultsCfg = new TaskDefaultsSettings(null, realClusterChainCfg);
            //Create readout unit configurations. We will forecast next High and Low prices.
            ReadoutUnitSettings highReadoutUnitCfg = new ReadoutUnitSettings("High", new ForecastTaskSettings());
            ReadoutUnitSettings lowReadoutUnitCfg  = new ReadoutUnitSettings("Low", new ForecastTaskSettings());
            //Create readout layer configuration
            ReadoutLayerSettings readoutLayerCfg = new ReadoutLayerSettings(taskDefaultsCfg,
                                                                            new ReadoutUnitsSettings(highReadoutUnitCfg,
                                                                                                     lowReadoutUnitCfg
                                                                                                     ),
                                                                            null
                                                                            );

            return(readoutLayerCfg);
        }
예제 #2
0
        /// <summary>
        /// Creates readout layer configuration
        /// </summary>
        /// <param name="testDataRatio">Specifies what part of available data to be used as test data</param>
        /// <param name="numOfAttempts">Number of regression attempts. Each readout network will try to learn numOfAttempts times</param>
        /// <param name="numOfEpochs">Number of training epochs within an attempt</param>
        ReadoutLayerSettings CreateReadoutLayerCfg(double testDataRatio, int numOfAttempts, int numOfEpochs)
        {
            //For each output field we will use prediction of two networks
            //First network having only Identity output neuron and associated the resilient back propagation trainer
            FeedForwardNetworkSettings ffNet1Cfg = new FeedForwardNetworkSettings(new IdentitySettings(),
                                                                                  null,
                                                                                  new RPropTrainerSettings(numOfAttempts, numOfEpochs)
                                                                                  );
            //Second network having Identity output neuron, hidden layer consisting of 5 LeakyReLU neurons
            //and associated the resilient back propagation trainer
            HiddenLayerSettings        hiddenLayerCfg = new HiddenLayerSettings(5, new LeakyReLUSettings());
            FeedForwardNetworkSettings ffNet2Cfg      = new FeedForwardNetworkSettings(new IdentitySettings(),
                                                                                       new HiddenLayersSettings(hiddenLayerCfg),
                                                                                       new RPropTrainerSettings(numOfAttempts, numOfEpochs)
                                                                                       );
            //Create default networks configuration for forecasting
            DefaultNetworksSettings defaultNetworksCfg = new DefaultNetworksSettings(null, new ForecastNetworksSettings(ffNet1Cfg, ffNet2Cfg));
            //Create readout units. We will forecast next High and Low prices. Both fields are real numbers.
            ReadoutUnitSettings highReadoutUnitCfg = new ReadoutUnitSettings("High", new ForecastTaskSettings(new RealFeatureFilterSettings()));
            ReadoutUnitSettings lowReadoutUnitCfg  = new ReadoutUnitSettings("Low", new ForecastTaskSettings(new RealFeatureFilterSettings()));
            //Create readout layer configuration
            ReadoutLayerSettings readoutLayerCfg = new ReadoutLayerSettings(new ReadoutUnitsSettings(highReadoutUnitCfg,
                                                                                                     lowReadoutUnitCfg
                                                                                                     ),
                                                                            testDataRatio,
                                                                            ReadoutLayerSettings.AutoFolds,
                                                                            ReadoutLayerSettings.DefaultRepetitions,
                                                                            defaultNetworksCfg
                                                                            );

            return(readoutLayerCfg);
        }
예제 #3
0
            /// <summary>
            /// Creates the instance and initializes it from given xml element.
            /// </summary>
            /// <param name="readoutUnitElem">
            /// Xml data containing the settings.
            /// </param>
            public ReadoutUnitSettings(XElement readoutUnitElem)
            {
                Name     = readoutUnitElem.Attribute("name").Value;
                TaskType = CommonEnums.ParseTaskType(readoutUnitElem.Attribute("task").Value);
                //Net settings
                List <XElement> netSettingsElems = new List <XElement>();

                netSettingsElems.AddRange(readoutUnitElem.Descendants("ff"));
                netSettingsElems.AddRange(readoutUnitElem.Descendants("pp"));
                if (netSettingsElems.Count != 1)
                {
                    throw new Exception("Only one network configuration can be specified in readout unit settings.");
                }
                if (netSettingsElems.Count == 0)
                {
                    throw new Exception("Network configuration is not specified in readout unit settings.");
                }
                XElement netSettingsElem = netSettingsElems[0];

                //FF?
                if (netSettingsElem.Name.LocalName == "ff")
                {
                    NetType     = ReadoutUnitNetworkType.FF;
                    NetSettings = new FeedForwardNetworkSettings(netSettingsElem);
                    OutputRange = ((FeedForwardNetworkSettings)NetSettings).OutputRange.DeepClone();
                }
                else
                {
                    //PP
                    NetType     = ReadoutUnitNetworkType.PP;
                    NetSettings = new ParallelPerceptronSettings(netSettingsElem);
                    OutputRange = ((ParallelPerceptronSettings)NetSettings).OutputRange.DeepClone();
                }
                return;
            }
예제 #4
0
            /// <summary>
            /// Creates the instance and initializes it from given xml element.
            /// </summary>
            /// <param name="readoutUnitElem">
            /// Xml data containing the settings.
            /// </param>
            public ReadoutUnitSettings(XElement readoutUnitElem)
            {
                RegressionAttempts      = int.Parse(readoutUnitElem.Attribute("attempts").Value);
                RegressionAttemptEpochs = int.Parse(readoutUnitElem.Attribute("attemptEpochs").Value);
                //Net settings
                List <XElement> netSettingsElems = new List <XElement>();

                netSettingsElems.AddRange(readoutUnitElem.Descendants("ff"));
                netSettingsElems.AddRange(readoutUnitElem.Descendants("pp"));
                if (netSettingsElems.Count != 1)
                {
                    throw new Exception("Only one network configuration can be specified in readout unit settings.");
                }
                if (netSettingsElems.Count == 0)
                {
                    throw new Exception("Network configuration is not specified in readout unit settings.");
                }
                XElement netSettingsElem = netSettingsElems[0];

                //FF?
                if (netSettingsElem.Name == "ff")
                {
                    NetType     = ReadoutUnitNetworkType.FF;
                    NetSettings = new FeedForwardNetworkSettings(netSettingsElem);
                }
                else
                {
                    //PP
                    NetType     = ReadoutUnitNetworkType.PP;
                    NetSettings = new ParallelPerceptronSettings(netSettingsElem);
                }
                return;
            }
예제 #5
0
        /// <summary>
        /// Creates the simplified configuration of the readout layer to solve the forecast task.
        /// </summary>
        /// <remarks>
        /// Supports the real numbers output only.
        /// </remarks>
        /// <param name="crossvalidationCfg">The crossvalidation configuration.</param>
        /// <param name="netCfg">The configuration of the FF network to be used in the cluster(s).</param>
        /// <param name="clusterChainLength">The number of chained clusters.</param>
        /// <param name="unitName">The readout unit names (the output field names).</param>
        public static ReadoutLayerSettings CreateForecastReadoutCfg(CrossvalidationSettings crossvalidationCfg,
                                                                    FeedForwardNetworkSettings netCfg,
                                                                    int clusterChainLength,
                                                                    params string[] unitName
                                                                    )
        {
            if (netCfg == null)
            {
                throw new ArgumentNullException("netL1Cfg");
            }
            List <ReadoutUnitSettings> unitCfgCollection = new List <ReadoutUnitSettings>();

            foreach (string name in unitName)
            {
                unitCfgCollection.Add(new ReadoutUnitSettings(name, new ForecastTaskSettings()));
            }
            TNRNetClusterRealNetworksSettings netsCfg              = new TNRNetClusterRealNetworksSettings(netCfg);
            TNRNetClusterRealSettings         clusterCfg           = new TNRNetClusterRealSettings(netsCfg, new TNRNetClusterRealWeightsSettings());
            List <TNRNetClusterRealSettings>  clusterCfgCollection = new List <TNRNetClusterRealSettings>();

            for (int i = 0; i < clusterChainLength; i++)
            {
                clusterCfgCollection.Add(clusterCfg);
            }
            TNRNetClustersRealSettings     clustersCfg     = new TNRNetClustersRealSettings(clusterCfgCollection);
            TNRNetClusterChainRealSettings clusterChainCfg = new TNRNetClusterChainRealSettings(crossvalidationCfg, clustersCfg);
            TaskDefaultsSettings           taskDefaultsCfg = new TaskDefaultsSettings(null, clusterChainCfg);

            return(new ReadoutLayerSettings(taskDefaultsCfg,
                                            new ReadoutUnitsSettings(unitCfgCollection),
                                            null
                                            ));
        }
예제 #6
0
 /// <summary>
 /// Creates new network and associated trainer.
 /// </summary>
 /// <param name="settings">Non-recurrent-network settings</param>
 /// <param name="trainingInputVectors">Collection of training input samples</param>
 /// <param name="trainingOutputVectors">Collection of training output (desired) samples</param>
 /// <param name="rand">Random object to be used</param>
 /// <param name="net">Created network</param>
 /// <param name="trainer">Created associated trainer</param>
 public static void CreateNetworkAndTrainer(INonRecurrentNetworkSettings settings,
                                            List <double[]> trainingInputVectors,
                                            List <double[]> trainingOutputVectors,
                                            Random rand,
                                            out INonRecurrentNetwork net,
                                            out INonRecurrentNetworkTrainer trainer
                                            )
 {
     if (IsFF(settings))
     {
         //Feed forward network
         FeedForwardNetworkSettings netCfg = (FeedForwardNetworkSettings)settings;
         FeedForwardNetwork         ffn    = new FeedForwardNetwork(trainingInputVectors[0].Length, trainingOutputVectors[0].Length, netCfg);
         net = ffn;
         if (netCfg.TrainerCfg.GetType() == typeof(QRDRegrTrainerSettings))
         {
             trainer = new QRDRegrTrainer(ffn, trainingInputVectors, trainingOutputVectors, (QRDRegrTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(RidgeRegrTrainerSettings))
         {
             trainer = new RidgeRegrTrainer(ffn, trainingInputVectors, trainingOutputVectors, (RidgeRegrTrainerSettings)netCfg.TrainerCfg);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(ElasticRegrTrainerSettings))
         {
             trainer = new ElasticRegrTrainer(ffn, trainingInputVectors, trainingOutputVectors, (ElasticRegrTrainerSettings)netCfg.TrainerCfg);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(RPropTrainerSettings))
         {
             trainer = new RPropTrainer(ffn, trainingInputVectors, trainingOutputVectors, (RPropTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else
         {
             throw new ArgumentException($"Unknown trainer {netCfg.TrainerCfg}");
         }
     }
     else if (IsPP(settings))
     {
         //Parallel perceptron network
         //Check output
         if (trainingOutputVectors[0].Length != 1)
         {
             throw new InvalidOperationException($"Can't create ParallelPerceptron. Only single output value is allowed.");
         }
         ParallelPerceptronSettings netCfg = (ParallelPerceptronSettings)settings;
         ParallelPerceptron         ppn    = new ParallelPerceptron(trainingInputVectors[0].Length, netCfg);
         net     = ppn;
         trainer = new PDeltaRuleTrainer(ppn, trainingInputVectors, trainingOutputVectors, netCfg.PDeltaRuleTrainerCfg, rand);
     }
     else
     {
         throw new InvalidOperationException($"Unknown network settings");
     }
     net.RandomizeWeights(rand);
     return;
 }
예제 #7
0
        /// <summary>
        /// Runs the example code.
        /// </summary>
        public void Run()
        {
            //Create configuration of the feed forward network having Identity output layer and two LeakyReLU hidden layers
            //with associated resilient back propagation trainer configuration
            const int                  HiddenLayerSize = 3;
            HiddenLayerSettings        hiddenLayerCfg  = new HiddenLayerSettings(HiddenLayerSize, new LeakyReLUSettings());
            FeedForwardNetworkSettings ffNetCfg        = new FeedForwardNetworkSettings(new IdentitySettings(),
                                                                                        new HiddenLayersSettings(hiddenLayerCfg, hiddenLayerCfg),
                                                                                        new RPropTrainerSettings(2, 200)
                                                                                        );
            //Collect training data
            VectorBundle trainingData = CreateTrainingData();
            //Create network instance
            //We specify 2 input values, 3 output values and previously prepared network structure configuration
            FeedForwardNetwork ffNet = new FeedForwardNetwork(2, 3, ffNetCfg);

            //Training
            _log.Write("Training");
            _log.Write("--------");
            //Create trainer instance
            RPropTrainer trainer = new RPropTrainer(ffNet,
                                                    trainingData.InputVectorCollection,
                                                    trainingData.OutputVectorCollection,
                                                    (RPropTrainerSettings)ffNetCfg.TrainerCfg,
                                                    new Random(0)
                                                    );

            //Training loop
            while (trainer.Iteration() && trainer.MSE > 1e-6)
            {
                _log.Write($"  Attempt {trainer.Attempt} / Epoch {trainer.AttemptEpoch,3} Mean Squared Error = {Math.Round(trainer.MSE, 8).ToString(CultureInfo.InvariantCulture)}", false);
            }
            _log.Write(string.Empty);

            //Training is done
            //Display network computation results
            _log.Write("Trained network computations:");
            _log.Write("-----------------------------");
            foreach (double[] input in trainingData.InputVectorCollection)
            {
                double[] results = ffNet.Compute(input);
                _log.Write($"  Input {input[0]} {input[1]} Results: AND={Math.Round(results[0])} OR={Math.Round(results[1])} XOR={Math.Round(results[2])}");
            }
            _log.Write(string.Empty);

            //Finished
            return;
        } //Run
예제 #8
0
            /// <summary>
            /// Creates the instance and initializes it from given xml element.
            /// </summary>
            /// <param name="readoutUnitElem">
            /// Xml data containing the settings.
            /// </param>
            public ReadoutUnitSettings(XElement readoutUnitElem)
            {
                //Name
                Name = readoutUnitElem.Attribute("name").Value;
                //Task and filter
                XElement taskElem = readoutUnitElem.Descendants().First();

                if (taskElem.Name.LocalName == "forecast")
                {
                    TaskType = ReadoutUnit.TaskType.Forecast;
                }
                else
                {
                    TaskType = ReadoutUnit.TaskType.Classification;
                }
                FeatureFilterCfg = FeatureFilterFactory.LoadSettings(taskElem.Descendants().First());
                //Net settings
                List <XElement> netSettingsElems = new List <XElement>();

                netSettingsElems.AddRange(readoutUnitElem.Descendants("ff"));
                netSettingsElems.AddRange(readoutUnitElem.Descendants("pp"));
                if (netSettingsElems.Count != 1)
                {
                    throw new Exception("Only one network configuration can be specified in readout unit settings.");
                }
                if (netSettingsElems.Count == 0)
                {
                    throw new Exception("Network configuration is not specified in readout unit settings.");
                }
                XElement netSettingsElem = netSettingsElems[0];

                //FF?
                if (netSettingsElem.Name.LocalName == "ff")
                {
                    NetType     = ReadoutUnitNetworkType.FF;
                    NetSettings = new FeedForwardNetworkSettings(netSettingsElem);
                    OutputRange = ((FeedForwardNetworkSettings)NetSettings).OutputRange.DeepClone();
                }
                else
                {
                    //PP
                    NetType     = ReadoutUnitNetworkType.PP;
                    NetSettings = new ParallelPerceptronSettings(netSettingsElem);
                    OutputRange = ((ParallelPerceptronSettings)NetSettings).OutputRange.DeepClone();
                }
                return;
            }
예제 #9
0
        /// <summary>
        /// Trains FF network to solve boolean algebra. It shows how to do it on the lowest level,
        /// without use of TNRNetBuilder.
        /// </summary>
        private void FullyManualLearning()
        {
            _log.Write("Example of a FF network low level training:");
            //Create FF network configuration.
            FeedForwardNetworkSettings ffNetCfg = CreateFFNetConfig();

            _log.Write($"Network configuration xml:");
            _log.Write(ffNetCfg.GetXml(true).ToString());
            //Collect training data
            VectorBundle trainingData = CreateTrainingData();
            //Create network instance
            //We specify 2 input values, 3 output values and previously prepared network structure configuration
            FeedForwardNetwork ffNet = new FeedForwardNetwork(2,       //The number of input values
                                                              3,       //The number of output values
                                                              ffNetCfg //Network structure and a trainer
                                                              );

            //Training
            _log.Write(string.Empty);
            _log.Write("  Training");
            _log.Write(string.Empty);
            //Create the trainer instance
            RPropTrainer trainer = new RPropTrainer(ffNet,
                                                    trainingData.InputVectorCollection,
                                                    trainingData.OutputVectorCollection,
                                                    (RPropTrainerSettings)ffNetCfg.TrainerCfg,
                                                    new Random(0)
                                                    );

            //Training loop
            while (trainer.Iteration())
            {
                _log.Write($"    Attempt {trainer.Attempt} / Epoch {trainer.AttemptEpoch,3} Mean Squared Error = {Math.Round(trainer.MSE, 8).ToString(CultureInfo.InvariantCulture)}", true);
                //Check training exit condition
                if (trainer.MSE < 1e-7)
                {
                    break;
                }
            }
            _log.Write(string.Empty);

            //Training is done
            //Display the network computation results
            DisplayNetworkComputations(ffNet);
            //Finished
            return;
        }
예제 #10
0
 private static void CreateNetAndTreainer(ReadoutLayerSettings.ReadoutUnitSettings settings,
                                          List <double[]> trainingPredictorsCollection,
                                          List <double[]> trainingIdealOutputsCollection,
                                          Random rand,
                                          out INonRecurrentNetwork net,
                                          out INonRecurrentNetworkTrainer trainer
                                          )
 {
     if (settings.NetType == ReadoutLayerSettings.ReadoutUnitSettings.ReadoutUnitNetworkType.FF)
     {
         FeedForwardNetworkSettings netCfg = (FeedForwardNetworkSettings)settings.NetSettings;
         FeedForwardNetwork         ffn    = new FeedForwardNetwork(trainingPredictorsCollection[0].Length, 1, netCfg);
         net = ffn;
         if (netCfg.TrainerCfg.GetType() == typeof(QRDRegrTrainerSettings))
         {
             trainer = new QRDRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (QRDRegrTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(RidgeRegrTrainerSettings))
         {
             trainer = new RidgeRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (RidgeRegrTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(ElasticRegrTrainerSettings))
         {
             trainer = new ElasticRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (ElasticRegrTrainerSettings)netCfg.TrainerCfg);
         }
         else if (netCfg.TrainerCfg.GetType() == typeof(RPropTrainerSettings))
         {
             trainer = new RPropTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (RPropTrainerSettings)netCfg.TrainerCfg, rand);
         }
         else
         {
             throw new ArgumentException($"Unknown trainer {netCfg.TrainerCfg}");
         }
     }
     else
     {
         ParallelPerceptronSettings netCfg = (ParallelPerceptronSettings)settings.NetSettings;
         ParallelPerceptron         ppn    = new ParallelPerceptron(trainingPredictorsCollection[0].Length, netCfg);
         net     = ppn;
         trainer = new PDeltaRuleTrainer(ppn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.PDeltaRuleTrainerCfg, rand);
     }
     net.RandomizeWeights(rand);
     return;
 }
예제 #11
0
        /// <summary>
        /// Creates the simplified configuration of the readout layer to solve the classification task.
        /// </summary>
        /// <remarks>
        /// Supports the probabilistic output only.
        /// </remarks>
        /// <param name="crossvalidationCfg">The crossvalidation configuration.</param>
        /// <param name="netCfg">The configuration of the FF network to be used in the cluster(s).</param>
        /// <param name="clusterChainLength">The number of chained clusters.</param>
        /// <param name="oneTakesAllGroupName">The name of the "One Takes All" group in case of multiple classes or use the "NA" code when there is only the single class.</param>
        /// <param name="unitName">The readout unit names (the names of the classes).</param>
        public static ReadoutLayerSettings CreateClassificationReadoutCfg(CrossvalidationSettings crossvalidationCfg,
                                                                          FeedForwardNetworkSettings netCfg,
                                                                          int clusterChainLength,
                                                                          string oneTakesAllGroupName,
                                                                          params string[] unitName
                                                                          )
        {
            if (netCfg == null)
            {
                throw new ArgumentNullException("netCfg");
            }
            List <string> readoutUnitNames = new List <string>(unitName.AsEnumerable());

            oneTakesAllGroupName = readoutUnitNames.Count > 1 ? oneTakesAllGroupName : "NA";
            List <ReadoutUnitSettings> unitCfgCollection = new List <ReadoutUnitSettings>();

            foreach (string name in readoutUnitNames)
            {
                unitCfgCollection.Add(new ReadoutUnitSettings(name, new ClassificationTaskSettings(oneTakesAllGroupName)));
            }

            TNRNetClusterSingleBoolNetworksSettings netsCfg              = new TNRNetClusterSingleBoolNetworksSettings(netCfg);
            TNRNetClusterSingleBoolSettings         clusterCfg           = new TNRNetClusterSingleBoolSettings(netsCfg, new TNRNetClusterSingleBoolWeightsSettings());
            List <TNRNetClusterSingleBoolSettings>  clusterCfgCollection = new List <TNRNetClusterSingleBoolSettings>();

            for (int i = 0; i < clusterChainLength; i++)
            {
                clusterCfgCollection.Add(clusterCfg);
            }
            TNRNetClustersSingleBoolSettings     clustersCfg     = new TNRNetClustersSingleBoolSettings(clusterCfgCollection);
            TNRNetClusterChainSingleBoolSettings clusterChainCfg = new TNRNetClusterChainSingleBoolSettings(crossvalidationCfg, clustersCfg);
            TaskDefaultsSettings taskDefaultsCfg = new TaskDefaultsSettings(clusterChainCfg, null);

            return(new ReadoutLayerSettings(taskDefaultsCfg,
                                            new ReadoutUnitsSettings(unitCfgCollection),
                                            readoutUnitNames.Count > 1 ? new OneTakesAllGroupsSettings(new OneTakesAllGroupSettings(oneTakesAllGroupName, new OneTakesAllBasicDecisionSettings())) : null
                                            ));
        }
예제 #12
0
        /// <summary>
        /// Creates readout layer configuration to solve forecast task
        /// </summary>
        /// <param name="netCfg">FF network configuration to be associated with readout units</param>
        /// <param name="testDataRatio">Specifies what part of available data to be used as test data</param>
        /// <param name="repetitions">Number of repetitions of the folds regression</param>
        /// <param name="unitName">Readout unit name</param>
        public static ReadoutLayerSettings CreateForecastReadoutCfg(FeedForwardNetworkSettings netCfg,
                                                                    double testDataRatio,
                                                                    int repetitions,
                                                                    params string[] unitName
                                                                    )
        {
            if (netCfg == null)
            {
                throw new ArgumentNullException("netCfg");
            }
            List <ReadoutUnitSettings> unitCfgCollection = new List <ReadoutUnitSettings>();

            foreach (string name in unitName)
            {
                unitCfgCollection.Add(new ReadoutUnitSettings(name, new ForecastTaskSettings(new RealFeatureFilterSettings())));
            }
            return(new ReadoutLayerSettings(new ReadoutUnitsSettings(unitCfgCollection),
                                            testDataRatio,
                                            ReadoutLayerSettings.AutoFolds,
                                            repetitions,
                                            new DefaultNetworksSettings(null, new ForecastNetworksSettings(netCfg))
                                            ));
        }
예제 #13
0
        private static void CreateNetAndTreainer(ReadoutLayerSettings.ReadoutUnitSettings settings,
                                                 List <double[]> trainingPredictorsCollection,
                                                 List <double[]> trainingIdealOutputsCollection,
                                                 Random rand,
                                                 out INonRecurrentNetwork net,
                                                 out INonRecurrentNetworkTrainer trainer
                                                 )
        {
            if (settings.NetType == ReadoutLayerSettings.ReadoutUnitSettings.ReadoutUnitNetworkType.FF)
            {
                FeedForwardNetworkSettings netCfg = (FeedForwardNetworkSettings)settings.NetSettings;
                FeedForwardNetwork         ffn    = new FeedForwardNetwork(trainingPredictorsCollection[0].Length, 1, netCfg);
                net = ffn;
                switch (netCfg.RegressionMethod)
                {
                case FeedForwardNetworkSettings.TrainingMethodType.Linear:
                    trainer = new LinRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, settings.RegressionAttemptEpochs, rand, netCfg.LinRegrTrainerCfg);
                    break;

                case FeedForwardNetworkSettings.TrainingMethodType.Resilient:
                    trainer = new RPropTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.RPropTrainerCfg);
                    break;

                default:
                    throw new ArgumentException($"Not supported regression method {netCfg.RegressionMethod}");
                }
            }
            else
            {
                ParallelPerceptronSettings netCfg = (ParallelPerceptronSettings)settings.NetSettings;
                ParallelPerceptron         ppn    = new ParallelPerceptron(trainingPredictorsCollection[0].Length, netCfg);
                net     = ppn;
                trainer = new PDeltaRuleTrainer(ppn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.PDeltaRuleTrainerCfg);
            }
            net.RandomizeWeights(rand);
            return;
        }
예제 #14
0
        /// <summary>
        /// Trains FF network to solve boolean algebra.
        /// It shows how to use the TNRNetBuilder (TNRNetBuilder is using its default build controller).
        /// </summary>
        private void TNRNetBuilderLearning_DefaultController()
        {
            _log.Write("Example of a FF network build using the TNRNetBuilder component with default build controller:");
            //Create FF network configuration.
            FeedForwardNetworkSettings ffNetCfg = CreateFFNetConfig();

            _log.Write($"Network configuration xml:");
            _log.Write(ffNetCfg.GetXml(true).ToString());
            //Collect training data
            VectorBundle trainingData = CreateTrainingData();
            //In our case, testing data is the same as training data
            VectorBundle testingData = trainingData;
            //Training
            //Create builder instance
            TNRNetBuilder builder = new TNRNetBuilder("Boolean Algebra",      //Network name
                                                      ffNetCfg,               //Network configuration
                                                      TNRNet.OutputType.Real, //Network output is one or more real numbers
                                                      trainingData,           //Training data
                                                      testingData,            //Testing data
                                                      null,                   //No specific random generator object to be used
                                                      null                    //No specific build controller -> use default
                                                      );

            //Register notification event handler
            builder.NetworkBuildProgressChanged += OnNetworkBuildProgressChanged;
            //Build the network
            _log.Write(string.Empty);
            _log.Write("  Training");
            TNRNet ffNet = builder.Build();

            //Training is done
            _log.Write(string.Empty);
            //Display the network computation results
            DisplayNetworkComputations(ffNet.Network);
            //Finished
            return;
        }
예제 #15
0
        /// <summary>
        /// Creates readout layer configuration to solve classification task
        /// </summary>
        /// <param name="netCfg">FF network configuration to be associated with readout units</param>
        /// <param name="testDataRatio">Specifies what part of available data to be used as test data</param>
        /// <param name="repetitions">Number of repetitions of the folds regression</param>
        /// <param name="oneWinnerGroupName">Name of the "one winner" group encapsulating classification readout units</param>
        /// <param name="unitName">Readout unit name</param>
        public static ReadoutLayerSettings CreateClassificationReadoutCfg(FeedForwardNetworkSettings netCfg,
                                                                          double testDataRatio,
                                                                          int repetitions,
                                                                          string oneWinnerGroupName,
                                                                          params string[] unitName
                                                                          )
        {
            if (netCfg == null)
            {
                throw new ArgumentNullException("netCfg");
            }
            List <ReadoutUnitSettings> unitCfgCollection = new List <ReadoutUnitSettings>();

            foreach (string name in unitName)
            {
                unitCfgCollection.Add(new ReadoutUnitSettings(name, new ClassificationTaskSettings(oneWinnerGroupName)));
            }
            return(new ReadoutLayerSettings(new ReadoutUnitsSettings(unitCfgCollection),
                                            testDataRatio,
                                            ReadoutLayerSettings.AutoFolds,
                                            repetitions,
                                            new DefaultNetworksSettings(new ClassificationNetworksSettings(netCfg), null)
                                            ));
        }