private static void CreateNetAndTreainer(ReadoutLayerSettings.ReadoutUnitSettings settings, List <double[]> trainingPredictorsCollection, List <double[]> trainingIdealOutputsCollection, Random rand, out INonRecurrentNetwork net, out INonRecurrentNetworkTrainer trainer ) { if (settings.NetType == ReadoutLayerSettings.ReadoutUnitSettings.ReadoutUnitNetworkType.FF) { FeedForwardNetworkSettings netCfg = (FeedForwardNetworkSettings)settings.NetSettings; FeedForwardNetwork ffn = new FeedForwardNetwork(trainingPredictorsCollection[0].Length, 1, netCfg); net = ffn; if (netCfg.TrainerCfg.GetType() == typeof(LinRegrTrainerSettings)) { trainer = new LinRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (LinRegrTrainerSettings)netCfg.TrainerCfg, rand); } else if (netCfg.TrainerCfg.GetType() == typeof(QRDRegrTrainerSettings)) { trainer = new QRDRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (QRDRegrTrainerSettings)netCfg.TrainerCfg, rand); } else if (netCfg.TrainerCfg.GetType() == typeof(RidgeRegrTrainerSettings)) { trainer = new RidgeRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (RidgeRegrTrainerSettings)netCfg.TrainerCfg, rand); } else if (netCfg.TrainerCfg.GetType() == typeof(RPropTrainerSettings)) { trainer = new RPropTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, (RPropTrainerSettings)netCfg.TrainerCfg, rand); } else { throw new ArgumentException($"Unknown trainer {netCfg.TrainerCfg}"); } } else { ParallelPerceptronSettings netCfg = (ParallelPerceptronSettings)settings.NetSettings; ParallelPerceptron ppn = new ParallelPerceptron(trainingPredictorsCollection[0].Length, netCfg); net = ppn; trainer = new PDeltaRuleTrainer(ppn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.PDeltaRuleTrainerCfg, rand); } net.RandomizeWeights(rand); return; }
private static void CreateNetAndTreainer(ReadoutLayerSettings.ReadoutUnitSettings settings, List <double[]> trainingPredictorsCollection, List <double[]> trainingIdealOutputsCollection, Random rand, out INonRecurrentNetwork net, out INonRecurrentNetworkTrainer trainer ) { if (settings.NetType == ReadoutLayerSettings.ReadoutUnitSettings.ReadoutUnitNetworkType.FF) { FeedForwardNetworkSettings netCfg = (FeedForwardNetworkSettings)settings.NetSettings; FeedForwardNetwork ffn = new FeedForwardNetwork(trainingPredictorsCollection[0].Length, 1, netCfg); net = ffn; switch (netCfg.RegressionMethod) { case FeedForwardNetworkSettings.TrainingMethodType.Linear: trainer = new LinRegrTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, settings.RegressionAttemptEpochs, rand, netCfg.LinRegrTrainerCfg); break; case FeedForwardNetworkSettings.TrainingMethodType.Resilient: trainer = new RPropTrainer(ffn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.RPropTrainerCfg); break; default: throw new ArgumentException($"Not supported regression method {netCfg.RegressionMethod}"); } } else { ParallelPerceptronSettings netCfg = (ParallelPerceptronSettings)settings.NetSettings; ParallelPerceptron ppn = new ParallelPerceptron(trainingPredictorsCollection[0].Length, netCfg); net = ppn; trainer = new PDeltaRuleTrainer(ppn, trainingPredictorsCollection, trainingIdealOutputsCollection, netCfg.PDeltaRuleTrainerCfg); } net.RandomizeWeights(rand); return; }