internal LearningResultUpdatedEventArgs(LearningEpoch epoch, double mse) { Contract.Requires(epoch != null); Contract.Requires(mse >= 0.0); Epoch = epoch; MSE = mse; }
internal LearningResult(LearningEpoch epoch, bool bestHolder = false) : base(epoch.SyncRoot) { Contract.Requires(epoch != null); Epoch = epoch; this.bestHolder = bestHolder; }
private static void Begin() { bool recurrent = false; var trainingProv = CreateProvider(10000, recurrent); var trainingStrat = new GaussianBatchingStrategy(.5); //var trainingStrat = new MonteCarloBatchingStrategy(); var trainingBatcher = new ScriptCollectionBatcher(trainingStrat, trainingProv, 200, 500); var validProv = CreateProvider(1000, recurrent); var validStrat = new MonteCarloBatchingStrategy(); var validBatcher = new ScriptCollectionBatcher(validStrat, validProv, 25, 10000); trainingBatcher.Initialize(); validBatcher.Initialize(); Console.WriteLine("Training samples: " + trainingProv.Count); Console.WriteLine("Validation samples: " + validProv.Count); // Rules: Console.WriteLine("Creating learning rules ..."); var weightInitRule = new NoisedWeightInitializationRule { Noise = 0.5, IsEnabled = true }; //var learningRule = new QuickpropRule { StepSize = 0.001 }; var learningRule = new SCGRule(); //var learningRule = new LMRule(); //var learningRule = new MetaQSARule { Mode = LearningMode.Stochastic, Momentum = 0.1, StepSizeRange = new DoubleRange(0.0, 0.005), StepSize = 0.001, StochasticAdaptiveStateUpdate = true }; //var learningRule = new SuperSABRule { Mode = LearningMode.Batch, Momentum = 0.8, StepSizeRange = new DoubleRange(0.0, 0.01), StepSize = 0.005, StochasticAdaptiveStateUpdate = false }; //var learningRule = new SignChangesRule { Mode = LearningMode.Stochastic, Momentum = 0.2, StepSizeRange = new DoubleRange(0.0, 0.001), StepSize = 0.001, StochasticAdaptiveStateUpdate = false }; //var learningRule = new GradientDescentRule { Mode = LearningMode.Stochastic, Momentum = 0.2, StepSize = 0.001 }; //var learningRule = new QSARule(); //var learningRule = new MAQRule(); //var learningRule = new AdaptiveAnnealingRule { WeightGenMul = 0.1, AcceptProbMul = 0.05 }; //var learningRule = new RpropRule { Momentum = 0.01, StepSize = 0.01 }; //var learningRule = new CrossEntropyRule { PopulationSize = 50, NumberOfElites = 10, MutationChance = 0.01, MutationStrength = 0.01, DistributionType = DistributionType.Gaussian }; //var learningRule = new GARule { PopulationSize = 40, MutationStrength = 0.01, MutationChance = 0.01 }; var wdRule = (ILearningRule)learningRule as IWeightDecayedLearningRule; if (wdRule != null) { wdRule.WeightDecay = new WeightDecay { Factor = -0.0001, IsEnabled = false }; } IterationRepeatPars iterationRepeatPars = new IterationRepeatPars(5, 10); // Net: Console.WriteLine("Creating Neural Network ..."); var network = CreateNetwork(recurrent, weightInitRule, learningRule); var exec = new LearningExecution(network, iterationRepeatPars); // Epoch: Console.WriteLine("Initializing epoch ..."); var epoch = new LearningEpoch(exec, trainingBatcher, validBatcher, 1); epoch.Initialize(); epoch.CurrentResult.Updated += (sender, e) => WriteResult(epoch); epoch.BestValidationResult.Updated += (sender, e) => vbestNet = network.Clone(); // Training loop: Console.WriteLine("Starting ..."); bool done = false; do { //CodeBench.By("Epoch").Do = () => //{ // epoch.Step(); //}; //CodeBench.By("Epoch").WriteToConsole(); epoch.Step(); //WriteResult(epoch); if (Console.KeyAvailable) { var key = Console.ReadKey(); switch (key.Key) { case ConsoleKey.Escape: done = true; break; case ConsoleKey.S: Save(network.Clone()); break; case ConsoleKey.V: if (vbestNet != null) Save(vbestNet); break; } } } while (!done); }
private static void WriteResult(LearningEpoch epoch) { Console.WriteLine("{0}: Current: {1}/{2} Validation: {3}/{4}", epoch.CurrentIteration.ToString("0000"), epoch.BestResult.MSE.ToString("0.000000"), epoch.CurrentResult.MSE.ToString("0.000000"), epoch.BestValidationResult.MSE.ToString("0.000000"), epoch.CurrentValidationResult.MSE.ToString("0.000000")); }