public static double TrainNetwork(String what, FreeformNetwork network, IMLDataSet trainingSet) { ICalculateScore score = new TrainingSetScore(trainingSet); IMLTrain trainAlt = new NeuralSimulatedAnnealing( network, score, 10, 2, 100); IMLTrain trainMain = new FreeformBackPropagation(network, trainingSet,0.00001, 0.0); StopTrainingStrategy stop = new StopTrainingStrategy(); trainMain.AddStrategy(new Greedy()); trainMain.AddStrategy(new HybridStrategy(trainAlt)); trainMain.AddStrategy(stop); EncogUtility.TrainToError(trainMain, 0.01); return trainMain.Error; }
public void Train(BasicNetwork network, IMLDataSet training) { IMLTrain trainMain = new LevenbergMarquardtTraining(network, training); // train the neural network var stop = new StopTrainingStrategy(); ICalculateScore score = new TrainingSetScore(trainMain.Training); IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100); trainMain.AddStrategy(new HybridStrategy(trainAlt)); trainMain.AddStrategy(stop); int epoch = 0; while (!stop.ShouldStop() && trainMain.IterationNumber < 1500) { trainMain.Iteration(); Console.WriteLine("Training " + ", Epoch #" + epoch + " Error:" + trainMain.Error); epoch++; } }
private void trainNetworkBackprop() { // IMLTrain train = new Backpropagation(this.network, this.input,this.ideal, 0.000001, 0.1); IMLDataSet aset = new BasicMLDataSet(input, ideal); int epoch = 1; // train the neural network ICalculateScore score = new TrainingSetScore(aset); IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100); IMLTrain trainMain = new Backpropagation(network, aset, 0.001, 0.0); StopTrainingStrategy stop = new StopTrainingStrategy(); var pop = new NEATPopulation(INPUT_SIZE, OUTPUT_SIZE, 1000); // train the neural network var step = new ActivationStep(); step.Center = 0.5; pop.OutputActivationFunction = step; var train = new NEATTraining(score, pop); trainMain.AddStrategy(new Greedy()); trainMain.AddStrategy(new HybridStrategy(trainAlt)); trainMain.AddStrategy(stop); trainMain.AddStrategy(new HybridStrategy(train)); network.ClearContext(); while (!stop.ShouldStop()) { trainMain.Iteration(); train.Iteration(); Console.WriteLine(@"Training " + @"Epoch #" + epoch + @" Error:" + trainMain.Error+ @" Genetic iteration:"+trainAlt.IterationNumber+ @"neat iteration:"+train.IterationNumber ); epoch++; } }
private double TrainNetwork(String what, BasicNetwork network, IMLDataSet trainingSet) { // train the neural network ICalculateScore score = new TrainingSetScore(trainingSet); IMLTrain trainAlt = new NeuralSimulatedAnnealing( network, score, 10, 2, 100); IMLTrain trainMain = new Backpropagation(network, trainingSet, 0.00001, 0.0); var stop = new StopTrainingStrategy(); trainMain.AddStrategy(new Greedy()); trainMain.AddStrategy(new HybridStrategy(trainAlt)); trainMain.AddStrategy(stop); int epoch = 0; while (!stop.ShouldStop()) { trainMain.Iteration(); app.WriteLine("Training " + what + ", Epoch #" + epoch + " Error:" + trainMain.Error); epoch++; } return trainMain.Error; }
public static double TrainNetworks(SupportVectorMachine network, MarketMLDataSet training) { // train the neural network SVMTrain trainMain = new SVMTrain(network, training); StopTrainingStrategy stop = new StopTrainingStrategy(0.0001, 200); trainMain.AddStrategy(stop); var sw = new Stopwatch(); sw.Start(); while (!stop.ShouldStop()) { trainMain.PreIteration(); trainMain.Iteration(); trainMain.PostIteration(); Console.WriteLine(@"Iteration #:" + trainMain.IterationNumber + @" Error:" + trainMain.Error); } sw.Stop(); Console.WriteLine("SVM Trained in :" + sw.ElapsedMilliseconds + "For error:" + trainMain.Error + " Iterated:" + trainMain.IterationNumber); return trainMain.Error; }
private double TrainNetwork(String what, BasicNetwork network, IMLDataSet trainingSet, string Method) { // train the neural network ICalculateScore score = new TrainingSetScore(trainingSet); IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100); IMLTrain trainMain; if (Method.Equals("Leven")) { Console.WriteLine("Using LevenbergMarquardtTraining"); trainMain = new LevenbergMarquardtTraining(network, trainingSet); } else trainMain = new Backpropagation(network, trainingSet); var stop = new StopTrainingStrategy(); trainMain.AddStrategy(new Greedy()); trainMain.AddStrategy(new HybridStrategy(trainAlt)); trainMain.AddStrategy(stop); int epoch = 0; while (!stop.ShouldStop()) { trainMain.Iteration(); app.WriteLine("Training " + what + ", Epoch #" + epoch + " Error:" + trainMain.Error); epoch++; } return trainMain.Error; }
/// <summary> /// Perform an individual job unit, which is a single network to train and /// evaluate. /// </summary> /// /// <param name="context">Contains information about the job unit.</param> public override sealed void PerformJobUnit(JobUnitContext context) { var network = (BasicNetwork) context.JobUnit; BufferedMLDataSet buffer = null; IMLDataSet useTraining = _training; if (_training is BufferedMLDataSet) { buffer = (BufferedMLDataSet) _training; useTraining = (buffer.OpenAdditional()); } // train the neural network double error = Double.PositiveInfinity; for (int z = 0; z < _weightTries; z++) { network.Reset(); Propagation train = new ResilientPropagation(network, useTraining); var strat = new StopTrainingStrategy(0.001d, 5); train.AddStrategy(strat); train.ThreadCount = 1; // force single thread mode for (int i = 0; (i < _iterations) && !ShouldStop && !strat.ShouldStop(); i++) { train.Iteration(); } error = Math.Min(error, train.Error); } if (buffer != null) { buffer.Close(); } if (!ShouldStop) { // update min and max _high = Math.Max(_high, error); _low = Math.Min(_low, error); if (_hidden1Size > 0) { int networkHidden1Count; int networkHidden2Count; if (network.LayerCount > 3) { networkHidden2Count = network.GetLayerNeuronCount(2); networkHidden1Count = network.GetLayerNeuronCount(1); } else { networkHidden2Count = 0; networkHidden1Count = network.GetLayerNeuronCount(1); } int row, col; if (_hidden2Size == 0) { row = networkHidden1Count - _hidden[0].Min; col = 0; } else { row = networkHidden1Count - _hidden[0].Min; col = networkHidden2Count - _hidden[1].Min; } if ((row < 0) || (col < 0)) { Console.Out.WriteLine("STOP"); } _results[row][col] = error; } // report status _currentTry++; UpdateBest(network, error); ReportStatus( context, "Current: " + NetworkToString(network) + "; Best: " + NetworkToString(_bestNetwork)); } }
public static SupportVectorMachine SVMSearch(SupportVectorMachine anetwork, IMLDataSet training) { SVMSearchTrain bestsearch = new SVMSearchTrain(anetwork, training); StopTrainingStrategy stop = new StopTrainingStrategy(0.00000000001, 1); bestsearch.AddStrategy(stop); while (bestsearch.IterationNumber < 30 && !stop.ShouldStop()) { bestsearch.Iteration(); Console.WriteLine("Iteration #" + bestsearch.IterationNumber + " Error :" + bestsearch.Error); } bestsearch.FinishTraining(); return anetwork; }
public static double TrainSVM(SVMTrain train, SupportVectorMachine machine) { StopTrainingStrategy stop = new StopTrainingStrategy(0.0001, 200); train.AddStrategy(stop); var sw = new Stopwatch(); sw.Start(); while (!stop.ShouldStop()) { train.PreIteration(); train.Iteration(); train.PostIteration(); Console.WriteLine(@"Iteration #:" + train.IterationNumber + @" Error:" + train.Error +" Gamma:"+train.Gamma); } sw.Stop(); Console.WriteLine(@"SVM Trained in :" + sw.ElapsedMilliseconds); return train.Error; }
public static double TrainNetworks(BasicNetwork network, IMLDataSet minis) { // train the neural network ICalculateScore score = new TrainingSetScore(minis); IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100); IMLTrain trainMain = new Backpropagation(network, minis, 0.0001, 0.01); StopTrainingStrategy stop = new StopTrainingStrategy(0.0001, 200); trainMain.AddStrategy(new Greedy()); trainMain.AddStrategy(new HybridStrategy(trainAlt)); trainMain.AddStrategy(stop); var sw = new Stopwatch(); sw.Start(); while (!stop.ShouldStop()) { trainMain.Iteration(); Console.WriteLine(@"Iteration #:" + trainMain.IterationNumber + @" Error:" + trainMain.Error + @" Genetic Iteration:" + trainAlt.IterationNumber); } sw.Stop(); return trainMain.Error; }
public static double TrainNetworks(BasicNetwork network, IMLDataSet minis) { Backpropagation trainMain = new Backpropagation(network, minis,0.0001,0.6); //set the number of threads below. trainMain.ThreadCount = 0; // train the neural network ICalculateScore score = new TrainingSetScore(minis); IMLTrain trainAlt = new NeuralSimulatedAnnealing(network, score, 10, 2, 100); // IMLTrain trainMain = new Backpropagation(network, minis, 0.0001, 0.01); StopTrainingStrategy stop = new StopTrainingStrategy(0.0001, 200); trainMain.AddStrategy(new Greedy()); trainMain.AddStrategy(new HybridStrategy(trainAlt)); trainMain.AddStrategy(stop); //prune strategy not in GIT!...Removing it. //PruneStrategy strategypruning = new PruneStrategy(0.91d, 0.001d, 10, network,minis, 0, 20); //trainMain.AddStrategy(strategypruning); EncogUtility.TrainConsole(trainMain,network,minis, 15.2); var sw = new Stopwatch(); sw.Start(); while (!stop.ShouldStop()) { trainMain.Iteration(); Console.WriteLine(@"Iteration #:" + trainMain.IterationNumber + @" Error:" + trainMain.Error + @" Genetic Iteration:" + trainAlt.IterationNumber); } sw.Stop(); Console.WriteLine(@"Total elapsed time in seconds:" + TimeSpan.FromMilliseconds(sw.ElapsedMilliseconds).Seconds); return trainMain.Error; }
public override sealed void PerformJobUnit(JobUnitContext context) { double num; int num2; int num3; int num4; int layerNeuronCount; int num6; int num7; BasicNetwork jobUnit = (BasicNetwork) context.JobUnit; BufferedMLDataSet set = null; IMLDataSet training = this._x823a2b9c8bf459c5; if (this._x823a2b9c8bf459c5 is BufferedMLDataSet) { set = (BufferedMLDataSet) this._x823a2b9c8bf459c5; if ((((uint) num) + ((uint) num2)) > uint.MaxValue) { goto Label_02BB; } } else { if (((uint) num4) > uint.MaxValue) { goto Label_0302; } goto Label_02BB; } training = set.OpenAdditional(); goto Label_0302; Label_0011: this._x7ca40c9a68f86359++; this.x6aa420caefd31103(jobUnit, num); base.ReportStatus(context, "Current: " + NetworkToString(jobUnit) + "; Best: " + NetworkToString(this._x61bb83c40eed7f47)); if (((uint) num) >= 0) { return; } if (((uint) layerNeuronCount) <= uint.MaxValue) { goto Label_0104; } if ((((uint) layerNeuronCount) | 3) == 0) { } Label_0090: if (num6 >= 0) { if (num7 >= 0) { goto Label_00A7; } } else if (0 != 0) { goto Label_00B9; } Console.Out.WriteLine("STOP"); Label_00A7: this._xd559aa34776631a5[num6][num7] = num; goto Label_0011; Label_00B9: if (this._xe4e6a25eae13e4b3 > 0) { goto Label_0188; } if (0 == 0) { if ((((uint) num3) & 0) != 0) { return; } goto Label_0011; } Label_0104: num7 = 0; goto Label_0090; Label_0125: num4 = jobUnit.GetLayerNeuronCount(1); Label_012E: if (this._x5426aa354995e9e0 != 0) { num6 = num4 - this._xab3ddaff42dd298a[0].Min; num7 = layerNeuronCount - this._xab3ddaff42dd298a[1].Min; goto Label_0090; } num6 = num4 - this._xab3ddaff42dd298a[0].Min; if ((((uint) num3) | 0xfffffffe) != 0) { goto Label_0104; } return; Label_0188: if (jobUnit.LayerCount <= 3) { layerNeuronCount = 0; goto Label_0125; } layerNeuronCount = jobUnit.GetLayerNeuronCount(2); num4 = jobUnit.GetLayerNeuronCount(1); goto Label_012E; Label_0195: if (base.ShouldStop) { return; } this._x628ea9b89457a2a9 = Math.Max(this._x628ea9b89457a2a9, num); this._xd12d1dba8a023d95 = Math.Min(this._xd12d1dba8a023d95, num); if ((((uint) num2) + ((uint) num6)) <= uint.MaxValue) { goto Label_00B9; } goto Label_0188; Label_02BB: num = double.PositiveInfinity; num2 = 0; Label_0217: if (num2 < this._xe009ad1bd0a8245a) { StopTrainingStrategy strategy; jobUnit.Reset(); Encog.Neural.Networks.Training.Propagation.Propagation propagation = new ResilientPropagation(jobUnit, training); if ((((uint) num) - ((uint) num3)) >= 0) { strategy = new StopTrainingStrategy(0.001, 5); } propagation.AddStrategy(strategy); propagation.ThreadCount = 1; num3 = 0; while (true) { if ((num3 < this._xdbf51c857aeb8093) && (!base.ShouldStop && !strategy.ShouldStop())) { propagation.Iteration(); } else { num = Math.Min(num, propagation.Error); if (4 != 0) { num2++; goto Label_0217; } goto Label_0195; } num3++; } } while (set != null) { set.Close(); if (((uint) num4) <= uint.MaxValue) { break; } } goto Label_0195; Label_0302: if ((((uint) num4) - ((uint) layerNeuronCount)) < 0) { goto Label_0125; } goto Label_02BB; }