public IIndividual <Rule> Solve() { const int firstSetSize = (int)(2048 * 0.8); const int secondSetSize = (int)(2048 * 0.2); var matcher = GetMatcher(@"Data\data2.txt", firstSetSize, 0); var test = GetMatcher(@"Data\data2.txt", secondSetSize, firstSetSize); IIndividual <Rule> best = null; const int maximumIteration = 3; for (var run = 0.0; run <= 2; run += 0.1) { Console.WriteLine("Running two with pop=" + run); var iteration = 0; var fitness = 0; while (iteration++ < maximumIteration) { //Make a new population with some sane defaults var population = new Population <IIndividual <Rule>, Rule>(100, 15, (size, rng) => new RuleIndividual(size, rng), id => id, matcher.CountMatches) { MutationMultiplier = run }; var logger = new FitnessLogger(); for (int i = 0;; i++) { //Then just run with it. var generation = population.Generation(); best = generation.AsParallel().MaxBy(matcher.CountMatches); var average = generation.AsParallel().Sum(rule => matcher.CountMatches(rule, true)) / generation.Count; var averageTest = generation.AsParallel().Sum(rule => test.CountMatches(rule, true)) / generation.Count; //logging as we go. logger.LogFitness(matcher.CountMatches(best, true), average, test.CountMatches(best, true), averageTest); //And stop moving once we match both sets. We predicate on both sets here because rulset 2 has a habit //of matching ruleset 1 in a specific way, then generalising. We could either give it a few hundred generations to generalise, or just //wait until it matches the test set too. I figure that this isn't letting the test set influence evolution at all, so it's fine. if ((int)(matcher.CountMatches(best, true) / firstSetSize) == 1 && (int)test.CountMatches(best, true) / secondSetSize == 1) { fitness += i; break; } //If we've been going for too long, just cut off. if (i > 3000) { fitness += i; break; } } logger.Save("two-mutation-" + run + ".csv"); } averageLoggerlogger.LogFitness(fitness / 3); } averageLoggerlogger.Save("two-mutation-runs.csv"); return(best); }
public IIndividual <bool> Solve() { var matcher = GetMatcher(@"Data\data1.txt"); Func <IIndividual <bool>, double> fitnessFunction = matcher.CountMatches; const int maximumIterations = 20; //This is fast, we may as well do loads. IIndividual <bool> best = null; var logger = new AverageLogger(); for (var run = 0.0; run <= 2; run += 0.1) { Console.WriteLine("Running one with pop=" + run); var iteration = 0; var fitness = 0; //This is the fitness sum so far, for later averaging. while (iteration++ < maximumIterations) { //Make a new population with some reasonable default values var population = new Population <IIndividual <bool>, bool>(100, 64, (size, rng) => new BoolIndividual(size, rng), ind => ind, fitnessFunction); population.MutationMultiplier = run; //and a logger var runLogger = new FitnessLogger(); var i = 0; while (true) { //Run a generation var generation = population.Generation(); //Find the best individual best = generation.OrderBy(fitnessFunction).Last(); //Find the average fitness var average = generation.Select(fitnessFunction).Sum() / generation.Count; //Log what we have runLogger.LogFitness(fitnessFunction(best), average); if ((int)fitnessFunction(best) == 64) { //If we've found the correct answer, we need to add how long it took us to the total and stop fitness += i; break; } if (i++ > 500) { //If we've taken way too long, we just add how far we've got and stop trying fitness += i; break; } } //Every run we save how it went for later crunching runLogger.Save("one-mutation-" + run + ".csv"); } //And we log how long everything took us, on average. logger.LogFitness(fitness / maximumIterations); } logger.Save("one-mutation-runs.csv"); return(best); }
public RuleSet Solve() { const int totalData = 2000; const double trainSet = totalData * 0.8; const double testSet = totalData * 0.2; var matcher = GetMatcher(@"Data\data3.txt", (int)(trainSet), 0); var test = GetMatcher(@"Data\data3.txt", (int)(testSet), (int)(trainSet)); var averageLogger = new AverageLogger(); //Hidden nodes have $#input + 1 bias weights each //Second hidden have $#hidden + 1 bias weights each //Output nodes have $#sndHidden + 1 bias weights each //Thus, ($#hidden * ($#input + 1)) + ($#sndHidden * ($#hidden + 1)) + ($#output * ($#sndHidden + 1) const int numInput = 6; const int numHidden = 6; const int numSecondHidden = 3; const int numOutput = 2; const int maximumIterations = 3; RuleSet ruleBest = null; for (var run = 0.0; run <= 2.0; run += 0.1) { Console.WriteLine("Running rul with var=" + run); var iterations = 0; var fitness = 0; while (iterations++ < maximumIterations) { //We make two populations for three, so we can run the neural network and ruleset side by side. var rulePopulation = new Population <RuleSet, double>(100, 10 * ((6 * 2) + 1), (num, random) => new RuleIndividual(num, random), individual => new RuleSet(individual.Genotype), matcher.CountMatches); var netPopulation = new Population <NeuralNetwork2, double>(100, (numHidden * (numInput + 1)) + (numSecondHidden * (numHidden + 1)) + (numOutput * (numSecondHidden + 1)), (length, rng) => new DoubleIndividual(length, rng), individual => new NeuralNetwork2(individual.Genotype, numInput, numHidden, numSecondHidden, numOutput), matcher.CountMatches); rulePopulation.MutationMultiplier = run; netPopulation.MutationMultiplier = run; var loggerRule = new FitnessLogger(); var loggerNet = new FitnessLogger(); int i = 0; while (true) { const double threshold = 0.9; var netGeneration = netPopulation.Generation(); var ruleGeneration = rulePopulation.Generation(); var netBest = netGeneration.AsParallel().OrderBy(matcher.CountMatches).Last(); ruleBest = ruleGeneration.AsParallel().OrderBy(matcher.CountMatches).Last(); var netAverage = netGeneration.AsParallel().Select(net => matcher.CountMatches(net, true)).Sum() / netGeneration.Count; var ruleAverage = ruleGeneration.AsParallel().Select(rul => matcher.CountMatches(rul, true)).Sum() / ruleGeneration.Count; var netTestAverage = netGeneration.AsParallel().Select(net => test.CountMatches(net, true)).Sum() / netGeneration.Count; var ruleTestAverage = ruleGeneration.AsParallel().Select(rul => test.CountMatches(rul, true)).Sum() / ruleGeneration.Count; loggerNet.LogFitness(matcher.CountMatches(netBest, true), netAverage, test.CountMatches(netBest, true), netTestAverage); loggerRule.LogFitness(matcher.CountMatches(ruleBest, true), ruleAverage, test.CountMatches(ruleBest, true), ruleTestAverage); //We finish when the ruleset hits the threshhold percentage, though. The neural network isn't very good. if ((int)(matcher.CountMatches(ruleBest, true)) >= (int)trainSet * threshold) { fitness += i; break; } if (i > 3000) { fitness += i; break; } i++; } loggerRule.Save("rul-mutation" + run + ".csv"); } averageLogger.LogFitness(fitness / maximumIterations); } averageLogger.Save("three-mutation-runs.csv"); return(ruleBest); }