public String DescribeShortGeneralized(Individual ind, IEvolutionState state, int subpopulation, int threadnum) { if (Start == null) { Start = new bool[MIN_ARRAY_SIZE]; Accept = new bool[MIN_ARRAY_SIZE]; Reading = new int[MIN_ARRAY_SIZE]; From = new int[MIN_ARRAY_SIZE]; To = new int[MIN_ARRAY_SIZE]; State1 = new bool[MIN_ARRAY_SIZE]; State2 = new bool[MIN_ARRAY_SIZE]; Reading1 = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Reading0 = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Epsilon = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Reading1L = new int[MIN_ARRAY_SIZE]; Reading0L = new int[MIN_ARRAY_SIZE]; EpsilonL = new int[MIN_ARRAY_SIZE]; } FullTest(state, ind, threadnum, PosA, NegA); return(": " + ((double)_totpos) / PosA.Length + " " + ((double)_totneg) / NegA.Length + " " + (((double)(_totpos + _totneg)) / (PosA.Length + NegA.Length)) + " " + (((((double)_totpos) / PosA.Length) + (((double)_totneg) / NegA.Length)) / 2) + " " + Math.Min((((double)_totpos) / PosA.Length), (((double)_totneg) / NegA.Length)) + " : "); }
public void setExpectedMaxSize(int numRows, int numCols) { this.numCols = numCols; this.numRows = numRows; minLength = Math.Min(numCols, numRows); int maxLength = Math.Max(numCols, numRows); if (dataQR == null || dataQR.Length < numCols || dataQR[0].Length < numRows * 2) { //dataQR = new double[ numCols ][ numRows*2 ]; dataQR = TensorFactory.Create <double>(numCols, numRows * 2); v = new double[maxLength * 2]; gammas = new double[minLength]; } if (v.Length < maxLength * 2) { v = new double[maxLength * 2]; } if (gammas.Length < minLength) { gammas = new double[minLength]; } }
public override void Setup(IEvolutionState state, IParameter paramBase) { base.Setup(state, paramBase); state.Output.ExitIfErrors(); IParameter kval = new Parameter(EvolutionState.P_EVALUATOR).Push(P_PROBLEM).Push(P_PROBLEMNAME) .Push(P_KVALUE); k = state.Parameters.GetInt(kval, null, 0); // System.out.println("K = " + k); for (int i = 0; i < _indices.Length; i++) { _indices[i] = -1; } _indices['A' - 'A'] = 0; _indices['B' - 'A'] = 1; _indices['X' - 'A'] = 2; _indices['Y' - 'A'] = 3; _indices['Z' - 'A'] = 4; _indices['W' - 'A'] = 5; // now do some initialization IMersenneTwister r = state.Random[0]; _nodeScore = new double[6]; _edgeScore = TensorFactory.Create <double>(2, 6); for (int i = 0; i < 6; i++) { _nodeScore[i] = 2 * r.NextDouble() - 1; } // We need to assure that the best fitness is positive (to normalize it to 1) // A method to do this is to have at least one terminal symbol with a positive score. bool ok = false; for (int i = 2; i < 6; i++) { if (_nodeScore[i] > 0) { ok = true; } } if (!ok) { _nodeScore[2] = r.NextDouble(); } for (int i = 0; i < 2; i++) { for (int j = 0; j < 6; j++) { _edgeScore[i][j] = r.NextDouble(); } } _bestFitness = ComputeBestFitness(); }
public override void Describe(IEvolutionState state, Individual ind, int subpopulation, int threadnum, int log) { if (Start == null) { Start = new bool[MIN_ARRAY_SIZE]; Accept = new bool[MIN_ARRAY_SIZE]; Reading = new int[MIN_ARRAY_SIZE]; From = new int[MIN_ARRAY_SIZE]; To = new int[MIN_ARRAY_SIZE]; State1 = new bool[MIN_ARRAY_SIZE]; State2 = new bool[MIN_ARRAY_SIZE]; Reading1 = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Reading0 = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Epsilon = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Reading1L = new int[MIN_ARRAY_SIZE]; Reading0L = new int[MIN_ARRAY_SIZE]; EpsilonL = new int[MIN_ARRAY_SIZE]; } if (Generalize) { FullTest(state, ind, threadnum, PosA, NegA); } else { FullTest(state, ind, threadnum, PosT, NegT); } if (Generalize) { state.Output.PrintLn("\n\nBest Individual's Generalization Score...\n" + "Pos: " + _totpos + "/" + PosA.Length + " Neg: " + _totneg + "/" + NegA.Length + "\n(pos+neg)/(allpos+allneg): " + (float) (((double)(_totpos + _totneg)) / (PosA.Length + NegA.Length)) + "\n((pos/allpos)+(neg/allneg))/2: " + (float) (((((double)_totpos) / PosA.Length) + (((double)_totneg) / NegA.Length)) / 2) + "\nMin(pos/allpos,neg/allneg): " + (float)Math.Min((((double)_totpos) / PosA.Length), (((double)_totneg) / NegA.Length)), log); } state.Output.PrintLn("\nBest Individual's NFA\n=====================\n", log); state.Output.PrintLn(PrintCurrentNFA(), log); }
public void Evaluate(IEvolutionState state, Individual ind, int subpop, int threadnum) { if (Start == null) { Start = new bool[MIN_ARRAY_SIZE]; Accept = new bool[MIN_ARRAY_SIZE]; Reading = new int[MIN_ARRAY_SIZE]; From = new int[MIN_ARRAY_SIZE]; To = new int[MIN_ARRAY_SIZE]; State1 = new bool[MIN_ARRAY_SIZE]; State2 = new bool[MIN_ARRAY_SIZE]; Reading1 = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Reading0 = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Epsilon = TensorFactory.Create <int>(MIN_ARRAY_SIZE, MIN_ARRAY_SIZE); // new int[MIN_ARRAY_SIZE][MIN_ARRAY_SIZE]; Reading1L = new int[MIN_ARRAY_SIZE]; Reading0L = new int[MIN_ARRAY_SIZE]; EpsilonL = new int[MIN_ARRAY_SIZE]; } if (!ind.Evaluated) // don't bother reevaluating { EdgeData input = (EdgeData)Input; FullTest(state, ind, threadnum, PosT, NegT); // the fitness better be KozaFitness! var f = (KozaFitness)ind.Fitness; // this is an awful fitness metric, but it's the standard // one used for these problems. :-( f.SetStandardizedFitness(state, (float) (1.0 - (double)(_totpos + _totneg) / (PosT.Length + NegT.Length))); // here are two other more reasonable fitness metrics //f.SetStandardizedFitness(state,(float) //(1.0 - Math.Min(((double)_totpos)/PosT.Length, //((double)_totneg)/NegT.Length))); //f.SetStandardizedFitness(state,(float) //(1.0 - (((double)_totpos)/PosT.Length + //((double)_totneg)/NegT.Length)/2.0)); f.Hits = _totpos + _totneg; ind.Evaluated = true; } }
public int RunCartPole(NEATNetwork net, IEvolutionState state) { // double in[] = new double[5]; //Input loading array double out1; double out2; double twelve_degrees = 0.2094384; _x = _xDot = _theta = _thetaDot = 0.0; _steps = 0; double[][] input = TensorFactory.Create <double>(1, 5); while (_steps++ < MAX_STEPS) { /*-- setup the input layer based on the four inputs and bias --*/ //setup_input(net,x,x_dot,theta,theta_dot); input[0][0] = 1.0; //Bias input[0][1] = (_x + 2.4) / 4.8; input[0][2] = (_xDot + .75) / 1.5; input[0][3] = (_theta + twelve_degrees) / .41; input[0][4] = (_thetaDot + 1.0) / 2.0; double[] output = GetNetOutput(net, input, state); /*-- decide which way to push via which output unit is greater --*/ if (output[0] > output[1]) { _y = 0; } else { _y = 1; } /*--- Apply action to the simulated cart-pole ---*/ cart_pole(_y); /*--- Check for failure. If so, return steps ---*/ if (_x < -2.4 || _x > 2.4 || _theta < -twelve_degrees || _theta > twelve_degrees) { return(_steps); } } return(_steps); }
/// <summary> /// Build an NxN rotation matrix[row][column] with a given seed. /// </summary> public static double[] /* row */ [] /* column */ BuildRotationMatrix(IEvolutionState state, long rotationSeed, int N) { if (rotationSeed == ROTATION_SEED) { state.Output.WarnOnce("Default rotation seed being used (" + rotationSeed + ")"); } IMersenneTwister rand = new MersenneTwisterFast(ROTATION_SEED); for (int i = 0; i < 624 * 4; i++) // prime the MT for 4 full sample iterations to get it warmed up { rand.NextInt(); } double[] /* row */ [] /* column */ o = TensorFactory.Create <double>(N, N); // make random values for (var i = 0; i < N; i++) { for (var k = 0; k < N; k++) { o[i][k] = rand.NextGaussian(); } } // build random values for (var i = 0; i < N; i++) { // extract o[i] -> no var no = new double[N]; for (var k = 0; k < N; k++) { no[k] = o[i][k]; } // go through o[i] and o[j], modifying no for (var j = 0; j < i; j++) { var d = Dot(o[i], o[j]); var val = ScalarMul(d, o[j]); no = Sub(no, val); } o[i] = Normalize(no); } return(o); }
public override Object Clone() { KLandscapes tmp = (KLandscapes)base.Clone(); tmp._nodeScore = new double[6]; tmp._edgeScore = TensorFactory.Create <double>(2, 6); tmp._bestFitness = _bestFitness; tmp.k = k; for (int i = 0; i < 6; i++) { tmp._nodeScore[i] = _nodeScore[i]; for (int j = 0; j < 2; j++) { tmp._edgeScore[j][i] = _edgeScore[j][i]; } } return(tmp); }
public override void Setup(IEvolutionState state, IParameter paramBase) { // very important, remember this base.Setup(state, paramBase); // I'm not using the default base for any of this stuff; // it's not safe I think. // set up our input Input = (LawnmowerData)state.Parameters.GetInstanceForParameterEq( paramBase.Push(P_DATA), null, typeof(LawnmowerData)); Input.Setup(state, paramBase.Push(P_DATA)); // load our map coordinates MaxX = state.Parameters.GetInt(paramBase.Push(P_X), null, 1); if (MaxX == 0) { state.Output.Error("The width (x dimension) of the lawn must be >0", paramBase.Push(P_X)); } MaxY = state.Parameters.GetInt(paramBase.Push(P_Y), null, 1); if (MaxY == 0) { state.Output.Error("The length (y dimension) of the lawn must be >0", paramBase.Push(P_Y)); } state.Output.ExitIfErrors(); // set up the map Map = TensorFactory.Create <int>(MaxX, MaxY); // new int[maxx][maxy]; for (var x = 0; x < MaxX; x++) { for (var y = 0; y < MaxY; y++) { Map[x][y] = UNMOWED; } } }
public virtual void BeforeCoevolutionaryEvaluation(IEvolutionState state, Population pop, IGroupedProblem prob) { if (state.Generation == 0) { // create arrays for the elite individuals in the population at the previous generation. // deep clone the elite individuals as random individuals (in the initial generation, nobody has been evaluated yet). // deal with the elites _eliteIndividuals = TensorFactory.Create <Individual>(state.Population.Subpops.Count, NumElite); // copy the first individuals in each subpop (they are already randomly generated) for (var i = 0; i < _eliteIndividuals.Length; i++) { if (NumElite > state.Population.Subpops[i].Individuals.Count) { state.Output.Fatal("Number of elite partners is greater than the size of the subpop."); } for (var j = 0; j < NumElite; j++) { _eliteIndividuals[i][j] = (Individual)(state.Population.Subpops[i].Individuals[j].Clone()); // just take the first N individuals of each subpopulation } } // test for shuffled if (NumShuffled > 0) { var size = state.Population.Subpops[0].Individuals.Count; for (var i = 0; i < state.Population.Subpops.Count; i++) { if (state.Population.Subpops[i].Individuals.Count != size) { state.Output.Fatal("Shuffling was requested in MultiPopCoevolutionaryEvaluator, but the subpopulation sizes are not the same. " + "Specifically, subpopulation 0 has size " + size + " but subpopulation " + i + " has size " + state.Population.Subpops[i].Individuals.Count); } } } } }
/// <summary> /// Build an NxN rotation matrix[row][column] with a given seed. /// </summary> public static double[] /* row */ [] /* column */ BuildRotationMatrix(double rotationSeed, int N) { IMersenneTwister rand = new MersenneTwisterFast(ROTATION_SEED); double[] /* row */ [] /* column */ o = TensorFactory.Create <double>(N, N); // make random values for (var i = 0; i < N; i++) { for (var k = 0; k < N; k++) { o[i][k] = rand.NextGaussian(); } } // build random values for (var i = 0; i < N; i++) { // extract o[i] -> no var no = new double[N]; for (var k = 0; k < N; k++) { no[k] = o[i][k]; } // go through o[i] and o[j], modifying no for (var j = 0; j < i; j++) { var d = Dot(o[i], o[j]); var val = ScalarMul(d, o[j]); no = Sub(no, val); } o[i] = Normalize(no); } return(o); }
double ComputeBestFitness() { // This is a dynamic programming kludge. double[][] ttable = TensorFactory.Create <double>(k, 2); double[][] ftable = TensorFactory.Create <double>(k + 1, 2); for (int i = 0; i < 2; i++) { ftable[0][i] = _nodeScore[i]; } // Case 1: the optimum hase depth at most k for (int i = 0; i < k; i++) { for (int j = 0; j < 2; j++) { if (i == 0) { double max = (1 + _edgeScore[j][2]) * _nodeScore[2]; for (int h = 3; h < 6; h++) { double tmp = (1 + _edgeScore[j][h]) * _nodeScore[h]; if (tmp > max) { max = tmp; } } ttable[i][j] = _nodeScore[j] + 2 * max; } else { double max = (1 + _edgeScore[j][0]) * ttable[i - 1][0]; for (int h = 1; h < 2; h++) { double tmp = (1 + _edgeScore[j][h]) * ttable[i - 1][h]; if (tmp > max) { max = tmp; } } ttable[i][j] = _nodeScore[j] + 2 * max; } } } // Case 2: the optimum has depth k+1 for (int i = 1; i < k + 1; i++) { for (int j = 0; j < 2; j++) { double max = (1 + _edgeScore[j][0]) * ftable[i - 1][0]; for (int h = 1; h < 2; h++) { double tmp = (1 + _edgeScore[j][h]) * ftable[i - 1][h]; if (tmp > max) { max = tmp; } } ftable[i][j] = _nodeScore[j] + 2 * max; } } double best = _nodeScore[2]; for (int i = 3; i < 6; i++) { if (_nodeScore[i] > best) { best = _nodeScore[i]; } } for (int i = 0; i < k; i++) { for (int j = 0; j < 2; j++) { if (ttable[i][j] > best) { best = ttable[i][j]; } } } for (int i = 0; i < 2; i++) { if (0.5 * ftable[k][i] > best) { best = 0.5 * ftable[k][i]; } } return(best); }
///// <summary> ///// Sets all subpops in pop to the expected Lambda size. Does not fill new slots with individuals. ///// </summary> //public virtual Population SetToLambda(Population pop, IEvolutionState state) //{ // for (var x = 0; x < pop.Subpops.Length; x++) // { // var s = Lambda[x]; // // check to see if the array's not the right size // if (pop.Subpops[x].Individuals.Length != s) // // need to increase // { // var newinds = new Individual[s]; // Array.Copy(pop.Subpops[x].Individuals, 0, newinds, 0, // s < pop.Subpops[x].Individuals.Length ? s : pop.Subpops[x].Individuals.Length); // pop.Subpops[x].Individuals = newinds; // } // } // return pop; //} public override Population BreedPopulation(IEvolutionState state) { // Complete 1/5 statistics for last population if (ParentPopulation != null) { // Only go from 0 to Lambda-1, as the remaining individuals may be parents. // A child C's parent's index I is equal to C / Mu[subpop]. for (var x = 0; x < state.Population.Subpops.Count; x++) { var numChildrenBetter = 0; for (var i = 0; i < Lambda[x]; i++) { var parent = i / (Lambda[x] / Mu[x]); // note integer division if (state.Population.Subpops[x].Individuals[i].Fitness.BetterThan(ParentPopulation.Subpops[x].Individuals[parent].Fitness)) { numChildrenBetter++; } } if (numChildrenBetter > Lambda[x] / 5.0) // note double division { Comparison[x] = C_OVER_ONE_FIFTH_BETTER; } else if (numChildrenBetter < Lambda[x] / 5.0) // note double division { Comparison[x] = C_UNDER_ONE_FIFTH_BETTER; } else { Comparison[x] = C_EXACTLY_ONE_FIFTH_BETTER; } } } // load the parent population ParentPopulation = state.Population; // MU COMPUTATION // At this point we need to do load our population info // and make sure it jibes with our mu info // the first issue is: is the number of subpops // equal to the number of mu's? if (Mu.Length != state.Population.Subpops.Count) // uh oh { state.Output.Fatal("For some reason the number of subpops is different than was specified in the file (conflicting with Mu and Lambda storage).", null); } // next, load our population, make sure there are no subpops smaller than the mu's for (var x = 0; x < state.Population.Subpops.Count; x++) { if (state.Population.Subpops[0].Individuals.Count < Mu[x]) { state.Output.Error("Subpopulation " + x + " must be a multiple of the equivalent mu (that is, " + Mu[x] + ")."); } } state.Output.ExitIfErrors(); // sort evaluation to get the Mu best of each subpop foreach (Subpopulation s in state.Population.Subpops) { s.Individuals.SortByFitnessDescending(); } // now the subpops are sorted so that the best individuals // appear in the lowest indexes. Population newpop = state.Population.EmptyClone(); // create the count array Count = new int[state.BreedThreads]; // divvy up the Lambda individuals to create // how many threads do we really need? No more than the maximum number of individuals in any subpopulation int numThreads = 0; for (int x = 0; x < state.Population.Subpops.Count; x++) { numThreads = Math.Max(numThreads, Lambda[x]); } numThreads = Math.Min(numThreads, state.BreedThreads); if (numThreads < state.BreedThreads) { state.Output.WarnOnce("Largest lambda size (" + numThreads + ") is smaller than number of breedthreads (" + state.BreedThreads + "), so fewer breedthreads will be created."); } NewIndividuals = TensorFactory.Create <IList <Individual> >(state.Population.Subpops.Count, numThreads); int[][] numinds = TensorFactory.Create <int>(numThreads, state.Population.Subpops.Count); int[][] from = TensorFactory.Create <int>(numThreads, state.Population.Subpops.Count); for (int x = 0; x < state.Population.Subpops.Count; x++) { for (int thread = 0; thread < numThreads; thread++) { NewIndividuals[x][thread].Clear(); } int length = Lambda[x]; // we will have some extra individuals. We distribute these among the early subpopulations int individualsPerThread = length / numThreads; // integer division int slop = length - numThreads * individualsPerThread; int currentFrom = 0; for (int y = 0; y < numThreads; y++) { if (slop > 0) { numinds[y][x] = individualsPerThread + 1; slop--; } else { numinds[y][x] = individualsPerThread; } if (numinds[y][x] == 0) { state.Output.WarnOnce("More threads exist than can be used to breed some subpopulations (first example: subpopulation " + x + ")"); } from[y][x] = currentFrom; currentFrom += numinds[y][x]; } } //for (var y = 0; y < state.BreedThreads; y++) // for (var x = 0; x < state.Population.Subpops.Length; x++) // { // // figure numinds // if (y < state.BreedThreads - 1) // // not last one // numinds[y][x] = Lambda[x] / state.BreedThreads; // // in case we're slightly off in division // else // numinds[y][x] = Lambda[x] / state.BreedThreads + (Lambda[x] - (Lambda[x] / state.BreedThreads) * state.BreedThreads); // // figure from // from[y][x] = (Lambda[x] / state.BreedThreads) * y; // } if (numThreads == 1) { BreedPopChunk(newpop, state, numinds[0], from[0], 0); } else { ParallelBreeding(state, newpop, from, numinds, this); } // Coalesce for (int subpop = 0; subpop < state.Population.Subpops.Count; subpop++) { IList <Individual> newpopinds = newpop.Subpops[subpop].Individuals; for (int thread = 0; thread < numThreads; thread++) { ((List <Individual>)newpopinds).AddRange(NewIndividuals[subpop][thread]); } } return(PostProcess(newpop, state.Population, state)); }
public override int Produce( int min, int max, int subpop, IList <Individual> inds, IEvolutionState state, int thread, IDictionary <string, object> misc) { int start = inds.Count; // how many individuals should we make? var n = TypicalIndsProduced; if (n < min) { n = min; } if (n > max) { n = max; } // should we bother? if (!state.Random[thread].NextBoolean(Likelihood)) { // just load from source 0 Sources[0].Produce(n, n, subpop, inds, state, thread, misc); return(n); } Parents.Clear(); // fill up parents: for (var i = 0; i < Sources.Length; i++) // parents.length == sources.length { // produce one parent from each source Sources[i].Produce(1, 1, subpop, Parents, state, thread, misc); } // We assume all of the species are the same species ... var species = (VectorSpecies)((VectorIndividual)Parents[0]).Species; // an array of the split points (width = 1) var points = new int[((VectorIndividual)Parents[0]).GenomeLength - 1]; for (var i = 0; i < points.Length; i++) { points[i] = i + 1; // first split point/index = 1 } // split all the parents into object arrays var pieces = TensorFactory.Create <object>(Parents.Count, ((VectorIndividual)Parents[0]).GenomeLength); // splitting... for (int i = 0; i < Parents.Count; i++) { if (((VectorIndividual)Parents[i]).GenomeLength != ((VectorIndividual)Parents[0]).GenomeLength) { state.Output.Fatal("All vectors must be of the same length for crossover!"); } else { ((VectorIndividual)Parents[i]).Split(points, pieces[i]); } } // crossing them over now for (var i = 0; i < pieces[0].Length; i++) { if (state.Random[thread].NextBoolean(species.CrossoverProbability)) { // shuffle for (var j = pieces.Length - 1; j > 0; j--) // no need to shuffle first index at the end { // find parent to swap piece with var parent2 = state.Random[thread].NextInt(j); // not inclusive; don't want to swap with self // swap var temp = pieces[j][i]; pieces[j][i] = pieces[parent2][i]; pieces[parent2][i] = temp; } } } // join them and add them to the population starting at the start location for (int i = 0, q = start; i < Parents.Count; i++, q++) { ((VectorIndividual)Parents[i]).Join(pieces[i]); Parents[i].Evaluated = false; //if (q < inds.Count) // just in case //{ // inds[q] = Parents[i]; //} // by Ermo. The comment code seems to be wrong. inds are empty, which means indes.size() returns 0. // I think it should be changed to following code // Sean -- right? inds.Add(Parents[i]); } return(n); }
public virtual void Preprocess(IEvolutionState state, int maxTreeSize) { state.Output.Message("Determining Tree Sizes"); MaxTreeSize = maxTreeSize; var functionSetRepository = ((GPInitializer)state.Initializer).FunctionSetRepository; // Put each function set into the arrays FunctionSets = new GPFunctionSet[functionSetRepository.Count]; FunctionSetsHash = Hashtable.Synchronized(new Hashtable()); var e = functionSetRepository.Values.GetEnumerator(); var count = 0; while (e.MoveNext()) { var funcs = (GPFunctionSet)e.Current; FunctionSetsHash[funcs] = count; FunctionSets[count++] = funcs; } // For each function set, assign each GPNode to a unique integer // so we can keep track of it (ick, this will be inefficient!) FuncNodesHash = Hashtable.Synchronized(new Hashtable()); var t_nodes = Hashtable.Synchronized(new Hashtable()); count = 0; MaxArity = 0; for (var x = 0; x < FunctionSets.Length; x++) { GPNode n; // hash all the nodes so we can remove duplicates for (var typ = 0; typ < FunctionSets[x].Nodes.Length; typ++) { for (var nod = 0; nod < FunctionSets[x].Nodes[typ].Length; nod++) { t_nodes[n = FunctionSets[x].Nodes[typ][nod]] = n; } } // rehash with Integers, yuck e = t_nodes.Values.GetEnumerator(); GPNode tmpn; while (e.MoveNext()) { tmpn = (GPNode)e.Current; if (MaxArity < tmpn.Children.Length) { MaxArity = tmpn.Children.Length; } if (!FuncNodesHash.ContainsKey(tmpn)) // don't remap the node; it'd make holes { FuncNodesHash[tmpn] = count++; } } } NumFuncNodes = FuncNodesHash.Count; var initializer = (GPInitializer)state.Initializer; var numAtomicTypes = initializer.NumAtomicTypes; var numSetTypes = initializer.NumSetTypes; var functionSetsLength = FunctionSets.Length; var atomicPlusSetTypes = numAtomicTypes + numSetTypes; var maxTreeSizePlusOne = MaxTreeSize + 1; // set up the arrays // NUMTREESOFTYPE NUMTREESOFTYPE = TensorFactory.Create <BigInteger>(functionSetsLength, atomicPlusSetTypes, maxTreeSizePlusOne); // NUMTREESROOTEDBYNODE NUMTREESROOTEDBYNODE = TensorFactory.Create <BigInteger>(functionSetsLength, NumFuncNodes, maxTreeSizePlusOne); // NUMCHILDPERMUTATIONS NUMCHILDPERMUTATIONS = TensorFactory.Create <BigInteger>(functionSetsLength, NumFuncNodes, maxTreeSizePlusOne, maxTreeSizePlusOne, MaxArity); // ROOT_D ROOT_D = TensorFactory.CreateOpenEnded <UniformGPNodeStorage>(functionSetsLength, atomicPlusSetTypes, maxTreeSizePlusOne); // 4D OpenEnded // ROOT_D_ZERO ROOT_D_ZERO = TensorFactory.Create <bool>(functionSetsLength, atomicPlusSetTypes, maxTreeSizePlusOne); // CHILD_D CHILD_D = TensorFactory.CreateOpenEnded <double>(functionSetsLength, NumFuncNodes, maxTreeSizePlusOne, maxTreeSizePlusOne); // 5D OpenEnded var types = ((GPInitializer)(state.Initializer)).Types; // _TrueSizesBigInt TrueSizesBigInt = TensorFactory.Create <BigInteger>(functionSetsLength, atomicPlusSetTypes, maxTreeSizePlusOne); // Go through each function set and determine numbers // (this will take quite a while! Thankfully it's offline) for (var x = 0; x < FunctionSets.Length; x++) { for (var y = 0; y < numAtomicTypes + numSetTypes; y++) { for (var z = 1; z <= MaxTreeSize; z++) { state.Output.Message("FunctionSet: " + FunctionSets[x].Name + ", Type: " + types[y].Name + ", Size: " + z + " num: " + (TrueSizesBigInt[x][y][z] = NumTreesOfType(initializer, x, y, z))); } } } state.Output.Message("Compiling Distributions"); TrueSizes = TensorFactory.Create <double>(functionSetsLength, atomicPlusSetTypes, maxTreeSizePlusOne); // convert to doubles and organize distribution for (var x = 0; x < FunctionSets.Length; x++) { for (var y = 0; y < numAtomicTypes + numSetTypes; y++) { for (var z = 1; z <= MaxTreeSize; z++) { TrueSizes[x][y][z] = (double)TrueSizesBigInt[x][y][z]; // BRS : DOES THIS TRUNCATE ANYTHING ??? } // and if this is all zero (a possibility) we should be forgiving (hence the 'true') -- I *think* RandomChoice.OrganizeDistribution(TrueSizes[x][y], true); } } // compute our percentages ComputePercentages(); }
public virtual void PerformCoevolutionaryEvaluation(IEvolutionState state, Population pop, IGroupedProblem prob) { var evaluations = 0; _inds = new Individual[pop.Subpops.Count]; _updates = new bool[pop.Subpops.Count]; // we start by warming up the selection methods if (NumCurrent > 0) { for (var i = 0; i < _selectionMethodCurrent.Length; i++) { _selectionMethodCurrent[i].PrepareToProduce(state, i, 0); } } if (NumPrev > 0) { for (var i = 0; i < _selectionMethodPrev.Length; i++) { // do a hack here var currentPopulation = state.Population; state.Population = _previousPopulation; _selectionMethodPrev[i].PrepareToProduce(state, i, 0); state.Population = currentPopulation; } } // build subpopulation array to pass in each time var subpops = new int[state.Population.Subpops.Count]; for (var j = 0; j < subpops.Length; j++) { subpops[j] = j; } // handle shuffled always if (NumShuffled > 0) { int[] /*numShuffled*/ [] /*subpop*/ [] /*shuffledIndividualIndexes*/ ordering = null; // build shuffled orderings ordering = TensorFactory.Create <Int32>(NumShuffled, state.Population.Subpops.Count, state.Population.Subpops[0].Individuals.Count); for (var c = 0; c < NumShuffled; c++) { for (var m = 0; m < state.Population.Subpops.Count; m++) { for (var i = 0; i < state.Population.Subpops[0].Individuals.Count; i++) { ordering[c][m][i] = i; } if (m != 0) { Shuffle(state, ordering[c][m]); } } } // for each individual for (var i = 0; i < state.Population.Subpops[0].Individuals.Count; i++) { for (var k = 0; k < NumShuffled; k++) { for (var ind = 0; ind < _inds.Length; ind++) { _inds[ind] = state.Population.Subpops[ind].Individuals[ordering[k][ind][i]]; _updates[ind] = true; } prob.Evaluate(state, _inds, _updates, false, subpops, 0); evaluations++; } } } // for each subpopulation for (var j = 0; j < state.Population.Subpops.Count; j++) { // now do elites and randoms if (!ShouldEvaluateSubpop(state, j, 0)) { continue; // don't evaluate this subpopulation } // for each individual for (var i = 0; i < state.Population.Subpops[j].Individuals.Count; i++) { var individual = state.Population.Subpops[j].Individuals[i]; // Test against all the elites for (var k = 0; k < _eliteIndividuals[j].Length; k++) { for (var ind = 0; ind < _inds.Length; ind++) { if (ind == j) { _inds[ind] = individual; _updates[ind] = true; } else { _inds[ind] = _eliteIndividuals[ind][k]; _updates[ind] = false; } } prob.Evaluate(state, _inds, _updates, false, subpops, 0); evaluations++; } // test against random selected individuals of the current population for (var k = 0; k < NumCurrent; k++) { for (var ind = 0; ind < _inds.Length; ind++) { if (ind == j) { _inds[ind] = individual; _updates[ind] = true; } else { _inds[ind] = ProduceCurrent(ind, state, 0); _updates[ind] = true; } } prob.Evaluate(state, _inds, _updates, false, subpops, 0); evaluations++; } // Test against random selected individuals of previous population for (int k = 0; k < NumPrev; k++) { for (int ind = 0; ind < _inds.Length; ind++) { if (ind == j) { _inds[ind] = individual; _updates[ind] = true; } else { _inds[ind] = ProducePrevious(ind, state, 0); _updates[ind] = false; } } prob.Evaluate(state, _inds, _updates, false, subpops, 0); evaluations++; } } } // now shut down the selection methods if (NumCurrent > 0) { for (var i = 0; i < _selectionMethodCurrent.Length; i++) { _selectionMethodCurrent[i].FinishProducing(state, i, 0); } } if (NumPrev > 0) { for (var i = 0; i < _selectionMethodPrev.Length; i++) { // do a hack here var currentPopulation = state.Population; state.Population = _previousPopulation; _selectionMethodPrev[i].FinishProducing(state, i, 0); state.Population = currentPopulation; } } state.Output.Message("Evaluations: " + evaluations); }
/// <summary> /// A simple breeder that doesn't attempt to do any cross- /// population breeding. Basically it applies pipelines, /// one per thread, to various subchunks of a new population. /// </summary> public override Population BreedPopulation(IEvolutionState state) { Population newpop; if (ClonePipelineAndPopulation) { newpop = (Population)state.Population.EmptyClone(); } else { if (BackupPopulation == null) { BackupPopulation = (Population)state.Population.EmptyClone(); } newpop = BackupPopulation; newpop.Clear(); BackupPopulation = state.Population; // swap in } // maybe resize? for (int i = 0; i < state.Population.Subpops.Count; i++) { if (ReduceBy[i] > 0) { int prospectiveSize = Math.Max( Math.Max(state.Population.Subpops[i].Individuals.Count - ReduceBy[i], MinimumSize[i]), NumElites(state, i)); if (prospectiveSize < state.Population.Subpops[i].Individuals.Count) // let's resize! { state.Output.Message("Subpop " + i + " reduced " + state.Population.Subpops[i].Individuals.Count + " -> " + prospectiveSize); newpop.Subpops[i].Truncate(prospectiveSize); } } } // load Elites into top of newpop LoadElites(state, newpop); // how many threads do we really need? No more than the maximum number of individuals in any subpopulation int numThreads = 0; for (int x = 0; x < state.Population.Subpops.Count; x++) { numThreads = Math.Max(numThreads, state.Population.Subpops[x].Individuals.Count); } numThreads = Math.Min(numThreads, state.BreedThreads); if (numThreads < state.BreedThreads) { state.Output.WarnOnce("Largest subpopulation size (" + numThreads + ") is smaller than number of breedthreads (" + state.BreedThreads + "), so fewer breedthreads will be created."); } NewIndividuals = TensorFactory.Create <IList <Individual> >(state.Population.Subpops.Count, numThreads); for (int subpop = 0; subpop < state.Population.Subpops.Count; subpop++) { for (int thread = 0; thread < numThreads; thread++) { NewIndividuals[subpop][thread] = new List <Individual>(); } } int[][] numinds = TensorFactory.Create <int>(state.BreedThreads, state.Population.Subpops.Count); int[][] from = TensorFactory.Create <int>(state.BreedThreads, state.Population.Subpops.Count); for (int x = 0; x < state.Population.Subpops.Count; x++) { for (int thread = 0; thread < numThreads; thread++) { NewIndividuals[x][thread].Clear(); } int length = ComputeSubpopulationLength(state, x, 0); // we will have some extra individuals. We distribute these among the early subpopulations int individualsPerThread = length / numThreads; // integer division int slop = length - numThreads * individualsPerThread; int currentFrom = 0; for (int y = 0; y < numThreads; y++) { if (slop > 0) { numinds[y][x] = individualsPerThread + 1; slop--; } else { numinds[y][x] = individualsPerThread; } if (numinds[y][x] == 0) { state.Output.WarnOnce("More threads exist than can be used to breed some subpopulations (first example: subpopulation " + x + ")"); } from[y][x] = currentFrom; currentFrom += numinds[y][x]; } } if (numThreads == 1) { BreedPopChunk(newpop, state, numinds[0], from[0], 0); } else { ParallelBreeding(state, newpop, from, numinds, this); } // Coalesce for (int subpop = 0; subpop < state.Population.Subpops.Count; subpop++) { IList <Individual> newpopindividuals = newpop.Subpops[subpop].Individuals; for (int thread = 0; thread < numThreads; thread++) { ((List <Individual>)newpopindividuals).AddRange(NewIndividuals[subpop][thread]); } } return(newpop); }
/// <summary> /// Tests an individual, returning its successful positives in totpos and its successful negatives in totneg. /// </summary> /// <param name="state"></param> /// <param name="ind"></param> /// <param name="threadnum"></param> /// <param name="pos"></param> /// <param name="neg"></param> public void FullTest(IEvolutionState state, Individual ind, int threadnum, bool[][] pos, bool[][] neg) { // reset the graph NumNodes = 2; NumEdges = 1; From[0] = 0; To[0] = 1; Start[0] = Start[1] = Accept[0] = Accept[1] = false; Input.edge = 0; // generate the graph ((GPIndividual)ind).Trees[0].Child.Eval( state, threadnum, Input, Stack, ((GPIndividual)ind), this); // produce the adjacency matrix if (Reading1.Length < NumNodes || Reading1[0].Length < NumEdges) { Reading1 = TensorFactory.Create <int>(NumNodes * 2, NumEdges * 2); // new int[numNodes * 2][numEdges * 2]; Reading0 = TensorFactory.Create <int>(NumNodes * 2, NumEdges * 2); // new int[numNodes*2][numEdges*2]; Epsilon = TensorFactory.Create <int>(NumNodes * 2, NumEdges * 2); // new int[numNodes*2][numEdges*2]; Reading1L = new int[NumNodes * 2]; Reading0L = new int[NumNodes * 2]; EpsilonL = new int[NumNodes * 2]; } for (int y = 0; y < NumNodes; y++) { Reading1L[y] = 0; Reading0L[y] = 0; EpsilonL[y] = 0; } for (var y = 0; y < NumEdges; y++) { switch (Reading[y]) { case READING0: Reading0[From[y]][Reading0L[From[y]]++] = To[y]; break; case READING1: Reading1[From[y]][Reading1L[From[y]]++] = To[y]; break; case EPSILON: Epsilon[From[y]][EpsilonL[From[y]]++] = To[y]; break; } } // create the states if (State1.Length < NumNodes) { State1 = new bool[NumNodes * 2]; State2 = new bool[NumNodes * 2]; } // test the graph on our data _totpos = 0; _totneg = 0; for (var y = 0; y < pos.Length; y++) { if (Test(pos[y])) { _totpos++; } } for (var y = 0; y < neg.Length; y++) { if (!Test(neg[y])) { _totneg++; } } }