Ejemplo n.º 1
0
        public void init(int id, double nx, double ny, double dir,
                         AgentBrain agentBrain, Environment environment,
                         float sensorNoise, float effectorNoise, float headingNoise, float timeStep)
        {
            steps = 0;

            this.id      = id;
            radius       = defaultRobotSize();
            location     = new Point2D(nx, ny);
            circle       = new Circle2D(location, radius);
            old_location = new Point2D(location);

            heading            = dir;
            velocity           = 0.0;
            collide_last       = false;
            this.timeStep      = timeStep;
            this.environment   = environment;
            this.agentBrain    = agentBrain;
            this.sensorNoise   = sensorNoise;
            this.effectorNoise = effectorNoise;
            this.headingNoise  = headingNoise;
            populateSensors();
            if (environment.seed != -1)
            {
                rng = environment.rng; //new SharpNeatLib.Maths.FastRandom(environment.seed); //Utilities.random;
            }
            else
            {
                rng = environment.rng;               //new SharpNeatLib.Maths.FastRandom();
            }
        }
Ejemplo n.º 2
0
        /// <summary>
        /// Initialize the robot.
        /// </summary>
        public void init(int id, double locationX, double locationY, double heading, AgentBrain agentBrain, Environment environment, float sensorNoise, float effectorNoise, float headingNoise, float timeStep)
        {
            this.ID      = id;
            Radius       = defaultRobotSize();
            Location     = new Point2D(locationX, locationY);
            AreaOfImpact = new Circle2D(Location, Radius);
            OldLocation  = new Point2D(Location);

            Heading                 = heading;
            Velocity                = 0.0;
            HasCollided             = false;
            this.Timestep           = timeStep;
            this.CurrentEnvironment = environment;
            this.Brain              = agentBrain;
            this.SensorNoise        = sensorNoise;
            this.EffectorNoise      = effectorNoise;
            this.HeadingNoise       = headingNoise;
            populateSensors();
            if (environment.seed != -1)
            {
                rng = environment.rng;
            }
            else
            {
                rng = environment.rng;
            }
            this.Trajectory = new List <int>();
        }
Ejemplo n.º 3
0
        public void reset()
        {
            seed         = 0;
            view_x       = 0.0f;
            view_y       = 0.0f;
            view_scale   = 5.0f;
            AOIRectangle = new Rectangle(30, 60, 640, 500);

            group_orientation = 0;
            robot_spacing     = 30;
            robot_heading     = 0;
            rng   = new SharpNeatLib.Maths.FastRandom();
            walls = new List <Wall>();

            POIPosition = new List <Point>();

            start_point = new Point2D(0, 0);
            goal_point  = new Point2D(100, 100);
        }
        // Schrum: Had to remove the internal from this too so that I could override it in AdversarialRoomClearingExperiment
        public override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            // Schrum: Used when punishing links in substrate networks.
            int linksInSubstrate = 0;

            double fitness = multiplicativeFitness ? 1 : 0;

            behavior = new SharpNeatLib.BehaviorType();

            // Schrum: Why is there a magic number 6 here?
            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++)
            {
                accumObjectives[i] = 0.0;
            }

            // Schrum: Special handling for FourTasks domain again
            if (fitnessFunction is FourTasksFitness)
            {
                // Pass the experiment into the fitness function once so its parameters can be changed for each environment
                ((FourTasksFitness)fitnessFunction).setExperiment(this);
            }

            IFitnessFunction          fit_copy;
            IBehaviorCharacterization bc_copy;
            //CollisionManager cm;
            int envNum = 0;

            // Schrum: Separate fitness for each environment.
            // Should this be put in accumObjectives instead? What is that for?
            double[] environmentScores = new double[environmentList.Count];
            foreach (Environment env2 in environmentList)
            {
                //Console.WriteLine("====================Environment loop " + envNum);
                // Schrum: moved this here to deal with consistency issues
                instance_pack inst = new instance_pack();

                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                // Schrum: Just add special handling for FourTasks to get settings right
                if (inst.ff is FourTasksFitness)
                {
                    // FourTasks needs to know the current environment. Experiment reference provided earlier.
                    ((FourTasksFitness)inst.ff).setupFitness(envNum);
                }

                // Schrum: moved here from outside/before the loop. Most domains use the same timestep in each environment,
                // but the FourTasks domain is an exception to this, so the setting needed to be moved after setupFitness
                inst.timestep = timestep;
                double   tempFit   = 0;
                double[] fitnesses = new double[timesToRunEnvironments];
                SharpNeatLib.Maths.FastRandom evalRand = new SharpNeatLib.Maths.FastRandom(100);
                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    int agent_trials = timesToRunEnvironments;

                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = this.evaluationTime;

                    inst.eval = evals;
                    env.seed  = evals;
                    env.rng   = new SharpNeatLib.Maths.FastRandom(env.seed + 1);


                    int noise_lev = (int)this.sensorNoise + 1;

                    float new_sn = evalRand.NextUInt() % noise_lev;
                    float new_ef = evalRand.NextUInt() % noise_lev;


                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, numBrains, evolveSubstrate, preferenceNeurons, forcedSituationalPolicyGeometry);
                    // Add up the links in each substrate brain
                    // Recalculates each evaluation, but resets to 0 each time.
                    linksInSubstrate = 0;
                    foreach (INetwork b in inst.agentBrain.multiBrains)
                    {
                        linksInSubstrate += b.NumLinks;
                    }
                    if (eval)
                    {
                        Console.WriteLine("Links In Substrate: " + linksInSubstrate);
                    }
                    initializeRobots(inst.agentBrain, env, headingNoise, new_sn, new_ef, inst);

                    inst.elapsed  = 0;
                    inst.timestep = this.timestep;
                    //Console.WriteLine(this.timestep);

                    inst.timeSteps        = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.Initialize(env, this, inst.robots);

                    try
                    {
                        //Console.WriteLine("Environment " + environmentName + " " + envNum);
                        while (inst.elapsed < evalTime)
                        {
                            // Schrum2: Only called for non-visual evaluations
                            //Console.WriteLine("MAE:" + inst.elapsed + "/" + evalTime);
                            runEnvironment(env, inst, sem);
                        }
                    }
                    catch (Exception e)
                    {
                        for (int x = 0; x < inst.robots.Count; x++)
                        {
                            for (int y = 0; y < inst.robots[x].history.Count; y++)
                            {
                                Console.WriteLine(x + " " + y + " " + inst.robots[x].history[y].x + " " + inst.robots[x].history[y].y);
                            }
                        }


                        behavior = new SharpNeatLib.BehaviorType();
                        Console.WriteLine(e.Message);
                        Console.WriteLine(e.StackTrace);
                        throw (e);
                    }


                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);

                    //Console.WriteLine(env.name + ": Fitness for one eval: " + thisFit);
                    fitnesses[evals] = thisFit;
                    tempFit         += thisFit;


                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                        {
                            accumObjectives[i] += behavior.objectives[i];
                        }

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List <double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }

                    inst.ff.reset();
                }
                // Schrum: Save each fitness separately
                environmentScores[envNum] = tempFit;
                envNum++; // go to the next environment

                // Schrum: Product fitness might encourage better performance in all environments
                if (multiplicativeFitness)
                {
                    fitness *= tempFit / timesToRunEnvironments;
                }
                else // default is sum/average
                {
                    fitness += tempFit / timesToRunEnvironments;
                }
            }

            // Punish CPPN modules: Must use with multiobjective option
            if (cppnModuleCost)
            {
                behavior.objectives = new double[] { fitness / environmentList.Count, -network.NumOutputModules };
            }
            // Punish CPPN links: Must use with multiobjective option
            else if (cppnLinkCost)
            {
                behavior.objectives = new double[] { fitness / environmentList.Count, -network.NumLinks };
            }
            // Punish substrate links: Must use with multiobjective option
            else if (substrateLinkCost)
            {
                behavior.objectives = new double[] { fitness / environmentList.Count, -linksInSubstrate };
            }
            else if (fitnessFunction is FourTasksFitness)
            { // Schrum: Special handling for FourTasksFitness ... could I just use accumObjectives?
                behavior.objectives = environmentScores;
            }
            else // Original default
            {
                behavior.objectives = accumObjectives;
            }
            behavior.modules        = network.NumOutputModules;
            behavior.cppnLinks      = network.NumLinks;
            behavior.substrateLinks = linksInSubstrate;

            //Console.WriteLine("Total: " + (fitness / environmentList.Count));
            // Schrum: Averaging helps normalize range when adding fitness values, but not when multiplying fitness values
            return(multiplicativeFitness ? fitness : fitness / environmentList.Count);
        }
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double fitness = 0;//SharpNeatLib.Utilities.random.NextDouble();
            NeatGenome tempGenome;
            INetwork controller;

            behavior = new SharpNeatLib.BehaviorType();

            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++) accumObjectives[i] = 0.0;

            IFitnessFunction fit_copy;
            IBehaviorCharacterization bc_copy;
            CollisionManager cm;
            instance_pack inst = new instance_pack();
            inst.timestep=timestep;
            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double tempFit = 0;
                double[] fitnesses = new double[timesToRunEnvironments];
                SharpNeatLib.Maths.FastRandom evalRand = new SharpNeatLib.Maths.FastRandom(100);
                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    int agent_trials = timesToRunEnvironments;

                    inst.num_rbts=this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed = evals;
                    env.rng = new SharpNeatLib.Maths.FastRandom(env.seed + 1);

                    int noise_lev = (int)this.sensorNoise+1;

                    float new_sn = evalRand.NextUInt() % noise_lev;
                    float new_ef = evalRand.NextUInt() % noise_lev;

                   inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate, dirComm);
                   initializeRobots(agentBrain, env, headingNoise, new_sn, new_ef, inst);

                    inst.elapsed = 0;
                    inst.timestep = this.timestep;
                    //Console.WriteLine(this.timestep);

                    inst.timeSteps = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.Initialize(env, this, inst.robots);

                    try {

                    while (inst.elapsed < evalTime)
                    {
                        runEnvironment(env,inst,sem);
                    }
                    }		catch( Exception e) {

                            for(int x=0;x<inst.robots.Count;x++) {
                                for(int y=0;y<inst.robots[x].history.Count;y++) {
                                    Console.WriteLine(x+" " + y+ " "+ inst.robots[x].history[y].x + " " + inst.robots[x].history[y].y);
                                }
                            }

                behavior=new SharpNeatLib.BehaviorType();
                    Console.WriteLine(e.Message);
                    Console.WriteLine(e.StackTrace);
                    throw(e);
                    return 0.0001;
                }

                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit += thisFit;

                    if (behavior != null && behavior.objectives != null && inst.bc!=null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                            accumObjectives[i] += behavior.objectives[i];

                        if(behavior.behaviorList==null)
                            behavior.behaviorList=new List<double>();
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }

                    inst.ff.reset();

                }
                fitness += tempFit / timesToRunEnvironments;
            }
            behavior.objectives = accumObjectives;
            return fitness / environmentList.Count;
        }
Ejemplo n.º 6
0
        public void reset()
        {
            seed = 0;
            view_x = 0.0f;
            view_y = 0.0f;
            view_scale = 5.0f;
            AOIRectangle = new Rectangle(30, 60, 640, 500);

            group_orientation = 0;
            robot_spacing = 30;
            robot_heading = 0;
            rng = new SharpNeatLib.Maths.FastRandom();
            walls = new List<Wall>();
            preys = new List<Prey>();

            POIPosition = new List<Point>();

            start_point = new Point2D(0, 0);
            goal_point = new Point2D(100, 100);
        }
        /// <summary>
        /// Runs a single individual (or single multiagent team) through the CurrentEnvironment(s) specified in the experiment file.
        /// </summary>
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double        fitness = 0;
            List <double> fitList = new List <double>();

            behavior = new SharpNeatLib.BehaviorType();
            List <Double> origBehavior = new List <Double>();

            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++)
            {
                accumObjectives[i] = 0.0;
            }

            IFitnessFunction          fit_copy;
            IBehaviorCharacterization bc_copy;
            instance_pack             inst = new instance_pack();

            inst.timestep = timestep;

            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double   tempFit   = 0;
                double[] fitnesses = new double[timesToRunEnvironments];

                SharpNeatLib.Maths.FastRandom evalRand;
                if (noiseDeterministic)
                {
                    evalRand = new SharpNeatLib.Maths.FastRandom(100);
                }
                else
                {
                    evalRand = new SharpNeatLib.Maths.FastRandom();
                }

                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed  = evals;
                    if (!benchmark && noiseDeterministic)
                    {
                        env.rng = new SharpNeatLib.Maths.FastRandom(env.seed + 1);
                    }
                    else
                    {
                        env.rng = new SharpNeatLib.Maths.FastRandom();
                    }

                    float new_sn           = this.sensorNoise;
                    float new_ef           = this.effectorNoise;
                    float new_headingnoise = this.headingNoise;

                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate, neatBrain, useCTRNNS);
                    inst.timestep   = timestep;
                    initializeRobots(agentBrain, env, new_headingnoise, new_sn, new_ef, inst);
                    inst.elapsed          = 0;
                    inst.timeSteps        = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.initialize(env, this, inst.robots);

                    while (inst.elapsed < evalTime)
                    {
                        runEnvironment(env, inst, sem);
                    }

                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit         += thisFit;

                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                        {
                            accumObjectives[i] += behavior.objectives[i];
                        }

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List <double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    else if (behavior != null && inst.bc != null)
                    {
                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List <double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    inst.ff.reset();
                }

                fitness += tempFit / timesToRunEnvironments;
                fitList.Add(tempFit / timesToRunEnvironments);
            }
            behavior.objectives = accumObjectives;
            if (behaviorCharacterization != null)
            {
                behavior.wraparoundRange = behaviorCharacterization.wraparoundDistCalc;
            }

            double t = 0;

            if (recordEndpoints)
            {
                behavior.finalLocation = new List <double>(2);
                behavior.finalLocation.Add(inst.robots[0].Location.X);
                behavior.finalLocation.Add(inst.robots[0].Location.Y);
            }

            if (recordTrajectories)
            {
                behavior.trajectory = new List <int>();
                behavior.trajectory.AddRange(inst.robots[0].Trajectory);
            }

            return((fitness - t) / environmentList.Count);
        }
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double     fitness = 0;//SharpNeatLib.Utilities.random.NextDouble();
            NeatGenome tempGenome;
            INetwork   controller;

            behavior = new SharpNeatLib.BehaviorType();

            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++)
            {
                accumObjectives[i] = 0.0;
            }

            IFitnessFunction          fit_copy;
            IBehaviorCharacterization bc_copy;
            CollisionManager          cm;
            instance_pack             inst = new instance_pack();

            inst.timestep = timestep;
            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double   tempFit   = 0;
                double[] fitnesses = new double[timesToRunEnvironments];
                SharpNeatLib.Maths.FastRandom evalRand = new SharpNeatLib.Maths.FastRandom(100);
                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    int agent_trials = timesToRunEnvironments;

                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed  = evals;
                    env.rng   = new SharpNeatLib.Maths.FastRandom(env.seed + 1);


                    int noise_lev = (int)this.sensorNoise + 1;

                    float new_sn = evalRand.NextUInt() % noise_lev;
                    float new_ef = evalRand.NextUInt() % noise_lev;


                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate);
                    initializeRobots(agentBrain, env, headingNoise, new_sn, new_ef, inst);

                    inst.elapsed  = 0;
                    inst.timestep = this.timestep;
                    //Console.WriteLine(this.timestep);

                    inst.timeSteps        = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.Initialize(env, this, inst.robots);

                    try
                    {
                        while (inst.elapsed < evalTime)
                        {
                            runEnvironment(env, inst, sem);
                        }
                    }
                    catch (Exception e)
                    {
                        for (int x = 0; x < inst.robots.Count; x++)
                        {
                            for (int y = 0; y < inst.robots[x].history.Count; y++)
                            {
                                Console.WriteLine(x + " " + y + " " + inst.robots[x].history[y].x + " " + inst.robots[x].history[y].y);
                            }
                        }


                        behavior = new SharpNeatLib.BehaviorType();
                        Console.WriteLine(e.Message);
                        Console.WriteLine(e.StackTrace);
                        throw (e);
                        return(0.0001);
                    }


                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit         += thisFit;


                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                        {
                            accumObjectives[i] += behavior.objectives[i];
                        }

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List <double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }

                    inst.ff.reset();
                }
                fitness += tempFit / timesToRunEnvironments;
            }
            behavior.objectives = accumObjectives;
            return(fitness / environmentList.Count);
        }
Ejemplo n.º 9
0
        /// <summary>
        /// Initialize the robot.
        /// </summary>
        public void init(int id, double locationX, double locationY, double heading, AgentBrain agentBrain, Environment environment, float sensorNoise, float effectorNoise, float headingNoise, float timeStep)
        {
            this.ID = id;
            Radius = defaultRobotSize();
            Location = new Point2D(locationX, locationY);
            AreaOfImpact = new Circle2D(Location, Radius);
            OldLocation = new Point2D(Location);

            Heading = heading;
            Velocity = 0.0;
            HasCollided = false;
            this.Timestep = timeStep;
            this.CurrentEnvironment = environment;
            this.Brain = agentBrain;
            this.SensorNoise = sensorNoise;
            this.EffectorNoise = effectorNoise;
            this.HeadingNoise = headingNoise;
            populateSensors();
            if (environment.seed != -1)
                rng = environment.rng;
            else
                rng = environment.rng;
            this.Trajectory = new List<int>();
        }
Ejemplo n.º 10
0
          public void init(int id, double nx, double ny, double dir, 
            AgentBrain agentBrain, Environment environment,
            float sensorNoise, float effectorNoise, float headingNoise, float timeStep)
        {
            steps = 0;

            this.id = id;
    	    radius = defaultRobotSize();
            location = new Point2D(nx, ny);
            circle = new Circle2D(location, radius);
            old_location = new Point2D(location);
           
            heading = dir;
            velocity = 0.0;
            collide_last = false;
            this.timeStep = timeStep;
            this.environment = environment;
            this.agentBrain = agentBrain;
            this.sensorNoise = sensorNoise;
            this.effectorNoise = effectorNoise;
            this.headingNoise = headingNoise;
            populateSensors();
			if(environment.seed != -1) 
            	rng=environment.rng; //new SharpNeatLib.Maths.FastRandom(environment.seed); //Utilities.random;
        	else
				rng=environment.rng; //new SharpNeatLib.Maths.FastRandom();
		}
Ejemplo n.º 11
0
 public FastRandom(int seed) : base(seed)
 {
     _random = new SharpNeatLib.Maths.FastRandom(seed);
 }
        /// <summary>
        /// Runs a single individual (or single multiagent team) through the CurrentEnvironment(s) specified in the experiment file.
        /// </summary>
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double fitness = 0;
            List<double> fitList = new List<double>();
            behavior = new SharpNeatLib.BehaviorType();
            List<Double> origBehavior = new List<Double>();
            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++) accumObjectives[i] = 0.0;

            IFitnessFunction fit_copy;
            IBehaviorCharacterization bc_copy;
            instance_pack inst = new instance_pack();
            inst.timestep = timestep;

            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double tempFit = 0;
                double[] fitnesses = new double[timesToRunEnvironments];

                SharpNeatLib.Maths.FastRandom evalRand;
                if (noiseDeterministic) evalRand = new SharpNeatLib.Maths.FastRandom(100);
                else
                {
                    evalRand = new SharpNeatLib.Maths.FastRandom();
                }

                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed = evals;
                    if (!benchmark && noiseDeterministic)
                        env.rng = new SharpNeatLib.Maths.FastRandom(env.seed + 1);
                    else
                    {
                        env.rng = new SharpNeatLib.Maths.FastRandom();
                    }

                    float new_sn = this.sensorNoise;
                    float new_ef = this.effectorNoise;
                    float new_headingnoise = this.headingNoise;

                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate, neatBrain, useCTRNNS);
                    inst.timestep = timestep;
                    initializeRobots(agentBrain, env, new_headingnoise, new_sn, new_ef, inst);
                    inst.elapsed = 0;
                    inst.timeSteps = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.initialize(env, this, inst.robots);

                    while (inst.elapsed < evalTime)
                    {
                        runEnvironment(env, inst, sem);
                    }

                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit += thisFit;

                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                            accumObjectives[i] += behavior.objectives[i];

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List<double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    else if (behavior != null && inst.bc != null)
                    {
                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List<double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    inst.ff.reset();
                }

                fitness += tempFit / timesToRunEnvironments;
                fitList.Add(tempFit / timesToRunEnvironments);
            }
            behavior.objectives = accumObjectives;
            if (behaviorCharacterization != null)
            {

                behavior.wraparoundRange = behaviorCharacterization.wraparoundDistCalc;
            }

            double t = 0;

            if (recordEndpoints)
            {
                behavior.finalLocation = new List<double>(2);
                behavior.finalLocation.Add(inst.robots[0].Location.X);
                behavior.finalLocation.Add(inst.robots[0].Location.Y);
            }                                                                   

            if (recordTrajectories)
            {
                behavior.trajectory = new List<int>();
                behavior.trajectory.AddRange(inst.robots[0].Trajectory);
            }

            return (fitness - t) / environmentList.Count;

        }