internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double fitness = 0;//SharpNeatLib.Utilities.random.NextDouble();
            NeatGenome tempGenome;
            INetwork controller;

            behavior = new SharpNeatLib.BehaviorType();

            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++) accumObjectives[i] = 0.0;

            IFitnessFunction fit_copy;
            IBehaviorCharacterization bc_copy;
            CollisionManager cm;
            instance_pack inst = new instance_pack();
            inst.timestep=timestep;
            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double tempFit = 0;
                double[] fitnesses = new double[timesToRunEnvironments];
                SharpNeatLib.Maths.FastRandom evalRand = new SharpNeatLib.Maths.FastRandom(100);
                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    int agent_trials = timesToRunEnvironments;

                    inst.num_rbts=this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed = evals;
                    env.rng = new SharpNeatLib.Maths.FastRandom(env.seed + 1);

                    int noise_lev = (int)this.sensorNoise+1;

                    float new_sn = evalRand.NextUInt() % noise_lev;
                    float new_ef = evalRand.NextUInt() % noise_lev;

                   inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate, dirComm);
                   initializeRobots(agentBrain, env, headingNoise, new_sn, new_ef, inst);

                    inst.elapsed = 0;
                    inst.timestep = this.timestep;
                    //Console.WriteLine(this.timestep);

                    inst.timeSteps = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.Initialize(env, this, inst.robots);

                    try {

                    while (inst.elapsed < evalTime)
                    {
                        runEnvironment(env,inst,sem);
                    }
                    }		catch( Exception e) {

                            for(int x=0;x<inst.robots.Count;x++) {
                                for(int y=0;y<inst.robots[x].history.Count;y++) {
                                    Console.WriteLine(x+" " + y+ " "+ inst.robots[x].history[y].x + " " + inst.robots[x].history[y].y);
                                }
                            }

                behavior=new SharpNeatLib.BehaviorType();
                    Console.WriteLine(e.Message);
                    Console.WriteLine(e.StackTrace);
                    throw(e);
                    return 0.0001;
                }

                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit += thisFit;

                    if (behavior != null && behavior.objectives != null && inst.bc!=null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                            accumObjectives[i] += behavior.objectives[i];

                        if(behavior.behaviorList==null)
                            behavior.behaviorList=new List<double>();
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }

                    inst.ff.reset();

                }
                fitness += tempFit / timesToRunEnvironments;
            }
            behavior.objectives = accumObjectives;
            return fitness / environmentList.Count;
        }
        // Schrum: Had to remove the internal from this too so that I could override it in AdversarialRoomClearingExperiment
        public override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            // Schrum: Used when punishing links in substrate networks.
            int linksInSubstrate = 0;

            double fitness = multiplicativeFitness ? 1 : 0;

            behavior = new SharpNeatLib.BehaviorType();

            // Schrum: Why is there a magic number 6 here?
            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++)
            {
                accumObjectives[i] = 0.0;
            }

            // Schrum: Special handling for FourTasks domain again
            if (fitnessFunction is FourTasksFitness)
            {
                // Pass the experiment into the fitness function once so its parameters can be changed for each environment
                ((FourTasksFitness)fitnessFunction).setExperiment(this);
            }

            IFitnessFunction          fit_copy;
            IBehaviorCharacterization bc_copy;
            //CollisionManager cm;
            int envNum = 0;

            // Schrum: Separate fitness for each environment.
            // Should this be put in accumObjectives instead? What is that for?
            double[] environmentScores = new double[environmentList.Count];
            foreach (Environment env2 in environmentList)
            {
                //Console.WriteLine("====================Environment loop " + envNum);
                // Schrum: moved this here to deal with consistency issues
                instance_pack inst = new instance_pack();

                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                // Schrum: Just add special handling for FourTasks to get settings right
                if (inst.ff is FourTasksFitness)
                {
                    // FourTasks needs to know the current environment. Experiment reference provided earlier.
                    ((FourTasksFitness)inst.ff).setupFitness(envNum);
                }

                // Schrum: moved here from outside/before the loop. Most domains use the same timestep in each environment,
                // but the FourTasks domain is an exception to this, so the setting needed to be moved after setupFitness
                inst.timestep = timestep;
                double   tempFit   = 0;
                double[] fitnesses = new double[timesToRunEnvironments];
                SharpNeatLib.Maths.FastRandom evalRand = new SharpNeatLib.Maths.FastRandom(100);
                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    int agent_trials = timesToRunEnvironments;

                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = this.evaluationTime;

                    inst.eval = evals;
                    env.seed  = evals;
                    env.rng   = new SharpNeatLib.Maths.FastRandom(env.seed + 1);


                    int noise_lev = (int)this.sensorNoise + 1;

                    float new_sn = evalRand.NextUInt() % noise_lev;
                    float new_ef = evalRand.NextUInt() % noise_lev;


                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, numBrains, evolveSubstrate, preferenceNeurons, forcedSituationalPolicyGeometry);
                    // Add up the links in each substrate brain
                    // Recalculates each evaluation, but resets to 0 each time.
                    linksInSubstrate = 0;
                    foreach (INetwork b in inst.agentBrain.multiBrains)
                    {
                        linksInSubstrate += b.NumLinks;
                    }
                    if (eval)
                    {
                        Console.WriteLine("Links In Substrate: " + linksInSubstrate);
                    }
                    initializeRobots(inst.agentBrain, env, headingNoise, new_sn, new_ef, inst);

                    inst.elapsed  = 0;
                    inst.timestep = this.timestep;
                    //Console.WriteLine(this.timestep);

                    inst.timeSteps        = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.Initialize(env, this, inst.robots);

                    try
                    {
                        //Console.WriteLine("Environment " + environmentName + " " + envNum);
                        while (inst.elapsed < evalTime)
                        {
                            // Schrum2: Only called for non-visual evaluations
                            //Console.WriteLine("MAE:" + inst.elapsed + "/" + evalTime);
                            runEnvironment(env, inst, sem);
                        }
                    }
                    catch (Exception e)
                    {
                        for (int x = 0; x < inst.robots.Count; x++)
                        {
                            for (int y = 0; y < inst.robots[x].history.Count; y++)
                            {
                                Console.WriteLine(x + " " + y + " " + inst.robots[x].history[y].x + " " + inst.robots[x].history[y].y);
                            }
                        }


                        behavior = new SharpNeatLib.BehaviorType();
                        Console.WriteLine(e.Message);
                        Console.WriteLine(e.StackTrace);
                        throw (e);
                    }


                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);

                    //Console.WriteLine(env.name + ": Fitness for one eval: " + thisFit);
                    fitnesses[evals] = thisFit;
                    tempFit         += thisFit;


                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                        {
                            accumObjectives[i] += behavior.objectives[i];
                        }

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List <double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }

                    inst.ff.reset();
                }
                // Schrum: Save each fitness separately
                environmentScores[envNum] = tempFit;
                envNum++; // go to the next environment

                // Schrum: Product fitness might encourage better performance in all environments
                if (multiplicativeFitness)
                {
                    fitness *= tempFit / timesToRunEnvironments;
                }
                else // default is sum/average
                {
                    fitness += tempFit / timesToRunEnvironments;
                }
            }

            // Punish CPPN modules: Must use with multiobjective option
            if (cppnModuleCost)
            {
                behavior.objectives = new double[] { fitness / environmentList.Count, -network.NumOutputModules };
            }
            // Punish CPPN links: Must use with multiobjective option
            else if (cppnLinkCost)
            {
                behavior.objectives = new double[] { fitness / environmentList.Count, -network.NumLinks };
            }
            // Punish substrate links: Must use with multiobjective option
            else if (substrateLinkCost)
            {
                behavior.objectives = new double[] { fitness / environmentList.Count, -linksInSubstrate };
            }
            else if (fitnessFunction is FourTasksFitness)
            { // Schrum: Special handling for FourTasksFitness ... could I just use accumObjectives?
                behavior.objectives = environmentScores;
            }
            else // Original default
            {
                behavior.objectives = accumObjectives;
            }
            behavior.modules        = network.NumOutputModules;
            behavior.cppnLinks      = network.NumLinks;
            behavior.substrateLinks = linksInSubstrate;

            //Console.WriteLine("Total: " + (fitness / environmentList.Count));
            // Schrum: Averaging helps normalize range when adding fitness values, but not when multiplying fitness values
            return(multiplicativeFitness ? fitness : fitness / environmentList.Count);
        }
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double     fitness = 0;//SharpNeatLib.Utilities.random.NextDouble();
            NeatGenome tempGenome;
            INetwork   controller;

            behavior = new SharpNeatLib.BehaviorType();

            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++)
            {
                accumObjectives[i] = 0.0;
            }

            IFitnessFunction          fit_copy;
            IBehaviorCharacterization bc_copy;
            CollisionManager          cm;
            instance_pack             inst = new instance_pack();

            inst.timestep = timestep;
            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double   tempFit   = 0;
                double[] fitnesses = new double[timesToRunEnvironments];
                SharpNeatLib.Maths.FastRandom evalRand = new SharpNeatLib.Maths.FastRandom(100);
                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    int agent_trials = timesToRunEnvironments;

                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed  = evals;
                    env.rng   = new SharpNeatLib.Maths.FastRandom(env.seed + 1);


                    int noise_lev = (int)this.sensorNoise + 1;

                    float new_sn = evalRand.NextUInt() % noise_lev;
                    float new_ef = evalRand.NextUInt() % noise_lev;


                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate);
                    initializeRobots(agentBrain, env, headingNoise, new_sn, new_ef, inst);

                    inst.elapsed  = 0;
                    inst.timestep = this.timestep;
                    //Console.WriteLine(this.timestep);

                    inst.timeSteps        = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.Initialize(env, this, inst.robots);

                    try
                    {
                        while (inst.elapsed < evalTime)
                        {
                            runEnvironment(env, inst, sem);
                        }
                    }
                    catch (Exception e)
                    {
                        for (int x = 0; x < inst.robots.Count; x++)
                        {
                            for (int y = 0; y < inst.robots[x].history.Count; y++)
                            {
                                Console.WriteLine(x + " " + y + " " + inst.robots[x].history[y].x + " " + inst.robots[x].history[y].y);
                            }
                        }


                        behavior = new SharpNeatLib.BehaviorType();
                        Console.WriteLine(e.Message);
                        Console.WriteLine(e.StackTrace);
                        throw (e);
                        return(0.0001);
                    }


                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit         += thisFit;


                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                        {
                            accumObjectives[i] += behavior.objectives[i];
                        }

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List <double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }

                    inst.ff.reset();
                }
                fitness += tempFit / timesToRunEnvironments;
            }
            behavior.objectives = accumObjectives;
            return(fitness / environmentList.Count);
        }