コード例 #1
0
        /// <summary>
        /// Constructs the behavior vector at the end of an individual evaluation.
        /// </summary>
        List<double> IBehaviorCharacterization.calculate(SimulatorExperiment exp, instance_pack ip)
        {
            // initialize the BC vector
            BehaviorVector = new List<double>();

            // If the robot never Stopped, set the max evaluation time as the end tick
            if (endTick == 0)
                endTick = exp.evaluationTime;

            // Calculate when to perform an update
            int numBehaviorChunks = VectorLength / 2;
            chunkSize = Convert.ToInt32(Math.Floor((double)endTick / (double)numBehaviorChunks)) * 2;

            float x, y;

            for (int chunkNum = 1; chunkNum < numBehaviorChunks + 1; chunkNum++)
            {
                // Take bc samples from the internal Trajectory store
                x = Trajectory[chunkNum * chunkSize - 2];
                x = (x - ip.env.AOIRectangle.Left) / ip.env.AOIRectangle.Width;
                BehaviorVector.Add(x);

                y = Trajectory[chunkNum * chunkSize - 1];
                y = (y - ip.env.AOIRectangle.Top) / ip.env.AOIRectangle.Height;
                BehaviorVector.Add(y);
            }

            return BehaviorVector;
        }
コード例 #2
0
        /// <summary>
        /// Constructs the behavior vector at the end of an individual evaluation.
        /// </summary>
        List <double> IBehaviorCharacterization.calculate(SimulatorExperiment exp, instance_pack ip)
        {
            // initialize the BC vector
            BehaviorVector = new List <double>();

            // If the robot never Stopped, set the max evaluation time as the end tick
            if (endTick == 0)
            {
                endTick = exp.evaluationTime;
            }

            // Calculate when to perform an update
            int numBehaviorChunks = VectorLength / 2;

            chunkSize = Convert.ToInt32(Math.Floor((double)endTick / (double)numBehaviorChunks)) * 2;

            float x, y;

            for (int chunkNum = 1; chunkNum < numBehaviorChunks + 1; chunkNum++)
            {
                // Take bc samples from the internal Trajectory store
                x = Trajectory[chunkNum * chunkSize - 2];
                x = (x - ip.env.AOIRectangle.Left) / ip.env.AOIRectangle.Width;
                BehaviorVector.Add(x);

                y = Trajectory[chunkNum * chunkSize - 1];
                y = (y - ip.env.AOIRectangle.Top) / ip.env.AOIRectangle.Height;
                BehaviorVector.Add(y);
            }

            return(BehaviorVector);
        }
コード例 #3
0
        /// <summary>
        /// Records the individual's Heading at each tick of the simulation.
        /// </summary>
        void IBehaviorCharacterization.update(SimulatorExperiment exp, instance_pack ip)
        {
            // If this is the first update, initialize the sensor value accumulator
            if (!Initialized)
            {
                HeadingValues = new List <double>(exp.evaluationTime + 1);
                Initialized   = true;
            }

            if (ip.robots[0].Stopped)
            {
                // If this is the first update after the robot has Stopped,
                // set the endpoint to be the current simulation tick
                if (endTick == 0)
                {
                    HeadingValues.Add(ip.robots[0].Heading); // JUSTIN: Sample on the last tick (first tick of being Stopped) to fix an array overflow error
                    endTick = Convert.ToInt32(ip.timeSteps * exp.timestep);
                }
            }
            else
            {
                // Sample the robot's Heading on this tick.
                HeadingValues.Add(ip.robots[0].Heading);
            }
        }
コード例 #4
0
        /// <summary>
        /// Draws the maze, robots, etc. to the screen.
        /// </summary>
        public override void draw(Graphics g, CoordinateFrame scale)
        {
            foreach (Robot robot in robots)
            {
                robot.draw(g, scale);
            }
            double[] obj = null;

            if (environment != null)
            {
                environment.draw(g, scale);
            }

            instance_pack ip = new instance_pack();

            ip.robots           = robots;
            ip.env              = environment;
            ip.timeSteps        = this.timeSteps;
            ip.agentBrain       = agentBrain;
            ip.collisionManager = collisionManager;
            ip.elapsed          = this.elapsedTime;
            ip.ff       = this.fitnessFunction;
            ip.bc       = this.behaviorCharacterization;
            ip.timestep = timestep;

            g.DrawString("Fitness: " + this.fitnessFunction.calculate(this, this.environment, ip, out obj), new Font("Tahoma", 12), Brushes.Black, 10, 90);
            g.DrawString("Elapsed time: " + this.elapsedTime, new Font("Tahoma", 12), Brushes.Black, 10, 60);
        }
コード例 #5
0
        /// <summary>
        /// Constructs the behavior vector at the end of an individual evaluation.
        /// </summary>
        List <double> IBehaviorCharacterization.calculate(SimulatorExperiment exp, instance_pack ip)
        {
            List <double> bc = new List <double>();

            for (int agentIndex = 0; agentIndex < ip.robots.Count; agentIndex++)
            {
                double x;
                double y;

                x = ip.robots[agentIndex].Location.X;
                y = ip.robots[agentIndex].Location.Y;
                x = (x - ip.env.AOIRectangle.Left) / ip.env.AOIRectangle.Width;
                y = (y - ip.env.AOIRectangle.Top) / ip.env.AOIRectangle.Height;
                bc.Insert(agentIndex * 2, x);
                bc.Insert(agentIndex * 2 + 1, y);
            }
            return(bc);
        }
コード例 #6
0
        /// <summary>
        /// Constructs the behavior vector at the end of an individual evaluation.
        /// </summary>
        List<double> IBehaviorCharacterization.calculate(SimulatorExperiment exp, instance_pack ip)
        {
            List<double> bc = new List<double>();

            for (int agentIndex = 0; agentIndex < ip.robots.Count; agentIndex++)
            {
                double x;
                double y;

                x = ip.robots[agentIndex].Location.X;
                y = ip.robots[agentIndex].Location.Y;
                x = (x - ip.env.AOIRectangle.Left) / ip.env.AOIRectangle.Width;
                y = (y - ip.env.AOIRectangle.Top) / ip.env.AOIRectangle.Height;
                bc.Insert(agentIndex * 2, x);
                bc.Insert(agentIndex * 2 + 1, y);

            }
            return bc;
        }
コード例 #7
0
        /// <summary>
        /// Records the individual's location at each tick of the simulation.
        /// </summary>
        void IBehaviorCharacterization.update(SimulatorExperiment exp, instance_pack ip)
        {
            if (ip.robots[0].Stopped)
            {
                // If this is the first update after the robot has Stopped,
                // send the endpoint to be the current simulation tick
                if (endTick == 0)
                {
                    endTick = Convert.ToInt32(ip.timeSteps * exp.timestep);
                }
            }

            // initialize the Trajectory list
            if (!Initialized)
            {
                // initialize the sensor value sampling/storage components
                Trajectory  = new List <int>(exp.evaluationTime * 2);
                Initialized = true;
            }

            // update the Trajectory at every tick
            Trajectory.Add(Convert.ToInt16(ip.robots[0].Location.X));
            Trajectory.Add(Convert.ToInt16(ip.robots[0].Location.Y));
        }
コード例 #8
0
 public virtual void initializeRobots(AgentBrain agentBrain, Environment e, float headingNoise, float sensorNoise, float effectorNoise, instance_pack x)
 {
 }
コード例 #9
0
        /// <summary>
        /// Runs a single individual (or single multiagent team) through the CurrentEnvironment(s) specified in the experiment file.
        /// </summary>
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double        fitness = 0;
            List <double> fitList = new List <double>();

            behavior = new SharpNeatLib.BehaviorType();
            List <Double> origBehavior = new List <Double>();

            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++)
            {
                accumObjectives[i] = 0.0;
            }

            IFitnessFunction          fit_copy;
            IBehaviorCharacterization bc_copy;
            instance_pack             inst = new instance_pack();

            inst.timestep = timestep;

            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double   tempFit   = 0;
                double[] fitnesses = new double[timesToRunEnvironments];

                SharpNeatLib.Maths.FastRandom evalRand;
                if (noiseDeterministic)
                {
                    evalRand = new SharpNeatLib.Maths.FastRandom(100);
                }
                else
                {
                    evalRand = new SharpNeatLib.Maths.FastRandom();
                }

                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed  = evals;
                    if (!benchmark && noiseDeterministic)
                    {
                        env.rng = new SharpNeatLib.Maths.FastRandom(env.seed + 1);
                    }
                    else
                    {
                        env.rng = new SharpNeatLib.Maths.FastRandom();
                    }

                    float new_sn           = this.sensorNoise;
                    float new_ef           = this.effectorNoise;
                    float new_headingnoise = this.headingNoise;

                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate, neatBrain, useCTRNNS);
                    inst.timestep   = timestep;
                    initializeRobots(agentBrain, env, new_headingnoise, new_sn, new_ef, inst);
                    inst.elapsed          = 0;
                    inst.timeSteps        = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.initialize(env, this, inst.robots);

                    while (inst.elapsed < evalTime)
                    {
                        runEnvironment(env, inst, sem);
                    }

                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit         += thisFit;

                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                        {
                            accumObjectives[i] += behavior.objectives[i];
                        }

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List <double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    else if (behavior != null && inst.bc != null)
                    {
                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List <double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    inst.ff.reset();
                }

                fitness += tempFit / timesToRunEnvironments;
                fitList.Add(tempFit / timesToRunEnvironments);
            }
            behavior.objectives = accumObjectives;
            if (behaviorCharacterization != null)
            {
                behavior.wraparoundRange = behaviorCharacterization.wraparoundDistCalc;
            }

            double t = 0;

            if (recordEndpoints)
            {
                behavior.finalLocation = new List <double>(2);
                behavior.finalLocation.Add(inst.robots[0].Location.X);
                behavior.finalLocation.Add(inst.robots[0].Location.Y);
            }

            if (recordTrajectories)
            {
                behavior.trajectory = new List <int>();
                behavior.trajectory.AddRange(inst.robots[0].Trajectory);
            }

            return((fitness - t) / environmentList.Count);
        }
コード例 #10
0
        /// <summary>
        /// Performs one tick of the experiment.
        /// </summary>
        protected internal virtual bool runEnvironment(Environment e, instance_pack ip, System.Threading.Semaphore sem)
        {
            bool collide;

            if (ip == null)
            {
                elapsedTime += timestep;
                timeSteps++;
            }
            else
            {
                ip.timeSteps++;
                ip.elapsed += ip.timestep;
                ip.env      = e;
            }

            AgentBrain                ab;
            CollisionManager          cm;
            List <Robot>              rbts;
            IFitnessFunction          fit_fun;
            IBehaviorCharacterization beh_char;

            if (ip == null)
            {
                ip                  = new instance_pack();
                ab                  = agentBrain;
                cm                  = collisionManager;
                rbts                = robots;
                fit_fun             = fitnessFunction;
                beh_char            = behaviorCharacterization;
                ip.agentBrain       = agentBrain;
                ip.collisionManager = cm;
                ip.robots           = rbts;
                ip.ff               = fit_fun;
                ip.env              = environment;
                ip.bc               = beh_char;
                ip.timeSteps        = timeSteps;
                ip.timestep         = timestep;
            }
            else
            {
                ab       = ip.agentBrain;
                cm       = ip.collisionManager;
                rbts     = ip.robots;
                fit_fun  = ip.ff;
                beh_char = ip.bc;
            }

            cm.simulationStepCallback();

            for (int x = 0; x < rbts.Count; x++)
            {
                rbts[x].updateSensors(e, rbts, cm);
                if (!rbts[x].Autopilot)
                {
                    rbts[x].doAction();
                }
                else
                {
                    ab.clearANNSignals(rbts[x].ZStack);
                }
            }

            ab.execute(sem);

            for (int x = 0; x < rbts.Count; x++)
            {
                collide             = cm.robotCollide(rbts[x]);
                rbts[x].HasCollided = collide;
                if (collide)
                {
                    rbts[x].onCollision();
                }
            }

            if (ip != null)
            {
                if (beh_char != null)
                {
                    beh_char.update(this, ip);
                }
                fit_fun.update(this, e, ip);

                // Track the Trajectory independent of the BC in case the BC has nothing to do with position
                if (recordTrajectories)
                {
                    if (ip.robots[0].TrajectoryUpdateCounter == trajectoryUpdateInterval)
                    {
                        ip.robots[0].Trajectory.Add(Convert.ToInt16(rbts[0].Location.X));
                        ip.robots[0].Trajectory.Add(Convert.ToInt16(rbts[0].Location.Y));
                        ip.robots[0].TrajectoryUpdateCounter = 0;
                    }
                    else
                    {
                        ip.robots[0].TrajectoryUpdateCounter++;
                    }
                }
            }
            return(false);
        }
コード例 #11
0
        /// <summary>
        /// Initializes robot brains and positions them in the CurrentEnvironment.
        /// </summary>
        public override void initializeRobots(AgentBrain agentBrain, Environment e, float headingNoise, float sensorNoise, float effectorNoise, instance_pack ip)
        {
            double spacing = 0;
            int    num_bots;
            double dx, dy; // Deltas for spacing the robots

            if (overrideTeamFormation)
            {
                dx      = Math.Cos(group_orientation / 180.0 * 3.14) * group_spacing;
                dy      = Math.Sin(group_orientation / 180.0 * 3.14) * group_spacing;
                spacing = group_spacing;
            }
            else
            {
                dx      = Math.Cos(e.group_orientation / 180.0 * 3.14) * e.robot_spacing;
                dy      = Math.Sin(e.group_orientation / 180.0 * 3.14) * e.robot_spacing;
                spacing = e.robot_spacing;
            }

            AgentBrain   ab;
            List <Robot> rbts;
            double       _timestep = 0.0;

            if (ip == null)
            {
                ab        = agentBrain;
                rbts      = robots;
                num_bots  = numberRobots;
                _timestep = this.timestep;
            }
            else
            {
                ab        = ip.agentBrain;
                rbts      = new List <Robot>();
                ip.robots = rbts;
                num_bots  = ip.num_rbts;
                _timestep = ip.timestep;
            }

            rbts.Clear();

            bool circle    = false;
            bool staggered = true;

            double radius     = (spacing) / (2.0 * Math.PI * (1.0 / num_bots));
            double angleDelta = (2 * Math.PI) / num_bots;

            // Add robots in their formation according to CurrentEnvironment
            double nx, ny;

            for (int num = 0; num < num_bots; num++)
            {
                double heading = overrideTeamFormation ? robot_heading : e.robot_heading;
                Robot  r;
                if (robotValues != null && robotValues.Count > 0 && robotValues.Count >= num)
                {
                    r = RobotModelFactory.getRobotModel(robotValues[num]);
                }
                else
                {
                    r = RobotModelFactory.getRobotModel(robotModelName);
                }

                if (circle)
                {
                    nx      = e.start_point.X + radius * Math.Cos(num * angleDelta);
                    ny      = e.start_point.Y + radius * Math.Sin(num * angleDelta);
                    heading = num * angleDelta - Math.PI;
                }
                else
                {
                    if (staggered && num % 2 == 1 && e.stagger != 0)
                    {
                        nx = e.start_point.X + num * dx + (staggered ? e.stagger * num : 0);
                    }
                    else
                    {
                        nx = e.start_point.X + num * dx - (staggered ? e.stagger * num : 0);
                    }
                    ny      = e.start_point.Y + num * dy;
                    heading = (heading / 180.0) * 3.14;
                }

                r.init(num, nx, ny,
                       heading, ab, e, sensorNoise, effectorNoise, headingNoise, (float)_timestep);
                r.CollisionPenalty = collisionPenalty;
                ab.registerRobot(r);

                if (overrideDefaultSensorDensity)
                {
                    r.populateSensors(sensorDensity);
                }
                else
                {
                    r.populateSensors();
                }
                if (agentBrain.ZCoordinates == null)
                {
                    r.ZStack = 0;
                }
                else
                {
                    r.ZStack = ab.ZCoordinates[num];
                }
                rbts.Add(r);
            }

            ab.updateInputDensity();
        }
コード例 #12
0
        /// <summary>
        /// Initializes robot brains and positions them in the CurrentEnvironment.
        /// </summary>
        public override void initializeRobots(AgentBrain agentBrain, Environment e, float headingNoise, float sensorNoise, float effectorNoise, instance_pack ip)
        {
            double spacing = 0;
            int num_bots;
            double dx, dy; // Deltas for spacing the robots
            if (overrideTeamFormation)
            {
                dx = Math.Cos(group_orientation / 180.0 * 3.14) * group_spacing;
                dy = Math.Sin(group_orientation / 180.0 * 3.14) * group_spacing;
                spacing = group_spacing;
            }
            else
            {
                dx = Math.Cos(e.group_orientation / 180.0 * 3.14) * e.robot_spacing;
                dy = Math.Sin(e.group_orientation / 180.0 * 3.14) * e.robot_spacing;
                spacing = e.robot_spacing;
            }

            AgentBrain ab;
            List<Robot> rbts;
            double _timestep = 0.0;
            if (ip == null)
            {
                ab = agentBrain;
                rbts = robots;
                num_bots = numberRobots;
                _timestep = this.timestep;
            }
            else
            {
                ab = ip.agentBrain;
                rbts = new List<Robot>();
                ip.robots = rbts;
                num_bots = ip.num_rbts;
                _timestep = ip.timestep;
            }

            rbts.Clear();

            bool circle = false;
            bool staggered = true;

            double radius = (spacing) / (2.0 * Math.PI * (1.0 / num_bots));
            double angleDelta = (2 * Math.PI) / num_bots;

            // Add robots in their formation according to CurrentEnvironment
            double nx, ny;
            for (int num = 0; num < num_bots; num++)
            {

                double heading = overrideTeamFormation ? robot_heading : e.robot_heading;
                Robot r;
                if (robotValues != null && robotValues.Count > 0 && robotValues.Count >= num)
                {
                    r = RobotModelFactory.getRobotModel(robotValues[num]);
                }
                else
                    r = RobotModelFactory.getRobotModel(robotModelName);

                if (circle)
                {
                    nx = e.start_point.X + radius * Math.Cos(num * angleDelta);
                    ny = e.start_point.Y + radius * Math.Sin(num * angleDelta);
                    heading = num * angleDelta - Math.PI;
                }
                else
                {
                    if (staggered && num % 2 == 1 && e.stagger != 0)
                        nx = e.start_point.X + num * dx + (staggered ? e.stagger * num : 0);
                    else
                        nx = e.start_point.X + num * dx - (staggered ? e.stagger * num : 0);
                    ny = e.start_point.Y + num * dy;
                    heading = (heading / 180.0) * 3.14;
                }

                r.init(num, nx, ny,
                    heading, ab, e, sensorNoise, effectorNoise, headingNoise, (float)_timestep);
                r.CollisionPenalty = collisionPenalty;
                ab.registerRobot(r);

                if (overrideDefaultSensorDensity)
                    r.populateSensors(sensorDensity);
                else
                    r.populateSensors();
                if (agentBrain.ZCoordinates == null) r.ZStack = 0;
                else
                    r.ZStack = ab.ZCoordinates[num];
                rbts.Add(r);
            }

            ab.updateInputDensity();
        }
コード例 #13
0
        /// <summary>
        /// Constructs the behavior vector at the end of an individual evaluation.
        /// </summary>
        List<double> IBehaviorCharacterization.calculate(SimulatorExperiment exp, instance_pack ip)
        {
            // Initialize the BC vector
            BehaviorVector = new List<double>();
            // If the robot never Stopped, set the max evaluation time as the end tick
            if (endTick == 0)
                endTick = exp.evaluationTime - 1;
            // Calculate when to perform an update
            chunkSize = Convert.ToInt32(Math.Floor((double)endTick / (double)NumIntervals));

            int[] headingBins;
            for (int chunkNum = 0; chunkNum < NumIntervals; chunkNum++)
            {
                // Reset the accumulators / Heading bins for this time slice.
                headingBins = new int[] { 0, 0, 0, 0 };
                double temp;
                // Fill the Heading bins for this time slice.
                for (int j = 0; j < chunkSize; j++)
                {
                    if ((chunkNum * chunkSize + j) >= HeadingValues.Count)
                    {
                        continue;
                    }
                    temp = HeadingValues[chunkNum * chunkSize + j];
                    temp *= 57.297f; // convert radians to degrees

                    // Convert the Heading to the maxRange 0-360
                    while (temp > 360)
                        temp -= 360;
                    while (temp < 0)
                        temp += 360;

                    if ((temp < 45 || temp >= 315))
                    {
                        headingBins[0] += 1;
                    }
                    else if (temp >= 45 && temp < 135)
                    {
                        headingBins[1] += 1;
                    }
                    else if (temp >= 135 && temp < 225)
                    {
                        headingBins[2] += 1;
                    }
                    else if (temp >= 225 && temp < 315)
                    {
                        headingBins[3] += 1;
                    }
                    else
                    {
                        Console.WriteLine("ERROR: Unrecognized Heading! Something wrong (DirectionBC). What happen.");
                    }
                }

                // Now figure out which bin had the majority and assign the corresponding discrete value to the BC
                int majorityIndex = 0;
                int majorityCount = -1;
                for (int i = 0; i < headingBins.Length; i++)
                {
                    if (headingBins[i] > majorityCount)
                    {
                        majorityIndex = i;
                        majorityCount = headingBins[i];
                    }
                }

                BehaviorVector.Add(0.125 + (majorityIndex * 0.250));
            }

            return BehaviorVector;
        }
コード例 #14
0
        /// <summary>
        /// Records the individual's Heading at each tick of the simulation. 
        /// </summary>
        void IBehaviorCharacterization.update(SimulatorExperiment exp, instance_pack ip)
        {
            // If this is the first update, initialize the sensor value accumulator
            if (!Initialized)
            {
                HeadingValues = new List<double>(exp.evaluationTime + 1);
                Initialized = true;
            }

            if (ip.robots[0].Stopped)
            {
                // If this is the first update after the robot has Stopped, 
                // set the endpoint to be the current simulation tick
                if (endTick == 0)
                {
                    HeadingValues.Add(ip.robots[0].Heading); // JUSTIN: Sample on the last tick (first tick of being Stopped) to fix an array overflow error
                    endTick = Convert.ToInt32(ip.timeSteps * exp.timestep);
                }
            }
            else
            {
                // Sample the robot's Heading on this tick.
                HeadingValues.Add(ip.robots[0].Heading);
            }
        }
コード例 #15
0
        /// <summary>
        /// Called on every simulation tick. If this is the first tick, initialize some instance variables. On every other tick, check to see if the robot is within stopping Distance of the goal.
        /// </summary>
        void IFitnessFunction.update(SimulatorExperiment Experiment, Environment environment, instance_pack ip)
        {
            // Initialization routines
            if (first)
            {
                first = false;

                if (DEBUG)
                {
                    // If debugging: Add POI at thirds along the border of the AOI rectangle, so we can arrange the maze properly
                    // This functionality helps align the maze with the MAP-Elites grid.
                    environment.POIPosition.Clear();
                    if (!DEBUG_CLEARONLY)
                    {
                        double dwidth  = environment.AOIRectangle.Width / NumBinsPerDim;
                        double dheight = environment.AOIRectangle.Height / NumBinsPerDim;
                        double cornerx = environment.AOIRectangle.Left;
                        double cornery = environment.AOIRectangle.Top;
                        for (int x = 0; x <= NumBinsPerDim; x++)
                        {
                            for (int y = 0; y <= NumBinsPerDim; y++)
                            {
                                environment.POIPosition.Add(new Point((int)(cornerx + dwidth * x), (int)(cornery + dheight * y)));
                            }
                        }
                    }
                }

                // Compute the max possible Distance a robot can achieve from the goal point while staying in the AOI bounds
                double maxSoFar = 0;

                // Top left
                Point2D cornerWeCareAboutRightNow = new Point2D(environment.AOIRectangle.Left, environment.AOIRectangle.Top);
                double  tempDist = environment.goal_point.distance(cornerWeCareAboutRightNow);
                if (tempDist > maxSoFar)
                {
                    maxSoFar = tempDist;
                }

                // Top right
                cornerWeCareAboutRightNow = new Point2D(environment.AOIRectangle.Right, environment.AOIRectangle.Top);
                tempDist = environment.goal_point.distance(cornerWeCareAboutRightNow);
                if (tempDist > maxSoFar)
                {
                    maxSoFar = tempDist;
                }

                // Bottom right
                cornerWeCareAboutRightNow = new Point2D(environment.AOIRectangle.Right, environment.AOIRectangle.Bottom);
                tempDist = environment.goal_point.distance(cornerWeCareAboutRightNow);
                if (tempDist > maxSoFar)
                {
                    maxSoFar = tempDist;
                }

                // Bottom left
                cornerWeCareAboutRightNow = new Point2D(environment.AOIRectangle.Left, environment.AOIRectangle.Bottom);
                tempDist = environment.goal_point.distance(cornerWeCareAboutRightNow);
                if (tempDist > maxSoFar)
                {
                    maxSoFar = tempDist;
                }

                // Define the Distance (that will be used to calculate fitness values) (add a small value (10) to give a little breathing room)
                MaxDistanceToGoal = maxSoFar + 10;
            }

            if (ip.robots[0].Location.squaredDistance(environment.goal_point) < StoppingRange)
            {
                ip.robots[0].Stopped = true;
            }
            return;
        }
コード例 #16
0
        /// <summary>
        /// Records the individual's location at each tick of the simulation. 
        /// </summary>
        void IBehaviorCharacterization.update(SimulatorExperiment exp, instance_pack ip)
        {
            if (ip.robots[0].Stopped)
            {
                // If this is the first update after the robot has Stopped, 
                // send the endpoint to be the current simulation tick
                if (endTick == 0)
                    endTick = Convert.ToInt32(ip.timeSteps * exp.timestep);
            }

            // initialize the Trajectory list
            if (!Initialized)
            {
                // initialize the sensor value sampling/storage components
                Trajectory = new List<int>(exp.evaluationTime * 2);
                Initialized = true;
            }

            // update the Trajectory at every tick
            Trajectory.Add(Convert.ToInt16(ip.robots[0].Location.X));
            Trajectory.Add(Convert.ToInt16(ip.robots[0].Location.Y));
        }
コード例 #17
0
        /// <summary>
        /// Draws the maze, robots, etc. to the screen.
        /// </summary>
        public override void draw(Graphics g, CoordinateFrame scale)
        {
            foreach (Robot robot in robots)
            {
                robot.draw(g, scale);
            }
            double[] obj = null;

            if (environment != null)
            {
                environment.draw(g, scale);
            }

            instance_pack ip = new instance_pack();

            ip.robots = robots;
            ip.env = environment;
            ip.timeSteps = this.timeSteps;
            ip.agentBrain = agentBrain;
            ip.collisionManager = collisionManager;
            ip.elapsed = this.elapsedTime;
            ip.ff = this.fitnessFunction;
            ip.bc = this.behaviorCharacterization;
            ip.timestep = timestep;

            g.DrawString("Fitness: " + this.fitnessFunction.calculate(this, this.environment, ip, out obj), new Font("Tahoma", 12), Brushes.Black, 10, 90);
            g.DrawString("Elapsed time: " + this.elapsedTime, new Font("Tahoma", 12), Brushes.Black, 10, 60);
        }
コード例 #18
0
        /// <summary>
        /// Called on every simulation tick. If this is the first tick, initialize some instance variables. On every other tick, check to see if the robot is within stopping Distance of the goal.
        /// </summary>
        void IFitnessFunction.update(SimulatorExperiment Experiment, Environment environment, instance_pack ip)
        {
            // Initialization routines
            if (first)
            {
                first = false;

                if (DEBUG)
                {
                    // If debugging: Add POI at thirds along the border of the AOI rectangle, so we can arrange the maze properly
                    // This functionality helps align the maze with the MAP-Elites grid.
                    environment.POIPosition.Clear();
                    if (!DEBUG_CLEARONLY)
                    {
                        double dwidth = environment.AOIRectangle.Width / NumBinsPerDim;
                        double dheight = environment.AOIRectangle.Height / NumBinsPerDim;
                        double cornerx = environment.AOIRectangle.Left;
                        double cornery = environment.AOIRectangle.Top;
                        for (int x = 0; x <= NumBinsPerDim; x++)
                        {
                            for (int y = 0; y <= NumBinsPerDim; y++)
                            {
                                environment.POIPosition.Add(new Point((int)(cornerx + dwidth * x), (int)(cornery + dheight * y)));
                            }
                        }
                    }
                }

                // Compute the max possible Distance a robot can achieve from the goal point while staying in the AOI bounds
                double maxSoFar = 0;

                // Top left
                Point2D cornerWeCareAboutRightNow = new Point2D(environment.AOIRectangle.Left, environment.AOIRectangle.Top);
                double tempDist = environment.goal_point.distance(cornerWeCareAboutRightNow);
                if (tempDist > maxSoFar) maxSoFar = tempDist;

                // Top right
                cornerWeCareAboutRightNow = new Point2D(environment.AOIRectangle.Right, environment.AOIRectangle.Top);
                tempDist = environment.goal_point.distance(cornerWeCareAboutRightNow);
                if (tempDist > maxSoFar) maxSoFar = tempDist;

                // Bottom right
                cornerWeCareAboutRightNow = new Point2D(environment.AOIRectangle.Right, environment.AOIRectangle.Bottom);
                tempDist = environment.goal_point.distance(cornerWeCareAboutRightNow);
                if (tempDist > maxSoFar) maxSoFar = tempDist;

                // Bottom left
                cornerWeCareAboutRightNow = new Point2D(environment.AOIRectangle.Left, environment.AOIRectangle.Bottom);
                tempDist = environment.goal_point.distance(cornerWeCareAboutRightNow);
                if (tempDist > maxSoFar) maxSoFar = tempDist;

                // Define the Distance (that will be used to calculate fitness values) (add a small value (10) to give a little breathing room)
                MaxDistanceToGoal = maxSoFar + 10;
            }

            if (ip.robots[0].Location.squaredDistance(environment.goal_point) < StoppingRange)
            {
                ip.robots[0].Stopped = true;
            }
            return;
        }
コード例 #19
0
 /// <summary>
 /// Calculates the fitness of an individual based on Distance to the goal. Not compatible with multiagent teams.
 /// </summary>
 double IFitnessFunction.calculate(SimulatorExperiment engine, Environment environment, instance_pack ip, out double[] objectives)
 {
     objectives = null;
     return (MaxDistanceToGoal - ip.robots[0].Location.distance(environment.goal_point)) / MaxDistanceToGoal * 1000;
 }
コード例 #20
0
        /// <summary>
        /// Constructs the behavior vector at the end of an individual evaluation.
        /// </summary>
        List <double> IBehaviorCharacterization.calculate(SimulatorExperiment exp, instance_pack ip)
        {
            // Initialize the BC vector
            BehaviorVector = new List <double>();
            // If the robot never Stopped, set the max evaluation time as the end tick
            if (endTick == 0)
            {
                endTick = exp.evaluationTime - 1;
            }
            // Calculate when to perform an update
            chunkSize = Convert.ToInt32(Math.Floor((double)endTick / (double)NumIntervals));

            int[] headingBins;
            for (int chunkNum = 0; chunkNum < NumIntervals; chunkNum++)
            {
                // Reset the accumulators / Heading bins for this time slice.
                headingBins = new int[] { 0, 0, 0, 0 };
                double temp;
                // Fill the Heading bins for this time slice.
                for (int j = 0; j < chunkSize; j++)
                {
                    if ((chunkNum * chunkSize + j) >= HeadingValues.Count)
                    {
                        continue;
                    }
                    temp  = HeadingValues[chunkNum * chunkSize + j];
                    temp *= 57.297f; // convert radians to degrees

                    // Convert the Heading to the maxRange 0-360
                    while (temp > 360)
                    {
                        temp -= 360;
                    }
                    while (temp < 0)
                    {
                        temp += 360;
                    }

                    if ((temp < 45 || temp >= 315))
                    {
                        headingBins[0] += 1;
                    }
                    else if (temp >= 45 && temp < 135)
                    {
                        headingBins[1] += 1;
                    }
                    else if (temp >= 135 && temp < 225)
                    {
                        headingBins[2] += 1;
                    }
                    else if (temp >= 225 && temp < 315)
                    {
                        headingBins[3] += 1;
                    }
                    else
                    {
                        Console.WriteLine("ERROR: Unrecognized Heading! Something wrong (DirectionBC). What happen.");
                    }
                }

                // Now figure out which bin had the majority and assign the corresponding discrete value to the BC
                int majorityIndex = 0;
                int majorityCount = -1;
                for (int i = 0; i < headingBins.Length; i++)
                {
                    if (headingBins[i] > majorityCount)
                    {
                        majorityIndex = i;
                        majorityCount = headingBins[i];
                    }
                }

                BehaviorVector.Add(0.125 + (majorityIndex * 0.250));
            }

            return(BehaviorVector);
        }
コード例 #21
0
        /// <summary>
        /// Performs one tick of the experiment.
        /// </summary>
        protected internal virtual bool runEnvironment(Environment e, instance_pack ip, System.Threading.Semaphore sem)
        {
            bool collide;

            if (ip == null)
            {
                elapsedTime += timestep;
                timeSteps++;
            }
            else
            {
                ip.timeSteps++;
                ip.elapsed += ip.timestep;
                ip.env = e;
            }

            AgentBrain ab;
            CollisionManager cm;
            List<Robot> rbts;
            IFitnessFunction fit_fun;
            IBehaviorCharacterization beh_char;

            if (ip == null)
            {
                ip = new instance_pack();
                ab = agentBrain;
                cm = collisionManager;
                rbts = robots;
                fit_fun = fitnessFunction;
                beh_char = behaviorCharacterization;
                ip.agentBrain = agentBrain;
                ip.collisionManager = cm;
                ip.robots = rbts;
                ip.ff = fit_fun;
                ip.env = environment;
                ip.bc = beh_char;
                ip.timeSteps = timeSteps;
                ip.timestep = timestep;
            }
            else
            {
                ab = ip.agentBrain;
                cm = ip.collisionManager;
                rbts = ip.robots;
                fit_fun = ip.ff;
                beh_char = ip.bc;
            }

            cm.simulationStepCallback();

            for (int x = 0; x < rbts.Count; x++)
            {
                rbts[x].updateSensors(e, rbts, cm);
                if (!rbts[x].Autopilot)
                    rbts[x].doAction();
                else
                    ab.clearANNSignals(rbts[x].ZStack);
            }

            ab.execute(sem);

            for (int x = 0; x < rbts.Count; x++)
            {
                collide = cm.robotCollide(rbts[x]);
                rbts[x].HasCollided = collide;
                if (collide)
                    rbts[x].onCollision();
            }

            if (ip != null)
            {
                if (beh_char != null)
                    beh_char.update(this, ip);
                fit_fun.update(this, e, ip);

                // Track the Trajectory independent of the BC in case the BC has nothing to do with position
                if (recordTrajectories)
                {
                    if (ip.robots[0].TrajectoryUpdateCounter == trajectoryUpdateInterval)
                    {
                        ip.robots[0].Trajectory.Add(Convert.ToInt16(rbts[0].Location.X));
                        ip.robots[0].Trajectory.Add(Convert.ToInt16(rbts[0].Location.Y));
                        ip.robots[0].TrajectoryUpdateCounter = 0;
                    }
                    else
                        ip.robots[0].TrajectoryUpdateCounter++;
                }
            }
            return false;
        }
コード例 #22
0
        /// <summary>
        /// Runs a single individual (or single multiagent team) through the CurrentEnvironment(s) specified in the experiment file.
        /// </summary>
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double fitness = 0;
            List<double> fitList = new List<double>();
            behavior = new SharpNeatLib.BehaviorType();
            List<Double> origBehavior = new List<Double>();
            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++) accumObjectives[i] = 0.0;

            IFitnessFunction fit_copy;
            IBehaviorCharacterization bc_copy;
            instance_pack inst = new instance_pack();
            inst.timestep = timestep;

            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double tempFit = 0;
                double[] fitnesses = new double[timesToRunEnvironments];

                SharpNeatLib.Maths.FastRandom evalRand;
                if (noiseDeterministic) evalRand = new SharpNeatLib.Maths.FastRandom(100);
                else
                {
                    evalRand = new SharpNeatLib.Maths.FastRandom();
                }

                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed = evals;
                    if (!benchmark && noiseDeterministic)
                        env.rng = new SharpNeatLib.Maths.FastRandom(env.seed + 1);
                    else
                    {
                        env.rng = new SharpNeatLib.Maths.FastRandom();
                    }

                    float new_sn = this.sensorNoise;
                    float new_ef = this.effectorNoise;
                    float new_headingnoise = this.headingNoise;

                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate, neatBrain, useCTRNNS);
                    inst.timestep = timestep;
                    initializeRobots(agentBrain, env, new_headingnoise, new_sn, new_ef, inst);
                    inst.elapsed = 0;
                    inst.timeSteps = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.initialize(env, this, inst.robots);

                    while (inst.elapsed < evalTime)
                    {
                        runEnvironment(env, inst, sem);
                    }

                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit += thisFit;

                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                            accumObjectives[i] += behavior.objectives[i];

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List<double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    else if (behavior != null && inst.bc != null)
                    {
                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List<double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    inst.ff.reset();
                }

                fitness += tempFit / timesToRunEnvironments;
                fitList.Add(tempFit / timesToRunEnvironments);
            }
            behavior.objectives = accumObjectives;
            if (behaviorCharacterization != null)
            {

                behavior.wraparoundRange = behaviorCharacterization.wraparoundDistCalc;
            }

            double t = 0;

            if (recordEndpoints)
            {
                behavior.finalLocation = new List<double>(2);
                behavior.finalLocation.Add(inst.robots[0].Location.X);
                behavior.finalLocation.Add(inst.robots[0].Location.Y);
            }                                                                   

            if (recordTrajectories)
            {
                behavior.trajectory = new List<int>();
                behavior.trajectory.AddRange(inst.robots[0].Trajectory);
            }

            return (fitness - t) / environmentList.Count;

        }
コード例 #23
0
 public virtual void initializeRobots(AgentBrain agentBrain, Environment e, float headingNoise, float sensorNoise, float effectorNoise, instance_pack x) { }
コード例 #24
0
 void IBehaviorCharacterization.update(SimulatorExperiment exp, instance_pack ip) { }
コード例 #25
0
 void IBehaviorCharacterization.update(SimulatorExperiment exp, instance_pack ip)
 {
 }
コード例 #26
0
 /// <summary>
 /// Calculates the fitness of an individual based on Distance to the goal. Not compatible with multiagent teams.
 /// </summary>
 double IFitnessFunction.calculate(SimulatorExperiment engine, Environment environment, instance_pack ip, out double[] objectives)
 {
     objectives = null;
     return((MaxDistanceToGoal - ip.robots[0].Location.distance(environment.goal_point)) / MaxDistanceToGoal * 1000);
 }