コード例 #1
1
        public CommunicationVisualizer(SharpNeatLib.CPPNs.SubstrateDescription _sd, ModularNetwork _net)
        {
            InitializeComponent();
            //zlevel = _zlevel;

            drawFont = new System.Drawing.Font("Arial", 8);
            drawBrush = new System.Drawing.SolidBrush(System.Drawing.Color.Black);

             drawFormat = new System.Drawing.StringFormat();

            sd = _sd;

            net = _net;
            _net.UpdateNetworkEvent += networkUpdated;
            activation = new activationLevels[200];
          //  outgoingActivation = new List<float>[200];

            for (int i = 0; i < activation.Length; i++)
            {
                activation[i] = new activationLevels();
                //outgoingActivation[i] = new List<float>();
                    

            }
            penConnection = new Pen(Color.Black);
            penRed = new Pen(Color.Red);

            this.SetStyle(
    ControlStyles.AllPaintingInWmPaint |
    ControlStyles.UserPaint |
    ControlStyles.DoubleBuffer, true);
        }
コード例 #2
0
 // Schrum: This runs multiple time steps to evaluate the agent
 public override double evaluateNetwork(SharpNeatLib.NeuralNetwork.INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
 {
     double result = base.evaluateNetwork(network, out behavior, sem);
     // Schrum: remove reference to enemy list after each eval.
     enemies = null; 
     return result;
 }
コード例 #3
0
        //TODO: deal with threading
        public double threadSafeEvaluateNetwork(SharpNeatLib.NeuralNetwork.INetwork network, System.Threading.Semaphore sem, out SharpNeatLib.BehaviorType behavior, int thread)
        {
		//	try {
            return exp.evaluateNetwork(network, out behavior,sem);
            //} catch (Exception e) {
				
            //    behavior=new BehaviorType();
            //    Console.WriteLine(e.Message);
            //    Console.WriteLine(e.StackTrace);
            //    throw e;
            //    return 0.0001;
				
            //}
        }
コード例 #4
0
 // Schrum: Changed "internal" to "public". Probably a bad idea, but it works!
  public virtual double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem) { behavior = null;  return 0.0; }
コード例 #5
0
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double fitness = 0;//SharpNeatLib.Utilities.random.NextDouble();
            NeatGenome tempGenome;
            INetwork controller;

            behavior = new SharpNeatLib.BehaviorType();

            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++) accumObjectives[i] = 0.0;

            IFitnessFunction fit_copy;
            IBehaviorCharacterization bc_copy;
            CollisionManager cm;
            instance_pack inst = new instance_pack();
            inst.timestep=timestep;
            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double tempFit = 0;
                double[] fitnesses = new double[timesToRunEnvironments];
                SharpNeatLib.Maths.FastRandom evalRand = new SharpNeatLib.Maths.FastRandom(100);
                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    int agent_trials = timesToRunEnvironments;

                    inst.num_rbts=this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed = evals;
                    env.rng = new SharpNeatLib.Maths.FastRandom(env.seed + 1);

                    int noise_lev = (int)this.sensorNoise+1;

                    float new_sn = evalRand.NextUInt() % noise_lev;
                    float new_ef = evalRand.NextUInt() % noise_lev;

                   inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate, dirComm);
                   initializeRobots(agentBrain, env, headingNoise, new_sn, new_ef, inst);

                    inst.elapsed = 0;
                    inst.timestep = this.timestep;
                    //Console.WriteLine(this.timestep);

                    inst.timeSteps = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.Initialize(env, this, inst.robots);

                    try {

                    while (inst.elapsed < evalTime)
                    {
                        runEnvironment(env,inst,sem);
                    }
                    }		catch( Exception e) {

                            for(int x=0;x<inst.robots.Count;x++) {
                                for(int y=0;y<inst.robots[x].history.Count;y++) {
                                    Console.WriteLine(x+" " + y+ " "+ inst.robots[x].history[y].x + " " + inst.robots[x].history[y].y);
                                }
                            }

                behavior=new SharpNeatLib.BehaviorType();
                    Console.WriteLine(e.Message);
                    Console.WriteLine(e.StackTrace);
                    throw(e);
                    return 0.0001;
                }

                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit += thisFit;

                    if (behavior != null && behavior.objectives != null && inst.bc!=null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                            accumObjectives[i] += behavior.objectives[i];

                        if(behavior.behaviorList==null)
                            behavior.behaviorList=new List<double>();
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }

                    inst.ff.reset();

                }
                fitness += tempFit / timesToRunEnvironments;
            }
            behavior.objectives = accumObjectives;
            return fitness / environmentList.Count;
        }
コード例 #6
0
        public static float addNoise(float val,float percentage,SharpNeatLib.Maths.FastRandom rng)
		{
			percentage/=100.0f;
			float p1 = 1.0f -percentage;
			float p2 = percentage;
			float rval = (float)rng.NextDouble();
			return p1*val+p2*rval;
		}
コード例 #7
0
        public void ResetEvaluator(SharpNeatLib.NeuralNetwork.IActivationFunction activationFn)
        {
              //populationEval = new SingleFilePopulationEvaluator(new NetworkEvaluator(simExp),null);
   		      populationEval = new MultiThreadedPopulationEvaluator(new NetworkEvaluator(simExp),null);
		}
コード例 #8
0
        //saves a CPPN in dot file format. 
        //Assumes that inputs are X1, Y1, X2, Y2, Z
        public static void saveCPPNasDOT(SharpNeatLib.NeatGenome.NeatGenome genome, string filename)
        {
            StreamWriter SW = File.CreateText(filename);
            SW.WriteLine("digraph g { ");

            String activationType = "";

            foreach (NeuronGene neuron in genome.NeuronGeneList)
            {


                switch (neuron.NeuronType)
                {
                    case NeuronType.Bias: SW.WriteLine("N0 [shape=box, label=Bias]"); break;
                    case NeuronType.Input:

                        string str = "?";
                        switch (neuron.InnovationId)
                        {
                            case 1: str = "X1"; break;
                            case 2: str = "Y1"; break;
                            case 3: str = "X2"; break;
                            case 4: str = "Y2"; break;
                            case 5: str = "Z"; break;

                        }
                        SW.WriteLine("N" + neuron.InnovationId + "[shape=box label=" + str + "]");
                        break;
                    case NeuronType.Output: SW.WriteLine("N" + neuron.InnovationId + "[shape=triangle]"); break;
                    case NeuronType.Hidden:
                        if (neuron.ActivationFunction.FunctionDescription.Equals("bipolar steepend sigmoid")) activationType = "S";
                        if (neuron.ActivationFunction.FunctionDescription.Equals("bimodal gaussian")) activationType = "G";
                        if (neuron.ActivationFunction.FunctionDescription.Equals("Linear")) activationType = "L";
                        if (neuron.ActivationFunction.FunctionDescription.Equals("Sin function with doubled period")) activationType = "Si";
                        if (neuron.ActivationFunction.FunctionDescription.Equals("Returns the sign of the input")) activationType = "Sign";

                        SW.WriteLine("N" + neuron.InnovationId + "[shape=circle, label=N" + neuron.InnovationId + "_" + activationType + ", fillcolor=gray]");
                        break;
                }

            }

            foreach (ConnectionGene gene in genome.ConnectionGeneList)
            {
                SW.Write("N" + gene.SourceNeuronId + " -> N" + gene.TargetNeuronId + " ");

                if (gene.Weight > 0)
                    SW.WriteLine("[color=black] ");
                else if (gene.Weight < -0)
                    SW.WriteLine("[color=red] [arrowType=inv]");
            }

            foreach (ModuleGene mg in genome.ModuleGeneList)
            {
                foreach (uint sourceID in mg.InputIds)
                {
                    foreach (uint targetID in mg.OutputIds)
                    {
                        SW.Write("N" + sourceID + " -> N" + targetID + " ");

                        SW.WriteLine("[color=gray]");
                    }
                }
            }

            SW.WriteLine(" { rank=same; ");
            foreach (NeuronGene neuron in genome.NeuronGeneList)
            {
                if (neuron.NeuronType == NeuronType.Output)
                {
                    SW.WriteLine("N" + neuron.InnovationId);
                }
            }
            SW.WriteLine(" } ");


            SW.WriteLine(" { rank=same; ");
            foreach (NeuronGene neuron in genome.NeuronGeneList)
            {
                if (neuron.NeuronType == NeuronType.Input)
                {
                    SW.Write("N" + neuron.InnovationId + " ->");
                }
            }
            //Also the bias neuron on the same level
            SW.WriteLine("N0 [style=invis]");
            SW.WriteLine(" } ");

            SW.WriteLine("}");

            SW.Close();
        }
コード例 #9
0
 /// <summary>
 /// Threadsafe wrapper function for the experiment class's evaluteNetwork function.
 /// </summary>
 /// <param name="network">If using NEAT (with direct encoding), the network is the controller itself. Otherwise, if using HyperNEAT, the network is a CPPN that indirectly encodes the controller.</param>
 /// <param name="sem">Semaphore for managing parallel processes.</param>
 /// <param name="behavior">** Output parameter ** Returns a vector representation of the agent's behavior inside the experimental domain.</param>
 public double threadSafeEvaluateNetwork(SharpNeatLib.NeuralNetwork.INetwork network, System.Threading.Semaphore sem, out SharpNeatLib.BehaviorType behavior, int thread)
 {
     //TODO: deal with threading
     return Experiment.evaluateNetwork(network, out behavior,sem);
 }
コード例 #10
0
 /// <summary>
 /// Wrapper function for the experiment class's evaluteNetwork function.
 /// </summary>
 /// <param name="network">If using NEAT (with direct encoding), the network is the controller itself. Otherwise, if using HyperNEAT, the network is a CPPN that indirectly encodes the controller.</param>
 /// <param name="behavior">** Output parameter ** Returns a vector representation of the agent's behavior inside the experimental domain.</param>
 public double evaluateNetwork(SharpNeatLib.NeuralNetwork.INetwork network, out SharpNeatLib.BehaviorType behavior)
 {
     return Experiment.evaluateNetwork(network, out behavior,null);
 }
コード例 #11
0
        /// <summary>
        /// Runs a single individual (or single multiagent team) through the CurrentEnvironment(s) specified in the experiment file.
        /// </summary>
        internal override double evaluateNetwork(INetwork network, out SharpNeatLib.BehaviorType behavior, System.Threading.Semaphore sem)
        {
            double fitness = 0;
            List<double> fitList = new List<double>();
            behavior = new SharpNeatLib.BehaviorType();
            List<Double> origBehavior = new List<Double>();
            double[] accumObjectives = new double[6];
            for (int i = 0; i < 6; i++) accumObjectives[i] = 0.0;

            IFitnessFunction fit_copy;
            IBehaviorCharacterization bc_copy;
            instance_pack inst = new instance_pack();
            inst.timestep = timestep;

            foreach (Environment env2 in environmentList)
            {
                fit_copy = fitnessFunction.copy();
                if (behaviorCharacterization != null)
                {
                    bc_copy = behaviorCharacterization.copy();
                    inst.bc = bc_copy;
                }
                inst.ff = fit_copy;

                double tempFit = 0;
                double[] fitnesses = new double[timesToRunEnvironments];

                SharpNeatLib.Maths.FastRandom evalRand;
                if (noiseDeterministic) evalRand = new SharpNeatLib.Maths.FastRandom(100);
                else
                {
                    evalRand = new SharpNeatLib.Maths.FastRandom();
                }

                for (int evals = 0; evals < timesToRunEnvironments; evals++)
                {
                    inst.num_rbts = this.numberRobots;

                    Environment env = env2.copy();

                    double evalTime = evaluationTime;

                    inst.eval = evals;
                    env.seed = evals;
                    if (!benchmark && noiseDeterministic)
                        env.rng = new SharpNeatLib.Maths.FastRandom(env.seed + 1);
                    else
                    {
                        env.rng = new SharpNeatLib.Maths.FastRandom();
                    }

                    float new_sn = this.sensorNoise;
                    float new_ef = this.effectorNoise;
                    float new_headingnoise = this.headingNoise;

                    inst.agentBrain = new AgentBrain(homogeneousTeam, inst.num_rbts, substrateDescription, network, normalizeWeights, adaptableANN, modulatoryANN, multibrain, evolveSubstrate, neatBrain, useCTRNNS);
                    inst.timestep = timestep;
                    initializeRobots(agentBrain, env, new_headingnoise, new_sn, new_ef, inst);
                    inst.elapsed = 0;
                    inst.timeSteps = 0;
                    inst.collisionManager = collisionManager.copy();
                    inst.collisionManager.initialize(env, this, inst.robots);

                    while (inst.elapsed < evalTime)
                    {
                        runEnvironment(env, inst, sem);
                    }

                    double thisFit = inst.ff.calculate(this, env, inst, out behavior.objectives);
                    fitnesses[evals] = thisFit;
                    tempFit += thisFit;

                    if (behavior != null && behavior.objectives != null && inst.bc != null)
                    {
                        for (int i = 0; i < behavior.objectives.Length; i++)
                            accumObjectives[i] += behavior.objectives[i];

                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List<double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    else if (behavior != null && inst.bc != null)
                    {
                        if (behavior.behaviorList == null)
                        {
                            behavior.behaviorList = new List<double>();
                        }
                        behavior.behaviorList.AddRange(inst.bc.calculate(this, inst));

                        inst.bc.reset();
                    }
                    inst.ff.reset();
                }

                fitness += tempFit / timesToRunEnvironments;
                fitList.Add(tempFit / timesToRunEnvironments);
            }
            behavior.objectives = accumObjectives;
            if (behaviorCharacterization != null)
            {

                behavior.wraparoundRange = behaviorCharacterization.wraparoundDistCalc;
            }

            double t = 0;

            if (recordEndpoints)
            {
                behavior.finalLocation = new List<double>(2);
                behavior.finalLocation.Add(inst.robots[0].Location.X);
                behavior.finalLocation.Add(inst.robots[0].Location.Y);
            }                                                                   

            if (recordTrajectories)
            {
                behavior.trajectory = new List<int>();
                behavior.trajectory.AddRange(inst.robots[0].Trajectory);
            }

            return (fitness - t) / environmentList.Count;

        }