예제 #1
0
        public void Game()
        {
            #region Initialization

            timer.AddReferee(kref);
            config = KPExperimentConfig.Load("KPExperimentConfig.xml");

            #endregion


            std.Realtime = false;

            for (int i = 0; i < config.num_Keepers; i++)
            {
                players.Add(new NEATKeepawayPlayer(std, "keepers", i + 1, "l", config.num_Keepers, config.num_Takers));
            }

            for (int i = 0; i < config.num_Takers; i++)
            {
                players.Add(new NEATKeepawayPlayer(std, "takers", i + 1, "r", config.num_Keepers, config.num_Takers));
            }

            for (int i = 0; i < players.Count; i++)
            {
                std.addPlayer(players[i]);
            }
        }
예제 #2
0
        static void Main(string[] args)
        {
            #region Initialization

            timer.AddReferee(kref);

            config = KeepawayConfig.Load("KeepawayConfig.xml");
            HyperNEATKeepawayPlayer.config = config;


            evo.SetParameters("Parameters.xml");

            evo.Evaluator = new EvaluateGenome(HyperNEATKeepaway);
            evo.Initialise();

            sub.GenerateNodes(NodeGeneration);
            sub.GenerateLinks(LinkCreation);

            std.Realtime = false;

            for (int i = 0; i < config.NumKeepers; i++)
            {
                players.Add(new HyperNEATKeepawayPlayer(std, "keepers", i + 1, "l", config.NumKeepers, config.NumTakers));
            }

            for (int i = 0; i < config.NumTakers; i++)
            {
                players.Add(new HyperNEATKeepawayPlayer(std, "takers", i + 1, "r", config.NumKeepers, config.NumTakers));
            }

            for (int i = 0; i < players.Count; i++)
            {
                std.addPlayer(players[i]);
            }



            evo.AllGenerations();


            #endregion
        }
예제 #3
0
        public void Game()
        {
            #region Initialization

            timer.AddReferee(kref);
            config = RLConfig.Load("RLConfig.xml");

            #endregion


            if (config.visualize == 1 && config.learning == 0) //Heuristic Policy with Visualisation
            {
                Application.EnableVisualStyles();
                Application.SetCompatibleTextRenderingDefault(false);

                RoboCup.FieldVisualizer f = new RoboCup.FieldVisualizer();
                std.Realtime   = false;
                timer.OnCycle += f.OnStadiumUpdate;
                for (int i = 0; i < config.num_Keepers; i++)
                {
                    std.addPlayer(new FixedKeepawayPlayer(std, "keepers", i + 1, "l", config.num_Keepers, config.num_Takers));
                }

                for (int i = 0; i < config.num_Takers; i++)
                {
                    std.addPlayer(new FixedKeepawayPlayer(std, "takers", i + 1, "r", config.num_Keepers, config.num_Takers));
                }
                f.std = std;
                f.backgroundWorker1.DoWork += new System.ComponentModel.DoWorkEventHandler(backgroundWorker1_DoWork);


                Application.Run(f);
            }
            else if (config.visualize == 1 && config.learning == 1) //Reinforcement Learning with Visualisation
            {
                Application.EnableVisualStyles();
                Application.SetCompatibleTextRenderingDefault(false);

                //Visualizer form = new Visualizer();

                // rlv.OneEpisode += form.UpdateGraph;

                RoboCup.FieldVisualizer f = new RoboCup.FieldVisualizer();
                std.Realtime   = false;
                timer.OnCycle += f.OnStadiumUpdate;

                for (int i = 0; i < config.num_Keepers; i++)
                {
                    players.Add(new RLKeepawayPlayer(std, "keepers", i + 1, "l", config.num_Keepers, config.num_Takers));
                }

                for (int i = 0; i < config.num_Takers; i++)
                {
                    players.Add(new RLKeepawayPlayer(std, "takers", i + 1, "r", config.num_Keepers, config.num_Takers));
                }

                for (int i = 0; i < players.Count; i++)
                {
                    std.addPlayer(players[i]);
                }

                f.std = std;
                //invokes an event handler that calls RLVisualisation methods
                f.backgroundWorker1.DoWork += new System.ComponentModel.DoWorkEventHandler(backgroundWorker2_DoWork);

                Application.Run(f);
            }
            else //Reinforcement learning without Visualisation
            {
                Console.WriteLine("Initialises the players well ");
                std.Realtime = false;

                for (int i = 0; i < config.num_Keepers; i++)
                {
                    players.Add(new RLKeepawayPlayer(std, "keepers", i + 1, "l", config.num_Keepers, config.num_Takers));
                }

                for (int i = 0; i < config.num_Takers; i++)
                {
                    players.Add(new RLKeepawayPlayer(std, "takers", i + 1, "r", config.num_Keepers, config.num_Takers));
                }

                for (int i = 0; i < players.Count; i++)
                {
                    std.addPlayer(players[i]);
                }
            }
        }