Exemple #1
0
        static void Main(string[] args)
        {
            const int num_rovers = 20;
            const int num_POI    = 40;

            bool GENERATE_ROVERS = false; // if FALSE, read in from file
            bool GENERATE_POIS   = false; // if FALSE, read in from file

            int TIMESTEPS = 100;

            int MIN_OBS_DIST = 1;
            int MAX_OBS_DIST = 3;

            bool DO_L = false;
            bool DO_G = false;
            bool DO_D = true;

            int EVOPOP = 100;

            int GENERATIONS = 100;

            DataIO.DataExport DE = new DataIO.DataExport();

            //setup rover domain
            Domains.MultiRoverDomain RoverDomain = new Domains.MultiRoverDomain(num_rovers, num_POI, TIMESTEPS, DO_L, DO_G, DO_D, GENERATE_ROVERS, GENERATE_POIS, MIN_OBS_DIST, MAX_OBS_DIST);

            //setup neuroevo policies
            NeuralNetworks.NeuroEvo Evo = new NeuralNetworks.NeuroEvo(num_rovers, EVOPOP);



            for (int gen = 0; gen < GENERATIONS; gen++)
            {
                Console.WriteLine("GEN = " + gen.ToString());
                //give rovers policies and use/score the policies
                RoverDomain.get_and_use_neuroevo_policies(Evo.Population, EVOPOP);

                //evolve policies and end generation
                Evo.EndGeneration();
            }

            if (DO_G)
            {
                DE.Export1DDoubleArray(RoverDomain.Global_rewards[0].ToArray(), "global");
            }
            if (DO_D)
            {
                DE.Export1DDoubleArray(RoverDomain.Global_rewards[0].ToArray(), "difference");
            }
        }
Exemple #2
0
        /* In this software sample, a neural network (from Drew Wilson's NeuralNetwork library)
         * is trained using training data generated from sin(x) with x range from 0 to "maxX"
         * and y range normalized between 0 and 1.
         *
         * The software creates a "data.csv" file of the neural network's approximation of the
         * sine function within the given range.
         */
        public static void Main(string[] args)
        {
            // initialize fields
            DataIO.DataExport              DE      = new DataIO.DataExport();
            MathClasses.Probability        Prob    = MathClasses.Probability.Instance;
            MathClasses.LinearAlgebra      LA      = new MathClasses.LinearAlgebra();
            NeuralNetworks.BackpropNetwork network = new NeuralNetworks.BackpropNetwork();

            int trainDataSize = 10000;
            int maxX          = 10;
            int testDataSize  = 10000;

            List <double[]> allInputs  = new List <double[]>();
            List <double[]> allOutputs = new List <double[]>();

            double[]        tempInput;
            double[]        tempOutput;
            List <double[]> plotData = new List <double[]>();


            // create training data from sin(x) function
            for (int i = 0; i < trainDataSize; i++)
            {
                tempInput     = new double[1];
                tempOutput    = new double[1];
                tempInput[0]  = Prob.NextDouble() * maxX;
                tempOutput[0] = Math.Sin(tempInput[0]);

                tempOutput[0] = (tempOutput[0] + 1) / 2; //normalize between 0 and 1

                allInputs.Add(tempInput);
                allOutputs.Add(tempOutput);
            }

            // set up network parameters (adjustable)
            network.NumInputs     = 1;                               // number of simulator inputs
            network.NumHidden     = 5;                               // number of hidden nodes per network in simulator ensemble
            network.NumOutputs    = 1;                               // number of outputs of simulator
            network.WeightInitSTD = .75;                             // std for random weight initialization
            network.Eta           = 0.1;                             // learning rate for backprop
            network.Episodes      = 2000;                            // episodes for backprop
            network.Momentum      = 0.5;                             // momentum for backprop
            network.Shuffle       = NeuralNetworks.ToggleShuffle.no; // do we shuffle training data during backprop?

            // give training data to network
            network.Inputs            = allInputs;
            network.Outputs           = allOutputs;
            network.ValidationInputs  = new List <double[]>();
            network.ValidationOutputs = new List <double[]>();

            network.TrainNetwork();

            // test the network
            tempInput  = new double[1];
            tempOutput = new double[1];
            for (int i = 0; i < testDataSize; i++)
            {
                tempInput[0] = (i + 1) * (double)maxX / (double)testDataSize;
                tempOutput   = network.NeuralNet.ForwardPass(tempInput);

                plotData.Add(LA.Join(tempInput, tempOutput));
            }

            DE.ExportData(plotData, "data");
        }