예제 #1
0
        static void Step4()
        {
            Console.WriteLine("STEP 4: Create neural network...");
            NetworkCreator networkCreator = new NetworkCreator();

            networkCreator.Create(DataFilesInfoGetter.NetworkFile);
        }
        public void TestCategories()
        {
            var validationSet = new List <TrainingSet>();

            var trainingSet = new List <TrainingSet>();

            var root     = new Category("document");
            var doc_1040 = root.AddSubcategory("1040");
            var doc_990  = root.AddSubcategory("990");

            var doc_1040_2012  = doc_1040.AddSubcategory("2012");
            var doc_1040_2010  = doc_1040.AddSubcategory("2010");
            var doc_1040_other = doc_1040.AddSubcategory("other");

            validationSet.Add(new TrainingSet("trainingdata/documents/1040/other/tax1040.gif", doc_1040_other));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/other/tax1040_2.png", doc_1040_other));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/2012/tax1040_3.gif", doc_1040_2012));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/2010/tax1040_4.jpg", doc_1040_2010));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/other/tax1040_5.gif", doc_1040_other));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/other/tax1040_6.gif", doc_1040_other));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/other/tax1040_7.gif", doc_1040_other));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/other/tax1040_8.gif", doc_1040_other));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/2010/tax1040_9.jpg", doc_1040_2010));
            validationSet.Add(new TrainingSet("trainingdata/documents/1040/2012/tax1040_10.gif", doc_1040_2012));

            validationSet.Add(new TrainingSet("trainingdata/documents/990/tax990.jpg", doc_990));
            validationSet.Add(new TrainingSet("trainingdata/documents/990/tax990_2.jpg", doc_990));
            validationSet.Add(new TrainingSet("trainingdata/documents/990/tax990_3.jpg", doc_990));
            validationSet.Add(new TrainingSet("trainingdata/documents/990/tax990_4.jpg", doc_990));
            validationSet.Add(new TrainingSet("trainingdata/documents/990/tax990_5.jpg", doc_990));
            validationSet.Add(new TrainingSet("trainingdata/documents/990/tax990_6.jpg", doc_990));
            validationSet.Add(new TrainingSet("trainingdata/documents/990/tax990_7.jpg", doc_990));

            var t = new Trainer();

            var nc = new NetworkCreator();
            var nl = nc.CreateNetworks(root);

            t.Train(nl, validationSet);

            var o = t.Run(nl, root, "trainingdata/documents/990/tax990_poor.gif");

            o = t.Run(nl, root, "trainingdata/documents/1040/2012/tax1040_10.gif");
            o = t.Run(nl, root, "trainingdata/documents/1040/2010/tax1040_4.jpg");
            var x = o;
        }
        public void TestNetwork_FromDirectory()
        {
            var validationSet = TrainingSet.FromDirectory("trainingdata");
            var trainingSet   = TrainingSet.FromDirectory("generateddata");

            var t = new Trainer();

            var nc = new NetworkCreator();
            var nl = nc.CreateNetworks(trainingSet.Item2);

            t.Train(nl, trainingSet.Item1, validationSet.Item1);

            var o = t.Run(nl, trainingSet.Item2, "trainingdata/documents/990/tax990_poor.gif");

            o = t.Run(nl, trainingSet.Item2, "trainingdata/documents/1040/2012/tax1040_10.gif");
            o = t.Run(nl, trainingSet.Item2, "trainingdata/documents/1040/2010/tax1040_4.jpg");
            var x = o;
        }
        //Generate the classification tree and start training it recursively
        public void RunNetwork()
        {
            var validationSet = TrainingSet.FromDirectory(path + "/trainingdata");
            var trainingSet   = TrainingSet.FromDirectory(path + "/generateddata");

            root = trainingSet.Item2;
            Invalidate();
            //Invoke(new MethodInvoker(Invalidate));

            var t = new Trainer();

            var nc = new NetworkCreator();
            var nl = nc.CreateNetworks(root);

            t.Train(nl, trainingSet.Item1, validationSet.Item1);


            new Thread(() =>
            {
                var thisImagePath = path + "/trainingdata/documents/990/tax990.jpg";
                results           = t.Run(nl, trainingSet.Item2, thisImagePath);
                Invalidate();
                var iv     = new ImageView(Image.FromFile(thisImagePath));
                iv.Visible = false;
                iv.ShowDialog();

                thisImagePath = path + "/trainingdata/documents/1040/2012/tax1040_10.gif";
                results       = t.Run(nl, trainingSet.Item2, thisImagePath);
                Invalidate();
                iv         = new ImageView(Image.FromFile(thisImagePath));
                iv.Visible = false;
                iv.ShowDialog();

                thisImagePath = path + "/trainingdata/documents/1040/2010/tax1040_4.jpg";
                results       = t.Run(nl, trainingSet.Item2, thisImagePath);
                Invalidate();
                iv         = new ImageView(Image.FromFile(thisImagePath));
                iv.Visible = false;
                iv.ShowDialog();
            }).Start();
        }
예제 #5
0
        public static void TestBackpropagation()
        {
            var NN = NetworkCreator.Perceptron(2, 1, new [] { 20, 20, 20 });

            var nums  = 100000;
            var data  = new Dictionary <List <double>, List <double> >();
            var rand  = new Random();
            var dif   = 1000;
            var shell = 500;

            for (var i = 0; i < nums; i++)
            {
                var first  = rand.Next(dif) - shell;
                var second = rand.Next(dif) - shell;
                data.Add(new List <double>()
                {
                    first, second
                },
                         new List <double>()
                {
                    (first + second + 2 * shell) / 2d / dif,
                });
            }


            BackpropagationAlgorithm.Teach(NN, data);
            NN.Print();

            for (var i = 0; i < 100; i++)
            {
                var first  = rand.Next(dif) - shell;
                var second = rand.Next(dif) - shell;
                NN.Work(new List <double>()
                {
                    first, second
                });
                var res = NN.GetResults()[0];
                Console.WriteLine(first + "  " + second + " . " + (first + second) + " " + (Math.Round((res * dif - shell) * 2)));
                Console.ReadLine();
            }
        }
예제 #6
0
        public static void TestGeneticAlgorithm()
        {
            var agents     = new List <NeuralEnvironment>();
            var population = 100;

            var rand    = new Random();
            var dileme  = new List <double>();
            var options = 7;

            for (var i = 0; i < options * options * 2; i++)
            {
                dileme.Add(rand.Next(11) / 10d);
            }
            var bacldileme = new List <double>();

            for (var i = 0; i < options * options; i++)
            {
                bacldileme.Add(dileme[i * 2 + 1]);
                bacldileme.Add(dileme[i * 2]);
            }

            for (var i = 0; i < options; i++)
            {
                for (var j = 0; j < options; j++)
                {
                    Console.Write(Math.Round(dileme[2 * (i * options + j)] * 10)
                                  + " " + Math.Round(dileme[2 * (i * options + j) + 1] * 10));
                    if (j + 1 != options)
                    {
                        Console.Write(" | ");
                    }
                }
                Console.WriteLine();
            }
            Console.WriteLine();
            Console.WriteLine();

            for (var i = 0; i < population; i++)
            {
                agents.Add(NetworkCreator.Perceptron(8, options, new List <int>()
                {
                    20
                }));
            }
            var tournament = new Func <bool, Func <List <NeuralEnvironment>, List <double> > >(print => ((players) =>
            {
                var startElo = 0d;
                var tours = 100;
                var elo =
                    players.ToDictionary(player => player, player => startElo);

                for (var game = 0; game < tours; game++)
                {
                    //if(print)Console.WriteLine(game);
                    var pairing = new List <(NeuralEnvironment, NeuralEnvironment)>();
                    var list = players.OrderBy(p => - elo[p]).ToList();
                    for (var i = 0; i < players.Count / 2; i++)
                    {
                        var f = list[rand.Next(list.Count)];
                        list.Remove(f);
                        var s = list[rand.Next(list.Count)];
                        list.Remove(s);
                        pairing.Add((f, s));
                    }

                    Parallel.ForEach(pairing, pair =>
                                     //foreach(var pair in pairing)
                    {
                        var(player1, player2) = pair;
                        player1.Work(dileme);
                        player2.Work(bacldileme);
                        var firstPlay = player1.GetMaxOutId();
                        var secondPlay = player2.GetMaxOutId();
                        //winwin
                        elo[player1] += dileme[firstPlay * 6 + secondPlay * 2];
                        elo[player2] += dileme[firstPlay * 6 + secondPlay * 2 + 1];

                        /*var E1 = 1 / (1 + Math.Pow(10, (elo[player2] - elo[player1]) / 400));
                         * var E2 = 1 / (1 + Math.Pow(10, (elo[player1] - elo[player2]) / 400));
                         * elo[player1] += 20 * (res[1] / points + (res[0] / draw) / 2d - E1);
                         * elo[player2] += 20 * (res[2] / points + (res[0] / draw) / 2d- E2);*/
                        //}
                    });
                }

                var bestPlayer = elo.OrderBy(p => p.Value).Last().Key;
                if (print)
                {
                    Console.WriteLine(Math.Round(elo[bestPlayer])
                                      + "  "
                                      + Math.Round(elo.Select(p => p.Value).Sum() * 1d / elo.Count));
                }
                return(players.Select(p => elo[p]).ToList());
            }));



            var iterations = 1000;

            for (var i = 0; i < iterations; i++)
            {
                agents = GeneticAlgorithm.Improve(
                    agents,
                    GeneticAlgorithm.RandomMerge,
                    tournament(false));
            }

            foreach (var agent in agents.Take(10))
            {
                agent.Work(dileme);
                Console.Write(agent.GetMaxOutId() + " ");
                agent.Work(bacldileme);
                Console.WriteLine(agent.GetMaxOutId());
            }
        }
예제 #7
0
파일: Program.cs 프로젝트: ZerGL1nG/ML
        static void Main(string[] args)
        {
            // All in-game parameters can be changed in GameRules.cs  (but may cause some exceptions)

            // Example of using library and making / teaching bot to play a game
            // NetworkCreator.Perceptron(inputs, outputs, list of neurons in hidden layers) - to create agent
            // NetworkCreator.ReadFromFile(filename) - to load agent from file

            // PlayGame(agents) - to watch agents in action
            // Teach(agents, number of example games, number of learning cycles) - to teach agents with backpropagation algorithm
            // Gen(agents, number of generations) - to improve agents with genetic algorithm



            // Shows perfect average lifetime for agents in game with current values
            _actions = new List <int>();
            for (var i = 0; i < GameRules.NumberOfActions; i++)
            {
                _actions.Add(0);
            }
            Console.WriteLine("Average target = " + (GameRules.StartOil
                                                     + GameRules.OilProb
                                                     * GameRules.OilBonus
                                                     * GameRules.MapWidth
                                                     * GameRules.MapHeight
                                                     / GameRules.numberOfAgents));
            Console.WriteLine();
            Console.WriteLine();
            ///////////////////////////////////////////////////////////////////////////////////////////////////////////

            // Code Examples


            /*
             * //Creates new agents
             *
             * var inputs = ((GameRules.SightWidth * 2 + 1) * (GameRules.SightHeight) - 1) * GameRules.Tags.Count + 2;
             * var outputs = GameRules.NumberOfActions;
             * var neuronsInHiddenLayers = new List<int>() {20};
             * var agents = new List<NeuralEnvironment>();
             * for(var i = 0; i < GameRules.numberOfAgents; i++)
             *  agents.Add(NetworkCreator.Perceptron(inputs, outputs, neuronsInHiddenLayers ));
             */

            /*
             * //Get saved agents from files
             *
             * var filename = "agent";
             * var iter = GameRules.numberOfAgents;
             * var agents = new List<NeuralEnvironment>();
             * for(var i = 0; i < iter; i++)
             *  agents.Add(NetworkCreator.ReadFromFile(filename + i));
             */

            /*
             * //Shows game with agents  (press Enter to see next game tick)
             *
             * PlayGame(agents, true);
             */


            /*
             * //Teach agents by playing yourself versus them.
             *
             * var agents1 = Teach(agents, 1, 10);
             */

            /*
             * //Agents learn playing against each other
             *
             * var agents1 = Gen(agents, 100);
             */


            /*
             * //Save agents
             * var filename = "agent";
             * iter = 0;
             * foreach (var agent in agents)
             *  agent.Save(filename + iter++);
             */

            ///////////////////////////////////////////////////////////////////////////////////////////////////////////
            //Here starts actual code (loads pretty fine agents tho)


            /*var filename = "agent";
             * var iter = GameRules.numberOfAgents;
             * var agents = new List<NeuralEnvironment>();
             * for(var i = 0; i < iter; i++)
             *  agents.Add(NetworkCreator.ReadFromFile(filename + i));
             */

            //loaded agents
            var filename = "agent";
            var iter     = GameRules.numberOfAgents;
            var agents   = new List <NeuralEnvironment>();

            for (var i = 0; i < iter; i++)
            {
                agents.Add(NetworkCreator.ReadFromFile(filename + i));
            }

            //let'em learn for 10 generations and watch their averages
            agents = Gen(agents, 10);
            //trying to teach'em
            agents = Teach(agents, 3, 5);
            //10 more generation to see the affect of us being a poor teacher
            agents = Gen(agents, 10);

            //probly don't wanna save that

            /*iter = 0;
             * foreach (var agent in agents)
             *  agent.Save(filename + iter++);
             */

            Console.WriteLine("Finished");
            Console.ReadLine();
        }