Esempio n. 1
0
        // /param correctnessDemonstrationSuccess is true if the task was correctly solved by the (returned) solver and all existing solvers can't solve the task
        void solverModificationAndCorrectnessDemonstration(ITaskAndSolverModification p, ITask task, out Solver solver, out bool correctnessDemonstrationSuccess)
        {
            var allExistingTasks = solverTuples.Select(v => v.task);

            context.solverModificationAndCorrectnessDemonstration(p, task, /* passing in this parameter is a hack */ previousS, allExistingTasks, out solver, out correctnessDemonstrationSuccess);
        }
Esempio n. 2
0
        public Solver returnInitialProgram()
        {
            Solver solver = new Solver();

            solver.slimRnn = new SlimRnn();

            SlimRnn rnn = solver.slimRnn;

            // neurons for the retina input
            for (int i = 0; i < 5 * 5; i++)
            {
                rnn.neurons.Add(SlimRnnNeuron.makeInputNeuron());
            }

            // neurons for periphal vision of the retina
            for (int i = 0; i < 3 * 3 - 1; i++)
            {
                rnn.neurons.Add(SlimRnnNeuron.makeInputNeuron());
            }

            neuronIndexConstantOne = (uint)rnn.neurons.Count;
            rnn.neurons.Add(SlimRnnNeuron.makeInputNeuron());


            uint neuronTerminationIndex = (uint)rnn.neurons.Count;

            // output neuron for termination
            rnn.neurons.Add(new SlimRnnNeuron(SlimRnnNeuron.EnumType.ADDITIVE));

            uint neuronOutputStartIndex = (uint)rnn.neurons.Count;
            uint numberOfOutputNeurons  = 8;

            // output neurons for controlling the retina
            for (int i = 0; i < numberOfOutputNeurons; i++)
            {
                rnn.neurons.Add(new SlimRnnNeuron(SlimRnnNeuron.EnumType.ADDITIVE));
            }

            rnn.outputNeuronsStartIndex = neuronOutputStartIndex;
            rnn.numberOfOutputNeurons   = numberOfOutputNeurons;

            rnn.terminatingNeuronIndex     = neuronTerminationIndex;
            rnn.terminatingNeuronThreshold = 0.5f;

            rnn.numberOfInputNeurons = (5 * 5) + (9 - 1) + 1 /* constant neuron */;

            rnn.t_lim = double.MaxValue; // is set by the learning algorithm


            // add and initialize "hidden" neurons

            neuronIndexOfHiddenUnits = (uint)rnn.neurons.Count;

            uint numberOfHiddenNeuronsWtaGroups = 50;
            uint numberOfNeuronsInWtaGroup      = 4; // 4 is a good number as chosen by Schmidhuber

            for (uint groupI = 0; groupI < numberOfHiddenNeuronsWtaGroups; groupI++)
            {
                for (int neuronI = 0; neuronI < numberOfNeuronsInWtaGroup; neuronI++)
                {
                    bool isNeuronIEvent = (neuronI % 2) == 0;
                    SlimRnnNeuron.EnumType neuronType = isNeuronIEvent ? SlimRnnNeuron.EnumType.ADDITIVE : SlimRnnNeuron.EnumType.MULTIPLICATIVE;

                    SlimRnnNeuron neuron = new SlimRnnNeuron(neuronType);
                    neuron.winnerTakesAllGroup = groupI;

                    rnn.neurons.Add(neuron);
                }
            }

            rnn.initializeNeurons();

            // set initial network


            { // wire the central input sensor to the termination
                int retinaX = 0;
                int retinaY = 0;

                int absoluteRetinaIndexX = 2 + retinaX;
                int absoluteRetinaIndexY = 2 + retinaY;

                int retinaInputNeuronIndex = 0 + 5 * absoluteRetinaIndexY + absoluteRetinaIndexX;

                rnn.neurons[retinaInputNeuronIndex].outNeuronsWithWeights.Add(new SlimRnnNeuronWithWeight(rnn.neurons[retinaInputNeuronIndex], rnn.neurons[(int)neuronTerminationIndex], 0.55f, 0));
            }
            return(solver);
        }
Esempio n. 3
0
        public void solverModificationAndCorrectnessDemonstration(ITaskAndSolverModification pParameter, ITask taskParameter, Solver previousS, IEnumerable <ITask> allExistingTasks, out Solver solver, out bool correctnessDemonstrationSuccess)
        {
            var p    = (Environment2dTaskAndSolverModification)pParameter;
            var task = (Environment2dTask)taskParameter;

            correctnessDemonstrationSuccess = false;
            solver = null;

            UniversalSlimRnnSearch slimRnnSearch = new UniversalSlimRnnSearch(previousS.slimRnn, new Environment2dPowerplayNetworkTester(task, allExistingTasks));

            slimRnnSearch.weightWithPropabilityTable = returnNormalizedPropabilitiesOfTable();

            { // solver modification
                // hard coded modification
                // make a connection between the 1.0 input neuron and the neuron which causes a move to the right
                previousS.slimRnn.neurons[(int)neuronIndexConstantOne].outNeuronsWithWeights.Add(
                    new SlimRnnNeuronWithWeight(previousS.slimRnn.neurons[(int)neuronIndexConstantOne], previousS.slimRnn.neurons[(int)previousS.slimRnn.outputNeuronsStartIndex], 0.0f, 0, true));


                // TODO< not hardcoded modification >
            }


            // let the SLIM-RNN search-algorithm search for a network which solves the task the fastest way
            uint    maximumIteration = 3;
            bool    mustHalt         = true;
            bool    wasSolved;
            SlimRnn solutionRnn;

            // we have to set the world because UniversalSlimRnnSearch.search() uses the world
            ///slimRnnSearch.world = new TaskSlimRnnWorld(task);

            // the search is doing "solver modification" _and_ "correctness demonstration"
            slimRnnSearch.search(maximumIteration, mustHalt, out wasSolved, out solutionRnn);

            // rollback changes
            // if it was solved then keep used eligable weights
            if (wasSolved)
            {
                int breakpointHere = 1;

                // we have to rollback/remove eligable weights which didn't get used
                foreach (SlimRnnNeuron iNeuron in previousS.slimRnn.neurons)
                {
                    iNeuron.outNeuronsWithWeights = new List <SlimRnnNeuronWithWeight>(
                        iNeuron.outNeuronsWithWeights.Where(v => !v.isEligable || (v.isEligable && (v.weight != 0.0f)))
                        );
                }

                // set all used eligable connections to normal connections
                foreach (SlimRnnNeuron iNeuron in previousS.slimRnn.neurons)
                {
                    foreach (var iConnection in iNeuron.outNeuronsWithWeights)
                    {
                        Debug.Assert(!iConnection.isEligable || (iConnection.isEligable && iConnection.weight != 0.0f));
                        iConnection.isEligable = false;
                    }
                }
            }
            else
            {
                // we have to rollback/remove all eligable weights
                foreach (SlimRnnNeuron iNeuron in previousS.slimRnn.neurons)
                {
                    iNeuron.outNeuronsWithWeights = new List <SlimRnnNeuronWithWeight>(iNeuron.outNeuronsWithWeights.Where(v => !v.isEligable));
                }
            }

            // if we solved the task with the SLIM-RNN then we have to create a solver which represents our new problem solver
            // Solver has of course the SLIM-RNN which solved the problem
            if (wasSolved)
            {
                solver         = new Solver();
                solver.slimRnn = solutionRnn;
            }

            if (!wasSolved)
            {
                // if it was not solved we can't demonstrate the correctness

                correctnessDemonstrationSuccess = false;
                return;
            }

            correctnessDemonstrationSuccess = true;
        }
Esempio n. 4
0
 public SolverTuple(/*ITaskAndSolverModification p, */ ITask task, Solver solver)
 {
     //this.p = p;
     this.task   = task;
     this.solver = solver;
 }