Ejemplo n.º 1
0
        // adds a parent tp the Parents list. No change to the STANN
        public void addParent(MetaNode parent)
        {
            int i;

            MetaNode[] temp;
            if (ParentNum == 0)
            {
                ParentNum++;
                Parents = new MetaNode[ParentNum];
            }
            else
            {
                temp = new MetaNode[ParentNum];
                for (i = 0; i < ParentNum; i++)
                {
                    temp[i] = Parents[i];
                }
                ParentNum++;
                Parents = new MetaNode[ParentNum];
                for (i = 0; i < ParentNum - 1; i++)
                {
                    Parents[i] = temp[i];
                }
            }
            Parents[ParentNum - 1] = parent;
        }
Ejemplo n.º 2
0
        // constructor
        public LMMobileRobot(EZRoboNetDevice netdevice, byte avatarnodeid, Random randgen)
            : base(netdevice, avatarnodeid, randgen)
        {
            flagAbstractionDataReady = false;
            flagAbstractionReady     = false;

            flagSensorDataAcquired = false;

            AbstractionBytes = 0;

            // creating the sonar array
            SonarArray     = new ushort[8];
            PrevSonarArray = new ushort[8];

            // creating the cognitive array
            CogTop = createLMCartCognitiveArray(ref CogSonarNode, ref CogCamLinesNodes);


            state = stIdle;

            flagSonarArrayFiring = flagSonarArrayFiringDone = false;
            flagAbilityDone      = flagAbilityExecuting = false;

            RawAbstraction = new byte[ABSTRACTION_FRAME_HEIGHT * ABSTRACTION_FRAME_WIDTH * 3];
        }
        public void trainingStep(int ability, ref int pass)
        {
            pass++;
            // 1. initiating ability
            state = stAbilityExecuting;

            // storing chosen ability
            LastUsedAbility = ability;
            // storing current input vectors
            LastInputVecs = Cart.makeInputVector();

            // finding the input vector for the top node
            TopNodeInput = new double[Cart.CogTop.InputNum];
            int i;

            for (i = 0; i < Cart.CogTop.InputNum; i++)
            {
                TopNodeInput[i] = MetaNode.getOutput(Cart.CogTop.Children[i], LastInputVecs, pass);
            }
            // now training
            double[] DesiredOutputVec = STANN.mapInt2VectorDouble(ability, 2, Cart.CogTop.stann.OutputNum);

            // training 6 times
            for (i = 0; i < 6; i++)
            {
                Cart.CogTop.stann.backPropagate(TopNodeInput, DesiredOutputVec);
            }

            // executing ability now..
            Cart.useAbility((t_CartAbility)ability);
        }
Ejemplo n.º 4
0
        public static double getOutputNoUpdate(MetaNode mnet, double[][] inputvec, int pass)
        {
            int i;

            double[] sigmoids;
            double   theoutput;

            if (mnet.ChildrenNum == 0)
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    // self training if the node has its ForceSelfTrain attribute set to true


                    // retrieving the sigmoids of the node
                    sigmoids = mnet.stann.sigmoidLayerOutputs(inputvec[mnet.LeafIndex], mnet.stann.LayerNum - 1);
                    // calculating the decimal equivalent to the ordered thresholded sigmoid outputs
                    theoutput = STANN.mapVector2Int(sigmoids, 2, mnet.stann.OutputNum);
                }
                else
                {
                    theoutput = mnet.NoUpdatePassOutput;
                }
            }
            else
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    double[] levelinput = new double[mnet.InputNum];

                    for (i = 0; i < mnet.InputNum; i++)
                    {
                        if (mnet.Children[i].NodeLevel >= mnet.NodeLevel)    // requesting output from a higher (or equal ) level node
                        {
                            levelinput[i] = mnet.Children[i].PreviousOutput; // avoiding circular reference in recursion
                        }
                        else
                        {
                            levelinput[i] = getOutputNoUpdate(mnet.Children[i], inputvec, pass);
                        }
                    }

                    // retrieving sigmoids
                    sigmoids = mnet.stann.sigmoidLayerOutputs(levelinput, mnet.stann.LayerNum - 1);

                    // calculating the decimal equivalent to the thresholded outputs

                    theoutput = STANN.mapVector2Int(sigmoids, 2, mnet.stann.OutputNum);
                }
                else
                {
                    theoutput = mnet.CurrentOutput;
                }
            }

            return(theoutput);
        }
Ejemplo n.º 5
0
        // the following method applies the sensor readings to the metanetwork and returns the result
        public int runCognitiveArray(int pass)
        {
            double[][] inputvector = makeInputVector();

            //double[][] inputvector = makeRandomInputVector();

            return((int)MetaNode.getOutput(CogTop, inputvector, pass));
        }
Ejemplo n.º 6
0
        public void transitionAction(ref System.Windows.Forms.Panel panel,
                                     System.Windows.Forms.TextBox[] texts, ref int pass)
        {
            switch (state)
            {
            case stIdle:     // nothing
                break;

            case stSonarFiring:
                if (Cart.flagSonarArrayFiringDone)
                {
                    Cart.flagSonarArrayFiringDone = false;
                    // changing state
                    state = stSonarDataTransmission;
                    Cart.requestSensorData();
                }
                break;

            case stSonarDataTransmission:
                if (Cart.flagSensorDataAcquired)
                {
                    Cart.fillSonarTextBoxes(texts);

                    state = stFrameTransmission;
                    Cart.retrieveAbstraction();
                }
                break;

            case stFrameTransmission:
                if (Cart.flagAbstractionReady)
                {
                    Cart.drawAbstraction(panel);
                    state = stAbilityExecuting;
                    // running the cognitive array now
                    double[][] inputVecs = Cart.makeInputVector();
                    pass++;
                    int output = (int)MetaNode.getOutput(Cart.CogTop, inputVecs, pass);
                    if (output < 11)
                    {
                        Cart.useAbility((t_CartAbility)output);
                    }
                    else
                    {
                        state = stIdle;
                    }
                }
                break;

            case stAbilityExecuting:
                if (Cart.flagAbilityDone)
                {
                    Cart.flagAbilityDone = false;
                    state = stIdle;
                }
                break;
            }
        }
Ejemplo n.º 7
0
        public RobosapienV2(EZRoboNetDevice netdevice, byte avatarnodeid, Random randgen)
            : base(netdevice, avatarnodeid, randgen)
        {
            flagAbstractionDataReady = false;
            flagAbstractionReady     = false;
            AbstractionBytes         = 0;

            // creating the cognitive array
            CogTop = createRSV2CognitiveArray(ref CogLimbs, ref CogCamFrameLines);


            state = stIdle;

            RawAbstraction = new byte[ABSTRACTION_FRAME_HEIGHT * ABSTRACTION_FRAME_WIDTH * 3];
        }
Ejemplo n.º 8
0
        // Single Leaf level creation (MUST be connected to the raw/processed sensory input)
        public static MetaNode createTreeLeaf(int leafinputnum, int inputrange,
                                              int stannlayernum, int stannneuronnum, int leafoutputnum,
                                              double stannthreshold, double stannlr,
                                              MetaNode[] parents, Random rand, int leafindex)
        {
            MetaNode leaf;

            // creating node


            leaf = new MetaNode(leafinputnum, null, 0, parents, 0,
                                leafoutputnum, inputrange, stannlayernum,
                                stannneuronnum, stannthreshold, stannlr,
                                rand, true, false, 0, 0, 0, leafindex);


            return(leaf);
        }
Ejemplo n.º 9
0
        // Higher or intermediate level single node creation
        public static MetaNode createHigherLevelNode(MetaNode[] children, int childrennum,
                                                     int stannlayernum, int stannneuronnum, int nodeoutputnum,
                                                     double stannthreshold, double stannlr,
                                                     MetaNode[] parents, int parentnum,
                                                     Random rand, Boolean selftrain,
                                                     Boolean supervised, double alpha, double gamma, int level)
        {
            MetaNode node;

            // creating node

            node = new MetaNode(0, children, childrennum, parents,
                                parentnum, nodeoutputnum, 0,
                                stannlayernum, stannneuronnum, stannthreshold,
                                stannlr, rand, selftrain, supervised, level, alpha, gamma, 0);

            return(node);
        }
Ejemplo n.º 10
0
        // static methods that create a tree or a branch

        // Leaf level creation (MUST be connected to the raw/processed sensory input)
        public static MetaNode[] createTreeLeaves(int leavesnum, int leafinputnum, int inputrange,
                                                  int stannlayernum, int stannneuronnum, int leafoutputnum,
                                                  double stannthreshold, double stannlr,
                                                  MetaNode[] parents, Random rand, int startindex)
        {
            MetaNode[] leaves = new MetaNode[leavesnum];

            // creating each node individually
            int i;

            for (i = 0; i < leavesnum; i++)
            {
                leaves[i] = new MetaNode(leafinputnum, null, 0, parents, 0,
                                         leafoutputnum, inputrange, stannlayernum,
                                         stannneuronnum, stannthreshold, stannlr,
                                         rand, true, false, 0, 0, 0, startindex + i);
            }

            return(leaves);
        }
        public void transitionAction(ref System.Windows.Forms.Panel panel,
                                     System.Windows.Forms.TextBox[] texts, ref int pass)
        {
            switch (state)
            {
            case stIdle:     // nothing
                break;

            case stSensorDataTransmission:
                if (Robosapien.flagSensorDataAcquired)
                {
                    Robosapien.fillSensorTexts(texts);

                    state = stFrameTransmission;
                    Robosapien.retrieveAbstraction();
                }
                break;

            case stFrameTransmission:
                if (Robosapien.flagAbstractionReady)
                {
                    Robosapien.drawAbstraction(panel);
                    state = stAbilityExecuting;
                    // running the cognitive array now
                    double[][] inputVecs = Robosapien.makeInputVector();
                    pass++;
                    int output = (int)MetaNode.getOutput(Robosapien.CogTop, inputVecs, pass);

                    Robosapien.useAbility((t_RSV2Ability)output);
                }
                break;

            case stAbilityExecuting:
                if (Robosapien.flagAbilityDone)
                {
                    Robosapien.flagAbilityDone = false;
                    state = stIdle;
                }
                break;
            }
        }
Ejemplo n.º 12
0
        // Higher or intermediate level nodes creation
        public static MetaNode[] createHigherLevelNodes(int nodenum, MetaNode[] children, int childrennum,
                                                        int stannlayernum, int stannneuronnum, int nodeoutputnum,
                                                        double stannthreshold, double stannlr,
                                                        MetaNode[] parents, int parentnum,
                                                        Random rand, Boolean selftrain,
                                                        Boolean supervised, double alpha, double gamma, int level)
        {
            MetaNode[] nodes = new MetaNode[nodenum];
            int        i;

            // creating each node individually
            for (i = 0; i < nodenum; i++)
            {
                nodes[i] = new MetaNode(0, children, childrennum, parents,
                                        parentnum, nodeoutputnum, 0,
                                        stannlayernum, stannneuronnum, stannthreshold,
                                        stannlr, rand, selftrain, supervised, level, alpha, gamma, 0);
            }

            return(nodes);
        }
Ejemplo n.º 13
0
        public static double[] getNodeInputNoUpdate(MetaNode mnet, double[][] inputvec, int pass)
        {
            int i;

            double[] sigmoids;
            double[] theinput;

            if (mnet.ChildrenNum == 0)
            {
                // the nodes input should be the input vector corresponding to the leaf
                theinput = inputvec[mnet.LeafIndex];
            }
            else
            {
                theinput = new double[mnet.InputNum];

                for (i = 0; i < mnet.InputNum; i++)
                {
                    theinput[i] = getOutputNoUpdate(mnet.Children[i], inputvec, pass);
                }
            }

            return(theinput);
        }
Ejemplo n.º 14
0
        // This method creates a Metanework cognitive array
        public static MetaNode createRSV2CognitiveArray(ref MetaNode limbs, ref MetaNode[] camlines)
        {
            MetaNode[] children;

            // creating Leaves
            // 1. Creating 1st level (0) STANN (1 MetaNode) for Right/Left Arm/Foot Sensors, including the pickups (Limbs)
            MetaNode Limbs = MetaNode.createTreeLeaf(12,      // number of inputs for each leaf
                                                     4,       // range of input (0-3)
                                                     3,       // 3 STANN Layers
                                                     20,      // Number of Neurons in each layer (except the output layer)
                                                     4,       // Number of binary outputs of the node
                                                     0.5,     // STANN Threshold
                                                     0.6,     // STANN Learning Rate (Quick)
                                                     null,    // parents should be linked here
                                                     RandGen, // random number generator
                                                     0        // leaf index is 0
                                                     );


            // 4. Creating 1st level STANN MetaNode for the Camera Input (8x8 frame abstraction)
            MetaNode[] CamFrameLines = MetaNode.createTreeLeaves(8,       // 8 metanodes, 1 for each line
                                                                 8,       //  8 line inputs
                                                                 2,       // thresholded input range (0 black, 1 -white)
                                                                 3,       // 3 STANN Layers
                                                                 20,      // 20 neurons per layer (except output)
                                                                 3,       // 3 STANN binary ouputs
                                                                 0.5,     // STANN threshold
                                                                 0.5,     // STANN Learning Rate (Quick)
                                                                 null,    // Parents are null for now...
                                                                 RandGen, // Random Number Generator
                                                                 2        // the starting index for these leaves is 2
                                                                 );

            // Creating TOP node (although if things go nice, we may create a level before the top node)
            children    = new MetaNode[9];
            children[0] = Limbs;
            int i;

            for (i = 0; i < 8; i++)
            {
                children[1 + i] = CamFrameLines[i];
            }

            // ATTENTION! this node will using its output as input, therefore should include
            // itself in the children array following creation

            MetaNode Top = MetaNode.createHigherLevelNode(children, //  children array
                                                          2,        // 2 children
                                                          3,        // 3 STANN Layers
                                                          30,       // 30 neurons per layer
                                                          6,        // 6 binary outputs (may have to reduce/increase it)
                                                          0.5,      // STANN threshold
                                                          0.7,      // fast learning rate
                                                          null,     // NO Parents. we're at the top
                                                          0,        // 0 number of parents
                                                          RandGen,  // Random Number Generator
                                                          false,    // node is NOT self trained
                                                          false,    // Q-Learning disabled (for now...)
                                                          0.3,      // Q -learning a param is 0.3
                                                          0.6,      // Q - learning γ param is 0.6
                                                          1         // Level 2
                                                          );

            // *** ADDING self into the children list
            Top.addChild(Top);


            // *************** Updating the parents entries of the MetaNodes in a bottom-up fashion *************



            limbs    = Limbs;
            camlines = CamFrameLines;

            return(Top);
        }
        // constructor
        public LMMobileRobot(EZRoboNetDevice netdevice, byte avatarnodeid, Random randgen)
            : base(netdevice, avatarnodeid, randgen)
        {
            flagAbstractionDataReady = false;
            flagAbstractionReady = false;

            flagSensorDataAcquired = false;

            AbstractionBytes = 0;

            // creating the sonar array
            SonarArray = new ushort[8];
            PrevSonarArray = new ushort[8];

            // creating the cognitive array
            CogTop = createLMCartCognitiveArray(ref CogSonarNode, ref CogCamLinesNodes);

            state = stIdle;

            flagSonarArrayFiring = flagSonarArrayFiringDone = false;
            flagAbilityDone = flagAbilityExecuting = false;

            RawAbstraction = new byte[ABSTRACTION_FRAME_HEIGHT * ABSTRACTION_FRAME_WIDTH * 3];
        }
Ejemplo n.º 16
0
        // This method creates a Metanework cognitive array
        public static MetaNode createLMCartCognitiveArray(ref MetaNode sonarnode, ref MetaNode[] camlinesnodes)
        {
            MetaNode[] children;

            // creating Leaves
            // 1. Creating 1st level (0) STANN (1 MetaNode) for Sonar Sensors
            MetaNode TransducersNode = MetaNode.createTreeLeaf(8,       // number of inputs
                                                               4,       // range of input (0-3)
                                                               3,       // 3 STANN Layers
                                                               20,      // Number of Neurons in each layer (except the output layer)
                                                               5,       // Number of binary outputs of the node
                                                               0.5,     // STANN Threshold
                                                               0.6,     // STANN Learning Rate (Quick)
                                                               null,    // parents should be linked here
                                                               RandGen, // random number generator
                                                               0        // leaf index is 0
                                                               );


            // 4. Creating 1st level STANN MetaNode for the Camera Input (8x8 frame abstraction)
            MetaNode[] CamFrameLinesNodes = MetaNode.createTreeLeaves(8,       // 8 metanodes, 1 for each line
                                                                      8,       //  8 line inputs
                                                                      2,       // thresholded input range (0 black, 1 -white)
                                                                      3,       // 3 STANN Layers
                                                                      20,      // 20 neurons per layer (except output)
                                                                      3,       // 3 STANN binary ouputs
                                                                      0.5,     // STANN threshold
                                                                      0.5,     // STANN Learning Rate (Quick)
                                                                      null,    // Parents are null for now...
                                                                      RandGen, // Random Number Generator
                                                                      2        // the starting index for these leaves is 2
                                                                      );

            // Creating TOP node (although if things go nice, we may create a level before the top node)
            children    = new MetaNode[10];
            children[0] = TransducersNode;
            int i;

            for (i = 0; i < 8; i++)
            {
                children[1 + i] = CamFrameLinesNodes[i];
            }

            // ATTENTION! this node will using its output as input, therefore should include
            // itself in the children array following creation

            MetaNode Top = MetaNode.createHigherLevelNode(children, //  children array
                                                          3,        // 3 children
                                                          3,        // 3 STANN Layers
                                                          25,       // 25 neurons per layer
                                                          4,        // 4 binary outputs
                                                          0.5,      // STANN threshold
                                                          0.5,      // fast learning rate
                                                          null,     // NO Parents. we're at the top
                                                          0,        // 0 number of parents
                                                          RandGen,  // Random Number Generator
                                                          false,    // node is NOT self trained
                                                          true,     // Q-Learning enabled
                                                          0.3,      // Q -learning a param is 0.3
                                                          0.6,      // Q - learning γ param is 0.6
                                                          1         // Level 2
                                                          );

            // *** ADDING self into the children list
            Top.addChild(Top);



            sonarnode     = TransducersNode;
            camlinesnodes = CamFrameLinesNodes;

            return(Top);
        }
Ejemplo n.º 17
0
        // return the output of a MetaNode branch given the input vector to the leaves

        /*
         * public static double getOutput(MetaNode mnet, double[][] inputvec, int pass)
         * {
         *  int i;
         *  double[] sigmoids;
         *  double theoutput;
         *
         *  if (mnet.ChildrenNum == 0)
         *  {
         *      if (mnet.OutputPass != pass)
         *      {
         *          mnet.OutputPass = pass;
         *          // self training if the node has its ForceSelfTrain attribute set to true
         *          if (mnet.ForcedSelfTrain)
         *              // self training
         *              mnet.stann.selfTrain(inputvec[mnet.LeafIndex]);
         *
         *          // retrieving the sigmoids of the node
         *          sigmoids = mnet.stann.sigmoidLayerOutputs(inputvec[mnet.LeafIndex], mnet.stann.LayerNum - 1);
         *          // calculating the decimal equivalent to the ordered thresholded sigmoid outputs
         *          theoutput = STANN.mapVector2Int(sigmoids, 2, mnet.stann.OutputNum);
         *
         *          mnet.PreviousOutput = mnet.CurrentOutput;
         *          mnet.CurrentOutput = theoutput;
         *      }
         *      else
         *          theoutput = mnet.CurrentOutput;
         *
         *  }
         *  else
         *  {
         *      if (mnet.OutputPass != pass)
         *      {
         *          mnet.OutputPass = pass;
         *          double[] levelinput = new double[mnet.InputNum];
         *
         *          for (i = 0; i < mnet.InputNum; i++)
         *              if (mnet.Children[i].NodeLevel >= mnet.NodeLevel) // requesting output from a higher (or equal ) level node
         *                  levelinput[i] = mnet.Children[i].PreviousOutput; // avoiding circular reference in recursion
         *              else
         *                  levelinput[i] = getOutput(mnet.Children[i], inputvec, pass);
         *
         *          // self training if the aselfrain attribute is on
         *          if (mnet.ForcedSelfTrain) mnet.stann.selfTrain(levelinput);
         *          // retrieving sigmoids
         *          sigmoids = mnet.stann.sigmoidLayerOutputs(levelinput, mnet.stann.LayerNum - 1);
         *          // calculating the decimal equivalent to the thresholded outputs
         *          theoutput = 0;
         *          for (i = 0; i < mnet.stann.OutputNum; i++)
         *          {
         *              int bit = (sigmoids[i] < 0.5) ? 0 : 1;
         *              theoutput += (int)Math.Pow(2, i) * bit;
         *          }
         *          // updating previous Input Vector and Previous Output properties of the metanode
         *          int t;
         *          mnet.PreviousInputVec = new double[mnet.InputNum];
         *          for (t = 0; t < mnet.InputNum; t++)
         *              mnet.PreviousInputVec[t] = levelinput[t];
         *          mnet.PreviousOutput = mnet.CurrentOutput;
         *          mnet.CurrentOutput = theoutput;
         *          // previous input vector and output updated with the new values
         *
         *          // Must now train the network!!!! (in case the qlearning property is on)
         *          if (mnet.ForcedQLearning)
         *          {
         *              // mapping the input to the proper index in the reward table
         *              int inputindex = 0;
         *              for (t = 0; t < mnet.InputNum; t++)
         *                  inputindex += (int)(Math.Pow(mnet.InputRange, t) * levelinput[t]);
         *              // finding the output that corresponds to the maximum Qvaluefor the given input
         *              double maxQvalue = mnet.getMaximumQValue(inputindex);
         *              int maxQvalueOutputindex = 0;
         *              while (mnet.QTable[inputindex][maxQvalueOutputindex] != maxQvalue)
         *                  maxQvalueOutputindex++;
         *
         *              // converting the maximum Q value output to a vector of binary digits
         *              double[] desiredOutput = mnet.stann.int2BinaryVector(maxQvalueOutputindex);
         *              // now training...
         *              mnet.stann.backPropagate(levelinput, desiredOutput);
         *              // updating the IO log
         *              if (mnet.IOLogLength == MAX_IO_LOG_LENGTH)
         *              { // IO Log is full
         *                  // clearing the log and starting all over again
         *                  mnet.IOLogLength = 1;
         *              }
         *              else
         *                  mnet.IOLogLength++;
         *              // updating the IO log entries
         *              mnet.IOLog[mnet.IOLogLength - 1].input = inputindex;
         *              mnet.IOLog[mnet.IOLogLength - 1].output = (int)theoutput;
         *
         *          }
         *      }
         *      else
         *          theoutput = mnet.CurrentOutput;
         *
         *  }
         *
         *  return theoutput;
         * }
         */



        public static double getOutput(MetaNode mnet, double[][] inputvec, int pass)
        {
            int i;

            double[] sigmoids;
            double   theoutput;

            if (mnet.ChildrenNum == 0)
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    // self training if the node has its ForceSelfTrain attribute set to true
                    if (mnet.ForcedSelfTrain)
                    {
                        // self training
                        mnet.stann.selfTrain(inputvec[mnet.LeafIndex]);
                    }

                    // retrieving the sigmoids of the node
                    sigmoids = mnet.stann.sigmoidLayerOutputs(inputvec[mnet.LeafIndex], mnet.stann.LayerNum - 1);
                    // calculating the decimal equivalent to the ordered thresholded sigmoid outputs
                    theoutput = STANN.sigmoids2Int(sigmoids, mnet.stann.OutputNum);

                    mnet.PreviousOutput = mnet.CurrentOutput;
                    mnet.CurrentOutput  = theoutput;
                }
                else
                {
                    theoutput = mnet.CurrentOutput;
                }
            }
            else
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    double[] levelinput = new double[mnet.InputNum];

                    for (i = 0; i < mnet.InputNum; i++)
                    {
                        if (mnet.Children[i].NodeLevel >= mnet.NodeLevel)    // requesting output from a higher (or equal ) level node
                        {
                            levelinput[i] = mnet.Children[i].PreviousOutput; // avoiding circular reference in recursion
                        }
                        else
                        {
                            levelinput[i] = getOutput(mnet.Children[i], inputvec, pass);
                        }
                    }

                    // self training if the aselfrain attribute is on
                    if (mnet.ForcedSelfTrain)
                    {
                        mnet.stann.selfTrain(levelinput);
                    }
                    // retrieving sigmoids
                    sigmoids = mnet.stann.sigmoidLayerOutputs(levelinput, mnet.stann.LayerNum - 1);

                    // calculating the decimal equivalent to the thresholded outputs

                    theoutput = STANN.mapVector2Int(sigmoids, 2, mnet.stann.OutputNum);
                    // updating previous Input Vector and Previous Output properties of the metanode
                    int t;
                    mnet.PreviousInputVec = new double[mnet.InputNum];
                    for (t = 0; t < mnet.InputNum; t++)
                    {
                        mnet.PreviousInputVec[t] = levelinput[t];
                    }


                    mnet.PreviousOutput = mnet.CurrentOutput;
                    mnet.CurrentOutput  = theoutput;
                    // previous input vector and output updated with the new values

                    // Must now train the network!!!! (in case the qlearning property is on)
                    if (mnet.ForcedQLearning)
                    {
                        // mapping the input to the proper index in the reward table
                        int inputindex = STANN.mapVector2Int(levelinput, mnet.InputRange, mnet.InputNum);

                        // finding the output that corresponds to the maximum Qvaluefor the given input

                        QTableEntry maxQvalueEntry = QTableEntry.getMaxQValue(mnet.QTable, inputindex);


                        if (maxQvalueEntry != null)
                        {
                            // converting the maximum Q value output to a vector of binary digits
                            double[] desiredOutput = STANN.mapInt2VectorDouble(maxQvalueEntry.Output, 2, mnet.stann.OutputNum);
                            // now training...
                            mnet.stann.backPropagate(levelinput, desiredOutput);
                        }
                        // updating the IO log
                        if (mnet.IOLogLength == MAX_IO_LOG_LENGTH)
                        { // IO Log is full
                            // clearing the log and starting all over again
                            mnet.IOLogLength = 1;
                        }
                        else
                        {
                            mnet.IOLogLength++;
                        }

                        // updating the IO log entries
                        mnet.IOLog[mnet.IOLogLength - 1].input  = inputindex;
                        mnet.IOLog[mnet.IOLogLength - 1].output = (int)theoutput;
                    }
                }
                else
                {
                    theoutput = mnet.CurrentOutput;
                }
            }

            return(theoutput);
        }
        // Higher or intermediate level single node creation
        public static MetaNode createHigherLevelNode(MetaNode[] children, int childrennum,
                                               int stannlayernum, int stannneuronnum, int nodeoutputnum,
                                               double stannthreshold, double stannlr,
                                               MetaNode[] parents, int parentnum,
                                               Random rand, Boolean selftrain,
                                               Boolean supervised, double alpha, double gamma, int level)
        {
            MetaNode node;

            // creating node

            node = new MetaNode(0, children, childrennum, parents,
                                           parentnum, nodeoutputnum, 0,
                                           stannlayernum, stannneuronnum, stannthreshold,
                                           stannlr, rand, selftrain, supervised, level, alpha, gamma, 0);

            return node;
        }
        public static double getOutputNoUpdate(MetaNode mnet, double[][] inputvec, int pass)
        {
            int i;
            double[] sigmoids;
            double theoutput;

            if (mnet.ChildrenNum == 0)
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    // self training if the node has its ForceSelfTrain attribute set to true

                    // retrieving the sigmoids of the node
                    sigmoids = mnet.stann.sigmoidLayerOutputs(inputvec[mnet.LeafIndex], mnet.stann.LayerNum - 1);
                    // calculating the decimal equivalent to the ordered thresholded sigmoid outputs
                    theoutput = STANN.mapVector2Int(sigmoids, 2, mnet.stann.OutputNum);

                }
                else
                    theoutput = mnet.NoUpdatePassOutput;

            }
            else
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    double[] levelinput = new double[mnet.InputNum];

                    for (i = 0; i < mnet.InputNum; i++)
                        if (mnet.Children[i].NodeLevel >= mnet.NodeLevel) // requesting output from a higher (or equal ) level node
                            levelinput[i] = mnet.Children[i].PreviousOutput; // avoiding circular reference in recursion
                        else
                            levelinput[i] = getOutputNoUpdate (mnet.Children[i], inputvec, pass);

                    // retrieving sigmoids
                    sigmoids = mnet.stann.sigmoidLayerOutputs(levelinput, mnet.stann.LayerNum - 1);

                    // calculating the decimal equivalent to the thresholded outputs

                    theoutput = STANN.mapVector2Int(sigmoids, 2, mnet.stann.OutputNum);

                }
                else
                    theoutput = mnet.CurrentOutput;

            }

            return theoutput;
        }
        // Higher or intermediate level nodes creation
        public static MetaNode[] createHigherLevelNodes(int nodenum, MetaNode[] children, int childrennum,
                                               int stannlayernum, int stannneuronnum, int nodeoutputnum,
                                               double stannthreshold, double stannlr,
                                               MetaNode[] parents, int parentnum,
                                               Random rand, Boolean selftrain,
                                               Boolean supervised, double alpha, double gamma, int level)
        {
            MetaNode[] nodes = new MetaNode[nodenum];
            int i;
            // creating each node individually
            for (i = 0; i < nodenum; i++)
                nodes[i] = new MetaNode(0, children, childrennum, parents,
                                           parentnum, nodeoutputnum, 0,
                                           stannlayernum, stannneuronnum, stannthreshold,
                                           stannlr, rand, selftrain, supervised, level, alpha, gamma, 0);

            return nodes;
        }
        // Single Leaf level creation (MUST be connected to the raw/processed sensory input)
        public static MetaNode createTreeLeaf(int leafinputnum, int inputrange,
                                                   int stannlayernum, int stannneuronnum, int leafoutputnum,
                                                   double stannthreshold, double stannlr,
                                                   MetaNode[] parents, Random rand, int leafindex)
        {
            MetaNode leaf;

            // creating node

               leaf = new MetaNode(leafinputnum, null, 0, parents, 0,
                                            leafoutputnum, inputrange, stannlayernum,
                                            stannneuronnum, stannthreshold, stannlr,
                                            rand, true, false, 0, 0, 0, leafindex);

            return leaf;
        }
Ejemplo n.º 22
0
        // adds a child to the node. the STANN MUST be recreated (due to change on the inputs)
        public void addChild(MetaNode child)
        {
            int i;

            MetaNode[] temp;

            if (ChildrenNum == 0)
            {
                // increase the number of children
                ChildrenNum++;
                // increase the number of inputs as well!
                InputNum++;
                Children = new MetaNode[ChildrenNum];
            }
            else
            {
                temp = new MetaNode[ChildrenNum];
                for (i = 0; i < ChildrenNum; i++)
                {
                    temp[i] = Children[i];
                }
                // increase number of children
                ChildrenNum++;
                // increase number of inputs as well!
                InputNum++;
                Children = new MetaNode[ChildrenNum];
                for (i = 0; i < ChildrenNum - 1; i++)
                {
                    Children[i] = temp[i];
                }
            }
            Children[ChildrenNum - 1] = child;

            int newinputrange, curoutputnum = stann.OutputNum;

            if (stann.InputRange < (int)Math.Pow(2, child.stann.OutputNum))
            {
                newinputrange = (int)Math.Pow(2, child.stann.OutputNum);
                InputRange    = newinputrange;
            }
            else
            {
                newinputrange = stann.InputRange;
                InputRange    = stann.InputRange;
            }

            // recreating the STANN
            stann = new STANN(getInputNum(), newinputrange, LayerNum,
                              NeuronNum, curoutputnum, Threshold, LR, rnd, ForcedSelfTrain);

            if (ForcedQLearning)
            {
                // Re-initializing the Reward Table and table of Q-values for possible Q-learning use (or abuse :))

                /*
                 * int possibleinputs = (int)Math.Pow(InputRange, InputNum);
                 * int possibleoutputs = (int)Math.Pow(2, stann.OutputNum);
                 *
                 * QTable = new double[possibleinputs][];
                 * RewardTable = new double[possibleinputs];
                 * for (i = 0; i < possibleinputs; i++)
                 * {
                 *   QTable[i] = new double[possibleoutputs];
                 *   RewardTable[i] = 0;
                 *   for (j = 0; j < possibleoutputs; j++)
                 *       QTable[i][j] = 0;
                 * }
                 */
                RewardTable = null;
                QTable      = null;
            }
            // Re-creating the previous input vector
            PreviousInputVec = new double[InputNum];
            for (i = 0; i < InputNum; i++)
            {
                PreviousInputVec[i] = 0;
            }
        }
        // static methods that create a tree or a branch
        // Leaf level creation (MUST be connected to the raw/processed sensory input)
        public static MetaNode[] createTreeLeaves(int leavesnum, int leafinputnum, int inputrange,
                                                   int stannlayernum, int stannneuronnum, int leafoutputnum,
                                                   double stannthreshold, double stannlr,
                                                   MetaNode[] parents, Random rand, int startindex)
        {
            MetaNode[] leaves = new MetaNode[leavesnum];

            // creating each node individually
            int i;
            for (i = 0; i < leavesnum; i++)
            {
                leaves[i] = new MetaNode(leafinputnum, null, 0, parents, 0,
                                            leafoutputnum, inputrange, stannlayernum,
                                            stannneuronnum, stannthreshold, stannlr,
                                            rand, true, false, 0, 0, 0, startindex+i);
            }

            return leaves;
        }
        public MetaNode(int rawinputnum, MetaNode[] children, int childrennum,
                            MetaNode[] parents, int parentnum, int nodeoutputnum,
                            int rawinputrange, int layernum, int neuronnum,
                            double threshold, double lr, Random rand,
                            Boolean forceSelfTrain, Boolean forcedQLearning,
                            int nodelevel, double alpha, double gamma, int leafindex)
        {
            int i;
            RawInputNum = rawinputnum;
            ChildrenNum = childrennum;
            LayerNum = layernum;
            NeuronNum = neuronnum;
            Threshold = threshold;
            LR = lr;
            rnd = rand;
            ParentNum = parentnum;
            ForcedSelfTrain = forceSelfTrain;
            ForcedQLearning = forcedQLearning;
            NodeLevel = nodelevel;
            Qalpha = alpha;
            Qgamma = gamma;
            LeafIndex = leafindex;
            // copying children array. also figuring out the input range for the stann
            int maxrange = rawinputrange;
            Children = new MetaNode[ChildrenNum];
            for (i = 0; i < ChildrenNum; i++)
            {
                Children[i] = children[i];
                Children[i].addParent(this);

                maxrange = (maxrange < Math.Pow(2, Children[i].stann.OutputNum)) ? (int)Math.Pow(2, Children[i].stann.OutputNum) : maxrange;
            }
            InputRange = maxrange;
            // copying parent array
            for (i = 0; i < ParentNum; i++)
                Parents[i] = parents[i];

            InputNum = getInputNum();
            // now creating the STANN or the ANN of the node
                stann = new STANN(InputNum, InputRange, LayerNum,
                                  NeuronNum, nodeoutputnum, Threshold, LR, rnd, ForcedSelfTrain);
            // initializing previous input vector and previous output properties to zero
            CurrentOutput = PreviousOutput = 0;
            OutputPass = 0;
            PreviousInputVec = new double[InputNum];
            for (i = 0; i < InputNum; i++)
                PreviousInputVec[i] = 0;

            // initializing the Reward Table and table of Q-values for possible Q-learning use (or abuse :))
            // if the ForcedQLearning Property is set
            if (ForcedQLearning)
            {/*
                int possibleinputs = (int)Math.Pow(InputRange, InputNum);
                int possibleoutputs = (int)Math.Pow(2, stann.OutputNum);

                QTable = new double[possibleinputs][];
                RewardTable = new double[possibleinputs];
                for (i = 0; i < possibleinputs; i++)
                {
                    QTable[i] = new double[possibleoutputs];
                    RewardTable[i] = 0;
                    for (j = 0; j < possibleoutputs; j++)
                        QTable[i][j] = 0;
                } */

                RewardTable = null;
                QTable = null;
                // initializing the IO log
                IOLog = new NodeIOLogEntry[MAX_IO_LOG_LENGTH];
                IOLogLength = 0;
            }
        }
        public static double[] getNodeInputNoUpdate(MetaNode mnet, double[][] inputvec, int pass)
        {
            int i;
            double[] sigmoids;
            double[] theinput;

            if (mnet.ChildrenNum == 0)
                // the nodes input should be the input vector corresponding to the leaf
                theinput = inputvec[mnet.LeafIndex];
            else
            {
                theinput = new double[mnet.InputNum];

                for (i=0; i<mnet.InputNum; i++)
                    theinput[i] = getOutputNoUpdate(mnet.Children[i], inputvec, pass);
            }

            return theinput;
        }
        // adds a child to the node. the STANN MUST be recreated (due to change on the inputs)
        public void addChild(MetaNode child)
        {
            int i;
            MetaNode[] temp;

            if (ChildrenNum == 0)
            {
                // increase the number of children
                ChildrenNum++;
                // increase the number of inputs as well!
                InputNum++;
                Children = new MetaNode[ChildrenNum];
            }
            else
            {
                temp = new MetaNode[ChildrenNum];
                for (i = 0; i < ChildrenNum; i++)
                    temp[i] = Children[i];
                // increase number of children
                ChildrenNum++;
                // increase number of inputs as well!
                InputNum++;
                Children = new MetaNode[ChildrenNum];
                for (i = 0; i < ChildrenNum - 1; i++)
                    Children[i] = temp[i];
            }
            Children[ChildrenNum - 1] = child;

            int newinputrange, curoutputnum = stann.OutputNum;
            if (stann.InputRange < (int)Math.Pow(2, child.stann.OutputNum))
            {
                newinputrange = (int)Math.Pow(2, child.stann.OutputNum);
                InputRange = newinputrange;
            }
            else
            {
                newinputrange = stann.InputRange;
                InputRange = stann.InputRange;
            }

            // recreating the STANN
            stann = new STANN(getInputNum(), newinputrange, LayerNum,
                              NeuronNum, curoutputnum, Threshold, LR, rnd, ForcedSelfTrain);

            if (ForcedQLearning)
            {
                // Re-initializing the Reward Table and table of Q-values for possible Q-learning use (or abuse :))
               /*
                int possibleinputs = (int)Math.Pow(InputRange, InputNum);
                int possibleoutputs = (int)Math.Pow(2, stann.OutputNum);

                QTable = new double[possibleinputs][];
                RewardTable = new double[possibleinputs];
                for (i = 0; i < possibleinputs; i++)
                {
                    QTable[i] = new double[possibleoutputs];
                    RewardTable[i] = 0;
                    for (j = 0; j < possibleoutputs; j++)
                        QTable[i][j] = 0;
                }
                */
                RewardTable = null;
                QTable = null;
            }
            // Re-creating the previous input vector
            PreviousInputVec = new double[InputNum];
            for (i = 0; i < InputNum; i++)
                PreviousInputVec[i] = 0;
        }
        // This method creates a Metanework cognitive array
        public static MetaNode createLMCartCognitiveArray(ref MetaNode sonarnode, ref MetaNode[] camlinesnodes)
        {
            MetaNode[] children;

            // creating Leaves
            // 1. Creating 1st level (0) STANN (1 MetaNode) for Sonar Sensors
            MetaNode TransducersNode = MetaNode.createTreeLeaf( 8,   // number of inputs
                                                      4,   // range of input (0-3)
                                                      3,   // 3 STANN Layers
                                                      20,  // Number of Neurons in each layer (except the output layer)
                                                       5,   // Number of binary outputs of the node
                                                     0.5,  // STANN Threshold
                                                     0.6,  // STANN Learning Rate (Quick)
                                                     null, // parents should be linked here
                                                     RandGen,   // random number generator
                                                     0          // leaf index is 0
                                                     );

            // 4. Creating 1st level STANN MetaNode for the Camera Input (8x8 frame abstraction)
            MetaNode[] CamFrameLinesNodes = MetaNode.createTreeLeaves(8, // 8 metanodes, 1 for each line
                                                                 8,  //  8 line inputs
                                                                 2, // thresholded input range (0 black, 1 -white)
                                                                 3,  // 3 STANN Layers
                                                                20,  // 20 neurons per layer (except output)
                                                                 3,  // 3 STANN binary ouputs
                                                               0.5, // STANN threshold
                                                               0.5, // STANN Learning Rate (Quick)
                                                              null, // Parents are null for now...
                                                            RandGen, // Random Number Generator
                                                            2        // the starting index for these leaves is 2
                                                            );

            // Creating TOP node (although if things go nice, we may create a level before the top node)
            children = new MetaNode[10];
            children[0] = TransducersNode;
            int i;
            for (i = 0; i < 8; i++)
                children[1 + i] = CamFrameLinesNodes[i];

            // ATTENTION! this node will using its output as input, therefore should include
            // itself in the children array following creation

            MetaNode Top = MetaNode.createHigherLevelNode(children, //  children array
                                                          3,        // 3 children
                                                          3,        // 3 STANN Layers
                                                          25,       // 25 neurons per layer
                                                          4,        // 4 binary outputs
                                                          0.5,      // STANN threshold
                                                          0.5,      // fast learning rate
                                                          null,     // NO Parents. we're at the top
                                                          0,        // 0 number of parents
                                                          RandGen,  // Random Number Generator
                                                          false,    // node is NOT self trained
                                                          true,     // Q-Learning enabled
                                                          0.3,      // Q -learning a param is 0.3
                                                          0.6,      // Q - learning γ param is 0.6
                                                          1         // Level 2
                                                          );
            // *** ADDING self into the children list
            Top.addChild(Top);

            sonarnode = TransducersNode;
            camlinesnodes = CamFrameLinesNodes;

            return Top;
        }
 // adds a parent tp the Parents list. No change to the STANN
 public void addParent(MetaNode parent)
 {
     int i;
     MetaNode[] temp;
     if (ParentNum == 0)
     {
         ParentNum++;
         Parents = new MetaNode[ParentNum];
     }
     else
     {
         temp = new MetaNode[ParentNum];
         for (i = 0; i < ParentNum; i++)
             temp[i] = Parents[i];
         ParentNum++;
         Parents = new MetaNode[ParentNum];
         for (i = 0; i < ParentNum - 1; i++)
             Parents[i] = temp[i];
     }
     Parents[ParentNum - 1] = parent;
 }
        // return the output of a MetaNode branch given the input vector to the leaves
        /*
        public static double getOutput(MetaNode mnet, double[][] inputvec, int pass)
        {
            int i;
            double[] sigmoids;
            double theoutput;

            if (mnet.ChildrenNum == 0)
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    // self training if the node has its ForceSelfTrain attribute set to true
                    if (mnet.ForcedSelfTrain)
                        // self training
                        mnet.stann.selfTrain(inputvec[mnet.LeafIndex]);

                    // retrieving the sigmoids of the node
                    sigmoids = mnet.stann.sigmoidLayerOutputs(inputvec[mnet.LeafIndex], mnet.stann.LayerNum - 1);
                    // calculating the decimal equivalent to the ordered thresholded sigmoid outputs
                    theoutput = STANN.mapVector2Int(sigmoids, 2, mnet.stann.OutputNum);

                    mnet.PreviousOutput = mnet.CurrentOutput;
                    mnet.CurrentOutput = theoutput;
                }
                else
                    theoutput = mnet.CurrentOutput;

            }
            else
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    double[] levelinput = new double[mnet.InputNum];

                    for (i = 0; i < mnet.InputNum; i++)
                        if (mnet.Children[i].NodeLevel >= mnet.NodeLevel) // requesting output from a higher (or equal ) level node
                            levelinput[i] = mnet.Children[i].PreviousOutput; // avoiding circular reference in recursion
                        else
                            levelinput[i] = getOutput(mnet.Children[i], inputvec, pass);

                    // self training if the aselfrain attribute is on
                    if (mnet.ForcedSelfTrain) mnet.stann.selfTrain(levelinput);
                    // retrieving sigmoids
                    sigmoids = mnet.stann.sigmoidLayerOutputs(levelinput, mnet.stann.LayerNum - 1);
                    // calculating the decimal equivalent to the thresholded outputs
                    theoutput = 0;
                    for (i = 0; i < mnet.stann.OutputNum; i++)
                    {
                        int bit = (sigmoids[i] < 0.5) ? 0 : 1;
                        theoutput += (int)Math.Pow(2, i) * bit;
                    }
                    // updating previous Input Vector and Previous Output properties of the metanode
                    int t;
                    mnet.PreviousInputVec = new double[mnet.InputNum];
                    for (t = 0; t < mnet.InputNum; t++)
                        mnet.PreviousInputVec[t] = levelinput[t];
                    mnet.PreviousOutput = mnet.CurrentOutput;
                    mnet.CurrentOutput = theoutput;
                    // previous input vector and output updated with the new values

                    // Must now train the network!!!! (in case the qlearning property is on)
                    if (mnet.ForcedQLearning)
                    {
                        // mapping the input to the proper index in the reward table
                        int inputindex = 0;
                        for (t = 0; t < mnet.InputNum; t++)
                            inputindex += (int)(Math.Pow(mnet.InputRange, t) * levelinput[t]);
                        // finding the output that corresponds to the maximum Qvaluefor the given input
                        double maxQvalue = mnet.getMaximumQValue(inputindex);
                        int maxQvalueOutputindex = 0;
                        while (mnet.QTable[inputindex][maxQvalueOutputindex] != maxQvalue)
                            maxQvalueOutputindex++;

                        // converting the maximum Q value output to a vector of binary digits
                        double[] desiredOutput = mnet.stann.int2BinaryVector(maxQvalueOutputindex);
                        // now training...
                        mnet.stann.backPropagate(levelinput, desiredOutput);
                        // updating the IO log
                        if (mnet.IOLogLength == MAX_IO_LOG_LENGTH)
                        { // IO Log is full
                            // clearing the log and starting all over again
                            mnet.IOLogLength = 1;
                        }
                        else
                            mnet.IOLogLength++;
                        // updating the IO log entries
                        mnet.IOLog[mnet.IOLogLength - 1].input = inputindex;
                        mnet.IOLog[mnet.IOLogLength - 1].output = (int)theoutput;

                    }
                }
                else
                    theoutput = mnet.CurrentOutput;

            }

            return theoutput;
        }
        */
        public static double getOutput(MetaNode mnet, double[][] inputvec, int pass)
        {
            int i;
            double[] sigmoids;
            double theoutput;

            if (mnet.ChildrenNum == 0)
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    // self training if the node has its ForceSelfTrain attribute set to true
                    if (mnet.ForcedSelfTrain)
                        // self training
                        mnet.stann.selfTrain(inputvec[mnet.LeafIndex]);

                    // retrieving the sigmoids of the node
                    sigmoids = mnet.stann.sigmoidLayerOutputs(inputvec[mnet.LeafIndex], mnet.stann.LayerNum - 1);
                    // calculating the decimal equivalent to the ordered thresholded sigmoid outputs
                    theoutput = STANN.sigmoids2Int(sigmoids, mnet.stann.OutputNum);

                    mnet.PreviousOutput = mnet.CurrentOutput;
                    mnet.CurrentOutput = theoutput;
                }
                else
                    theoutput = mnet.CurrentOutput;

            }
            else
            {
                if (mnet.OutputPass != pass)
                {
                    mnet.OutputPass = pass;
                    double[] levelinput = new double[mnet.InputNum];

                    for (i = 0; i < mnet.InputNum; i++)
                        if (mnet.Children[i].NodeLevel >= mnet.NodeLevel) // requesting output from a higher (or equal ) level node
                            levelinput[i] = mnet.Children[i].PreviousOutput; // avoiding circular reference in recursion
                        else
                            levelinput[i] = getOutput(mnet.Children[i], inputvec, pass);

                    // self training if the aselfrain attribute is on
                    if (mnet.ForcedSelfTrain) mnet.stann.selfTrain(levelinput);
                    // retrieving sigmoids
                    sigmoids = mnet.stann.sigmoidLayerOutputs(levelinput, mnet.stann.LayerNum - 1);

                    // calculating the decimal equivalent to the thresholded outputs

                    theoutput = STANN.mapVector2Int(sigmoids, 2, mnet.stann.OutputNum);
                    // updating previous Input Vector and Previous Output properties of the metanode
                    int t;
                    mnet.PreviousInputVec = new double[mnet.InputNum];
                    for (t = 0; t < mnet.InputNum; t++)
                        mnet.PreviousInputVec[t] = levelinput[t];

                    mnet.PreviousOutput = mnet.CurrentOutput;
                    mnet.CurrentOutput = theoutput;
                    // previous input vector and output updated with the new values

                    // Must now train the network!!!! (in case the qlearning property is on)
                    if (mnet.ForcedQLearning)
                    {
                        // mapping the input to the proper index in the reward table
                        int inputindex = STANN.mapVector2Int(levelinput, mnet.InputRange, mnet.InputNum);

                        // finding the output that corresponds to the maximum Qvaluefor the given input

                        QTableEntry maxQvalueEntry = QTableEntry.getMaxQValue(mnet.QTable, inputindex);

                        if (maxQvalueEntry != null)
                        {
                            // converting the maximum Q value output to a vector of binary digits
                            double[] desiredOutput = STANN.mapInt2VectorDouble(maxQvalueEntry.Output, 2, mnet.stann.OutputNum);
                            // now training...
                            mnet.stann.backPropagate(levelinput, desiredOutput);
                        }
                        // updating the IO log
                        if (mnet.IOLogLength == MAX_IO_LOG_LENGTH)
                        { // IO Log is full
                            // clearing the log and starting all over again
                            mnet.IOLogLength = 1;
                        }
                        else
                            mnet.IOLogLength++;

                        // updating the IO log entries
                        mnet.IOLog[mnet.IOLogLength - 1].input = inputindex;
                        mnet.IOLog[mnet.IOLogLength - 1].output = (int)theoutput;
                    }

                }
                else
                    theoutput = mnet.CurrentOutput;

            }

            return theoutput;
        }