public MyStochasticReturnPredictor(MyRootDecisionSpace rds, int myPromotedVariable,
                                           MyModuleParams setup, String label, int level)
            : base(label, level, setup)
        {
            base.AddPromotedVariable(myPromotedVariable, rds);

            this.Rds             = rds;
            Ds                   = new MyDecisionSpace(this, rds, setup);
            Mem                  = new MyQSAMemory(rds.VarManager.MAX_VARIABLES, 0);
            m_asm                = new MyMotivationBasedDeleteUnselectedASM(setup);
            LearningAlgorithm    = new MyDiscreteQLearning(setup, Mem);
            m_mlvh               = new MyLocalVariableHistory(rds, m_setup, Ds);
            m_prevSelectedAction = 0;
            m_prev_st            = Ds.GetCurrentState();
            this.m_newVariables  = new List <int>();
        }
        private int[] m_prev_st; // previous state (for variable adding and sharing knowledge)

        #endregion Fields

        #region Constructors

        public MyStochasticReturnPredictor(MyRootDecisionSpace rds, int myPromotedVariable,
            MyModuleParams setup, String label, int level)
            : base(label, level, setup)
        {
            base.AddPromotedVariable(myPromotedVariable, rds);

            this.Rds = rds;
            Ds = new MyDecisionSpace(this, rds, setup);
            Mem = new MyQSAMemory(rds.VarManager.MAX_VARIABLES, 0);
            m_asm = new MyMotivationBasedDeleteUnselectedASM(setup);
            LearningAlgorithm = new MyDiscreteQLearning(setup, Mem);
            m_mlvh = new MyLocalVariableHistory(rds, m_setup, Ds);
            m_prevSelectedAction = 0;
            m_prev_st = Ds.GetCurrentState();
            this.m_newVariables = new List<int>();
        }