Ejemplo n.º 1
0
        /// <summary>
        /// Generates a <see cref="QLearnerModel"/> based on states/actions with transitions and rewards.
        /// </summary>
        /// <param name="X1">Initial State matrix.</param>
        /// <param name="y">Action label vector.</param>
        /// <param name="X2">Transition State matrix.</param>
        /// <param name="r">Reward values.</param>
        /// <returns>QLearnerModel.</returns>
        public override IReinforcementModel Generate(Matrix X1, Vector y, Matrix X2, Vector r)
        {
            this.Preprocess(X1, y, X2, r);

            var examples = MDPConverter.GetStates(X1, y, X2, this.FeatureProperties, this.FeatureDiscretizer);

            var states = examples.Item1; var actions = examples.Item2; var statesP = examples.Item3;

            QTable Q = new QTable();

            // construct Q table
            for (int i = 0; i < states.Count(); i++)
            {
                var state  = states.ElementAt(i);
                var action = actions.ElementAt(i);
                var stateP = statesP.ElementAt(i);

                Q.AddOrUpdate(state, action, r[i]);

                if (!Q.ContainsKey(stateP))
                {
                    Q.AddKey(stateP);
                }
            }

            double count = states.Select(s => s.Id).Distinct().Count();

            double change = 0;

            for (int pass = 0; pass < this.MaxIterations; pass++)
            {
                change = 0;

                for (int i = 0; i < states.Count(); i++)
                {
                    IState  state  = states.ElementAt(i);
                    IAction action = actions.ElementAt(i);
                    IState  stateP = statesP.ElementAt(i);
                    double  reward = r[i];

                    double q = (1.0 - this.LearningRate) * Q[state, action]
                               + this.LearningRate * (reward + this.Lambda * Q[stateP, Q.GetMaxAction(stateP)]);

                    change += (1.0 / count) * System.Math.Abs((Q[state, action] - q));

                    Q[state, action] = q;
                }

                if (change <= this.Epsilon)
                {
                    break;
                }
            }

            return(new QLearnerModel()
            {
                Descriptor = this.Descriptor,
                TransitionDescriptor = this.TransitionDescriptor,
                NormalizeFeatures = this.NormalizeFeatures,
                FeatureNormalizer = this.FeatureNormalizer,
                FeatureProperties = this.FeatureProperties,
                FeatureDiscretizer = this.FeatureDiscretizer,
                LearningRate = this.LearningRate,
                Lambda = this.Lambda,
                Q = Q
            });
        }
Ejemplo n.º 2
0
        /// <summary>
        ///   Generates a <see cref="QLearnerModel" /> based on states/actions with transitions and rewards.
        /// </summary>
        /// <param name="X1">Initial State matrix.</param>
        /// <param name="y">Action label vector.</param>
        /// <param name="X2">Transition State matrix.</param>
        /// <param name="r">Reward values.</param>
        /// <returns>QLearnerModel.</returns>
        public override IReinforcementModel Generate(Matrix X1, Vector y, Matrix X2, Vector r)
        {
            Preprocess(X1, y, X2, r);

            var examples = MDPConverter.GetStates(X1, y, X2, FeatureProperties, FeatureDiscretizer);

            var states  = examples.Item1;
            var actions = examples.Item2;
            var statesP = examples.Item3;

            var Q = new QTable();

            // construct Q table
            for (var i = 0; i < states.Count(); i++)
            {
                var state  = states.ElementAt(i);
                var action = actions.ElementAt(i);
                var stateP = statesP.ElementAt(i);

                Q.AddOrUpdate(state, action, r[i]);

                if (!Q.ContainsKey(stateP))
                {
                    Q.AddKey(stateP);
                }
            }

            double count = states.Select(s => s.Id).Distinct().Count();

            for (var pass = 0; pass < MaxIterations; pass++)
            {
                double change = 0;

                for (var i = 0; i < states.Count(); i++)
                {
                    var state  = states.ElementAt(i);
                    var action = actions.ElementAt(i);
                    var stateP = statesP.ElementAt(i);
                    var reward = r[i];

                    var q = (1.0 - LearningRate) * Q[state, action]
                            + LearningRate * (reward + Lambda * Q[stateP, Q.GetMaxAction(stateP)]);

                    change += 1.0 / count * System.Math.Abs(Q[state, action] - q);

                    Q[state, action] = q;
                }

                if (change <= Epsilon)
                {
                    break;
                }
            }

            return(new QLearnerModel
            {
                Descriptor = Descriptor,
                TransitionDescriptor = TransitionDescriptor,
                NormalizeFeatures = NormalizeFeatures,
                FeatureNormalizer = FeatureNormalizer,
                FeatureProperties = FeatureProperties,
                FeatureDiscretizer = FeatureDiscretizer,
                LearningRate = LearningRate,
                Lambda = Lambda,
                Q = Q
            });
        }