Ejemplo n.º 1
0
        public Advisor(IModel model, List<Property> properties, List<LoggedValue> loggedValues)
        {
            _model = model;

            MODEL_ACTION_VAR_COUNT = _model.GenerateControlVariables().Count;
            MODEL_STATE_VAR_COUNT = _model.GetInitialState().Count;

            HORIZON_SIZE = (int)Convert.ToDouble(properties.Find(p => p.Name.Equals("Horizon")).Value);
            Vsize = (int)Convert.ToDouble(properties.Find(p => p.Name.Equals("Neurons Number")).Value);
            BetaV = Convert.ToDouble(properties.Find(p => p.Name.Equals("BetaV")).Value);
            Gamma = Convert.ToDouble(properties.Find(p => p.Name.Equals("Discount")).Value);;
            Sigma = Convert.ToDouble(properties.Find(p => p.Name.Equals("Sigma")).Value);
            SigmaMin = Convert.ToDouble(properties.Find(p => p.Name.Equals("SigmaMin")).Value);
            TimesToAdjust = (int)Convert.ToDouble(properties.Find(p => p.Name.Equals("TimesToAdjust")).Value);
            var externalDiscretization =
                Convert.ToDouble(properties.Find(p => p.Name.Equals("ExternalDiscretization")).Value);
            var internalDiscretization =
                Convert.ToDouble(properties.Find(p => p.Name.Equals("InternalDiscretization")).Value);
            var TimeLimit = Convert.ToDouble(properties.Find(p => p.Name.Equals("TimeLimit")).Value);
            TimesToTeach = (int)Convert.ToDouble(properties.Find(p => p.Name.Equals("TimesToTeach")).Value);
            TimesToAdjustPastActions = (int)Convert.ToDouble(properties.Find(p => p.Name.Equals("TimesToAdjustPastActions")).Value);

            _logger = new LogIt("", loggedValues);
            _model.SetDiscretizations(externalDiscretization, internalDiscretization);

            TimeIndex = 0;
            _episodeNr = 0;
            AllVisits = new ArrayList();
            Sampler = new ASampler();
            IterationsLimit = (int) (TimeLimit / externalDiscretization);
            VestSum = 0;

            StartInState(_model.GetInitialState().ToArray());
            MinAction = new Vector(_model.MinActionValues());
            MaxAction = new Vector(_model.MaxActionValues());

            double[] stateAverage = _model.GetStateValuesAverageNN().ToArray();
            double[] stateStandardDeviation = _model.GetStateValuesStandardDeviationNN().ToArray();

            Init(stateAverage, stateStandardDeviation, MinAction.Table, Vsize);
        }
Ejemplo n.º 2
0
 public List<Double> MeddleWithGoalAndStartingState()
 {
     var state = GetInitialState();
     state[1] = new ASampler().NextDouble() * 2 * Math.PI;
     return state;
 }
Ejemplo n.º 3
0
 public List<Double> MeddleWithGoalAndStartingState()
 {
     var rand = new ASampler();
     _setpoint = rand.Next(28000 - 20000) + 20000.5;
     var state = GetInitialState();
     while (GetValue(state)[0] < 25000.5 || GetValue(state)[0] > 38000.0)
     {
         state[0] = rand.NextDouble() * 5.506774;
         state[1] = rand.NextDouble() * 0.132906;
         state[2] = rand.NextDouble() * 0.0019752;
         state[3] = rand.NextDouble() * 49.38182;
     }
     return state;
 }
Ejemplo n.º 4
0
 public CellSet()
 {
     TheSampler = new ASampler();
 }