예제 #1
0
        public void setUp()
        {
            WumpusCave completeCave = new WumpusCave(4, 4);

            actionFn = WumpusFunctionFunctions.createActionsFunction(completeCave);
            resultFn = WumpusFunctionFunctions.createResultFunction();
        }
예제 #2
0
 /**
  * Constructs a problem with the specified components, and a default step
  * cost function (i.e. 1 per step).
  *
  * @param initialState
  *            the initial state that the agent starts in.
  * @param actionsFn
  *            a description of the possible actions available to the agent.
  * @param resultFn
  *            a description of what each action does; the formal name for
  *            this is the transition model, specified by a function
  *            RESULT(s, a) that returns the state that results from doing
  *            action a in state s.
  * @param goalTest
  *            test determines whether a given state is a goal state.
  */
 public GeneralProblem(S initialState,
                       IActionsFunction <S, A> actionsFn,
                       IResultFunction <S, A> resultFn,
                       GoalTest <S> goalTest)
     : this(initialState, actionsFn, resultFn, goalTest, new DefaultStepCostFunction <S, A>())
 {
 }
 public OnlineSearchProblem(IActionsFunction actionsFunction,
                            IGoalTest goalTest, IStepCostFunction stepCostFunction)
 {
     this.ActionsFunction  = actionsFunction;
     this.GoalTest         = goalTest;
     this.StepCostFunction = stepCostFunction;
 }
 // TODO: Should do something to make these two methods below thread safe
 public static IActionsFunction GetActionsFunction()
 {
     if (null == actionsFunction)
     {
         actionsFunction = new EpActionsFunction();
     }
     return actionsFunction;
 }
예제 #5
0
 /// <summary>
 /// Returns an ACTIONS function for the incremental formulation of the
 /// n-queens problem.
 /// </summary>
 /// <returns></returns>
 public static IActionsFunction GetIActionsFunction()
 {
     if (null == _iActionsFunction)
     {
         _iActionsFunction = new NQIActionsFunction();
     }
     return(_iActionsFunction);
 }
예제 #6
0
 /// <summary>
 /// Returns an ACTIONS function for the complete-state formulation of the
 /// n-queens problem.
 /// </summary>
 /// <returns></returns>
 public static IActionsFunction GetCActionsFunction()
 {
     if (null == _cActionsFunction)
     {
         _cActionsFunction = new NQCActionsFunction();
     }
     return(_cActionsFunction);
 }
예제 #7
0
        public void setUp()
        {
            oneBoard   = new NQueensBoard(1);
            eightBoard = new NQueensBoard(8);
            board      = new NQueensBoard(8);

            actionsFn = NQueensFunctions.getIFActionsFunction();
            resultFn  = NQueensFunctions.getResultFunction();
            goalTest  = NQueensFunctions.testGoal;
        }
 /**
  * Constructor
  */
 public NondeterministicProblem(S initialState,
                                IActionsFunction <S, A> actionsFn, IResultsFunction <S, A> resultsFn,
                                GoalTest <S> goalTest, IStepCostFunction <S, A> stepCostFn)
 {
     this.initialState = initialState;
     this.actionsFn    = actionsFn;
     this.resultsFn    = resultsFn;
     this.goalTest     = goalTest;
     this.stepCostFn   = stepCostFn;
 }
예제 #9
0
 public Problem(object initialState, IActionsFunction actionsFunction,
                IResultFunction resultFunction, IGoalTest goalTest,
                IStepCostFunction stepCostFunction)
 {
     this.InitialState     = initialState;
     this.ActionsFunction  = actionsFunction;
     this.ResultFunction   = resultFunction;
     this.GoalTest         = goalTest;
     this.StepCostFunction = stepCostFunction;
 }
예제 #10
0
        public PassiveADPAgent(IMap <S, A> fixedPolicy, ISet <S> states,
                               S initialState, IActionsFunction <S, A> actionsFunction,
                               IPolicyEvaluation <S, A> policyEvaluation)
        {
            this.pi.AddAll(fixedPolicy);
            this.mdp = new MDP <S, A>(states, initialState, actionsFunction,
                                      new TransitionProbabilityFunctionImpl(P),
                                      new RewardFunctionImpl(R));

            this.policyEvaluation = policyEvaluation;
        }
예제 #11
0
 public MDP(ISet <S> states, S initialState,
            IActionsFunction <S, A> actionsFunction,
            ITransitionProbabilityFunction <S, A> transitionProbabilityFunction,
            IRewardFunction <S> rewardFunction)
 {
     this._states         = states;
     this.initialState    = initialState;
     this.actionsFunction = actionsFunction;
     this.transitionProbabilityFunction = transitionProbabilityFunction;
     this.rewardFunction = rewardFunction;
 }
예제 #12
0
 /**
  * Constructor.
  *
  * @param actionsFunction
  *            a function that lists the legal actions from a state.
  * @param noneAction
  *            an action representing None, i.e. a NoOp.
  * @param alpha
  *            a fixed learning rate.
  * @param gamma
  *            discount to be used.
  * @param Ne
  *            is fixed parameter for use in the method f(u, n).
  * @param Rplus
  *            R+ is an optimistic estimate of the best possible reward
  *            obtainable in any state, which is used in the method f(u, n).
  */
 public QLearningAgent(IActionsFunction <S, A> actionsFunction,
                       A noneAction, double alpha,
                       double gamma, int Ne, double Rplus)
 {
     this.actionsFunction = actionsFunction;
     this.noneAction      = noneAction;
     this._alpha          = alpha;
     this.gamma           = gamma;
     this.Ne    = Ne;
     this.Rplus = Rplus;
 }
예제 #13
0
        public void setUp()
        {
            ExtendableMap aMap = new ExtendableMap();

            aMap.addBidirectionalLink("A", "B", 5.0);
            aMap.addBidirectionalLink("A", "C", 6.0);
            aMap.addBidirectionalLink("B", "C", 4.0);
            aMap.addBidirectionalLink("C", "D", 7.0);
            aMap.addUnidirectionalLink("B", "E", 14.0);

            actionsFn  = MapFunctions.createActionsFunction(aMap);
            resultFn   = MapFunctions.createResultFunction();
            stepCostFn = MapFunctions.createDistanceStepCostFunction(aMap);
        }
예제 #14
0
        public IList <Node> ExpandNode(Node node, Problem problem)
        {
            IList <Node> childNodes = new List <Node>();

            IActionsFunction  actionsFunction  = problem.ActionsFunction;
            IResultFunction   resultFunction   = problem.ResultFunction;
            IStepCostFunction stepCostFunction = problem.StepCostFunction;

            foreach (IAction action in actionsFunction.Actions(node.State))
            {
                object successorState = resultFunction.Result(node.State, action);

                double stepCost = stepCostFunction.C(node.State, action, successorState);
                childNodes.Add(new Node(successorState, node, action, stepCost));
            }
            this.Metrics.Set(MetricNodesExpanded, this.Metrics.GetInt(MetricNodesExpanded) + 1);

            return(childNodes);
        }
예제 #15
0
 public Problem(object initialState, IActionsFunction actionsFunction,
                IResultFunction resultFunction, IGoalTest goalTest)
     : this(initialState, actionsFunction, resultFunction, goalTest,
            new DefaultStepCostFunction())
 {
 }
예제 #16
0
 /**
  * Constructor
  */
 public NondeterministicProblem(S initialState,
                                IActionsFunction <S, A> actionsFn, IResultsFunction <S, A> resultsFn,
                                GoalTest <S> goalTest)
     : this(initialState, actionsFn, resultsFn, goalTest, new DefaultStepCostFunction <S, A>())
 {
 }
 public OnlineSearchProblem(IActionsFunction actionsFunction, IGoalTest goalTest)
     : this(actionsFunction, goalTest, new DefaultStepCostFunction())
 {
 }