public void setUp() { WumpusCave completeCave = new WumpusCave(4, 4); actionFn = WumpusFunctionFunctions.createActionsFunction(completeCave); resultFn = WumpusFunctionFunctions.createResultFunction(); }
/** * Constructs a problem with the specified components, and a default step * cost function (i.e. 1 per step). * * @param initialState * the initial state that the agent starts in. * @param actionsFn * a description of the possible actions available to the agent. * @param resultFn * a description of what each action does; the formal name for * this is the transition model, specified by a function * RESULT(s, a) that returns the state that results from doing * action a in state s. * @param goalTest * test determines whether a given state is a goal state. */ public GeneralProblem(S initialState, IActionsFunction <S, A> actionsFn, IResultFunction <S, A> resultFn, GoalTest <S> goalTest) : this(initialState, actionsFn, resultFn, goalTest, new DefaultStepCostFunction <S, A>()) { }
public OnlineSearchProblem(IActionsFunction actionsFunction, IGoalTest goalTest, IStepCostFunction stepCostFunction) { this.ActionsFunction = actionsFunction; this.GoalTest = goalTest; this.StepCostFunction = stepCostFunction; }
// TODO: Should do something to make these two methods below thread safe public static IActionsFunction GetActionsFunction() { if (null == actionsFunction) { actionsFunction = new EpActionsFunction(); } return actionsFunction; }
/// <summary> /// Returns an ACTIONS function for the incremental formulation of the /// n-queens problem. /// </summary> /// <returns></returns> public static IActionsFunction GetIActionsFunction() { if (null == _iActionsFunction) { _iActionsFunction = new NQIActionsFunction(); } return(_iActionsFunction); }
/// <summary> /// Returns an ACTIONS function for the complete-state formulation of the /// n-queens problem. /// </summary> /// <returns></returns> public static IActionsFunction GetCActionsFunction() { if (null == _cActionsFunction) { _cActionsFunction = new NQCActionsFunction(); } return(_cActionsFunction); }
public void setUp() { oneBoard = new NQueensBoard(1); eightBoard = new NQueensBoard(8); board = new NQueensBoard(8); actionsFn = NQueensFunctions.getIFActionsFunction(); resultFn = NQueensFunctions.getResultFunction(); goalTest = NQueensFunctions.testGoal; }
/** * Constructor */ public NondeterministicProblem(S initialState, IActionsFunction <S, A> actionsFn, IResultsFunction <S, A> resultsFn, GoalTest <S> goalTest, IStepCostFunction <S, A> stepCostFn) { this.initialState = initialState; this.actionsFn = actionsFn; this.resultsFn = resultsFn; this.goalTest = goalTest; this.stepCostFn = stepCostFn; }
public Problem(object initialState, IActionsFunction actionsFunction, IResultFunction resultFunction, IGoalTest goalTest, IStepCostFunction stepCostFunction) { this.InitialState = initialState; this.ActionsFunction = actionsFunction; this.ResultFunction = resultFunction; this.GoalTest = goalTest; this.StepCostFunction = stepCostFunction; }
public PassiveADPAgent(IMap <S, A> fixedPolicy, ISet <S> states, S initialState, IActionsFunction <S, A> actionsFunction, IPolicyEvaluation <S, A> policyEvaluation) { this.pi.AddAll(fixedPolicy); this.mdp = new MDP <S, A>(states, initialState, actionsFunction, new TransitionProbabilityFunctionImpl(P), new RewardFunctionImpl(R)); this.policyEvaluation = policyEvaluation; }
public MDP(ISet <S> states, S initialState, IActionsFunction <S, A> actionsFunction, ITransitionProbabilityFunction <S, A> transitionProbabilityFunction, IRewardFunction <S> rewardFunction) { this._states = states; this.initialState = initialState; this.actionsFunction = actionsFunction; this.transitionProbabilityFunction = transitionProbabilityFunction; this.rewardFunction = rewardFunction; }
/** * Constructor. * * @param actionsFunction * a function that lists the legal actions from a state. * @param noneAction * an action representing None, i.e. a NoOp. * @param alpha * a fixed learning rate. * @param gamma * discount to be used. * @param Ne * is fixed parameter for use in the method f(u, n). * @param Rplus * R+ is an optimistic estimate of the best possible reward * obtainable in any state, which is used in the method f(u, n). */ public QLearningAgent(IActionsFunction <S, A> actionsFunction, A noneAction, double alpha, double gamma, int Ne, double Rplus) { this.actionsFunction = actionsFunction; this.noneAction = noneAction; this._alpha = alpha; this.gamma = gamma; this.Ne = Ne; this.Rplus = Rplus; }
public void setUp() { ExtendableMap aMap = new ExtendableMap(); aMap.addBidirectionalLink("A", "B", 5.0); aMap.addBidirectionalLink("A", "C", 6.0); aMap.addBidirectionalLink("B", "C", 4.0); aMap.addBidirectionalLink("C", "D", 7.0); aMap.addUnidirectionalLink("B", "E", 14.0); actionsFn = MapFunctions.createActionsFunction(aMap); resultFn = MapFunctions.createResultFunction(); stepCostFn = MapFunctions.createDistanceStepCostFunction(aMap); }
public IList <Node> ExpandNode(Node node, Problem problem) { IList <Node> childNodes = new List <Node>(); IActionsFunction actionsFunction = problem.ActionsFunction; IResultFunction resultFunction = problem.ResultFunction; IStepCostFunction stepCostFunction = problem.StepCostFunction; foreach (IAction action in actionsFunction.Actions(node.State)) { object successorState = resultFunction.Result(node.State, action); double stepCost = stepCostFunction.C(node.State, action, successorState); childNodes.Add(new Node(successorState, node, action, stepCost)); } this.Metrics.Set(MetricNodesExpanded, this.Metrics.GetInt(MetricNodesExpanded) + 1); return(childNodes); }
public Problem(object initialState, IActionsFunction actionsFunction, IResultFunction resultFunction, IGoalTest goalTest) : this(initialState, actionsFunction, resultFunction, goalTest, new DefaultStepCostFunction()) { }
/** * Constructor */ public NondeterministicProblem(S initialState, IActionsFunction <S, A> actionsFn, IResultsFunction <S, A> resultsFn, GoalTest <S> goalTest) : this(initialState, actionsFn, resultsFn, goalTest, new DefaultStepCostFunction <S, A>()) { }
public OnlineSearchProblem(IActionsFunction actionsFunction, IGoalTest goalTest) : this(actionsFunction, goalTest, new DefaultStepCostFunction()) { }