// function ONLINE-DFS-AGENT(s') returns an action // inputs: s', a percept that identifies the current state public override Action execute(Percept psDelta) { object sDelta = ptsFunction.getState(psDelta); // if GOAL-TEST(s') then return stop if (goalTest(sDelta)) { a = NoOpAction.NO_OP; } else { // if s' is a new state (not in untried) then untried[s'] <- // ACTIONS(s') if (!untried.ContainsKey(sDelta)) { untried.Add(sDelta, actions(sDelta)); } // if s is not null then do if (null != s) { // Note: If I've already seen the result of this // [s, a] then don't put it back on the unbacktracked // list otherwise you can keep oscillating // between the same states endlessly. if (!(sDelta.Equals(result.get(s, a)))) { // result[s, a] <- s' result.put(s, a, sDelta); // Ensure the unbacktracked always has a list for s' if (!unbacktracked.ContainsKey(sDelta)) { unbacktracked.Add(sDelta, new List <object>()); } // add s to the front of the unbacktracked[s'] unbacktracked[sDelta].Add(s); } } // if untried[s'] is empty then if (untried[sDelta].Capacity == 0) { // if unbacktracked[s'] is empty then return stop if (unbacktracked[sDelta].Capacity == 0) { a = NoOpAction.NO_OP; } else { // else a <- an action b such that result[s', b] = // POP(unbacktracked[s']) object popped = unbacktracked[sDelta].Remove(0); foreach (Pair <object, Action> sa in result.Keys) { if (sa.getFirst().Equals(sDelta) && result[sa].Equals(popped)) { a = sa.getSecond(); break; } } } } else //Needs debugging. { // else a <- POP(untried[s']) //a = untried[sDelta].Remove(0); } } if (a.isNoOp()) { // I'm either at the Goal or can't get to it, // which in either case I'm finished so just die. setAlive(false); } // s <- s' s = sDelta; // return a return(a); }
// function LRTA*-AGENT(s') returns an action // inputs: s', a percept that identifies the current state public override Action execute(Percept psDelta) { object sDelta = ptsFunction.getState(psDelta); // if GOAL-TEST(s') then return stop if (goalTest(sDelta)) { a = NoOpAction.NO_OP; } else { // if s' is a new state (not in H) then H[s'] <- h(s') if (!H.ContainsKey(sDelta)) { H.Add(sDelta, getHeuristicFunction().h(sDelta)); } // if s is not null if (null != s) { // result[s, a] <- s' result.put(s, a, sDelta); // H[s] <- min LRTA*-COST(s, b, result[s, b], H) // b (element of) ACTIONS(s) double minimum = double.MaxValue; foreach (Action b in actions(s)) { double cost = lrtaCost(s, b, result.get(s, b)); if (cost < minimum) { minimum = cost; } } H.Add(s, minimum); } // a <- an action b in ACTIONS(s') that minimizes LRTA*-COST(s', b, // result[s', b], H) double min = double.MaxValue; // Just in case no actions a = NoOpAction.NO_OP; foreach (agent.Action b in actions(sDelta)) { double cost = lrtaCost(sDelta, b, result.get(sDelta, b)); if (cost < min) { min = cost; a = b; } } } // s <- s' s = sDelta; if (a.isNoOp()) { // I'm either at the Goal or can't get to it, // which in either case I'm finished so just die. setAlive(false); } // return a return(a); }