// Updates with a real player public void UpdateWithPlayer(World.Player realPlayer) { I = World.XToI(realPlayer.X); J = World.YToJ(realPlayer.Y); Ammo = realPlayer.Ammo; Weapon = realPlayer.Weapon; }
// Returns the path index - the index of the next path node to target given the current player int getNewPathIndex(World.Player player, int currentIndex) { if (strategy.SearchPath == null) { return(currentIndex); } int pathLength = strategy.SearchPath.States.Count; // Get target for (int i = 0; i + currentIndex < pathLength; i++) { BlockWorld targetWorld = strategy.SearchPath.States[currentIndex + i]; int targetI = targetWorld.Player.I; int targetJ = targetWorld.Player.J; int playerI = World.XToI(player.X); int playerJ = World.YToJ(player.Y); // If the player is touching the next path coord, then return the index of the new path coord if (playerI == targetI && playerJ == targetJ) { return(currentIndex + i + 1); } } return(currentIndex); }
// Checks if the enemy player has changed position bool dangerZoneShifted(World world) { World.Player opponent = playerNum == 1 ? world.Player2 : world.Player1; int opponentI = World.XToI(opponent.X); int opponentJ = World.YToJ(opponent.Y); return(Util.ManhattanDistance(opponentI, opponentJ, dangerZone.SourceI, dangerZone.SourceJ) > DangerZoneRecalcDistance); }
public Engine(MapWindow mapWindow) { MapWindow = mapWindow; Generator = mapWindow.Generator; Player = new World.Player(this); KeyboardInput = new KeyboardInput(this); Camera = new Camera(this); MiniMap = new MiniMap(new Vector(200, 200), this); }
public static World.Player SpawnPlayer(Vector3 position, Transform parent, bool active = false) { RectTransform transform = Spawn(position, Game.Graphics.Player, parent, active); World.Player worldCharacter = transform.GetComponent <World.Player>(); worldCharacter.Initialize(new Character <Characters> { Type = Characters.Link }); return(worldCharacter); }
public void Initialize(Inventory inventory, World.Player player) { Inventory = inventory; Inventory.OnChanged += Inventory_OnChanged; Inventory.ChangeSelection += Inventory_OnChangeSelection; Player = player; Player.OnInitialize += Player_OnInitialize; Player.OnTakeDamage += Player_OnTakeDamage; Menu.Initialize(inventory); }
// Searches the world for an action public ActionWithFiller ComputeBestAction(World world, WorldAction currentFillerAction, int currentPathIndex) { WorldAction bestAction = WorldAction.NoAction; WorldAction bestFillerAction = WorldAction.NoAction; float bestActionUtility = float.MinValue; // Determine which actions are possible World.Player currentPlayer = playerNum == 1 ? world.Player1 : world.Player2; List <WorldAction> possibleActions = currentPlayer.GetPossibleActions(); // Choose maximum-utility action out of all possibilies foreach (WorldAction action in possibleActions) { // Make a new clone of the world to run a simulated step World newState = world.Clone(); World.Player newCurrentPlayer = playerNum == 1 ? newState.Player1 : newState.Player2; newCurrentPlayer.Advance(new List <WorldAction>() { action }); newState.Advance(emptyList, false, false); //currentPathIndex = PathIndexFunction(newCurrentPlayer, currentPathIndex); // Decide filler action and do it repeatedly WorldAction potentialFillerAction = FillerActionFunction(action, currentFillerAction); List <WorldAction> fillerActionList = new List <WorldAction>() { potentialFillerAction }; for (int i = 0; i < StepSize - 1; i++) { newCurrentPlayer.Advance(fillerActionList); newState.Advance(emptyList, false, false); //currentPathIndex = PathIndexFunction(newCurrentPlayer, currentPathIndex); } // Calculate utility and update maximum //float utility = 0.0f; float utility = calculateUtility(newState, 0, true, float.MinValue, float.MaxValue, potentialFillerAction, currentPathIndex); if (utility > bestActionUtility) { bestAction = action; bestFillerAction = potentialFillerAction; bestActionUtility = utility; } } return(new ActionWithFiller(bestAction, bestFillerAction)); }
// Checks if player left path bool playerLeftPath(World world, Path path) { if (path != null) { World.Player player = playerNum == 1 ? world.Player1 : world.Player2; float cutOffSquared = PathDeviationRecalcDistance * PathDeviationRecalcDistance; foreach (BlockWorld blockWorld in path.States) { int pathI = blockWorld.Player.I; int pathJ = blockWorld.Player.J; float pathX = World.IToXMin(pathI) + World.BlockSize / 2.0f; float pathY = World.JToYMin(pathJ) + World.BlockSize / 2.0f; if (Util.SquareDistance(player.X, player.Y, pathX, pathY) < cutOffSquared) { return(false); } } return(true); } return(false); }
// Method which takes in a world and playerNum and returns the XCloseness of the player and the enemy public YCloseness HowYClose(World world, int playerNum) { World.Player player = playerNum == 1 ? world.Player1 : world.Player2; World.Player other = playerNum == 1 ? world.Player2 : world.Player1; float dY = player.Y - other.Y; if (isGroundBetween(player.X, player.Y, other.X, other.Y, world)) { return(YCloseness.WallBetween); } float blockUnit = World.BlockSize; if (dY < -10.0f * blockUnit) { return(YCloseness.NegFar); } if (dY < -1.0f * blockUnit) { return(YCloseness.NegMedium); } if (dY < 0.0f * blockUnit) { return(YCloseness.NegNear); } if (dY < 1.0f * blockUnit) { return(YCloseness.PosNear); } if (dY < 10.0f * blockUnit) { return(YCloseness.PosMedium); } return(YCloseness.PosFar); }
// Constructor takes a real player to simplify public BlockPlayer(World.Player realPlayer) { UpdateWithPlayer(realPlayer); }
// Computes a belief distribution for the projectile sources void computeSourceBeliefs(int playerNum, World world) { // Find player World.Player player = playerNum == 1 ? world.Player1 : world.Player2; int playerI = World.XToI(player.X); int playerJ = World.YToJ(player.Y); sourceI = playerI; sourceJ = playerJ; // The initial source is at the player ProjectileSourceBelief initial = new ProjectileSourceBelief(1.0f, player.Ammo, player.Weapon); sourceBeliefs.Add(new IJCoords(playerI, playerJ), initial); // Compute distribution of projectile sources for (int iter = 0; iter < DistributionSteps; iter++) { Dictionary <IJCoords, ProjectileSourceBelief> newBeliefs = new Dictionary <IJCoords, ProjectileSourceBelief>(); float newBeliefsTotal = 0.0f; // Advance our belief state, except keep the old one around for efficiency foreach (KeyValuePair <IJCoords, ProjectileSourceBelief> entry in sourceBeliefs) { ProjectileSourceBelief belief = entry.Value; int i = entry.Key.I; int j = entry.Key.J; float prior = belief.Probability; // Determine possible directions bool upPossible = false; bool leftPossible = false; bool rightPossible = false; bool downPossible = false; bool stayPossible = false; int numPossibleDirections = 0; bool supported = blockWorld.CheckPositionSupported(i, j); if (!supported) { // Can only move down from unsupported positions numPossibleDirections++; downPossible = true; } else { stayPossible = true; numPossibleDirections++; if (!blockWorld.CheckGroundByIndex(i, j + 1)) { downPossible = true; numPossibleDirections++; } if (!blockWorld.CheckGroundByIndex(i + 1, j)) { leftPossible = true; numPossibleDirections++; } if (!blockWorld.CheckGroundByIndex(i - 1, j)) { rightPossible = true; numPossibleDirections++; } if (!blockWorld.CheckGroundByIndex(i, j - 1)) { upPossible = true; numPossibleDirections++; } } // Compute chance of each direction - uniform float chance = 1.0f / numPossibleDirections; float derivedChance = chance * prior; // Update beliefs based on possible directions // Staying in place is always an option if (stayPossible) { addBeliefUsingWorld(newBeliefs, i, j, belief, derivedChance, ref newBeliefsTotal); } if (upPossible) { addBeliefUsingWorld(newBeliefs, i, j - 1, belief, derivedChance, ref newBeliefsTotal); } if (downPossible) { addBeliefUsingWorld(newBeliefs, i, j + 1, belief, derivedChance, ref newBeliefsTotal); } if (leftPossible) { addBeliefUsingWorld(newBeliefs, i + 1, j, belief, derivedChance, ref newBeliefsTotal); } if (rightPossible) { addBeliefUsingWorld(newBeliefs, i - 1, j, belief, derivedChance, ref newBeliefsTotal); } } // Normalize our new belief state foreach (KeyValuePair <IJCoords, ProjectileSourceBelief> entry in newBeliefs) { entry.Value.Probability /= newBeliefsTotal; } // Update sourceBeliefs = newBeliefs; } }
// Calculates the utility of a state protected float calculateUtility(World state, int depth, bool isOpponentsTurn, float alpha, float beta, WorldAction prevFillerAction, int currentPathIndex) { // Check if terminal and return terminal utility if (state.IsTerminal()) { float p1sTermUtil = state.TerminalUtility(); return(playerNum == 1 ? p1sTermUtil : -p1sTermUtil); } // Use heuristic for over max depth if (depth > SearchDepth) { // Uncomment below to check heuristic bounds between -1 and 1 //float h = Heuristic(state, currentPathIndex); //if (h > 1.0f || h < -1.0f) Debug.LogWarning("Heuristic has magnitude greater than 1!"); //return h; return(Heuristic(state, currentPathIndex)); } if (isOpponentsTurn) { // Determine which actions are possible World.Player currentPlayer = playerNum == 1 ? state.Player2 : state.Player1; List <WorldAction> possibleActions = currentPlayer.GetPossibleActions(); // Minimize utility float minUtil = float.MaxValue; // Find utility of possible actions foreach (WorldAction action in possibleActions) { // Make a new clone of the world to run a simulated step of *only the opponent* World newState = state.Clone(); World.Player newCurrentPlayer = playerNum == 1 ? newState.Player2 : newState.Player1; newCurrentPlayer.Advance(new List <WorldAction>() { action }); // Do filler action WorldAction potentialFillerAction = FillerActionFunction(action, prevFillerAction); List <WorldAction> fillerActionList = new List <WorldAction>() { potentialFillerAction }; for (int i = 0; i < StepSize - 1; i++) { newCurrentPlayer.Advance(fillerActionList); } // Calculate utility and update minimum float utility = calculateUtility(newState, depth + 1, false, alpha, beta, potentialFillerAction, currentPathIndex); if (utility < minUtil) { minUtil = utility; // Alpha check if (minUtil <= alpha) { return(minUtil); } // Beta update if (minUtil < beta) { beta = minUtil; } } } return(minUtil); } else { // Determine which actions are possible World.Player currentPlayer = playerNum == 1 ? state.Player1 : state.Player2; List <WorldAction> possibleActions = currentPlayer.GetPossibleActions(); // Maximize utility float maxUtil = float.MinValue; // Find utility of possible actions foreach (WorldAction action in possibleActions) { // Make a new clone of the world to run a simulated step with *player and projectiles* World newState = state.Clone(); World.Player newCurrentPlayer = playerNum == 1 ? newState.Player1 : newState.Player2; newCurrentPlayer.Advance(new List <WorldAction>() { action }); newState.Advance(emptyList, false, false); //currentPathIndex = PathIndexFunction(newCurrentPlayer, currentPathIndex); // Do filler action WorldAction potentialFillerAction = FillerActionFunction(action, prevFillerAction); List <WorldAction> fillerActionList = new List <WorldAction>() { potentialFillerAction }; for (int i = 0; i < StepSize - 1; i++) { newCurrentPlayer.Advance(fillerActionList); newState.Advance(emptyList, false, false); //currentPathIndex = PathIndexFunction(newCurrentPlayer, currentPathIndex); } // Calculate utility and update maximum float utility = calculateUtility(newState, depth + 1, true, alpha, beta, potentialFillerAction, currentPathIndex); if (utility > maxUtil) { maxUtil = utility; // Beta check if (maxUtil >= beta) { return(maxUtil); } // Alpha update if (maxUtil > alpha) { alpha = maxUtil; } } } return(maxUtil); } }
// The level 1 adversarial search heuristic public float Level1Heuristic(World world, int pathIndex) { World.Player currentPlayer = playerNum == 1 ? world.Player1 : world.Player2; World.Player opponentPlayer = playerNum == 1 ? world.Player2 : world.Player1; // Compute normalized factors float dHealth = (currentPlayer.Health - opponentPlayer.Health) / 200.0f; float dAmmo = (currentPlayer.Ammo /* - opponentPlayer.Ammo*/) / 3.0f; if (currentPlayer.IsMaster) { dAmmo = 0.0f; // Ammo is not a concern when master } float normalizedInverseDist = Util.BoundedInverseDistance(currentPlayer.X, currentPlayer.Y, opponentPlayer.X, opponentPlayer.Y); // Normalized level 2 conformance float normalizedConformance = 0.0f; if (SearchPath != null) { int pathLength = SearchPath.States.Count; // The real target depth may not exceed the end of the path int targetDepth = pathLength - pathIndex - 1 < PathTargetDepth ? pathLength - pathIndex - 1: PathTargetDepth; if (pathIndex < pathLength) { int i = pathIndex + targetDepth; int targetI = SearchPath.States[i].Player.I; int targetJ = SearchPath.States[i].Player.J; float targetX = World.IToXMin(targetI) + World.BlockSize / 2.0f; float targetY = World.JToYMin(targetJ) + World.BlockSize / 2.0f; float weight = ((float)i) / pathLength; normalizedConformance += Util.BoundedInverseDistance(currentPlayer.X, currentPlayer.Y, targetX, targetY) * weight; } normalizedConformance *= oneOverXSquaredNormalizationFactor; } else { // If the path is null, the danger zone will still provide us with a small heuristic weight int playerI = World.XToI(currentPlayer.X); int playerJ = World.YToJ(currentPlayer.Y); // The danger zone exists primarily to influence paths, so weight this very lightly if (Level2DangerZone != null) { normalizedConformance = Level2DangerZone.CheckDanger(playerI, playerJ) * 0.001f; } } // Return weighted sum of influences return(level1HealthWeight * dHealth + level1AmmoWeight * dAmmo + level1ConfrontationWeight * normalizedInverseDist + level1SuperlevelWeight * normalizedConformance ); }
// The center of the AI - get an action override public List <WorldAction> GetAction(World world) { // The immediate action comes from level 1 WorldAction bestAction = WorldAction.NoAction; // Update level 1 heuristic parameters World.Player player = playerNum == 1 ? world.Player1 : world.Player2; // Calculate new level 1 action if timer is up if (decisionTimer <= 0) { ActionWithFiller decision = level1Searcher.ComputeBestAction(world, fillerAction, strategy.NextPathIndex); bestAction = decision.Action; fillerAction = decision.FillerAction; decisionTimer = Level1StepSize; // Otherwise do the filler action } else { bestAction = fillerAction; // Check distance to path bool doneWithPath = false; if (strategy.SearchPath != null) { doneWithPath = strategy.NextPathIndex >= strategy.SearchPath.States.Count - 1; } // Calculate the path if this frame has been designated to it if (calculatePathNextFrame) { // Run A* Path path = level2Searcher.ComputeBestPath(blockWorld); // Must be set before using the level 1 heuristic with a path strategy.SearchPath = path; strategy.NextPathIndex = 0; calculatePathNextFrame = false; // If no path is able to be calculated, then check again sooner than normal if (path == null) { strategyTimer = NoPathFoundRefreshTimer; } } else { // Compute a new strategy if the old one is no longer valid SimplifiedWorld currentState = new SimplifiedWorld(world, playerNum); if (isFirstTime || !previousState.IsEquivalent(currentState) || doneWithPath || dangerZoneShifted(world) || playerLeftPath(world, strategy.SearchPath) || strategyTimer <= 0 || world.IsTerminal()) { if (isFirstTime) { previousState = currentState; } isFirstTime = false; // Get reward and update QValues if learning if (IsLearning) { float reward = SimplifiedWorld.Reward(previousState, strategy.Type, currentState); QLearner.UpdateQValue(previousState, strategy.Type, currentState, reward); // Don't learn once world is terminal if (world.IsTerminal()) { IsLearning = false; } } // Get a new strategy StrategyType newStrategy = QLearner.GetStrategy(currentState); #if STRATEGY_PRINT Debug.Log("Player " + playerNum.ToString() + " selects strategy: " + newStrategy.ToString()); #endif strategy = Strategy.StrategyWithType(playerNum, newStrategy); level1Searcher = new DiscreteAdversarialSearch(playerNum, strategy.Level1Heuristic, getFillerAction, getNewPathIndex, Level1StepSize, 4); level2Searcher = new AStar(Level2MaxNodesInPrioQueue, Level2MaxExpansions, strategy.Level2CostFunction, strategy.Level2GoalFunction, strategy.Level2HeuristicFunction); // Create block world and danger zone blockWorld = new BlockWorld(playerNum, world); // Recalc danger zone dangerZone = new DangerZone(opponentNum, world, blockWorld); // Must be set before using the level 2 reward, cost, and goal functions strategy.Level2DangerZone = dangerZone; // Calculate the path in the next frame calculatePathNextFrame = true; // Speeds up framerate after player has died if (world.IsTerminal()) { calculatePathNextFrame = false; } // Reset previous state previousState = currentState; strategyTimer = MaxStrategyTime; } } // Debug rendering of danger zone #if DANGER_RENDER dangerZone.Render(ResourceScript); dangerZone.RenderPlayerBeliefs(ResourceScript); #endif } // Advance path position strategy.NextPathIndex = getNewPathIndex(player, strategy.NextPathIndex); decisionTimer--; strategyTimer--; #if PATH_RENDER if (strategy.SearchPath != null) { strategy.SearchPath.Render(ResourceScript, strategy.NextPathIndex); } #endif // Return a single-valued list with the best action return(new List <WorldAction>() { bestAction }); }