public bool DidTeamWin(int teamId) { int alliesAlive = 0; int opponentsAlive = 0; foreach (var modelActiveCharacter in ModelActiveCharacters) { if (!modelActiveCharacter.UnitState.IsAlive) { continue; } if (UnitHelpers.GetRelativeOwner(modelActiveCharacter.UnitIdentifier.TeamId, teamId) == UnitRelativeOwner.Self) { alliesAlive++; } else { opponentsAlive++; } } return(alliesAlive > 0 && opponentsAlive == 0); }
public TurnAction WIP_CalculateGOAPAction(Unit unit, int AIDepth = -1) { var rootWorldModel = GetCurrentWorldModelLayer(); var currentWorldModel = new WorldModel(rootWorldModel.ModelActiveCharacters, rootWorldModel.Queue, rootWorldModel.CurrentlyActiveUnit); WorldModelStack.Push(currentWorldModel); if (AIDepth == -1) { AIDepth = DEFAULT_AI_DEPTH; } int currentDepth = 0; var possibleActions = GetAllViableActions(unit); Stack <TurnAction> currentActionSequence = new Stack <TurnAction>(); TurnAction currentInitialMove = null; var bestAction = possibleActions[0]; var bestDiscontentmentValue = float.MaxValue; bool victoryActionPlanFound = false; int leastStepsToWin = Int32.MaxValue; var DEBUG_loops = 0; var DEBUG_timestampStart = Time.realtimeSinceStartup; do { DEBUG_loops++; var isVictoryPlan = currentWorldModel.DidTeamWin(unit.UnitIdentifier.TeamId); var isLostPlan = currentWorldModel.DidTeamLose(unit.UnitIdentifier.TeamId); if (WorldModelStack.Count >= AIDepth || isVictoryPlan || isLostPlan) { var currentDiscontentment = currentWorldModel.GetDiscontentmentForUnit(unit); // Basically after finding a VictoryPlan stop considering those plans that don't end in likely victory if (!isVictoryPlan && victoryActionPlanFound) { continue; } else if (!isVictoryPlan && !victoryActionPlanFound) { if (currentDiscontentment < bestDiscontentmentValue) { bestAction = currentInitialMove; bestDiscontentmentValue = currentDiscontentment; } } else if (isVictoryPlan && !victoryActionPlanFound) { victoryActionPlanFound = true; bestDiscontentmentValue = currentDiscontentment; leastStepsToWin = currentActionSequence.Count; } else if (isVictoryPlan && victoryActionPlanFound) { if (currentDiscontentment < bestDiscontentmentValue) { if (leastStepsToWin > currentActionSequence.Count) { leastStepsToWin = currentActionSequence.Count; bestAction = currentInitialMove; bestDiscontentmentValue = currentDiscontentment; } } } try { currentActionSequence.Pop(); } catch (Exception e) { Console.WriteLine(e); throw; } WorldModelStack.Pop(); currentWorldModel = WorldModelStack.Peek(); } else { // We should assume AI opponent (player) will make optimal moves as well, we do this at minimum depth to avoid bottlenecks (only checking state right after action) if (UnitHelpers.GetRelativeOwner(currentWorldModel.CurrentlyActiveUnit.TeamId, unit.UnitIdentifier.TeamId) == UnitRelativeOwner.Opponent) { if (!currentWorldModel.IsProcessed) { if (currentWorldModel.TryGetUnit(currentWorldModel.CurrentlyActiveUnit, out var independentUnit)) { var nextAction = CalculateGOBAction(independentUnit, currentWorldModel); currentActionSequence.Push(nextAction); var nextQueueState = currentWorldModel.GetNextQueueState(); var nextWorldState = new WorldModel(currentWorldModel.CopyUnits(), nextQueueState.Queue, nextQueueState.CurrentlyActiveUnit); WorldModelStack.Push(nextWorldState); nextWorldState.ApplyTurnAction(nextAction); currentWorldModel.IsProcessed = true; } } else { if (currentActionSequence.Count > 0) { currentActionSequence.Pop(); } WorldModelStack.Pop(); currentWorldModel = WorldModelStack.Count > 0 ? WorldModelStack.Peek() : null; } } else { var nextActionEnumerator = currentWorldModel.GetNextActionEnumerator(); if (nextActionEnumerator.MoveNext()) { if (currentActionSequence.Count == 0) { currentInitialMove = nextActionEnumerator.Current; } currentActionSequence.Push(nextActionEnumerator.Current); var nextQueueState = currentWorldModel.GetNextQueueState(); WorldModelStack.Push(new WorldModel(currentWorldModel.CopyUnits(), nextQueueState.Queue, nextQueueState.CurrentlyActiveUnit)); currentWorldModel = WorldModelStack.Peek(); currentWorldModel.ApplyTurnAction(nextActionEnumerator.Current); } else { nextActionEnumerator.Dispose(); if (currentActionSequence.Count > 0) { currentActionSequence.Pop(); } WorldModelStack.Pop(); currentWorldModel = WorldModelStack.Count > 0 ? WorldModelStack.Peek() : null; } } } } while (WorldModelStack.Count > 1); Debug.Log($"AI Calculation Report: loops: {DEBUG_loops}, time: {Time.realtimeSinceStartup - DEBUG_timestampStart}, action taken: {bestAction.CommonCommandData.unitAbility.AbilityName}"); return(bestAction); }