/// <summary> /// Defines the decision-making logic of the agent. Given the information /// about the agent, returns a vector of actions. /// </summary> /// <returns>Vector action vector.</returns> /// <param name="vectorObs">The vector observations of the agent.</param> /// <param name="visualObs">The cameras the agent uses for visual observations.</param> /// <param name="reward">The reward the agent received at the previous step.</param> /// <param name="done">Whether or not the agent is done.</param> /// <param name="memory"> /// The memories stored from the previous step with /// <see cref="MakeMemory(List{float}, List{Texture2D}, float, bool, List{float})"/> /// </param> public override float[] Decide(List <float> vectorObs, List <Texture2D> visualObs, float reward, bool done, List <float> memory) { if (chessGame == null) { chessGame = FindObjectOfType <ChessGame> (); } if (academy == null) { academy = FindObjectOfType <ChessAcademy> (); } else { if (academy.resetParameters.ContainsKey("ai-depth")) { depth = Mathf.FloorToInt(academy.resetParameters["ai-depth"]); } } currentTeam = chessGame.GetChess().currentTeam; Move bestmove = ChessAI.GetBestMove(ObservationToChess(vectorObs), depth); float[] act = new float[brainParameters.vectorActionSize.Length]; act[0] = MoveToIndex(bestmove); chessGame.GetChess().currentTeam = currentTeam; return(act); }
private void AIMove(Team team) { if (chessGame.GetChess().currentTeam == team) { Move move = ChessAI.GetBestMove(chessGame.GetChess(), depth); chessGame.GetChess().currentTeam = team; chessGame.MakeMove(move); } }