Пример #1
0
 public AI_Order(int id, EActionCode code, Vector3 position, Vector3 direction)
 {
     this.CharacterID = id;
     this.ActionCode  = code;
     this.Direction   = direction;
     this.Position    = position;
 }
 public AI_Action(EActionCode action_code, AI_Character caster, AI_Character target)
 {
     this.ActionCode = action_code;
     this.Caster     = caster;
     this.Target     = target;
     this.Success    = false;
 }
        }                                         // ignored for events other than EventTypeDamageDealt

        public AI_ParamEvent(EEventType type, int caster_id, int target_id, EActionCode action_code, float damage)
        {
            this.ActionCode = action_code;
            this.EventType  = type;
            this.CasterID   = caster_id;
            this.TargetID   = target_id;
            this.Damage     = damage;
        }
Пример #4
0
 /// <summary>
 /// Gives a new order to a character. Previous orders that were not consumed are overriden.
 /// </summary>
 /// <param name="id"></param>
 /// <param name="code">Action code</param>
 /// <param name="direction">Target direction of the action (optional)</param>
 /// <param name="position">Target position of the action (optional)</param>
 /// <returns>Order to perform</returns>
 public static AI_Order SetOrderForCharacter(int id, EActionCode code, Vector3 position, Vector3 direction)
 {
     lock (Orders)
     {
         AI_Order order = new AI_Order(id, code, position, direction);
         Orders[id] = order;
         return(order);
     }
 }
Пример #5
0
 public AI_DeepLearningSystem()
 {
     this.IsRunning                = false;
     this.LastUpdateTime           = 0.0f;
     this.Characters               = new Dictionary <int, AI_Character>();
     this.Anticipation_ActionCode  = EActionCode.ActionCodeNoAction;
     this.Anticipation_ActionTime  = 0.0f;
     this.Anticipation_Probability = 0.0f;
     this.LoadNetworkState();
 }
Пример #6
0
        /// <summary>
        /// 构造一个新的协议并初始化协议头部.
        /// </summary>
        /// <param name="request">一级,类</param>
        /// <param name="action">二级,方法</param>
        /// <param name="returncode">三级,返回值</param>
        public ProtocolBytes(ERequestCode request, EActionCode action, EReturnCode returncode = EReturnCode.None)
        {
            RequestCode = request;
            ActionCode  = action;
            ReturnCode  = returncode;

            AddInt((int)RequestCode);
            AddInt((int)ActionCode);
            AddInt((int)ReturnCode);
        }
Пример #7
0
    /// <summary>
    /// Trains the anticipation network.
    /// </summary>
    /// <param name="player">Player</param>
    /// <param name="action_code">Code of the action performed</param>
    public void TrainAnticipation(AI_Character player, EActionCode action_code)
    {
        AI_DeepLearningNetwork network;

        float[] outputs = new float[(int)EActionCode.Count];
        for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
        {
            outputs[(int)code] = (code == action_code) ? 1.0f : 0.0f;
        }

        foreach (AI_Character c in this.Characters.Values)
        {
            if (c.CharacterType == ECharacterType.CharacterTypeEnemy && c.IsAlive)
            {
                if (player.PreviousAction != null)
                {
                    network = this.Networks_Anticipation_1[player.PreviousAction.ActionCode];
                    network.Learn(new float[] {
                        Vector3.Distance(c.Position, player.Position),
                        Vector3.Angle(player.Direction, c.Position - player.Position),
                        Vector3.Angle(c.Direction, player.Position - c.Position),
                        this.LastUpdateTime - player.PreviousAction.CastTime
                    }, outputs);
                }
                if (c.CurrentAction != null)
                {
                    network = this.Networks_Anticipation_2[c.CurrentAction.ActionCode];
                    network.Learn(new float[] {
                        Vector3.Distance(c.Position, player.Position),
                        Vector3.Angle(player.Direction, c.Position - player.Position),
                        Vector3.Angle(c.Direction, player.Position - c.Position),
                        this.LastUpdateTime - c.CurrentAction.CastTime
                    }, outputs);
                }
                else
                {
                    network = this.Networks_Anticipation_2[EActionCode.ActionCodeNoAction];
                    network.Learn(new float[] {
                        Vector3.Distance(c.Position, player.Position),
                        Vector3.Angle(player.Direction, c.Position - player.Position),
                        Vector3.Angle(c.Direction, player.Position - c.Position),
                        this.LastUpdateTime - c.CurrentAction.CastTime
                    }, outputs);
                }
            }
        }
    }
Пример #8
0
    /// <summary>
    /// Called when a character starts a new action.
    /// </summary>
    /// <param name="caster">Caster</param>
    /// <param name="action_code">Code of the action (see EActionCode)</param>
    public void OnCharacterActionStarted(AI_Character caster, EActionCode action_code)
    {
        Debug.Log("[AI] Action started (Caster ID: " + caster.CharacterID + ", Action Code: " + action_code + ").");
        AI_Character target = null;

        // Determining the target
        switch (caster.CharacterType)
        {
        case ECharacterType.CharacterTypeEnemy:
            Debug.Assert(this.Characters.ContainsKey(AI_Params.PlayerID), "[AI] Invalid player ID (" + AI_Params.PlayerID + ").");
            target = this.Characters[AI_Params.PlayerID];
            break;

        case ECharacterType.CharacterTypePlayer:
            float distance = 10000.0f;
            foreach (AI_Character c in this.Characters.Values)
            {
                if (distance > Vector3.Distance(c.Position, caster.Position) + Mathf.Abs(Vector3.Angle(caster.Direction, c.Position - caster.Position)))
                {
                    distance = Vector3.Distance(c.Position, caster.Position) + Mathf.Abs(Vector3.Angle(caster.Direction, c.Position - caster.Position));
                    target   = c;
                }
            }
            break;
        }

        // Registering the action
        AI_Action action = caster.OnActionStarted(this.LastUpdateTime, action_code, target);

        foreach (AI_Character c in this.Characters.Values)
        {
            if ((c.CurrentAction != null) && (c.CurrentAction.Target.CharacterID == caster.CharacterID))
            {
                c.CurrentAction.OnTargetActionStarted(action);
            }
        }

        // Training anticipation network
        if (caster.CharacterType == ECharacterType.CharacterTypePlayer)
        {
            this.TrainAnticipation(caster, action_code);
        }
    }
Пример #9
0
        /// <summary>
        /// 消息分发
        /// </summary>
        /// <param name="conn"></param>
        /// <param name="protocolBase"></param>
        private void HandleMsg(Conn conn, ProtocolBase protocolBase)
        {
            ERequestCode requestCode = protocolBase.RequestCode;
            EActionCode  actionCode  = protocolBase.ActionCode;

            Console.WriteLine($"收到来自[{conn.GetAddress()}]协议:[{ requestCode.ToString()}---{actionCode.ToString()}]");
            BaseController controller;

            if (!requestDict.TryGetValue(requestCode, out controller))
            {
                Console.WriteLine($"【警告】未找到Request:{requestCode.ToString()}对应的方法");
                return;
            }
            MethodInfo method = controller.GetType().GetMethod(actionCode.ToString());

            if (method == null)
            {
                Console.WriteLine($"【警告】未找到Action:{actionCode.ToString()}对应的方法");
                return;
            }
            object[] objs = new object[] { conn, protocolBase };
            method.Invoke(controller, objs);
        }
 public static void PushEvent(EEventType type, int caster_id, int target_id, EActionCode action_code)
 {
     PushEvent(type, caster_id, target_id, action_code, 0.0f);
 }
 /// <summary>
 /// [Game thread] Adds information about an event that occured since the last update.
 /// </summary>
 /// <param name="type">Type of event (refer to the EEventType enum for valid event types)</param>
 /// <param name="caster_id">ID of the caster (optional, AI_Params.UnknownID if undetermined)</param>
 /// <param name="target_id">ID of the target (optional, AI_Params.UnknownID if undetermined)</param>
 /// <param name="action_code">Code of the action (optional, refer to the EActionCode enum for valid action codes)</param>
 /// <param name="damage">Damage dealt (optional)</param>
 public static void PushEvent(EEventType type, int caster_id, int target_id, EActionCode action_code, float damage)
 {
     Instance.Events.Add(new AI_ParamEvent(type, caster_id, target_id, action_code, damage));
 }
 /// Character started an action
 public AI_Action OnActionStarted(float game_time, EActionCode action_code, AI_Character target)
 {
     this.CurrentAction = new AI_Action(action_code, this, target);
     this.CurrentAction.OnActionStarted(game_time);
     return(this.CurrentAction);
 }
Пример #13
0
    /// <summary>
    /// Thinks the orders for AI characters.
    /// </summary>
    public void ThinkAction()
    {
        Debug.Assert(this.Characters.ContainsKey(AI_Params.PlayerID), "[AI] Invalid player ID (" + AI_Params.PlayerID + ").");
        AI_Character player = this.Characters[AI_Params.PlayerID];

        foreach (AI_Character c in this.Characters.Values)
        {
            if (!c.IsPlayer && c.IsAlive && c.IsIdle)
            {
                AI_DeepLearningNetwork network;
                float[] outputs;

                float[] actions = new float[(int)EActionCode.Count];
                for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
                {
                    actions[(int)code] = 0.0f;
                }

                for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
                {
                    // Action
                    network = this.Networks_Action[code];
                    outputs = network.Think(new float[] {
                        Vector3.Distance(c.Position, player.Position),
                        Vector3.Angle(c.Direction, player.Position - c.Position),
                        Vector3.Angle(player.Direction, c.Position - player.Position)
                    }, network.Variance);
                    actions[(int)code] = outputs[0];

                    // Reaction
                    if (player.CurrentAction != null)
                    {
                        network = this.Networks_Reaction[code][player.CurrentAction.ActionCode];
                        outputs = network.Think(new float[] {
                            Vector3.Distance(c.Position, player.Position),
                            Vector3.Angle(c.Direction, player.Position - c.Position),
                            Vector3.Angle(player.Direction, c.Position - player.Position),
                            this.LastUpdateTime - player.CurrentAction.CastTime
                        }, network.Variance);
                        actions[(int)code] = actions[(int)code] * outputs[0] - outputs[1];
                    }
                    else
                    {
                        network = this.Networks_Reaction[code][this.Anticipation_ActionCode];
                        outputs = network.Think(new float[] {
                            Vector3.Distance(c.Position, player.Position),
                            Vector3.Angle(c.Direction, player.Position - c.Position),
                            Vector3.Angle(player.Direction, c.Position - player.Position),
                            this.LastUpdateTime - this.Anticipation_ActionTime
                        }, network.Variance);
                        actions[(int)code] = actions[(int)code] * outputs[0] - outputs[1];
                    }
                }

                // Choosing the best action
                float       best_action_value = 0.0f;
                EActionCode best_action_code  = EActionCode.ActionCodeNoAction;
                for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
                {
                    if (best_action_value < actions[(int)code])
                    {
                        best_action_value = actions[(int)code];
                        best_action_code  = code;
                    }
                }

                // Setting the new order
                switch (best_action_code)
                {
                case EActionCode.ActionCodeAttackLight:
                case EActionCode.ActionCodeAttackHeavy:
                case EActionCode.ActionCodeGuard:
                    Debug.Log("[AI] Set order for character " + c.CharacterID + " : " + best_action_code + ".");
                    AI_Orders.SetOrderForCharacter(c.CharacterID, best_action_code, c.Position, Vector3.Normalize(player.Position - c.Position));
                    break;

                case EActionCode.ActionCodeRoll:
                    Debug.Log("[AI] Set order for character " + c.CharacterID + " : " + best_action_code + ".");
                    float   left_or_right    = (UnityEngine.Random.value >= 0.5) ? 1.0f : -1.0f;
                    Vector3 direction_player = Vector3.Normalize(player.Position - c.Position);
                    Vector3 direction_roll   = new Vector3(-direction_player.x, direction_player.y, direction_player.z) * left_or_right;
                    AI_Orders.SetOrderForCharacter(c.CharacterID, best_action_code, c.Position, direction_roll);
                    break;

                case EActionCode.ActionCodeNoAction:
                    if (Vector3.Distance(player.Position, c.Position) < 100.0f)
                    {
                        Debug.Log("[AI] Set order for character " + c.CharacterID + " : " + best_action_code + " (stay in position).");
                        AI_Orders.SetOrderForCharacter(c.CharacterID, EActionCode.ActionCodeNoAction, c.Position, Vector3.Normalize(player.Position - c.Position));
                    }
                    else
                    {
                        Debug.Log("[AI] Set order for character " + c.CharacterID + " : " + best_action_code + " (move towards player).");
                        AI_Orders.SetOrderForCharacter(c.CharacterID, EActionCode.ActionCodeNoAction, player.Position, Vector3.Normalize(player.Position - c.Position));
                    }
                    break;
                }
            }
        }
    }
Пример #14
0
    /// <summary>
    /// Thinks the next player action.
    /// </summary>
    public void ThinkAnticipation()
    {
        Debug.Assert(this.Characters.ContainsKey(AI_Params.PlayerID), "[AI] Invalid player ID (" + AI_Params.PlayerID + ").");
        AI_Character player  = this.Characters[AI_Params.PlayerID];
        bool         changed = false;

        // Training guard
        if ((player.CurrentAction != null) && (player.CurrentAction.ActionCode == EActionCode.ActionCodeGuard))
        {
            this.TrainAnticipation(player, EActionCode.ActionCodeGuard);
        }

        // Training idle
        if ((player.CurrentAction == null) || (player.CurrentAction.ActionCode == EActionCode.ActionCodeNoAction))
        {
            this.TrainAnticipation(player, EActionCode.ActionCodeNoAction);
        }

        // Decaying anticipation probability
        if (this.LastUpdateTime > this.Anticipation_ActionTime)
        {
            this.Anticipation_Probability -= this.LastUpdateTime - this.Anticipation_ActionTime;
            this.Anticipation_ActionTime   = this.LastUpdateTime;
            // Reset anticipation after a while
            if (this.Anticipation_Probability < 0.0f)
            {
                this.Anticipation_Probability = 0.0f;
                this.Anticipation_ActionCode  = EActionCode.ActionCodeNoAction;
            }
        }

        float[] outputs;
        AI_DeepLearningNetwork network;

        // Trying different anticipation timings
        for (float t = 0.1f; t <= 1.0f; t += 0.1f)
        {
            // We will add the anticipation results from each AI for each action in this array
            float[] actions = new float[(int)EActionCode.Count];
            for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
            {
                actions[(int)code] = 0.0f;
            }

            // Each AI character participates in the anticipation
            foreach (AI_Character c in this.Characters.Values)
            {
                if (c.CharacterType == ECharacterType.CharacterTypeEnemy && c.IsAlive)
                {
                    // Anticipation from current/previous player action
                    if (player.CurrentAction != null)
                    {
                        network = this.Networks_Anticipation_1[player.CurrentAction.ActionCode];
                        outputs = network.Think(new float[] {
                            Vector3.Distance(c.PositionIn(t), player.PositionIn(t)),
                            Vector3.Angle(player.Direction, c.PositionIn(t) - player.PositionIn(t)),
                            Vector3.Angle(c.Direction, player.PositionIn(t) - c.PositionIn(t)),
                            t + this.LastUpdateTime - player.CurrentAction.CastTime
                        }, network.Variance);
                        for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
                        {
                            actions[(int)code] += outputs[(int)code];
                        }
                    }
                    else if (player.PreviousAction != null)
                    {
                        network = this.Networks_Anticipation_1[player.PreviousAction.ActionCode];
                        outputs = network.Think(new float[] {
                            Vector3.Distance(c.PositionIn(t), player.PositionIn(t)),
                            Vector3.Angle(player.Direction, c.PositionIn(t) - player.PositionIn(t)),
                            Vector3.Angle(c.Direction, player.PositionIn(t) - c.PositionIn(t)),
                            t + this.LastUpdateTime - player.PreviousAction.CastTime
                        }, network.Variance);
                        for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
                        {
                            actions[(int)code] += outputs[(int)code];
                        }
                    }

                    // Anticipation from current AI action
                    if (c.CurrentAction != null)
                    {
                        network = this.Networks_Anticipation_2[c.CurrentAction.ActionCode];
                        outputs = network.Think(new float[] {
                            Vector3.Distance(c.PositionIn(t), player.PositionIn(t)),
                            Vector3.Angle(player.Direction, c.PositionIn(t) - player.PositionIn(t)),
                            Vector3.Angle(c.Direction, player.PositionIn(t) - c.PositionIn(t)),
                            t + this.LastUpdateTime - c.CurrentAction.CastTime
                        }, network.Variance);
                        for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
                        {
                            actions[(int)code] += outputs[(int)code];
                        }
                    }
                    else
                    {
                        network = this.Networks_Anticipation_2[EActionCode.ActionCodeNoAction];
                        outputs = network.Think(new float[] {
                            Vector3.Distance(c.PositionIn(t), player.PositionIn(t)),
                            Vector3.Angle(player.Direction, c.PositionIn(t) - player.PositionIn(t)),
                            Vector3.Angle(c.Direction, player.PositionIn(t) - c.PositionIn(t)),
                            t
                        }, network.Variance);
                        for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
                        {
                            actions[(int)code] += outputs[(int)code];
                        }
                    }
                }
            }

            // Choosing the most likely action
            for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
            {
                if (this.Anticipation_Probability < actions[(int)code])
                {
                    this.Anticipation_Probability = actions[(int)code];
                    this.Anticipation_ActionCode  = code;
                    this.Anticipation_ActionTime  = this.LastUpdateTime + t;
                    changed = true;
                }
            }
        }

        if (changed)
        {
            Debug.Log("[AI] Anticipating player action " + this.Anticipation_ActionCode + " in " + (this.Anticipation_ActionTime - this.LastUpdateTime) + "s.");
        }
    }
Пример #15
0
    /// <summary>
    /// Loads all neural networks.
    /// </summary>
    public void LoadNetworkState()
    {
        Debug.Log("[AI] Loading network state...");

        // Loading network - action
        this.Networks_Action = new Dictionary <EActionCode, AI_DeepLearningNetwork>();
        for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
        {
            this.Networks_Action[code] = new AI_DeepLearningNetwork("AI_Action_" + code, new float[3, 2] {
                { 0.0f, 10000.0f },             // Input 1: Distance to player
                { -180.0f, +180.0f },           // Input 2: Angle to player from caster
                { -180.0f, +180.0f }            // Input 3: Angle to caster from player
            }, new float[1, 2] {
                { 0.0f, 1.0f }                  // Output 1: Probability of hitting
            });
        }

        // Loading network - reaction
        this.Networks_Reaction = new Dictionary <EActionCode, Dictionary <EActionCode, AI_DeepLearningNetwork> >();
        for (EActionCode code1 = EActionCode.ActionCodeNoAction; code1 < EActionCode.Count; code1++)
        {
            this.Networks_Reaction[code1] = new Dictionary <EActionCode, AI_DeepLearningNetwork>();
            for (EActionCode code2 = EActionCode.ActionCodeNoAction; code2 < EActionCode.Count; code2++)
            {
                this.Networks_Reaction[code1][code2] = new AI_DeepLearningNetwork("AI_Reaction_" + code1 + "_" + code2, new float[4, 2] {
                    { 0.0f, 10000.0f },         // Input 1: Distance to player
                    { -180.0f, +180.0f },       // Input 2: Angle to caster from player
                    { -180.0f, +180.0f },       // Input 3: Angle to caster from player
                    { -3.0f, +3.0f }            // Input 4: Time advantage for player
                }, new float[2, 2] {
                    { -3.0f, +3.0f },           // Output 1: Average damage dealt
                    { -3.0f, +3.0f }            // Output 2: Average damage taken
                });
            }
        }

        // Loading network - reaction - roll direction



        // Loading network - anticipation
        float[,] outputs = new float[(int)EActionCode.Count, 2];
        for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
        {
            outputs[(int)code, 0] = 0.0f;
            outputs[(int)code, 1] = 1.0f;
        }
        this.Networks_Anticipation_1 = new Dictionary <EActionCode, AI_DeepLearningNetwork>();
        this.Networks_Anticipation_2 = new Dictionary <EActionCode, AI_DeepLearningNetwork>();
        for (EActionCode code = EActionCode.ActionCodeNoAction; code < EActionCode.Count; code++)
        {
            this.Networks_Anticipation_1[code] = new AI_DeepLearningNetwork("AI_Anticipation_2_" + code, new float[4, 2] {
                { 0.0f, 10000.0f },             // Input 1: Distance to player
                { -180.0f, +180.0f },           // Input 2: Angle to caster from player
                { -180.0f, +180.0f },           // Input 3: Angle to player from caster
                { -3.0f, +3.0f }                // Input 4: Time since last action started
            }, outputs);                        // Output 1: Probability of hitting
            this.Networks_Anticipation_2[code] = new AI_DeepLearningNetwork("AI_Anticipation_1_" + code, new float[4, 2] {
                { 0.0f, 10000.0f },             // Input 1: Distance to player
                { -180.0f, +180.0f },           // Input 2: Angle to caster from player
                { -180.0f, +180.0f },           // Input 3: Angle to player from caster
                { -3.0f, +3.0f }                // Input 4: Caster action advantage
            }, outputs);                        // Output 1: Probability of hitting
        }

        Debug.Log("[AI] Loading network state - Done.");
    }
Пример #16
0
 public static AI_Order SetOrderForCharacter(int id, EActionCode code)
 {
     return(SetOrderForCharacter(id, code, Vector3.zero));
 }