private void UpdateEnemyInfo(EnemyInfo enemyInfoToUpdate, ScannedRobotEvent e)
        {
            var currentTargetPosition =
                Utilities.CalculateRobotPosition(_position, Utils.NormalAbsoluteAngle(this.HeadingRadians + e.BearingRadians), e.Distance);

            enemyInfoToUpdate.LastPosition = currentTargetPosition;
            enemyInfoToUpdate.RecordedTime = Time;
            enemyInfoToUpdate.LastDistance = e.Distance;
            enemyInfoToUpdate.LastEnergy   = e.Energy;
        }
        public SimpleAimBot()
            : base()
        {
            var nrStatesQLearning = 9;

            ResetFirePowerLevels();

            _enemy           = new EnemyInfo();
            _robotController = new AbstractRobot(this);

            if (!_hasCreatedScores)
            {
                _aimLearner = new QLearning(
                    Segmentation.None
                    | Segmentation.DistanceClose | Segmentation.DistanceFar |
                    Segmentation.VelocityFast | Segmentation.VelocitySlow
                    ,
                    nrStatesQLearning, 0.0d);

                //Make sure discount factor is 1, as we will use temperature as the exploration rate
                _aimLearner.DiscountFactor           = 1.0f;
                _aimLearner.Temperature              = .2f;
                _aimLearner.TemperatureDecraseAmount = _aimLearner.Temperature / 20f;
                _aimLearner.MinTemperature           = .005f;
                _aimLearner.UseSoftmaxSelection      = true;

                //Start with positive scores so if robot keeps missing the scores go down
                _aimLearner.UpdateAllLearningScores(1.0d);
                _aimLearner.ResetFavorableActions(1);

                _hasCreatedScores = true;
            }
            else
            {
                //Increase temperature slightly as opponent might switch dodging strategy
                _aimLearner.Temperature += (.2f / 20f) * 5;
            }
        }