コード例 #1
0
ファイル: AStarAlgo.cs プロジェクト: TimLenchanteur/IA_UQAC
            public AStarNode(AStarNode previousNode, VacuumAgent.VacuumAction action)
            {
                _action = action;
                _state  = new CustomEnvState(previousNode._state, action);
                /// Calcul du cout associe a ce noeud
                /// Le cout pour atteindre ce noeud est donnee par le coup des actions precedente
                _costToRechNode = previousNode._costToRechNode;
                if (previousNode._action == VacuumAgent.VacuumAction.GrabClean)
                {
                    /// On ajoute 1 si l'action precedente est grab clean car cela represente deux actions
                    _costToRechNode += 2;
                }
                else
                {
                    _costToRechNode += 1;
                }

                _costToReachSolution = _costToRechNode + (action == VacuumAgent.VacuumAction.Clean ?  0 : (action == VacuumAgent.VacuumAction.GrabClean ? 1 : _state.EuclidianActionHeuristic()));
            }
コード例 #2
0
ファイル: Problem.cs プロジェクト: TimLenchanteur/IA_UQAC
 public bool HasBeenSolved(CustomEnvState resultState)
 {
     return(_wishedState.IsEqualRelativeToTestedAttribute(resultState));
 }
コード例 #3
0
ファイル: Problem.cs プロジェクト: TimLenchanteur/IA_UQAC
 public Problem(CustomEnvState initialState, CustomEnvState wishedState)
 {
     _initialState = initialState;
     _wishedState  = wishedState;
 }
コード例 #4
0
ファイル: AStarAlgo.cs プロジェクト: TimLenchanteur/IA_UQAC
 public AStarNode(CustomEnvState initialState)
 {
     _state               = initialState;
     _costToRechNode      = 0;
     _costToReachSolution = _state.EuclidianActionHeuristic();
 }
コード例 #5
0
        public static void VacuumProc()
        {
            // Attente de l'initialisation de l'environnement
            while (!Environment._init)
            {
            }
            Init();

            Console.WriteLine(3 & Environment.DIRT);

            Stack <VacuumAction> intent = new Stack <VacuumAction>();

            _actionCycle = 0;
            while (true)
            {
                if (intent.Count == 0 || _actionsCount >= _actionCycle)
                {
                    // Récupération de l'état actuel de l'environnement
                    int[,] belief = Environment._grid;
                    CustomEnvState currentState = new CustomEnvState(belief, _pos);
                    // L'agent ne se déplace que si l'une des pièces est sale
                    if (currentState.NbOfDirtyRoom > 0)
                    {
                        if (_nextAlgo != _currentAlgorithm)
                        {
                            _currentAlgorithm = _nextAlgo;
                            MainWindow.Instance.Dispatcher.Invoke(() => MainWindow.Instance.UpdateAlgo(_currentAlgorithm.ToString()));
                        }

                        // Mesure de performance
                        if (_actionsCount != 0)
                        {
                            if (_learningCount >= _learningCycle - 1)
                            {
                                _optimalActionCycle = ComputeOptimalActionCycle();
                                MainWindow.Instance.Dispatcher.Invoke(() => MainWindow.Instance.UpdateOptimalActions());
                                _learningCount = 0;
                            }
                            else
                            {
                                _learningCount++;
                            }
                            _lastActionsCycleTrack.Add(new KeyValuePair <int, float>(_actionsCount, Environment.GivePerf()));
                            MainWindow.Instance.Dispatcher.Invoke(() => MainWindow.Instance.AddLearnedAction(_actionsCount, Environment.GivePerf()));
                            Environment.ResetPerf();
                            _actionsCount = 0;
                        }

                        // Formulation du but
                        // Nous définissons le but de cet agent comme étant de nettoyer une seule pièce
                        CustomEnvState wishedState = new CustomEnvState(belief, _pos);
                        wishedState.DefineWishedRoomDirtyAs(currentState.NbOfDirtyRoom - 1);
                        wishedState.MarkAttributeForEquality(CustomEnvState.NUMBER_DIRTY_ROOM_ATTRIBUTE);
                        // Formulation du problème
                        Problem problem = new Problem(currentState, wishedState);
                        // Exploration
                        intent = Explore(problem, _currentAlgorithm);
                        // Mise à jour du cycle d'action optimal
                        _actionCycle = _optimalActionCycle == 0 ? intent.Count : _optimalActionCycle + WeightedRandom(0, Math.Max(intent.Count - _optimalActionCycle, 0));
                        MainWindow.Instance.Dispatcher.Invoke(() => MainWindow.Instance.UpdateActionCycle());
                        MainWindow.Instance.Dispatcher.Invoke(() => MainWindow.Instance.UpdateComputingState(""));
                    }
                }
                else if (_actionsCount < _actionCycle)
                {
                    _actionsCount++;
                    // Exécuter et retirer une action du plan d'actions
                    VacuumAction action = intent.Pop();
                    Execute(action);
                    Thread.Sleep(700);
                }
            }
        }
コード例 #6
0
 public BFSNode(CustomEnvState previousState, VacuumAgent.VacuumAction action)
 {
     _action = action;
     _state  = new CustomEnvState(previousState, action);
 }
コード例 #7
0
 public BFSNode(CustomEnvState initialState)
 {
     _state = initialState;
 }