public override LSI <object, TicTacToeState, TicTacToeMove, object, TreeSearchNode <TicTacToeState, TicTacToeMove>, OddmentTable <int> > SetupLSI(ISideInformationStrategy <object, TicTacToeState, TicTacToeMove, object, TicTacToeMove, OddmentTable <int> > sideInformationStrategy, ILSISamplingStrategy <TicTacToeState, TicTacToeMove, OddmentTable <int> > samplingStrategy)
        {
            var playoutStrategy    = new AgentPlayout <object, TicTacToeState, TicTacToeMove, object, TicTacToeMove>(Agent);
            var evaluationStrategy = EvaluationStrategy;

            //var search = new LSI<object, TicTacToeState, TicTacToeMove, object, TreeSearchNode<TicTacToeState, TicTacToeMove>, OddmentTable<int>>(sideInformationStrategy, samplingStrategy, playoutStrategy, evaluationStrategy, GameLogic);

            throw new NotImplementedException();
        }
        public void Setup()
        {
            // Setup
            State     = new TicTacToeState();
            Agent     = new TicTacToeGameLogic();
            GameLogic = new TicTacToeGameLogic();

            PlayoutStrategy    = new AgentPlayout <object, TicTacToeState, TicTacToeMove, object, TicTacToeMove>(Agent);
            EvaluationStrategy = new WinLossDrawStateEvaluation <object, TicTacToeState, TicTacToeMove, object, TicTacToeMove, TreeSearchNode <TicTacToeState, TicTacToeMove> >(1, -10, 0);
        }
 /// <summary>
 /// Constructs a new instance.
 /// </summary>
 public NMCTSBuilder()
 {
     ExplorationStrategy = new ChanceExploration <D, P, A, S, Sol>(Constants.DEFAULT_EXPLORE_CHANCE);
     PlayoutStrategy     = new AgentPlayout <D, P, A, S, Sol>(new RandomAgent <D, P, A, S, Sol>());
     PolicyGlobal        = 0; // pure-greedy, i.e. e-greedy with e=0
 }
Ejemplo n.º 4
0
 /// <summary>
 /// Constructs a new instance.
 /// </summary>
 public FlatMCSBuilder()
 {
     PlayoutStrategy = new AgentPlayout <D, P, A, S, Sol>(new RandomAgent <D, P, A, S, Sol>());
 }