Пример #1
0
        public void ApplyBoard(BoardState boardState)
        {
            var playedAs = Player.White;

            LearnHowToStart(boardState, NeuralNetwork, playedAs);
        }
Пример #2
0
        void LearnHowToStart(
            BoardState boardState,
            NeuralNetwork neuralNetwork,
            Player playedAs)
        {
            var learningRate = .02d;
            var stopTraining = false;

            for (int a = 0; a < 100; a++)
            //while(!stopTraining)
            {
                var inputs = GetInputsFromBoard(boardState, playedAs);

                var       outputs         = neuralNetwork.ForwardPass(inputs);
                var       training        = new double[outputs.Length];
                var       error           = 0d;
                var       maxValue        = 0d;
                BasePiece maxValuePiece   = null;
                var       allAllowedMoves = new List <(int y, int x)>();

                //> Populate training data where AI should click first
                for (int i = 0; i < outputs.Length / 2; i++)
                {
                    int y = i / 8;
                    int x = i % 8;

                    if (boardState.Squares[y, x] != null &&
                        boardState.Squares[y, x].ControlledBy == playedAs)
                    {
                        training[i] = 1;

                        // For now let's just select all possible moves;
                        allAllowedMoves.AddRange(boardState.Squares[y, x].GetAllowedMoves(boardState));

                        //if (outputs[i] > maxValue)
                        //{
                        //    maxValue = outputs[i];
                        //    maxValuePiece = boardState.Squares[y, x];
                        //}
                    }

                    error += Math.Pow(outputs[i] - training[i], 2);
                    //error += outputs[i] - training[i];
                }
                //<

                //var allowedMoves = maxValuePiece.GetAllowedMoves(boardState);

                //boardState.GetPieceLocation(maxValuePiece)

                //> Populate training data where player should click second
                for (int i = outputs.Length / 2; i < outputs.Length; i++)
                {
                    int y = (i - outputs.Length / 2) / 8;
                    int x = i % 8;

                    //if (allowedMoves.Any(l => l.y == y && l.x == x))
                    //{
                    //    training[i] = 1;
                    //}

                    if (allAllowedMoves.Any(l => l.y == y && l.x == x))
                    {
                        training[i] = 1;
                    }
                    error += training[i] - outputs[i];
                }
                //<
                error /= 2;

                //var error = desiredOutput[randomTrainningIndex] - neuralNetwork.Outputs[neuralNetwork.Outputs.Length - 1][0];
                //Console.WriteLine(randomTrainningIndex + "Error: " + error);
                Debug.WriteLine("error: " + error);
                neuralNetwork.ApplyBackpropagation(training, learningRate);
            }
        }
Пример #3
0
        static List <KeyValuePair <((int, int), (int, int)), BoardState> > GetPossibleMoves(BoardState state, Player player)
        {
            var pieces = state.GetPlayerPieces(player);

            var piecesMoves = pieces.ToDictionary(p => state.GetPieceLocation(p), p => p.GetAllowedMoves(state));

            return(piecesMoves.Select(pieceMoves => pieceMoves.Value.ToDictionary(move => (pieceMoves.Key, move), move =>
            {
                var shadowState = state.Copy();

                shadowState.UpdatePieces(pieceMoves.Key.y, pieceMoves.Key.x);
                shadowState.UpdatePieces(move.y, move.x);

                return shadowState;
            })).SelectMany(_ => _).ToList());
        }