public FrmPlay(NNFeedForwardNetwork network) { InitializeComponent(); initWindow(); this.network = network; gameloopThread = new System.Threading.Thread(gameloop); gameloopThread.Start(); }
private void gameloop() { while (!stop) { double[] res = new double[4]; RefreshDelegate refreshDelegate = new RefreshDelegate(refr); while (!snake.isGameOver() && !quit) { System.Threading.Thread.Sleep(100); if (network != null) { double[] gc = snake.getGameCharacteristics(); res = network.propagateToEnd(gc); if (res[0] >= res[1] && res[0] >= res[2] && res[0] >= res[3]) { currentMovement = (int)Keys.Up; } else if (res[1] >= res[0] && res[1] >= res[2] && res[1] >= res[3]) { currentMovement = (int)Keys.Left; } else if (res[2] >= res[0] && res[2] >= res[1] && res[2] >= res[3]) { currentMovement = (int)Keys.Down; } else if (res[3] >= res[0] && res[3] >= res[1] && res[3] >= res[2]) { currentMovement = (int)Keys.Right; } } switch (currentMovement) { case (int)Keys.Up: if (trainByPlay) { gamechars.AddLast(snake.getGameCharacteristics()); labels.AddLast(new double[] { 1.0, 0.0, 0.0, 0.0 }); } snake.moveUp(); break; case (int)Keys.Left: if (trainByPlay) { gamechars.AddLast(snake.getGameCharacteristics()); labels.AddLast(new double[] { 0.0, 1.0, 0.0, 0.0 }); } snake.moveLeft(); break; case (int)Keys.Down: if (trainByPlay) { gamechars.AddLast(snake.getGameCharacteristics()); labels.AddLast(new double[] { 0.0, 0.0, 1.0, 0.0 }); } snake.moveDown(); break; case (int)Keys.Right: if (trainByPlay) { gamechars.AddLast(snake.getGameCharacteristics()); labels.AddLast(new double[] { 0.0, 0.0, 0.0, 1.0 }); } snake.moveRight(); break; } Application.DoEvents(); if (!stop) { this.Invoke(refreshDelegate); } } if (trainByPlay) { MessageBox.Show("Learning ..."); double[][] ar_trainingset = gamechars.ToArray(); double[][] ar_labels = labels.ToArray(); /* * network = new NNDeepBeliefNetwork(new int[] { ar_trainingset[0].Length, 10, 5, 4 }, new int[] { 4, 4 }); * for (int i = 0; i < ((NNDeepBeliefNetwork)network).getUnsupervisedLayerCount(); i++) * { * ((NNDeepBeliefNetwork)network).trainUnsupervised(ar_trainingset, i, 10000, 0.1); * } * ((NNDeepBeliefNetwork)network).trainSupervised(ar_trainingset, ar_labels, 1000, 1.0); */ network = new NNAccordInterface(new int[] { ar_trainingset[0].Length, 10, 5, 4 }); ((NNAccordInterface)network).train(ar_trainingset, ar_labels, 10000, 0.1); /* * network = new NNFeedForwardNetwork(new int[] { ar_trainingset[0].Length, 5, 4 }); * ((NNFeedForwardNetwork)network).randomizeWeights(); * ((NNFeedForwardNetwork)network).train(ar_trainingset, ar_labels, 1000, 1.0f); */ // new FrmNetworkVisualizer(((NNDeepBeliefNetwork)network).getSupervisedNetwork()).Show(); MessageBox.Show("Learning finished. Playing ..."); trainByPlay = false; quit = false; } snake.restart(); } }