示例#1
0
 public static void softupdate(MLP target, MLP qnet, double epsilon)
 {
     for (var i = 0; i < target.W.Length; i++)
     {
         target.W [i] = target.W [i].Multiply(1.0 - epsilon) + qnet.W [i].Multiply(epsilon);
     }
 }
        static void Main(string[] args)
        {
            double[][] inputs  = new double[4][];
            double[]   outputs = { 1, 1, -1, -1 };
            inputs[0] = new double[] { -1, 1 };
            inputs[1] = new double[] { 1, -1 };
            inputs[2] = new double[] { 1, 1 };
            inputs[3] = new double[] { -1, -1 };

            MLP network = SourceMulti.mlp_create_model(3, new int[] { 2, 3, 1 });

            //SourceMulti.mlp_fit_classification_backdrop(network, inputs, outputs, 1000, 0.1);
            SourceMulti.mlp_fit_regression_backdrop(network, inputs, outputs, 1000, 0.1);

            /*Console.WriteLine(SourceMulti.mlp_classify(network, new double[] { -1, 1 })[0]);
             * Console.WriteLine(SourceMulti.mlp_classify(network, new double[] { 1, -1 })[0]);
             * Console.WriteLine(SourceMulti.mlp_classify(network, new double[] { 1, 1 })[0]);
             * Console.WriteLine(SourceMulti.mlp_classify(network, new double[] { -1, -1 })[0]);*/

            Console.WriteLine(SourceMulti.mlp_predict(network, new double[] { -1, 1 })[0]);
            Console.WriteLine(SourceMulti.mlp_predict(network, new double[] { 1, -1 })[0]);
            Console.WriteLine(SourceMulti.mlp_predict(network, new double[] { 1, 1 })[0]);
            Console.WriteLine(SourceMulti.mlp_predict(network, new double[] { -1, -1 })[0]);

            Console.ReadLine();
        }
示例#3
0
        private static void MLPExample()
        {
            // Hidden layers
            int[] h_n = { 2 };

            // XOR dataset
            double[,] dataset = { { 1, 1, 0 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 1, 1 } };

            // Architecture
            var mlp = new MLP(2, h_n, 1, Activation.BentIdentity, Activation.DBentIdentity);

            // Optional -> fit into [0, 1] range
            //dataset = Utility.NormalizeData(dataset);
            dataset = Mathf.Standardize(dataset);

            // Training
            var r = mlp.Backpropagation_Momentum(dataset, threshold: 1e-15);

            // Saving into a file the mean squared error over the epochs
            using (var sw = new System.IO.StreamWriter("MLPExample.txt"))
                foreach (double d in r)
                {
                    sw.WriteLine(d);
                }

            //Utility.Save<MLP>("C:\\Users\\ivan.soares\\Desktop\\mlp", mlp);

            //var o = Utility.Load<MLP>("C:\\Users\\ivan.soares\\Desktop\\mlp.bin");
            //Console.WriteLine(o.ToString());

            Console.WriteLine("Done");
            Console.ReadLine();
        }
示例#4
0
        private async void BtnOpen_Click(object sender, RoutedEventArgs e)
        {
            FileOpenPicker openPicker = new FileOpenPicker();

            openPicker.ViewMode = PickerViewMode.Thumbnail;
            openPicker.SuggestedStartLocation = PickerLocationId.Desktop;
            openPicker.CommitButtonText       = "Открыть";
            openPicker.FileTypeFilter.Add(".mlp");
            var file = await openPicker.PickSingleFileAsync();

            if (file != null)
            {
                var xmlSerializer = new XmlSerializer(typeof(MLP));
                await Task.Run(async() =>
                {
                    string serializedMLP = await FileIO.ReadTextAsync(file);
                    var stringReader     = new StringReader(serializedMLP);
                    network = (MLP)xmlSerializer.Deserialize(stringReader);
                });

                TbNumIN.Text     = network.sizeIN.ToString();
                TbNumOUT.Text    = network.sizeOUT.ToString();
                TbNumHidden.Text = network.GetHiddenLayersInfo();

                BtnNext.IsEnabled = true;
            }
        }
示例#5
0
        public static bool Test4()
        {
            var xorinput  = new[] { new[] { 0.0, 0.0 }, new[] { 1.0, 0.0 }, new[] { 0.0, 1.0 }, new[] { 1.0, 1.0 } };
            var xoroutput = new[] { new[] { 0.0 }, new[] { 1.0 }, new[] { 1.0 }, new[] { 0.0 } };
            var mlp       = new MLP(2, 4, 1);
            var stsp      = new StandardTrainingSetProvider(xorinput, xoroutput);

            stsp.Split();
            var gdbp = new BackPropagation(mlp, stsp);

            ((GD)gdbp.Solver).Criterion = LearningCriterion.CrossEntropy;
            //((GD)gdbp.Solver).LearningRate = .01;
            ((GD)gdbp.Solver).AdaGrad = true;

            var minerr = double.MaxValue;

            gdbp.ReportReady += optimizer =>
            {
                Console.WriteLine("Epoch = {0}, Error = {1}", optimizer.CurrentEpoch,
                                  optimizer.TrainingSetProvider.TrainError);
                minerr = Math.Min(minerr, optimizer.Solver.Error);

                if (optimizer.Done)
                {
                    Console.ReadLine();
                }
            };

            gdbp.RunAsync().Wait();
            return(true);
        }
示例#6
0
 void Reset()
 {
     Root       = transform;
     Controller = new Controller();
     Character  = new Character();
     Character.BuildHierarchy(transform);
     MLP = new MLP();
 }
示例#7
0
 protected override void OnNavigatedTo(NavigationEventArgs e)
 {
     if (e.Parameter != null)
     {
         network       = (MLP)e.Parameter;
         TbNumIN.Text  = network.sizeIN.ToString();
         TbNumOUT.Text = network.sizeOUT.ToString();
     }
 }
示例#8
0
 public void copyTo(MLP network)
 {
     for (var i = 0; i < W.Length; i++)
     {
         network.W [i] = M.DenseOfMatrix(W [i]);
         network.m [i] = M.DenseOfMatrix(m [i]);
         network.v [i] = M.DenseOfMatrix(v [i]);
     }
 }
示例#9
0
 public PredictorMLP(int aDimension, int aInputLength)
 {
     dimension   = aDimension;
     inputLength = aInputLength;
     mlp         = new MLP(new int[] { 30, dimension }, dimension * inputLength);
     r           = new Random();
     mu          = 0.0001;
     trainCount  = 0;
 }
示例#10
0
 void Awake()
 {
     rnd            = new System.Random(GetInstanceID());
     Qnetwork       = new MLP(state_size, hiddenSizes);
     QtargetNetwork = new MLP(state_size, hiddenSizes);
     Qnetwork.copyTo(QtargetNetwork);
     history = new History(feedbackModulus, state_size);
     memory  = new Memory(MEM_LIMIT, rnd, state_size);
 }
示例#11
0
 void Start()
 {
     m_startPosition = transform.position;
     alive           = true;
     m_net           = new MLP(1, 3, 1);
     fitness         = 0;
     score           = 0;
     rndgen          = new System.Random();
     rb = gameObject.GetComponent <Rigidbody2D>();
 }
示例#12
0
    public NNTest()
    {
        var M = Matrix <double> .Build;

        double[,] x = { { 5.0 }, { 3.0 }, { -1.0 }, { 2.5 }, { -0.5 }, { 0.3 }, { 0.7 }, { 1.9 }, { 3.5 }, { -0.3 } };

        double qtarget = 0.5;

        int inputsize = 10;

        int[] hiddenSizes = { 25, 9 };

        MLP network = new MLP(inputsize, hiddenSizes);

        network.write("test.txt");

        Matrix <double> xmat = M.DenseOfArray(x);

        Debug.Log(network.Forward(xmat));

        Debug.Log(network.Loss(xmat, 0, qtarget));

        Matrix <double>[] g = network.Gradients(xmat, 0, qtarget);

        Debug.Log(g[0]);
        Debug.Log(g[1]);
        Debug.Log(g[2]);
        Debug.Log(g[3]);

        network.update(g);

        Debug.Log(network.W[0]);
        Debug.Log(network.W[1]);
        Debug.Log(network.W[2]);
        Debug.Log(network.W[3]);

        MLP targetnetwork = new MLP(inputsize, hiddenSizes);

        network.copyTo(targetnetwork);

        Debug.Log(targetnetwork.W[0]);
        Debug.Log(targetnetwork.W[1]);
        Debug.Log(targetnetwork.W[2]);
        Debug.Log(targetnetwork.W[3]);

        network.W[0][1, 2] = 2.0;
        Debug.Log(network.W[0]);
        Debug.Log(targetnetwork.W[0]);

        network.write("testdata.txt");
        network.read("testdata.txt");
        Debug.Log(network.W[0]);
    }
示例#13
0
 void Awake()
 {
     qvaluesimg     = new Texture2D(3, 3);
     rnd            = new System.Random(GetInstanceID());
     Qnetwork       = new MLP(state_size, hiddenSizes);
     QtargetNetwork = new MLP(state_size, hiddenSizes);
     Qnetwork.copyTo(QtargetNetwork);
     memory       = new Memory(MEM_LIMIT, rnd, state_size);
     returnValues = new List <double> ();
     returnCounts = new List <int> ();
     returnValues.Add(0.0);
     returnCounts.Add(0);
 }
示例#14
0
    void Awake()
    {
        rnd = new System.Random(GetInstanceID());

        Qnetwork = new MLP(state_size, hiddenSizes);

        QtargetNetwork = new MLP(state_size, hiddenSizes);

        Qnetwork.copyTo(QtargetNetwork);

        memory = new Memory2(MEM_LIMIT, rnd, state_size);

        test_mode = false;
    }
示例#15
0
 private void init(int[] neuronCount, int inputLength, bool isWeakening)
 {
     // Fix the pointer to the array. Assume array is the variable and T is
     // the type of the array.
     unsafe
     {
         fixed(int *nc = neuronCount)
         {
             // pArray now has the pointer to the array. You can get an IntPtr
             //by casting to void, and passing that in.
             mlp = createMLP(inputLength, nc, neuronCount.Length, isWeakening);
         }
     }
 }
示例#16
0
 private void btnCreate_Click(object sender, EventArgs e)
 {
     try
     {
         int hiddenLayerSize = int.Parse(this.txtHiddenNeurons.Text, System.Globalization.CultureInfo.InvariantCulture);
         this._MLP = new MLP(30, hiddenLayerSize, 10);
         this.errorChart.Series["Erro"].Points.Clear();
         this.btnTrain.Enabled  = true;
         this.lblTotEpochs.Text = "0";
     }
     catch (Exception exc)
     {
         MessageBox.Show(String.Format("Invalid number format. [{0}]", exc.Message));
     }
 }
示例#17
0
        public static bool Test3()
        {
            var mlp = new MLP(2, 2, 1);

            mlp[0, 0, 0][0] = 1;
            mlp[0, 0, 1][0] = 2;
            mlp[0, 1, 0][0] = 3;
            mlp[0, 1, 1][0] = 4;
            mlp[0, 2, 0][0] = 5; //weight of bias to neuron #1 connection in next layer
            mlp[0, 2, 1][0] = 6; //bias to neuron #2 in next layer
            mlp[1, 0, 0][0] = 7;
            mlp[1, 1, 0][0] = 8;
            mlp[1, 2, 0][0] = 9; //bias to output
            return(true);
        }
示例#18
0
    void Start()
    {
        Application.runInBackground = true;
        //init variables
        cars           = new GameObject[numcars];
        obstacles      = new GameObject[numobstacles];
        Qnetwork       = new MLP(CarManager.state_size, CarManager.hiddenSizes);
        QtargetNetwork = new MLP(CarManager.state_size, CarManager.hiddenSizes);
        Qnetwork.copyTo(QtargetNetwork);

        //*Transfer Learning */
        if (load_at_init)
        {
            this.QtargetNetwork.read("QTargetNetwork");
            this.Qnetwork.read("QNetwork");
        }

        for (int i = 0; i < (numinst == 0 ? ++numinst:numinst); i++)
        {
            int        index        = getIndex(i);
            Quaternion cur_rotation = getRotation(index);
            Vector3    cur_position = getPosition(index);

            cars[i] = (GameObject)Instantiate(CarFab, cur_position, cur_rotation);
            CarManager m = cars[i].GetComponent(typeof(CarManager)) as CarManager;
            m.SetMLP(this.Qnetwork, this.QtargetNetwork);

            if (!multipletrainers && i != 0)
            {
                m.training        = false;
                m.display_qvalues = false;
                m.report          = false;
                m.epsilon         = m.epsilon_min;
            }
            else
            {
                m.training        = true;
                m.display_qvalues = i == 0 ? true : false;
                m.report          = true;
                if (i == 0)
                {
                    cars [0].GetComponent <Renderer> ().material.color = Color.green;
                    EpisodeController = cars [0].GetComponent(typeof(CarManager)) as CarManager;
                }
            }
            cars[i].name = "CloneCar" + i;
        }
    }
示例#19
0
        private void EditMLP_Enter(object sender, EventArgs e)
        {
            DCostFunctionMLP.DataSource = Enum.GetValues(typeof(CostFunction));
            // Set default options up.
            MLP net = MenuModel.CurrentNet as MLP;

            if (net == null)
            {
                throw new MenuException("Cannot edit network that is not an MLP with this screen");
            }

            TMLPLearningRate.Text         = net.LearningRate.ToString();
            DCostFunctionMLP.SelectedItem = net.CostFunction.ToString();

            BuildLayerSummary();
        }
示例#20
0
    /// <summary>
    ///
    /// </summary>
    /// <param name="net"></param>
    /// <returns></returns>
    void Mutate(MLP net)
    {
        Debug.Log("Mutating population");
        List <double> w = new List <double>();

        System.Random rndgen = new System.Random();
        w = net.GetNetworkWeights();
        for (byte i = 0; i < net.GetNetworkWeights().Count; i++)
        {
            if (rndgen.NextDouble() <= mutationRate)
            {
                w[i] = GetRandom(0, 2);
            }
        }
        net.SetNetworkWeights(w);
    }
示例#21
0
文件: NNs.cs 项目: ydyvip/ml-games
    void Start()
    {
        Func <float, float> relu = x => Mathf.Max(0f, x);
        Func <float, float> sigm = x => 1 / (1 + Mathf.Exp(-x));

        MLP           clf = new MLP();
        List <Neuron> nl  = new List <Neuron>();

        nl.Add(new Neuron(0.9916f, new float[] { -2.9073f, -0.2868f }, relu));
        nl.Add(new Neuron(0.3341f, new float[] { 0.6037f, -0.9359f }, relu));
        clf.addLayer(nl);
        nl = new List <Neuron>();
        nl.Add(new Neuron(1.2597f, new float[] { -1.9851f, 1.3447f }, sigm));
        clf.addLayer(nl);

        Debug.Log(clf.propagate(new float[] { -0.6f, 0.3f }));
    }
示例#22
0
 private void BtnNext_Click(object sender, RoutedEventArgs e)
 {
     BoxHidden.BorderBrush = new SolidColorBrush(Windows.UI.Colors.Gray);
     BoxAlpha.BorderBrush  = new SolidColorBrush(Windows.UI.Colors.Gray);
     BoxEps.BorderBrush    = new SolidColorBrush(Windows.UI.Colors.Gray);
     BoxError.BorderBrush  = new SolidColorBrush(Windows.UI.Colors.Gray);
     try
     {
         network = new MLP(dataToTrain, TextToIntArray(BoxHidden.Text));
     }
     catch
     {
         BoxHidden.BorderBrush = new SolidColorBrush(Windows.UI.Colors.Red);
         return;
     }
     try
     {
         network.Alpha = Convert.ToDouble(BoxAlpha.Text);
     }
     catch
     {
         BoxAlpha.BorderBrush = new SolidColorBrush(Windows.UI.Colors.Red);
         return;
     }
     try
     {
         network.Epsilon = Convert.ToDouble(BoxEps.Text);
     }
     catch
     {
         BoxEps.BorderBrush = new SolidColorBrush(Windows.UI.Colors.Red);
         return;
     }
     try
     {
         network.Error = Convert.ToDouble(BoxError.Text);
     }
     catch
     {
         BoxError.BorderBrush = new SolidColorBrush(Windows.UI.Colors.Red);
         return;
     }
     Frame.Navigate(typeof(Page4), network);
 }
示例#23
0
    /// <summary>
    ///
    /// </summary>
    /// <param name="a"></param>
    /// <param name="b"></param>
    /// <returns></returns>
    void CrossOver(MLP a, MLP b)
    {
        System.Random rndgen = new System.Random();

        List <double> w1 = new List <double>();
        List <double> w2 = new List <double>();

        w1 = a.GetNetworkWeights();
        w2 = b.GetNetworkWeights();

        for (byte i = 0; i < a.GetNetworkWeights().Count; i++)
        {
            if (rndgen.NextDouble() <= crossoverRate)
            {
                w1[i] = w2[i];
            }
        }
        a.SetNetworkWeights(w1);
    }
示例#24
0
    //	public static Vector3[] MPositions = {new Vector3 (-2.8f, 0.1f, 1.3f), new Vector3 (-2.8f, 0.1f, -0.29f)};
    //	public static Quaternion[] MRotations = { Quaternion.AngleAxis(270, new Vector3(0, 1, 0)), Quaternion.AngleAxis(270, new Vector3(0, 1, 0))};

    // Use this for initialization
    void Start()
    {
        //new NNTest ();
        Application.runInBackground = true;
        cars     = new GameObject[numcars];
        Qnetwork = new MLP(state_size, hiddenSizes);
//		obstacles = new GameObject[numobstacles];
        QtargetNetwork = new MLP(state_size, hiddenSizes);
        int i = 0;
//		for (int i = 0; i < numcars; i++){
        //			int ind = rng.Next (0, Positions.Length);
        //			cars[i] = (GameObject) Instantiate(CarFab,Positions[ind],Rotations[ind]);
        double d = rng.NextDouble();

        cars[i] = (GameObject)Instantiate(CarFab, Positions[i], d > 0.5 ? Rotations[i] : Alt_Rotations[i]);
        CarManager2 m = cars[i].GetComponent(typeof(CarManager2)) as CarManager2;

        m.SetMLP(this.Qnetwork, this.QtargetNetwork);
        cars[i].name = "CloneCar" + i;
//		}
        EpisodeController = cars[0].GetComponent(typeof(CarManager2)) as CarManager2;
    }
示例#25
0
        protected override void OnNavigatedTo(NavigationEventArgs e)
        {
            if (e.Parameter != null)
            {
                network = (MLP)e.Parameter;
            }
            threadLearning = new Task(() => network.Learning());
            threadLearning.Start();
            Task.Run(() => LoopLog());

            Task.Run(async() =>
            {
                while (!threadLearning.IsCompleted)
                {
                    await Task.Delay(TimeSpan.FromSeconds(1));
                }
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal,
                                          () =>
                {
                    BtnNext.IsEnabled = true;
                });
            });
        }
示例#26
0
        internal static void mlp_process(MLP m, float[] input, float[] output)
        {
            int j;

            float[] hidden = new float[MAX_NEURONS];
            float[] W      = m.weights;
            int     W_ptr  = 0;

            /* Copy to tmp_in */

            for (j = 0; j < m.topo[1]; j++)
            {
                int   k;
                float sum = W[W_ptr];
                W_ptr++;
                for (k = 0; k < m.topo[0]; k++)
                {
                    sum = sum + input[k] * W[W_ptr];
                    W_ptr++;
                }
                hidden[j] = tansig_approx(sum);
            }

            for (j = 0; j < m.topo[2]; j++)
            {
                int   k;
                float sum = W[W_ptr];
                W_ptr++;
                for (k = 0; k < m.topo[1]; k++)
                {
                    sum = sum + hidden[k] * W[W_ptr];
                    W_ptr++;
                }
                output[j] = tansig_approx(sum);
            }
        }
示例#27
0
 private static extern unsafe MLP copyMLP(MLP copy);
示例#28
0
 private static extern unsafe MLP copyMLPandChangeWeakening(MLP copy, bool isWeakening);
示例#29
0
 private static extern unsafe float MaxDeltaWeight(MLP mlp);
示例#30
0
 private void init(int[] neuronCount, int inputLength, bool isWeakening) 
 {
     // Fix the pointer to the array. Assume array is the variable and T is
     // the type of the array.
     unsafe
     {
         fixed (int* nc = neuronCount)
         {
             // pArray now has the pointer to the array. You can get an IntPtr
             //by casting to void, and passing that in.
             mlp = createMLP(inputLength, nc, neuronCount.Length, isWeakening);
         }
     }
 }
示例#31
0
 public MLPDll(MLPDll copy)
 {
     mlp = copyMLP(copy.mlp);
 }
        static void Main()
        {
            do
            {
                Console.Clear();
                Console.WriteLine("MENU: \n 1 - Treinar \n 2 - Carregar Rede Neural \n 3 - Salvar Rede Neural \n 4 - Executar \n 0 - Sair");
                var op = Console.ReadKey().KeyChar;
                if (op == '1')//treinar
                {
                    using (var ofd = new OpenFileDialog()
                    {
                        Filter = "Arquivo Treino File|*.txt", Title = "Selecione o arquivo de treino: "
                    })
                    {
                        if (ofd.ShowDialog() == DialogResult.OK)
                        {
                            using (var ofd2 = new OpenFileDialog()
                            {
                                Filter = "Arquivo Treino File|*.txt", Title = "Selecione o arquivo de treino: "
                            })
                            {
                                Console.Clear();
                                Console.WriteLine("Aguarde...");
                                var neuralFile_A = File.ReadAllLines(ofd.FileName);
                                var pc_A         = PointsConvertor.Converter(neuralFile_A);

                                PointsConverted pc_B = null;
                                if (ofd2.ShowDialog() == DialogResult.OK)
                                {
                                    var neuralFile_B = File.ReadAllLines(ofd2.FileName);
                                    pc_B = PointsConvertor.Converter(neuralFile_B);
                                }
                                var inicio = DateTime.Now;
                                MLP.Train(pc_A, pc_B);
                                Console.WriteLine("Treino realizado com sucesso \n Inicio: " + inicio.ToLongTimeString() + " \n Fim: " + DateTime.Now.ToLongTimeString());
                                Console.ReadKey();
                            }
                        }
                    }
                }
                else if (op == '2')//carregar rede neural
                {
                    using (var ofd = new OpenFileDialog()
                    {
                        Filter = "MLP Files|*.mlp", Title = "Selecione o arquivo de rede neural: "
                    })
                    {
                        if (ofd.ShowDialog() == DialogResult.OK)
                        {
                            try
                            {
                                Console.Clear();
                                Console.WriteLine("Aguarde...");
                                MLP.LoadNetwork(ofd.FileName);
                                Console.WriteLine("Rede neural recarregada com sucesso.");
                                Console.ReadKey();
                            }
                            catch (Exception)
                            {
                                Console.WriteLine("Erro na leitura dos dados.");
                                Console.ReadKey();
                            }
                        }
                    }
                }
                else if (op == '3')//salvar rede neural
                {
                    using (var ofd = new SaveFileDialog()
                    {
                        Filter = "MLP Files|*.mlp", Title = "Salve o arquivo de rede neural: "
                    })
                    {
                        if (ofd.ShowDialog() == DialogResult.OK)
                        {
                            Console.Clear();
                            Console.WriteLine("Aguarde...");
                            var retorno = MLP.SaveNetwork(ofd.FileName);
                            Console.WriteLine("Arquivo salvo em: " + retorno);
                            Console.ReadKey();
                        }
                    }
                }
                else if (op == '4')//executar simulação
                {
                    Console.Clear();
                    Console.WriteLine("Aguarde...");
                    Executa();
                }
            }while (true);
        }
示例#33
0
 public void SetMLP(MLP qnet, MLP qtargetnet)
 {
     this.Qnetwork       = qnet;
     this.QtargetNetwork = qtargetnet;
 }
示例#34
0
 private static extern unsafe void Backpropagate(MLP mlp);
示例#35
0
 private static extern unsafe void CalculateDeltaWeights(MLP mlp);
示例#36
0
 private static extern unsafe void ChangeWeights(MLP mlp, float mu);
示例#37
0
 private static extern unsafe void Train(MLP mlp, Matrix errors, float mu);
示例#38
0
 private static extern unsafe int AddDeltaWeights(MLP src1, MLP src2, MLP dest);
示例#39
0
 private static extern unsafe void ClearDeltaWeights(MLP mlp);
示例#40
0
 private static extern unsafe void SetOutputError(MLP mlp, Matrix errors);
示例#41
0
 private static extern unsafe void RandomClearWeights(MLP mlp);
示例#42
0
 private static extern unsafe Matrix Output(MLP mlp, Matrix input);
        private static void Executa()
        {
            var datahora_atual = DateTime.MinValue;

            do
            {
                var datahora = Simulation.Memory.Get().dmDateTime.DataHora;
                var Dados_A  = Simulation.Input.Termostato_A();
                var Dados_D  = Simulation.Input.Termostato_D();
                var Dados_E  = Simulation.Input.Termostato_E();
                var Dados_G  = Simulation.Input.Termostato_G();

                if (datahora >= datahora_atual.AddSeconds(.5))
                {
                    datahora_atual = datahora;
                    var hora = datahora;

                    var TempA = Dados_A.TemperaturaNormalizado;
                    var SetA  = Dados_A.SetPointNormalizado;

                    var TempD = Dados_D.TemperaturaNormalizado;
                    var SetD  = Dados_D.SetPointNormalizado;

                    var TempE = Dados_E.TemperaturaNormalizado;
                    var SetE  = Dados_E.SetPointNormalizado;

                    var TempG = Dados_G.TemperaturaNormalizado;
                    var SetG  = Dados_G.SetPointNormalizado;

                    var dataEntrada = new BasicMLData(new double[] { TempA, SetA, TempD, SetD, TempE, SetE, TempG, SetG });

                    var dataSaida = MLP.Compute(dataEntrada);

                    Console.WriteLine("A: " + dataSaida[0] + " | D: " + dataSaida[1] + " | E: " + dataSaida[2] + " | G: " + dataSaida[3]);

                    var saida = "";
                    if (dataSaida[0] >= 0.5)
                    {
                        Simulation.Output.LigarAquecedor_A();
                        saida += "A: ON";
                    }
                    else
                    {
                        Simulation.Output.DesligarAquecedor_A();
                        saida += "A: OFF";
                    }
                    saida += "  T: " + (Dados_A.TemperaturaReal - Dados_A.SetPointReal).ToString("F1") + "  |";
                    /////////////////
                    if (dataSaida[1] >= 0.5)
                    {
                        Simulation.Output.LigarAquecedor_D();
                        saida += " D: ON";
                    }
                    else
                    {
                        Simulation.Output.DesligarAquecedor_D();
                        saida += " D: OFF";
                    }
                    saida += "  T: " + (Dados_D.TemperaturaReal - Dados_D.SetPointReal).ToString("F1") + "  |";
                    /////////////////
                    if (dataSaida[2] >= 0.5)
                    {
                        Simulation.Output.LigarAquecedor_E();
                        saida += " E: ON";
                    }
                    else
                    {
                        Simulation.Output.DesligarAquecedor_E();
                        saida += " E: OFF";
                    }
                    saida += "  T: " + (Dados_E.TemperaturaReal - Dados_E.SetPointReal).ToString("F1") + "  |";
                    /////////////////
                    if (dataSaida[3] >= 0.5)
                    {
                        Simulation.Output.LigarAquecedor_G();
                        saida += " G: ON";
                    }
                    else
                    {
                        Simulation.Output.DesligarAquecedor_G();
                        saida += " G: OFF";
                    }
                    saida += "  T: " + (Dados_G.TemperaturaReal - Dados_G.SetPointReal).ToString("F1") + "  |";
                    Console.WriteLine(saida);
                }
            }while (true);
        }
示例#44
0
 private static extern unsafe void ForwardPorpagate(MLP mlp);
示例#45
0
 public MLPDll(MLPDll copy, bool isWeakening)
 {
     mlp = copyMLPandChangeWeakening(copy.mlp, isWeakening);            
 }
示例#46
0
 private static extern unsafe void SetInput(MLP mlp, Matrix input);
    void Start()
    {
        Manager = GameObject.FindGameObjectWithTag( "Level" ).GetComponent<LevelManager>();

        if ( Health > SOLIDER_MAX_HEALTH )
        {
            Health = SOLIDER_MAX_HEALTH;
            UnityEngine.Debug.LogWarning( "Solider health is greater than the levels max health" );
        }

        if ( Ammo > SOLIDER_MAX_AMMO )
        {
            Ammo = SOLIDER_MAX_AMMO;
            UnityEngine.Debug.LogWarning( "Solider ammo is greater than the levels max ammo" );
        }

        switch ( AssignedTeam )
        {
            case ( Team.Blue ):

                gameObject.layer = LayerMask.NameToLayer( "BlueTeam" );

                break;
            case ( Team.Red ):

                gameObject.layer = LayerMask.NameToLayer( "RedTeam" );

                break;

            default:
                UnityEngine.Debug.LogException( new ArgumentException( "Solider cannot have a null team" ) );

                break;
        }

        GetComponent<SpriteRenderer>().sprite = AliveSprite;

        EffectedDamage = 0;
        EffectedMovementSpeed = 0;

        Manager.RegisterSolider( this );

        moveTarget = new Vector2( 0, 0 );
        isMoving = false;

        if ( AssignedTeam == Team.Red )
            gameObject.renderer.material.color = Color.red;

        else if ( AssignedTeam == Team.Blue )
            gameObject.renderer.material.color = Color.blue;

        nearbyEnemies = new Solider[ SOLIDER_TRACK_ENEMY_COUNT ];
        nearbyFriendlies = new Solider[ SOLIDER_TRACK_FRIENDLY_COUNT ];

        canShoot = true;

        ///// Start up and train the NN /////

        // Set up the MLP and configure its inputs to receive inputs within the expected ranges
        // Note: This is to allow the MLP to normalize inputs to the range [0, 1]
        neuralNetwork = new MLP( 20, 20, 6, ActivationFunction.Threshold );
        for ( int i = 0; i < 5; i++ )
        {
            neuralNetwork.Inputs[ i ].MinValue =    0f; // Current health, friendly health
            neuralNetwork.Inputs[ i ].MaxValue =  100f;
        }
        for ( int i = 5; i < 10; i++ )
        {
            neuralNetwork.Inputs[ i ].MinValue =    0f; // Current ammo, friendly ammo
            neuralNetwork.Inputs[ i ].MaxValue = 1000f;
        }
        for ( int i = 10; i < 15; i++ )
        {
            neuralNetwork.Inputs[ i ].MinValue =    1f; // Nearest powerup distance, friendly distances
            neuralNetwork.Inputs[ i ].MaxValue =    5f;
        }
        for ( int i = 15; i < 20; i++ )
        {
            neuralNetwork.Inputs[ i ].MinValue =    1f; // Nearest powerup distance, friendly distances
            neuralNetwork.Inputs[ i ].MaxValue =    5f;
        }

        // Train the system with hardcoded training data
        TrainNN();
    }
示例#48
0
 private static extern unsafe void deleteMLP(MLP mlp);
示例#49
0
 public void SetNetwork(MLP n)
 {
     m_net = n;
 }
示例#50
0
 private static extern unsafe void RandomClearWeakness(MLP mlp, float min, float max);