Exemplo n.º 1
0
        public void TrainXOR()
        {
            try {
                //Load train data
                float[,] testX = new float[, ] {
                    { 0, 1 },
                };
                float[,] x = new float[, ] {
                    { 0, 0 }, { 0, 1 }, { 1, 0 }, { 1, 1 }
                };
                float[] y = new float[] { 0, 1, 1, 0 };

                //Build sequential model
                var model = new Sequential();
                model.Add(new Dense(32, activation: "relu", input_shape: new Shape(2)));
                model.Add(new Dense(32, activation: "relu"));
                model.Add(new Dropout(0.1d));
                model.Add(new Dense(1, activation: "sigmoid"));

                //Compile and train
                var optimizer = new Adam();
                model.Compile(optimizer: optimizer, loss: "mse", metrics: new string[] { "accuracy" });
                model.Fit(x, y, batch_size: 2, epochs: 1000, verbose: 1);

                float[] predicts;
                predicts = model.Predict(x).GetData <float>();
                predicts = model.PredictOnBatch(x).GetData <float>();
                predicts = model.Predict(x).GetData <float>();
                predicts = model.PredictOnBatch(x).GetData <float>();
                predicts = model.Predict(x).GetData <float>();
                predicts = model.PredictOnBatch(x).GetData <float>();

                Stopwatch watch = new Stopwatch();
                watch.Restart();
                for (int i = 0; i < 5; ++i)
                {
                    predicts = model.PredictOnBatch(testX).GetData <float>();
                }
                watch.Stop();
                string batchMs = watch.GetElapsedMilliseconds().ToString();
                watch.Restart();
                for (int i = 0; i < 5; ++i)
                {
                    predicts = model.Predict(testX).GetData <float>();
                }
                watch.Stop();

                //MainWindow.Instance.Dispatcher.BeginInvoke(new Action(() => {
                //	MainWindow.Instance.DebugTextBox.Text = batchMs + " / " + watch.GetElapsedMilliseconds().ToString();
                //}));
            } catch (Exception ex) {
                //MainWindow.Instance.Dispatcher.BeginInvoke(new Action(() => {
                //	MainWindow.Instance.DebugTextBox.Text = ex.ToString();
                //}));
            }
        }
Exemplo n.º 2
0
        static void Main(string[] args)
        {
            SaveRateToFileTrainData("USD");
            SaveRateToFileTestData("USD");
            Global.UseEngine(SiaNet.Backend.ArrayFire.SiaNetBackend.Instance, DeviceType.CUDA, true);


            var train = PreparingExchangeRateData.LoadTrain();
            var test  = PreparingExchangeRateData.LoadTest();

            var model = new Sequential();

            model.EpochEnd += Model_EpochEnd;
            model.Add(new Dense(60, ActType.Sigmoid));
            model.Add(new Dense(60, ActType.Sigmoid));
            model.Add(new Dense(1, ActType.Linear));

            //Compile with Optimizer, Loss and Metric
            model.Compile(OptimizerType.SGD, LossType.MeanSquaredError, MetricType.MSE);
            // Train for 1000 epoch with batch size of 2
            model.Train(train, epochs: 1000, batchSize: 32);

            //Create prediction data to evaluate
            DataFrame2D predX = new DataFrame2D(2);

            predX.Load(0, 0, 0, 1, 1, 0, 1, 1); //Result should be 0, 1, 1, 0
            var rawPred = model.Predict(test);

            Console.ReadLine();
        }
Exemplo n.º 3
0
        static void Main(string[] args)
        {
            //Setup Engine

            Global.UseEngine(SiaNet.Backend.MxNetLib.SiaNetBackend.Instance, DeviceType.CPU);

            //Prep Data
            var(x, y) = PrepDataset();
            x.Head();
            DataFrameIter trainSet = new DataFrameIter(x, y);

            //Build model with simple fully connected layers
            var model = new Sequential();

            model.EpochEnd += Model_EpochEnd;
            model.Add(new Dense(64, ActType.ReLU));
            model.Add(new Dense(1, ActType.Sigmoid));

            //Compile with Optimizer, Loss and Metric
            model.Compile(OptimizerType.SGD, LossType.MeanSquaredError, MetricType.BinaryAccurary);

            // Train for 100 epoch with batch size of 2
            model.Train(trainSet, 1000, 2);

            //Create prediction data to evaluate
            DataFrame2D predX = new DataFrame2D(2);

            predX.Load(0, 0, 0, 1); //Result should be 0 and 1

            var rawPred = model.Predict(predX);

            Console.ReadLine();
        }
        public NDarray Predict(string imgPath)
        {
            NDarray x = Utils.Normalize(imgPath);

            x = x.reshape(1, x.shape[0], x.shape[1], x.shape[2]);
            return(_model.Predict(x));
        }
Exemplo n.º 5
0
        static (NDarray, NDarray) GenerateFakeGeneratorSamples(Sequential generatorModel, int latentDim, int sampleCount)
        {
            var xInput = GenerateLatentPoints(latentDim, sampleCount);
            var x      = generatorModel.Predict(xInput);
            var y      = np.zeros(new int[] { sampleCount, 1 });

            return(x, y);
        }
Exemplo n.º 6
0
        /// <summary>
        /// Teaches an agent whose model is represented by a direct distribution network (non-recurrent). Used when the model operates only with the current state of the environment, not taking into account previous states.
        /// </summary>
        /// <param name="agent">Agent for training, a network of a given architecture</param>
        /// <param name="iterationCount">Number of learning iterations (eras)</param>
        /// <param name="rolloutCount">The number of runs (in the case of a game - passing the level until the end of the game <seealso cref="Environment.IsTerminated"/> ), which will be completed before the weights are updated.
        /// It can be interpreted as the amount of training data for one era.</param>
        /// <param name="minibatchSize">Minibatch size for training</param>
        /// <param name="actionPerIteration">The arbitrary action that each epoch requires. Allows you to interrupt the training process. Input parameters: era, loss error, evaluation error.
        /// Weekend: true - interrupt the training process, false - continue the training.
        /// Used for logging, displaying the learning process, saving intermediate model checkpoints, etc.</param>
        /// <param name="gamma">Reward attenuation coefficient (reward) when calculating Discounted reward</param>
        /// <returns></returns>
        public Sequential <T> Teach(Sequential <T> agent, int iterationCount, int rolloutCount, int minibatchSize, Func <int, double, double, bool> actionPerIteration = null, double gamma = 0.99)
        {
            for (int iteration = 0; iteration < iterationCount; iteration++)
            {
                var data = new LinkedList <(int rollout, int actionNumber, T[] state, T[] action, T reward)>();
                for (int rolloutNumber = 0; rolloutNumber < rolloutCount; rolloutNumber++)
                {
                    int actionNumber = 0;
                    while (!Environment.IsTerminated)
                    {
                        var currentState = Environment.GetCurrentState <T>();
                        var action       = agent.Predict(currentState, Device);
                        var reward       = Environment.PerformAction(action);
                        data.AddLast((rolloutNumber, ++actionNumber, currentState, action, reward));
                    }
                    Environment.Reset();
                }
                var discountedRewards = new T[data.Count];
                foreach (var rollout in data.GroupBy(p => p.rollout))
                {
                    var steps = rollout.ToList();
                    steps.Sort((a, b) => a.actionNumber > b.actionNumber ? 1 : a.actionNumber < b.actionNumber ? -1 : 0); //ascending actionNumber
                    for (int i = 0; i < steps.Count; i++)
                    {
                        var remainingRewards = steps.GetRange(i, steps.Count - i)
                                               .Select(p => Environment.HasRewardOnlyForRollout ? steps[steps.Count - 1].reward : p.reward)
                                               .ToArray();
                        discountedRewards[i] = CalculateDiscountedReward(remainingRewards, gamma);
                    }
                }

                var features = data.Select(p => p.state);
                var labels   = data.Zip(discountedRewards, (d, reward) => Multiply(d.action, reward));
                var dataset  = features.Zip(labels, (f, l) => f.Concat(l).ToArray()).ToArray();
                var inputDim = features.FirstOrDefault().Length;

                var fitResult = agent.Fit(dataset,
                                          inputDim,
                                          minibatchSize,
                                          GetLoss()[0],
                                          GetEvalLoss()[0],
                                          GetOptimizer()[0],
                                          1,
                                          false,
                                          Device);
                data.Clear();
                var needStop = actionPerIteration?.Invoke(iteration, fitResult.LossError, fitResult.EvaluationError);
                if (needStop.HasValue && needStop.Value)
                {
                    break;
                }
            }
            return(agent);
        }
 public int Classify(Bitmap bitmap)
 {
     if (_framework == "keras")
     {
         var pred = ClassFromNumpy(_model.Predict(np.array(ImagePixels(bitmap))));
         return pred;
     }
     else
     {
         _trainer.Net.Forward(BuilderInstance.Volume.From(ImagePixels(bitmap), new Shape(80, 60, 1)));
         var predictedClass = _trainer.Net.GetPrediction();
         return predictedClass[0];
     }
 }
Exemplo n.º 8
0
        public static void Run()
        {
            var(x, y) = LoadTrain();
            Sequential model = new Sequential();

            model.Add(new Dense(32, activation: SuperNeuro.Engine.ActType.ReLU));
            model.Add(new Dense(32, activation: SuperNeuro.Engine.ActType.ReLU));
            model.Add(new Dense(1, activation: SuperNeuro.Engine.ActType.Sigmoid));

            model.EpochEnd += Model_EpochEnd;
            model.Compile(SuperNeuro.Engine.OptimizerType.Adam, SuperNeuro.Engine.LossType.BinaryCrossEntropy, SuperNeuro.Engine.MetricType.BinaryAccurary);
            model.Train(new SuperNeuro.Data.DataFrameIter(x, y), 100, 32);

            var test = model.Predict(LoadTest());

            test.Head();
        }
Exemplo n.º 9
0
        static void Main(string[] args)
        {
            //Setup Engine
            Global.UseEngine(SiaNet.Backend.ArrayFire.SiaNetBackend.Instance, DeviceType.Default);

            var train = LoadTrain(); //Load train data
            var test  = LoadTest();  //Load test data

            var model = new Sequential();

            model.EpochEnd += Model_EpochEnd;
            model.Add(new Dense(128, ActType.ReLU));
            model.Add(new Dense(64, ActType.ReLU));
            model.Add(new Dense(1, ActType.Sigmoid));

            //Compile with Optimizer, Loss and Metric
            model.Compile(OptimizerType.Adam, LossType.BinaryCrossEntropy, MetricType.BinaryAccurary);

            // Perform training with train and val dataset
            model.Train(train, epochs: 100, batchSize: 200);

            var prediction = model.Predict(test);
        }
Exemplo n.º 10
0
        /// <summary>
        /// Teaches an agent whose model is represented by a recurrent network. It is used when the model operates with a chain of environmental states.
        /// </summary>
        /// <param name="agent">Agent for training, a network of a given architecture</param>
        /// <param name="iterationCount">Number of learning iterations (eras)</param>
        /// <param name="rolloutCount">The number of runs (in the case of a game - passing the level until the end of the game <seealso cref="Environment.IsTerminated"/> ), which will be completed before the weights are updated.
        /// It can be interpreted as the amount of training data for one era.</param>
        /// <param name="minibatchSize">Minibatch size for training</param>
        /// <param name="sequenceLength">Sequence length: a chain of previous state environments on each action.</param>
        /// <param name="actionPerIteration">The arbitrary action that each epoch requires. Allows you to interrupt the training process. Input parameters: era, loss error, evaluation error.
        /// Weekend: true - interrupt the training process, false - continue the training.
        /// Used for logging, displaying the learning process, saving intermediate model checkpoints, etc.</param>
        /// <param name="gamma">Reward attenuation coefficient (reward) when calculating Discounted reward</param>
        /// <returns></returns>
        public Sequential <T> Teach(Sequential <T> agent, int iterationCount, int rolloutCount, int minibatchSize, int sequenceLength, Func <int, double, double, bool> actionPerIteration = null, double gamma = 0.99)
        {
            for (int iteration = 0; iteration < iterationCount; iteration++)
            {
                var data = new List <(int rollout, int actionNumber, T[] state, T[] action, T reward)>();
                for (int rolloutNumber = 0; rolloutNumber < rolloutCount; rolloutNumber++)
                {
                    int actionNumber = 0;
                    while (!Environment.IsTerminated)
                    {
                        var currentState = Environment.GetCurrentState <T>();
                        var sequence     = actionNumber < sequenceLength
                            ? data.GetRange(data.Count - actionNumber, actionNumber)
                            : data.GetRange(data.Count - sequenceLength - 1, sequenceLength - 1);

                        var sequenceStates = sequence
                                             .Select(p => p.state)
                                             .ToList();
                        sequenceStates.Add(currentState);
                        var action = agent.Predict(sequenceStates, Device);
                        var reward = Environment.PerformAction(action);
                        data.Add((rolloutNumber, ++actionNumber, currentState, action, reward));
                    }
                    Environment.Reset();
                }
                var discountedRewards = new T[data.Count];
                foreach (var rollout in data.GroupBy(p => p.rollout))
                {
                    var steps = rollout.ToList();
                    for (int i = 0; i < steps.Count; i++)
                    {
                        var remainingRewards = steps.GetRange(i, steps.Count - i)
                                               .Select(p => Environment.HasRewardOnlyForRollout ? steps[steps.Count - 1].reward : p.reward)
                                               .ToArray();
                        discountedRewards[i] = CalculateDiscountedReward(remainingRewards, gamma);
                    }
                }

                var features = new List <IList <T[]> >();
                var labels   = new List <T[]>();
                var dataWithDiscountedReward = data.Zip(discountedRewards, (dat, reward) => (dat, reward)).GroupBy(p => p.dat.rollout);
                foreach (var rollout in dataWithDiscountedReward)
                {
                    var steps = rollout.ToList();
                    steps.Sort((a, b) => a.dat.actionNumber > b.dat.actionNumber ? 1 : a.dat.actionNumber < b.dat.actionNumber ? -1 : 0); //ascending actionNumber
                    for (int i = 0; i < steps.Count; i++)
                    {
                        if (i < sequenceLength)
                        {
                            features.Add(steps.GetRange(0, i + 1).Select(p => p.dat.state).ToArray());
                        }
                        else
                        {
                            features.Add(steps.GetRange(i - sequenceLength, sequenceLength).Select(p => p.dat.state).ToArray());
                        }
                        labels.Add(Multiply(steps[i].dat.action, steps[i].reward));
                    }
                }

                var fitResult = agent.Fit(features,
                                          labels,
                                          minibatchSize,
                                          GetLoss()[0],
                                          GetEvalLoss()[0],
                                          GetOptimizer()[0],
                                          1,
                                          Device);
                data.Clear();
                var needStop = actionPerIteration?.Invoke(iteration, fitResult.LossError, fitResult.EvaluationError);
                if (needStop.HasValue && needStop.Value)
                {
                    break;
                }
            }
            return(agent);
        }