Пример #1
0
        public static void Run()
        {
            //var ((x_train, y_train), (x_test, y_test)) = IMDB.LoadData(num_words: top_words);
            var((x_train, y_train), (x_test, y_test)) = LoadDataRussianLanguageToxicComments(
                trainCount: train_count, testCount: test_count, numWords: top_words, maxWords: max_words);

            //Не нужно массивы дополнять до 500 элементов, т.к. они уже размером в 500 элементов
            //x_train = SequenceUtil.PadSequences(x_train, maxlen: max_words);
            //x_test = SequenceUtil.PadSequences(x_test, maxlen: max_words);

            //Create model
            Sequential model = new Sequential();

            model.Add(new Embedding(top_words, 32, input_length: max_words));
            model.Add(new Conv1D(filters: 32, kernel_size: 3, padding: "same", activation: "relu"));
            model.Add(new MaxPooling1D(pool_size: 2));
            model.Add(new Flatten());
            model.Add(new Dense(250, activation: "relu"));
            model.Add(new Dense(1, activation: "sigmoid"));

            model.Compile(loss: "binary_crossentropy", optimizer: "adam", metrics: new string[] { "accuracy" });
            model.Summary();

            // Fit the model
            model.Fit(x_train, y_train, validation_data: new NDarray[] { x_test, y_test },
                      epochs: 2 /*10*/, batch_size: 128, verbose: 2);
            // Final evaluation of the model
            var scores = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Accuracy: " + (scores[1] * 100));

            model.Save("model.h5");
            File.WriteAllText("model.json", model.ToJson());    //save model
            //model.SaveTensorflowJSFormat("./");   //error - Cannot perform runtime binding on a null reference
        }
Пример #2
0
        // Training method and helper for a selectable SNN or CNN type network from a given config
        // Resulting model and scores are stored for further use
        public void Process(Config _config, int num_classes)
        {
            if (x_train == null || y_train == null || x_test == null || y_test == null)
            {
                throw new Exception("Dataset was null!");
            }

            dtStart = DateTime.Now;
            config  = _config;

            Shape input_shape = null;

            if (config.isCNN)
            {
                input_shape = GetShape();
            }

            if (config.isNormalize)
            {
                x_train  = x_train.astype(np.float32);
                x_test   = x_test.astype(np.float32);
                x_train /= 255;
                x_test  /= 255;
            }

            Console.WriteLine("Test Started {0}", dtStart);
            Console.WriteLine("Network Type: {0} Neural Network", (!config.isCNN) ? "Simple" : "Convolution");
            Console.WriteLine("Width: {0} Height: {1} Size:{2}", width, height, width * height);
            Console.WriteLine("x_train shape: {0} x_train samples: {1} x_test shape: {2} x_test samples: {3}",
                              x_train.shape, x_train.shape[0], x_test.shape, x_test.shape[0]);

            y_train = Util.ToCategorical(y_train, num_classes);
            y_test  = Util.ToCategorical(y_test, num_classes);

            starttime = Utils.GetDateTime_Formatted();
            log_file  = Utils.GetFileWithExtension(log_dir, starttime, log_ext);

            if (!config.isCNN)
            {
                model = ProcessSnnModel(x_train, y_train, x_test, y_test, num_classes, log_file, config);
            }
            else
            {
                model = ProcessCnnModel(input_shape, x_train, y_train, x_test, y_test, num_classes, log_file, config);
            }

            dtEnd = DateTime.Now;

            // Score the model for performance
            score = model.Evaluate(x_test, y_test, verbose: 0);
            TimeSpan ts = dtEnd - dtStart;

            model.Summary();
            Console.WriteLine("Test End: {0}  Duration: {1}:{2}.{3}", dtEnd, ts.Hours, ts.Minutes, ts.Seconds);
            Console.WriteLine("Loss: {0} Accuracy: {1}", score[0], score[1]);
        }
Пример #3
0
        static void SummarizePerformance(int epoch, Sequential generator, Sequential discriminator, NDarray dataset, int latentDim, int sampleCount = 50)
        {
            var real    = GenerateRealSamples(dataset, sampleCount);
            var realAcc = discriminator.Evaluate(real.Item1, real.Item2, verbose: 0);

            var fake    = GenerateFakeGeneratorSamples(generator, latentDim, sampleCount);
            var fakeAcc = discriminator.Evaluate(fake.Item1, fake.Item2, verbose: 0);

            Console.WriteLine("Accuracy real: \t " + realAcc.Last() * 100 + "% \t fake:" + fakeAcc.Last() * 100 + "%");
            var fakes = fake.Item1;

            fakes = (fakes + 1) / 2;

            for (int i = 0; i < sampleCount; i++)
            {
                SaveArrayAsImage(fakes[i], "output/gantest_" + epoch + "_" + i + ".png");
            }

            generator.Save("output/generator" + epoch + ".h5");
        }
        public static void Run()
        {
            //Load IMDb dataset
            var((x_train, y_train), (x_test, y_test)) = IMDB.LoadData();

            var X = np.concatenate(new NDarray[] { x_train, x_test }, axis: 0);
            var Y = np.concatenate(new NDarray[] { y_train, y_test }, axis: 0);

            Console.WriteLine("Shape of X: " + X.shape);
            Console.WriteLine("Shape of Y: " + Y.shape);

            //We can get an idea of the total number of unique words in the dataset.
            Console.WriteLine("Number of words: ");
            var hstack = np.hstack(new NDarray[] { X });
            //var unique = hstack.unique();
            //Console.WriteLine(np.unique(np.hstack(new NDarray[] { X })).Item1);

            // Load the dataset but only keep the top n words, zero the rest
            int top_words = 1000;// 5000;

            ((x_train, y_train), (x_test, y_test)) = IMDB.LoadData(num_words: top_words);

            int max_words = 500;

            x_train = SequenceUtil.PadSequences(x_train, maxlen: max_words);
            x_test  = SequenceUtil.PadSequences(x_test, maxlen: max_words);

            //Create model
            Sequential model = new Sequential();

            model.Add(new Embedding(top_words, 32, input_length: max_words));
            model.Add(new Conv1D(filters: 32, kernel_size: 3, padding: "same", activation: "relu"));
            model.Add(new MaxPooling1D(pool_size: 2));
            model.Add(new Flatten());
            model.Add(new Dense(250, activation: "relu"));
            model.Add(new Dense(1, activation: "sigmoid"));

            model.Compile(loss: "binary_crossentropy", optimizer: "adam", metrics: new string[] { "accuracy" });
            model.Summary();

            // Fit the model
            model.Fit(x_train, y_train, validation_data: new NDarray[] { x_test, y_test },
                      epochs: 1 /*10*/, batch_size: 128, verbose: 2);
            // Final evaluation of the model
            var scores = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Accuracy: " + (scores[1] * 100));

            model.Save("model.h5");
            File.WriteAllText("model.json", model.ToJson());    //save model
            //model.SaveTensorflowJSFormat("./");   //error - Cannot perform runtime binding on a null reference
        }
        public static void Run()
        {
            //Load train data
            NDarray dataset = np.loadtxt(fname: "C:/Project/LSTMCoreApp/pima-indians-diabetes.data.csv", delimiter: ",");
            var     X       = dataset[":,0: 8"];
            var     Y       = dataset[":, 8"];

            //Build sequential model
            var model = new Sequential();

            model.Add(new Dense(12, input_dim: 8, kernel_initializer: "uniform", activation: "relu"));
            model.Add(new Dense(8, kernel_initializer: "uniform", activation: "relu"));
            model.Add(new Dense(1, activation: "sigmoid"));

            //Compile and train
            model.Compile(optimizer: "adam", loss: "binary_crossentropy", metrics: new string[] { "accuracy" });
            model.Fit(X, Y, batch_size: 10, epochs: 150, verbose: 1);

            //Evaluate model
            var scores = model.Evaluate(X, Y, verbose: 1);

            Console.WriteLine("Accuracy: {0}", scores[1] * 100);

            //Save model and weights
            string json = model.ToJson();

            File.WriteAllText("model.json", json);
            model.SaveWeight("model.h5");
            Console.WriteLine("Saved model to disk");
            //Load model and weight
            var loaded_model = Sequential.ModelFromJson(File.ReadAllText("model.json"));

            loaded_model.LoadWeight("model.h5");
            Console.WriteLine("Loaded model from disk");

            loaded_model.Compile(optimizer: "rmsprop", loss: "binary_crossentropy", metrics: new string[] { "accuracy" });
            scores = model.Evaluate(X, Y, verbose: 1);
            Console.WriteLine("Accuracy: {0}", scores[1] * 100);
        }
        public void Train()
        {
            // Build CNN model
            _model.Add(new Conv2D(32, kernel_size: (3, 3).ToTuple(),
                                  padding: Settings.PaddingMode,
                                  input_shape: new Shape(Settings.ImgWidth, Settings.ImgHeight, Settings.Channels)));
            _model.Add(new Activation(Settings.ActivationFunction));
            _model.Add(new Conv2D(32, (3, 3).ToTuple()));
            _model.Add(new Activation(Settings.ActivationFunction));
            _model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            _model.Add(new Dropout(0.25));

            _model.Add(new Conv2D(64, kernel_size: (3, 3).ToTuple(),
                                  padding: Settings.PaddingMode));
            _model.Add(new Activation(Settings.ActivationFunction));
            _model.Add(new Conv2D(64, (3, 3).ToTuple()));
            _model.Add(new Activation(Settings.ActivationFunction));
            _model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            _model.Add(new Dropout(0.25));

            _model.Add(new Flatten());
            _model.Add(new Dense(Settings.FullyConnectedNodes));
            _model.Add(new Activation(Settings.ActivationFunction));
            _model.Add(new Dropout(0.5));
            _model.Add(new Dense(_dataset.NumberClasses));
            _model.Add(new Softmax());

            _model.Compile(loss: Settings.LossFunction,
                           optimizer: Settings.Optimizer,
                           metrics: new string[] { Settings.Accuracy });

            _model.Fit(_dataset.TrainX, _dataset.TrainY,
                       batch_size: Settings.BatchSize,
                       epochs: Settings.Epochs,
                       validation_data: new NDarray[] { _dataset.ValidationX, _dataset.ValidationY });

            var score = _model.Evaluate(_dataset.ValidationX, _dataset.ValidationY, verbose: 0);

            Console.WriteLine("Test loss:" + score[0]);
            Console.WriteLine("Test accuracy:" + score[1]);
        }
Пример #7
0
        static void Main(string[] args)
        {
            // the data, split between train and test sets
            var((x_train, y_train), (x_test, y_test)) = BostonHousing.LoadData();

            //Explore the data structure using basic C# Commands
            WriteLine(value: $"Type of the Dataset: {(y_train).GetType()}");
            WriteLine($"Shape of Training Data : {x_train.shape}");
            WriteLine($"Shape of training label: {y_train.shape}");
            WriteLine($"Shape of Testing Data : {x_test.GetType()}");
            WriteLine($"Shape of testing Labels : {y_test.shape}");

            //Check the Contents of the training dataset Using the slicing notation
            WriteLine(x_train[":3,:"]);

            // Extract last 100 rows from the training data to create validation datasets
            var x_val = x_train["300:,"];
            var y_val = y_train["300:,"];

            //Define the model architecture
            var model = new Sequential();

            model.Add(new Dense(13, kernel_initializer: "normal", activation: "relu", input_dim: 13));
            model.Add(new Dense(6, input_dim: 6, activation: "relu", kernel_initializer: "normal"));
            model.Add(new Dense(1, input_dim: 1, kernel_initializer: "normal"));

            //Compile model
            model.Compile(loss: "mean_squared_error", optimizer: "adam", metrics: new string[] { "mean_absolute_percentage_error" });

            //Train the Model
            model.Fit(x_train, y_train, batch_size: 32, epochs: 10, validation_data: new NDarray[] { x_val, y_val });

            var results = model.Evaluate(x_test, y_test, verbose: 0);

            WriteLine("\n\n\n\n===========================================================");
            WriteLine($"Loss : {results[0]}");
            WriteLine($"Mean Absolute Percentage Error : {results[1]}");
        }
Пример #8
0
        static void Main(string[] args)
        {
            //Mock data fix the file address
            NDarray dataset = Numpy.np.loadtxt(fname: "C:\\Natan\\csharp\\trial11\\pima-indians-diabetes.csv", delimiter: ",");
            var     X       = dataset[":,0: 8"];
            var     Y       = dataset[":, 8"];
            var     model   = new Sequential();

            model.Add(new Dense(12, input_dim: 8, kernel_initializer: "uniform", activation: "relu"));
            model.Add(new Dense(8, kernel_initializer: "uniform", activation: "relu"));
            model.Add(new Dense(1, activation: "sigmoid"));
            model.Compile(optimizer: "adam", loss: "binary_crossentropy", metrics: new string[] { "accuracy" });
            model.Fit(X, Y, batch_size: 10, epochs: 150, verbose: 1);
            model.Save("modelAA.h5");
            double[] scores = model.Evaluate(X, Y);
            foreach (double sc in scores)
            {
                Console.WriteLine(sc);
                //Console.WriteLine(scmodel..metrics_names[1], scores[1] * 100))
            }

            Console.WriteLine("Hello World! we learned");
            Console.ReadKey();
        }
Пример #9
0
        public static void Run()
        {
            int batch_size  = 128;
            int num_classes = 10;
            int epochs      = 12;

            // input image dimensions
            int img_rows = 28, img_cols = 28;

            Shape input_shape = null;

            // the data, split between train and test sets
            var((x_train, y_train), (x_test, y_test)) = MNIST.LoadData();

            if (K.ImageDataFormat() == "channels_first")
            {
                x_train     = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols);
                x_test      = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols);
                input_shape = (1, img_rows, img_cols);
            }
            else
            {
                x_train     = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1);
                x_test      = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1);
                input_shape = (img_rows, img_cols, 1);
            }

            x_train  = x_train.astype(np.float32);
            x_test   = x_test.astype(np.float32);
            x_train /= 255;
            x_test  /= 255;
            Console.WriteLine("x_train shape: " + x_train.shape);
            Console.WriteLine(x_train.shape[0] + " train samples");
            Console.WriteLine(x_test.shape[0] + " test samples");

            // convert class vectors to binary class matrices
            y_train = Util.ToCategorical(y_train, num_classes);
            y_test  = Util.ToCategorical(y_test, num_classes);

            // Build CNN model
            var model = new Sequential();

            model.Add(new Conv2D(32, kernel_size: (3, 3).ToTuple(),
                                 activation: "relu",
                                 input_shape: input_shape));
            model.Add(new Conv2D(64, (3, 3).ToTuple(), activation: "relu"));
            model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            model.Add(new Dropout(0.25));
            model.Add(new Flatten());
            model.Add(new Dense(128, activation: "relu"));
            model.Add(new Dropout(0.5));
            model.Add(new Dense(num_classes, activation: "softmax"));

            model.Compile(loss: "categorical_crossentropy",
                          optimizer: new Adadelta(), metrics: new string[] { "accuracy" });

            model.Fit(x_train, y_train,
                      batch_size: batch_size,
                      epochs: epochs,
                      verbose: 1,
                      validation_data: new NDarray[] { x_test, y_test });


            var score = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Test loss:" + score[0]);
            Console.WriteLine("Test accuracy:" + score[1]);
        }
Пример #10
0
        static void Main(string[] args)
        {
            int batch_size  = 128; //Training batch size
            int num_classes = 10;  //No. of classes
            int epochs      = 12;  //No. of epoches we will train

            // input image dimensions
            int img_rows = 28, img_cols = 28;

            // Declare the input shape for the network
            Shape input_shape = null;

            // Load the MNIST dataset into Numpy array
            var((x_train, y_train), (x_test, y_test)) = MNIST.LoadData();

            //Check if its channel fist or last and rearrange the dataset accordingly
            if (K.ImageDataFormat() == "channels_first")
            {
                x_train     = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols);
                x_test      = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols);
                input_shape = (1, img_rows, img_cols);
            }
            else
            {
                x_train     = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1);
                x_test      = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1);
                input_shape = (img_rows, img_cols, 1);
            }

            //Normalize the input data
            x_train  = x_train.astype(np.float32);
            x_test   = x_test.astype(np.float32);
            x_train /= 255;
            x_test  /= 255;
            Console.WriteLine("x_train shape: " + x_train.shape);
            Console.WriteLine(x_train.shape[0] + " train samples");
            Console.WriteLine(x_test.shape[0] + " test samples");

            // Convert class vectors to binary class matrices
            y_train = Util.ToCategorical(y_train, num_classes);
            y_test  = Util.ToCategorical(y_test, num_classes);

            // Build CNN model
            var model = new Sequential();

            model.Add(new Conv2D(32, kernel_size: (3, 3).ToTuple(),
                                 activation: "relu",
                                 input_shape: input_shape));
            model.Add(new Conv2D(64, (3, 3).ToTuple(), activation: "relu"));
            model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            model.Add(new Dropout(0.25));
            model.Add(new Flatten());
            model.Add(new Dense(128, activation: "relu"));
            model.Add(new Dropout(0.5));
            model.Add(new Dense(num_classes, activation: "softmax"));

            //Compile with loss, metrics and optimizer
            model.Compile(loss: "categorical_crossentropy",
                          optimizer: new Adadelta(), metrics: new string[] { "accuracy" });

            //Train the model
            model.Fit(x_train, y_train,
                      batch_size: batch_size,
                      epochs: epochs,
                      verbose: 1,
                      validation_data: new NDarray[] { x_test, y_test });


            //Score the model for performance
            var score = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Test loss:" + score[0]);
            Console.WriteLine("Test accuracy:" + score[1]);

            // Save the model to HDF5 format which can be loaded later or ported to other application
            model.Save("model.h5");
            // Save it to Tensorflow JS format and we will test it in browser.
            var v = K.Instance;

            //model.SaveTensorflowJSFormat(@"C:\_temp\");
            //model.SaveOnnx(@"C:\_temp\");
            Console.ReadLine();
        }
Пример #11
0
        public static void Run()
        {
            int batch_size  = 128;
            int num_classes = 10;
            int epochs      = 100;

            // the data, split between train and test sets
            var((x_train, y_train), (x_test, y_test)) = Cifar10.LoadData();

            Console.WriteLine("x_train shape: " + x_train.shape);
            Console.WriteLine(x_train.shape[0] + " train samples");
            Console.WriteLine(x_test.shape[0] + " test samples");

            // convert class vectors to binary class matrices
            y_train = Util.ToCategorical(y_train, num_classes);
            y_test  = Util.ToCategorical(y_test, num_classes);

            // Build CNN model
            var model = new Sequential();

            model.Add(new Conv2D(32, kernel_size: (3, 3).ToTuple(),
                                 padding: "same",
                                 input_shape: new Shape(32, 32, 3)));
            model.Add(new Activation("relu"));
            model.Add(new Conv2D(32, (3, 3).ToTuple()));
            model.Add(new Activation("relu"));
            model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            model.Add(new Dropout(0.25));

            model.Add(new Conv2D(64, kernel_size: (3, 3).ToTuple(),
                                 padding: "same"));
            model.Add(new Activation("relu"));
            model.Add(new Conv2D(64, (3, 3).ToTuple()));
            model.Add(new Activation("relu"));
            model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            model.Add(new Dropout(0.25));

            model.Add(new Flatten());
            model.Add(new Dense(512));
            model.Add(new Activation("relu"));
            model.Add(new Dropout(0.5));
            model.Add(new Dense(num_classes));
            model.Add(new Activation("softmax"));

            model.Compile(loss: "categorical_crossentropy",
                          optimizer: new RMSprop(lr: 0.0001f, decay: 1e-6f), metrics: new string[] { "accuracy" });

            x_train  = x_train.astype(np.float32);
            x_test   = x_test.astype(np.float32);
            x_train /= 255;
            x_test  /= 255;

            model.Fit(x_train, y_train,
                      batch_size: batch_size,
                      epochs: epochs,
                      verbose: 1,
                      validation_data: new NDarray[] { x_test, y_test },
                      shuffle: true);

            //Save model and weights
            //string model_path = "./model.json";
            //string weight_path = "./weights.h5";
            //string json = model.ToJson();
            //File.WriteAllText(model_path, json);
            //model.SaveWeight(weight_path);
            model.Save("model.h5");
            model.SaveTensorflowJSFormat("./");

            //Score trained model.
            var score = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Test loss:" + score[0]);
            Console.WriteLine("Test accuracy:" + score[1]);
        }
Пример #12
0
        static void Main(string[] args)
        {
            var datasetTrain = new List <double[]>();
            var datasetTest  = new List <double[]>();

            using (var zipToOpen = File.OpenRead(@"Dataset\mnist-in-csv.zip"))
                using (ZipArchive archive = new ZipArchive(zipToOpen, ZipArchiveMode.Read))
                {
                    using (var train = new StreamReader(archive.GetEntry("mnist_train.csv").Open()))
                    {
                        datasetTrain = train.ReadToEnd()
                                       .Split('\n')
                                       .Skip(1)
                                       .Select(p => p.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries))
                                       .Where(p => p.Length > 0)
                                       .Select(p => p.Select(q => double.Parse(q)).ToArray())
                                       .Select(p => p.Skip(1)                          //1 столбец - метка, пропускаем
                                               .Concat(MnistOneHotEncoding((int)p[0])) //переносим метку в конец массива с признаками, и сразу кодируем в one-hot-encoding
                                               .ToArray())
                                       .ToList();
                    }
                    using (var test = new StreamReader(archive.GetEntry("mnist_test.csv").Open()))
                    {
                        datasetTest = test.ReadToEnd()
                                      .Split('\n')
                                      .Skip(1)
                                      .Select(p => p.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries))
                                      .Where(p => p.Length > 0)
                                      .Select(p => p.Select(q => double.Parse(q)).ToArray())
                                      .Select(p => p.Skip(1)
                                              .Concat(MnistOneHotEncoding((int)p[0]))
                                              .ToArray())
                                      .ToList();
                    }
                }

            var device = DeviceDescriptor.GPUDevice(0);

            int minibatchSize  = 512;
            int inputDimension = 784;
            int epochs         = 50;

            var model = new Sequential <double>(device, new[] { inputDimension }, inputName: "Input");

            model.Add(new Residual2(784, new Tanh()));
            model.Add(new Residual2(300, new Tanh()));
            model.Add(new Dense(10, new Sigmoid()));

            var fitResult = model.Fit(datasetTrain,
                                      inputDimension,
                                      minibatchSize,
                                      new SquaredError(),
                                      new ClassificationError(),
                                      new Adam(0.1, 0.9, minibatchSize),
                                      epochs,
                                      shuffleSampleInMinibatchesPerEpoch: false,
                                      device: device,
                                      ruleUpdateLearningRate: (epoch, learningRate) => epoch % 10 == 0 ? 0.95 * learningRate : learningRate,
                                      actionPerEpoch: (epoch, loss, eval) =>
            {
                Console.WriteLine($"Loss: {loss:F10} Eval: {eval:F3} Epoch: {epoch}");
                if (eval < 0.05)     //ошибка классфикации меньше 5%, сохраем модель в файл и заканчиваем обучение
                {
                    model.SaveModel($"{model}.model", saveArchitectureDescription: false);
                    return(true);
                }
                return(false);
            },
                                      inputName: "Input");

            Console.WriteLine($"Duration train: {fitResult.Duration}");
            Console.WriteLine($"Epochs: {fitResult.EpochCount}");
            Console.WriteLine($"Loss error: {fitResult.LossError}");
            Console.WriteLine($"Eval error: {fitResult.EvaluationError}");

            var metricsTrain = model
                               .Evaluate(datasetTrain, inputDimension, device)
                               .GetOneLabelClassificationMetrics();

            Console.WriteLine($"---Train---");
            Console.WriteLine($"Accuracy: {metricsTrain.Accuracy}");
            metricsTrain.ClassesDistribution.ForEach(p => Console.WriteLine($"Class: {p.Index} | Precision: {p.Precision:F5} | Recall: {p.Recall:F5} | Fraction: {p.Fraction * 100:F3}"));

            var metricsTest = model
                              .Evaluate(datasetTest, inputDimension, device)
                              .GetOneLabelClassificationMetrics();

            Console.WriteLine($"---Test---");
            Console.WriteLine($"Accuracy: {metricsTest.Accuracy}");
            metricsTest.ClassesDistribution.ForEach(p => Console.WriteLine($"Class: {p.Index} | Precision: {p.Precision:F5} | Recall: {p.Recall:F5} | Fraction: {p.Fraction * 100:F3}"));

            Console.Read();
        }
Пример #13
0
        static void Main(string[] args)
        {
            CNTKLib.SetFixedRandomSeed(0); //for reproducibility. because initialization of weights in neural network layers
                                           //depends on CNTK random number generator

            //create a simulated dataset from sequences describing a sinusoid
            var dataset = Enumerable.Range(1, 2000)
                          .Select(p => Math.Sin(p / 100.0))                                            //decrease the pitch so that the sine wave is smoother
                          .Segment(10)                                                                 //break the sinusoid into segments of 10 elements
                          .Select(p => (featureSequence: p.Take(9).Select(q => new[] { q }).ToArray(), //set a sequence of 9 elements, each element of dimension 1 (maybe: 1, 2, 3 ... n)
                                        label: new[] { p[9] }))                                        //set a label for a sequence of dimension 1 (maybe: 1, 2, 3 ... n)
                          .ToArray();

            dataset.Split(0.7, out var train, out var test);

            int minibatchSize  = 16;
            int epochCount     = 300;
            int inputDimension = 1;
            var device         = DeviceDescriptor.GPUDevice(0);

            var model = new Sequential <double>(device, new[] { inputDimension }, inputName: "Input");

            model.Add(new LSTM(1, selfStabilizerLayer: new SelfStabilization()));
            model.Add(new Residual2(1, new Tanh()));

            //it is possible to join LSTM layers one after another as in the comment below:
            //var model = new Sequential<double>(device, new[] { inputDimension });
            //model.Add(new Dense(3, new Tanh()));
            //model.Add (new LSTM (10, isLastLstm: false)); // LSTM can also be the first layer in the model
            //model.Add(new LSTM(5, isLastLstm: false));
            //model.Add(new LSTM(2, selfStabilizerLayer: new SelfStabilization()));
            //model.Add(new Residual2(1, new Tanh()));

            //uses one of several overloads that can train recursive networks
            var fitResult = model.Fit(features:     train.Select(p => p.featureSequence).ToArray(),
                                      labels:                             train.Select(p => p.label).ToArray(),
                                      minibatchSize:                      minibatchSize,
                                      lossFunction:                       new AbsoluteError(),
                                      evaluationFunction:                 new AbsoluteError(),
                                      optimizer:                          new Adam(0.005, 0.9, minibatchSize),
                                      epochCount:                         epochCount,
                                      device:                             device,
                                      shuffleSampleInMinibatchesPerEpoch: true,
                                      ruleUpdateLearningRate: (epoch, learningRate) => learningRate % 50 == 0 ? 0.95 * learningRate : learningRate,
                                      actionPerEpoch: (epoch, loss, eval) =>
            {
                Console.WriteLine($"Loss: {loss:F10} Eval: {eval:F3} Epoch: {epoch}");
                if (loss < 0.05)     //stopping criterion is reached, save the model to a file and finish training (approximately 112 epochs)
                {
                    model.SaveModel($"{model}.model", saveArchitectureDescription: false);
                    return(true);
                }
                return(false);
            },
                                      inputName: "Input");

            Console.WriteLine($"Duration train: {fitResult.Duration}");
            Console.WriteLine($"Epochs: {fitResult.EpochCount}");
            Console.WriteLine($"Loss error: {fitResult.LossError}");
            Console.WriteLine($"Eval error: {fitResult.EvaluationError}");

            var metricsTrain = model
                               .Evaluate(train.Select(p => p.featureSequence), train.Select(p => p.label), device)
                               .GetRegressionMetrics();
            var metricsTest = model
                              .Evaluate(test.Select(p => p.featureSequence), test.Select(p => p.label), device)
                              .GetRegressionMetrics();

            Console.WriteLine($"Train => MAE: {metricsTrain[0].MAE} RMSE: {metricsTrain[0].RMSE} R2: {metricsTrain[0].Determination}"); //R2 ~ 0,983
            Console.WriteLine($"Test => MAE: {metricsTest[0].MAE} RMSE: {metricsTest[0].RMSE} R2: {metricsTest[0].Determination}");     //R2 ~ 0,982

            Console.ReadKey();
        }
Пример #14
0
        static void Main(string[] args)
        {
            CNTKLib.SetFixedRandomSeed(0); //для воспроизводимости. т.к. инициализация весов в слоях нейросети
                                           //зависит от генератора случайных чисел CNTK

            //создаем симулированный датасет из последовательностей описывающих синусоиду
            var dataset = Enumerable.Range(1, 2000)
                          .Select(p => Math.Sin(p / 100.0))                                            //уменьшаем шаг, чтобы синусоида была плавнее
                          .Segment(10)                                                                 //разбиваем синусоиду на сегменты по 10 элементов
                          .Select(p => (featureSequence: p.Take(9).Select(q => new[] { q }).ToArray(), //задаем последовательность из 9 элементов, каждый элемент размерности 1 (может быть: 1, 2, 3...n)
                                        label: new[] { p[9] }))                                        //задаем метку для последовательности размерности 1 (может быть: 1, 2, 3...n)
                          .ToArray();

            dataset.Split(0.7, out var train, out var test);

            int minibatchSize  = 16;
            int epochCount     = 300;
            int inputDimension = 1;
            var device         = DeviceDescriptor.GPUDevice(0);

            var model = new Sequential <double>(device, new[] { inputDimension }, inputName: "Input");

            model.Add(new LSTM(1, selfStabilizerLayer: new SelfStabilization()));
            model.Add(new Residual2(1, new Tanh()));

            //можно стыковать слои LSTM друг за другом как в комментарии ниже:
            //var model = new Sequential<double>(device, new[] { inputDimension });
            //model.Add(new Dense(3, new Tanh()));
            //model.Add(new LSTM(10, isLastLstm: false)); //LSTM так же может быть первым слоем в модели
            //model.Add(new LSTM(5, isLastLstm: false));
            //model.Add(new LSTM(2, selfStabilizerLayer: new SelfStabilization()));
            //model.Add(new Residual2(1, new Tanh()));

            //используется одна из нескольких перегрузок, которые способны обучать реккурентные сети
            var fitResult = model.Fit(features:     train.Select(p => p.featureSequence).ToArray(),
                                      labels:                             train.Select(p => p.label).ToArray(),
                                      minibatchSize:                      minibatchSize,
                                      lossFunction:                       new AbsoluteError(),
                                      evaluationFunction:                 new AbsoluteError(),
                                      optimizer:                          new Adam(0.005, 0.9, minibatchSize),
                                      epochCount:                         epochCount,
                                      device:                             device,
                                      shuffleSampleInMinibatchesPerEpoch: true,
                                      ruleUpdateLearningRate: (epoch, learningRate) => learningRate % 50 == 0 ? 0.95 * learningRate : learningRate,
                                      actionPerEpoch: (epoch, loss, eval) =>
            {
                Console.WriteLine($"Loss: {loss:F10} Eval: {eval:F3} Epoch: {epoch}");
                if (loss < 0.05)     //критерий остановки достигнут, сохраем модель в файл и заканчиваем обучение (приблизительно на 112 эпохе)
                {
                    model.SaveModel($"{model}.model", saveArchitectureDescription: false);
                    return(true);
                }
                return(false);
            },
                                      inputName: "Input");

            Console.WriteLine($"Duration train: {fitResult.Duration}");
            Console.WriteLine($"Epochs: {fitResult.EpochCount}");
            Console.WriteLine($"Loss error: {fitResult.LossError}");
            Console.WriteLine($"Eval error: {fitResult.EvaluationError}");

            var metricsTrain = model
                               .Evaluate(train.Select(p => p.featureSequence), train.Select(p => p.label), device)
                               .GetRegressionMetrics();
            var metricsTest = model
                              .Evaluate(test.Select(p => p.featureSequence), test.Select(p => p.label), device)
                              .GetRegressionMetrics();

            Console.WriteLine($"Train => MAE: {metricsTrain[0].MAE} RMSE: {metricsTrain[0].RMSE} R2: {metricsTrain[0].Determination}"); //R2 ~ 0,983
            Console.WriteLine($"Test => MAE: {metricsTest[0].MAE} RMSE: {metricsTest[0].RMSE} R2: {metricsTest[0].Determination}");     //R2 ~ 0,982

            Console.ReadKey();
        }