Example #1
0
        public static void FitMnist()
        {
            var model = new Sequential();

            model.Add(new Conv2D(32, kernelSize: new int[] { 3, 3 }, inputShape: new int[] { 28, 28, 1 }, activation: "relu"));
            model.Add(new Conv2D(64, kernelSize: new int[] { 3, 3 }, activation: "relu"));
            // model.Add(new MaxPooling1D(poolSize: 2));
            model.Add(new MaxPooling2D(poolSize: new int[] { 2, 2 }));
            model.Add(new Dropout(0.25));
            model.Add(new Flatten());
            model.Add(new Dense(128, activation: "relu"));
            model.Add(new Dropout(0.5));
            model.Add(new Dense(10, activation: "softmax"));

            var optimizer = new SGD(lr: 0.01);

            model.Compile("categorical_crossentropy", optimizer, new string[] { "accuracy" });

            var xtrain = TensorUtils.Deserialize(new FileStream(GetDataPath("datasets/nda_mnist/mnist_xtrain.nda"), FileMode.Open));
            var ytrain = TensorUtils.Deserialize(new FileStream(GetDataPath("datasets/nda_mnist/mnist_ytrain.nda"), FileMode.Open));

            xtrain = xtrain.Cast(DType.Float32);
            xtrain = Ops.Div(null, xtrain, 255f);

            ytrain = ytrain.Cast(DType.Float32);

            model.Fit(xtrain, ytrain, batchSize: 128, epochs: 12);

            var stream = new FileStream("c:/ttt/mnist.model", FileMode.OpenOrCreate, FileAccess.Write);

            stream.SetLength(0);

            model.Save(stream);
        }
Example #2
0
        public static void Train()
        {
            var model = new Sequential();

            // embedding layer
            model.Add(new Embedding(output, 100, input_length: 32));

            model.Add(new Conv1D(64, 3, padding: "causal", activation: "tanh"));
            model.Add(new Dropout(0.2));
            model.Add(new MaxPooling1D(2));


            model.Add(new Conv1D(128, 3, activation: "relu", dilation_rate: 2, padding: "causal"));
            model.Add(new Dropout(0.2));
            model.Add(new MaxPooling1D(2));

            model.Add(new Conv1D(256, 3, activation: "relu", dilation_rate: 4, padding: "causal"));
            model.Add(new Dropout(0.2));
            model.Add(new MaxPooling1D(2));

            //model.Add(new Conv1D(256, 5, activation: "relu"));
            model.Add(new GlobalMaxPooling1D());

            model.Add(new Dense(256, activation: "relu"));
            model.Add(new Dense(output, activation: "softmax"));

            model.Compile(loss: "sparse_categorical_crossentropy", optimizer: new Adam());
            model.Summary();

            var mc      = new ModelCheckpoint("best_model.h5", monitor: "val_loss", mode: "min", save_best_only: true, verbose: 1);
            var history = model.Fit(train_x, train_y, batch_size: 32, epochs: 100, validation_split: 0.25f, verbose: 1, callbacks: new Callback[] { mc });

            model.Save("last_epoch.h5");
        }
Example #3
0
        public static void FitMnistSimple()
        {
            var model = new Sequential();

            model.Add(new Dense(512, activation: "relu", inputShape: new int[] { 784 }));
            model.Add(new Dropout(0.2));
            model.Add(new Dense(512, activation: "relu"));
            model.Add(new Dropout(0.2));
            model.Add(new Dense(10, activation: "softmax"));

            var optimizer = new SGD(lr: 0.01);

            model.Compile("categorical_crossentropy", optimizer, new string[] { "accuracy" });

            var xtrain = TensorUtils.Deserialize(new FileStream(GetDataPath("datasets/nda_mnist/mnist_xtrain.nda"), FileMode.Open));
            var ytrain = TensorUtils.Deserialize(new FileStream(GetDataPath("datasets/nda_mnist/mnist_ytrain.nda"), FileMode.Open));

            xtrain = xtrain.Cast(DType.Float32);
            xtrain = Ops.Div(null, xtrain, 255f);

            ytrain = ytrain.Cast(DType.Float32);

            model.Fit(xtrain, ytrain, batchSize: 128, epochs: 20);

            var stream = new FileStream("c:/ttt/mnist-simple.model", FileMode.OpenOrCreate, FileAccess.Write);

            stream.SetLength(0);

            model.Save(stream);
        }
        private void PatchInstructorsNotes(EdxCourse edxCourse, Course ulearnCourse, string olxPath)
        {
            var ulearnUnits = ulearnCourse.Units;

            foreach (var chapter in edxCourse.CourseWithChapters.Chapters)
            {
                var chapterUnit = ulearnCourse.Units.FirstOrDefault(u => u.Title == chapter.DisplayName);
                var chapterNote = chapterUnit?.InstructorNote;
                if (chapterUnit == null || chapterNote == null)
                {
                    continue;
                }

                var unitIndex      = ulearnUnits.IndexOf(chapterUnit);
                var displayName    = "Заметки преподавателю";
                var sequentialId   = $"{ulearnCourse.Id}-{unitIndex}-note-seq";
                var verticalId     = $"{ulearnCourse.Id}-{unitIndex}-note-vert";
                var mdBlockId      = $"{ulearnCourse.Id}-{unitIndex}-note-md";
                var sequentialNote = new Sequential(sequentialId, displayName,
                                                    new[]
                {
                    new Vertical(verticalId, displayName, new[] { new MdBlock(chapterNote.Markdown).ToEdxComponent(mdBlockId, displayName, chapterUnit.Directory.FullName) })
                })
                {
                    VisibleToStaffOnly = true
                };
                if (!File.Exists($"{olxPath}/sequential/{sequentialNote.UrlName}.xml"))
                {
                    var sequentials = chapter.Sequentials.ToList();
                    sequentials.Add(sequentialNote);
                    new Chapter(chapter.UrlName, chapter.DisplayName, chapter.Start, sequentials.ToArray()).Save(olxPath);
                }
                sequentialNote.Save(olxPath);
            }
        }
Example #5
0
        public static void Run()
        {
            //var ((x_train, y_train), (x_test, y_test)) = IMDB.LoadData(num_words: top_words);
            var((x_train, y_train), (x_test, y_test)) = LoadDataRussianLanguageToxicComments(
                trainCount: train_count, testCount: test_count, numWords: top_words, maxWords: max_words);

            //Не нужно массивы дополнять до 500 элементов, т.к. они уже размером в 500 элементов
            //x_train = SequenceUtil.PadSequences(x_train, maxlen: max_words);
            //x_test = SequenceUtil.PadSequences(x_test, maxlen: max_words);

            //Create model
            Sequential model = new Sequential();

            model.Add(new Embedding(top_words, 32, input_length: max_words));
            model.Add(new Conv1D(filters: 32, kernel_size: 3, padding: "same", activation: "relu"));
            model.Add(new MaxPooling1D(pool_size: 2));
            model.Add(new Flatten());
            model.Add(new Dense(250, activation: "relu"));
            model.Add(new Dense(1, activation: "sigmoid"));

            model.Compile(loss: "binary_crossentropy", optimizer: "adam", metrics: new string[] { "accuracy" });
            model.Summary();

            // Fit the model
            model.Fit(x_train, y_train, validation_data: new NDarray[] { x_test, y_test },
                      epochs: 2 /*10*/, batch_size: 128, verbose: 2);
            // Final evaluation of the model
            var scores = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Accuracy: " + (scores[1] * 100));

            model.Save("model.h5");
            File.WriteAllText("model.json", model.ToJson());    //save model
            //model.SaveTensorflowJSFormat("./");   //error - Cannot perform runtime binding on a null reference
        }
        private void PatchInstructorsNotes(EdxCourse edxCourse, Course ulearnCourse, string olxPath)
        {
            var ulearnUnits = ulearnCourse.GetUnits().ToList();

            foreach (var chapter in edxCourse.CourseWithChapters.Chapters)
            {
                var chapterNote = ulearnCourse.InstructorNotes.FirstOrDefault(x => x.UnitName == chapter.DisplayName);
                if (chapterNote == null)
                {
                    continue;
                }
                var unitIndex      = ulearnUnits.IndexOf(chapterNote.UnitName);
                var displayName    = "Заметки преподавателю";
                var sequentialId   = string.Format("{0}-{1}-{2}", ulearnCourse.Id, unitIndex, "note-seq");
                var verticalId     = string.Format("{0}-{1}-{2}", ulearnCourse.Id, unitIndex, "note-vert");
                var mdBlockId      = string.Format("{0}-{1}-{2}", ulearnCourse.Id, unitIndex, "note-md");
                var sequentialNote = new Sequential(sequentialId, displayName,
                                                    new[]
                {
                    new Vertical(verticalId, displayName, new[] { new MdBlock(chapterNote.Markdown).ToEdxComponent(mdBlockId, displayName, ulearnCourse.GetDirectoryByUnitName(chapterNote.UnitName)) })
                })
                {
                    VisibleToStaffOnly = true
                };
                if (!File.Exists(string.Format("{0}/sequential/{1}.xml", olxPath, sequentialNote.UrlName)))
                {
                    var sequentials = chapter.Sequentials.ToList();
                    sequentials.Add(sequentialNote);
                    new Chapter(chapter.UrlName, chapter.DisplayName, chapter.Start, sequentials.ToArray()).Save(olxPath);
                }
                sequentialNote.Save(olxPath);
            }
        }
        public static void Run()
        {
            //Load IMDb dataset
            var((x_train, y_train), (x_test, y_test)) = IMDB.LoadData();

            var X = np.concatenate(new NDarray[] { x_train, x_test }, axis: 0);
            var Y = np.concatenate(new NDarray[] { y_train, y_test }, axis: 0);

            Console.WriteLine("Shape of X: " + X.shape);
            Console.WriteLine("Shape of Y: " + Y.shape);

            //We can get an idea of the total number of unique words in the dataset.
            Console.WriteLine("Number of words: ");
            var hstack = np.hstack(new NDarray[] { X });
            //var unique = hstack.unique();
            //Console.WriteLine(np.unique(np.hstack(new NDarray[] { X })).Item1);

            // Load the dataset but only keep the top n words, zero the rest
            int top_words = 1000;// 5000;

            ((x_train, y_train), (x_test, y_test)) = IMDB.LoadData(num_words: top_words);

            int max_words = 500;

            x_train = SequenceUtil.PadSequences(x_train, maxlen: max_words);
            x_test  = SequenceUtil.PadSequences(x_test, maxlen: max_words);

            //Create model
            Sequential model = new Sequential();

            model.Add(new Embedding(top_words, 32, input_length: max_words));
            model.Add(new Conv1D(filters: 32, kernel_size: 3, padding: "same", activation: "relu"));
            model.Add(new MaxPooling1D(pool_size: 2));
            model.Add(new Flatten());
            model.Add(new Dense(250, activation: "relu"));
            model.Add(new Dense(1, activation: "sigmoid"));

            model.Compile(loss: "binary_crossentropy", optimizer: "adam", metrics: new string[] { "accuracy" });
            model.Summary();

            // Fit the model
            model.Fit(x_train, y_train, validation_data: new NDarray[] { x_test, y_test },
                      epochs: 1 /*10*/, batch_size: 128, verbose: 2);
            // Final evaluation of the model
            var scores = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Accuracy: " + (scores[1] * 100));

            model.Save("model.h5");
            File.WriteAllText("model.json", model.ToJson());    //save model
            //model.SaveTensorflowJSFormat("./");   //error - Cannot perform runtime binding on a null reference
        }
Example #8
0
        static void SummarizePerformance(int epoch, Sequential generator, Sequential discriminator, NDarray dataset, int latentDim, int sampleCount = 50)
        {
            var real    = GenerateRealSamples(dataset, sampleCount);
            var realAcc = discriminator.Evaluate(real.Item1, real.Item2, verbose: 0);

            var fake    = GenerateFakeGeneratorSamples(generator, latentDim, sampleCount);
            var fakeAcc = discriminator.Evaluate(fake.Item1, fake.Item2, verbose: 0);

            Console.WriteLine("Accuracy real: \t " + realAcc.Last() * 100 + "% \t fake:" + fakeAcc.Last() * 100 + "%");
            var fakes = fake.Item1;

            fakes = (fakes + 1) / 2;

            for (int i = 0; i < sampleCount; i++)
            {
                SaveArrayAsImage(fakes[i], "output/gantest_" + epoch + "_" + i + ".png");
            }

            generator.Save("output/generator" + epoch + ".h5");
        }
Example #9
0
        public static void BuildAndTrain()
        {
            //Model to hold the neural network architecture which in this case is WaveNet
            var model = new Sequential();

            // Starts with embedding layer
            model.Add(new Embedding(output, 100, input_length: 32));

            model.Add(new Conv1D(64, 3, padding: "causal", activation: "tanh"));
            model.Add(new Dropout(0.2));
            model.Add(new MaxPooling1D(2));

            model.Add(new Conv1D(128, 3, activation: "relu", dilation_rate: 2, padding: "causal"));
            model.Add(new Dropout(0.2));
            model.Add(new MaxPooling1D(2));

            model.Add(new Conv1D(256, 3, activation: "relu", dilation_rate: 4, padding: "causal"));
            model.Add(new Dropout(0.2));
            model.Add(new MaxPooling1D(2));

            //model.Add(new Conv1D(256, 5, activation: "relu"));
            model.Add(new GlobalMaxPooling1D());

            model.Add(new Dense(256, activation: "relu"));
            model.Add(new Dense(output, activation: "softmax"));

            // Compile with Adam optimizer
            model.Compile(loss: "sparse_categorical_crossentropy", optimizer: new Adam());
            model.Summary();

            // Callback to store the best trained model
            var mc = new ModelCheckpoint("best_model.h5", monitor: "val_loss", mode: "min", save_best_only: true, verbose: 1);

            //Method to actually train the model for 100 iteration
            var history = model.Fit(train_x, train_y, batch_size: 32, epochs: 100, validation_split: 0.25f, verbose: 1, callbacks: new Callback[] { mc });

            // Save the final trained model which we are going to use for prediction
            model.Save("last_epoch.h5");
        }
Example #10
0
        static void Main(string[] args)
        {
            //Mock data fix the file address
            NDarray dataset = Numpy.np.loadtxt(fname: "C:\\Natan\\csharp\\trial11\\pima-indians-diabetes.csv", delimiter: ",");
            var     X       = dataset[":,0: 8"];
            var     Y       = dataset[":, 8"];
            var     model   = new Sequential();

            model.Add(new Dense(12, input_dim: 8, kernel_initializer: "uniform", activation: "relu"));
            model.Add(new Dense(8, kernel_initializer: "uniform", activation: "relu"));
            model.Add(new Dense(1, activation: "sigmoid"));
            model.Compile(optimizer: "adam", loss: "binary_crossentropy", metrics: new string[] { "accuracy" });
            model.Fit(X, Y, batch_size: 10, epochs: 150, verbose: 1);
            model.Save("modelAA.h5");
            double[] scores = model.Evaluate(X, Y);
            foreach (double sc in scores)
            {
                Console.WriteLine(sc);
                //Console.WriteLine(scmodel..metrics_names[1], scores[1] * 100))
            }

            Console.WriteLine("Hello World! we learned");
            Console.ReadKey();
        }
Example #11
0
        static void Main(string[] args)
        {
            int batch_size  = 128; //Training batch size
            int num_classes = 10;  //No. of classes
            int epochs      = 12;  //No. of epoches we will train

            // input image dimensions
            int img_rows = 28, img_cols = 28;

            // Declare the input shape for the network
            Shape input_shape = null;

            // Load the MNIST dataset into Numpy array
            var((x_train, y_train), (x_test, y_test)) = MNIST.LoadData();

            //Check if its channel fist or last and rearrange the dataset accordingly
            if (K.ImageDataFormat() == "channels_first")
            {
                x_train     = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols);
                x_test      = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols);
                input_shape = (1, img_rows, img_cols);
            }
            else
            {
                x_train     = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1);
                x_test      = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1);
                input_shape = (img_rows, img_cols, 1);
            }

            //Normalize the input data
            x_train  = x_train.astype(np.float32);
            x_test   = x_test.astype(np.float32);
            x_train /= 255;
            x_test  /= 255;
            Console.WriteLine("x_train shape: " + x_train.shape);
            Console.WriteLine(x_train.shape[0] + " train samples");
            Console.WriteLine(x_test.shape[0] + " test samples");

            // Convert class vectors to binary class matrices
            y_train = Util.ToCategorical(y_train, num_classes);
            y_test  = Util.ToCategorical(y_test, num_classes);

            // Build CNN model
            var model = new Sequential();

            model.Add(new Conv2D(32, kernel_size: (3, 3).ToTuple(),
                                 activation: "relu",
                                 input_shape: input_shape));
            model.Add(new Conv2D(64, (3, 3).ToTuple(), activation: "relu"));
            model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            model.Add(new Dropout(0.25));
            model.Add(new Flatten());
            model.Add(new Dense(128, activation: "relu"));
            model.Add(new Dropout(0.5));
            model.Add(new Dense(num_classes, activation: "softmax"));

            //Compile with loss, metrics and optimizer
            model.Compile(loss: "categorical_crossentropy",
                          optimizer: new Adadelta(), metrics: new string[] { "accuracy" });

            //Train the model
            model.Fit(x_train, y_train,
                      batch_size: batch_size,
                      epochs: epochs,
                      verbose: 1,
                      validation_data: new NDarray[] { x_test, y_test });


            //Score the model for performance
            var score = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Test loss:" + score[0]);
            Console.WriteLine("Test accuracy:" + score[1]);

            // Save the model to HDF5 format which can be loaded later or ported to other application
            model.Save("model.h5");
            // Save it to Tensorflow JS format and we will test it in browser.
            var v = K.Instance;

            //model.SaveTensorflowJSFormat(@"C:\_temp\");
            //model.SaveOnnx(@"C:\_temp\");
            Console.ReadLine();
        }
        public static void Run()
        {
            //Console.WriteLine(modelPath);
            //Console.WriteLine(h5Path);

            int batch_size  = 200;
            int num_classes = 10;
            int epochs      = 10;

            // input image dimensions
            int img_rows = 28, img_cols = 28;

            Shape input_shape = null;

            // the data, split between train and test sets
            var((x_train, y_train), (x_test, y_test)) = MNIST.LoadData();

            if (K.ImageDataFormat() == "channels_first")
            {
                x_train     = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols);
                x_test      = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols);
                input_shape = (1, img_rows, img_cols);
            }
            else
            {
                x_train     = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1);
                x_test      = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1);
                input_shape = (img_rows, img_cols, 1);
            }

            x_train  = x_train.astype(np.float32);
            x_test   = x_test.astype(np.float32);
            x_train /= 255;
            x_test  /= 255;
            Console.WriteLine("x_train shape: " + x_train.shape);
            Console.WriteLine(x_train.shape[0] + " train samples");
            Console.WriteLine(x_test.shape[0] + " test samples");

            // convert class vectors to binary class matrices
            y_train = Util.ToCategorical(y_train, num_classes);
            y_test  = Util.ToCategorical(y_test, num_classes);

            // Build CNN model
            var model = new Sequential();

            model.Add(new Conv2D(32, kernel_size: (3, 3).ToTuple(),
                                 activation: "relu",
                                 input_shape: input_shape));
            model.Add(new Conv2D(64, (3, 3).ToTuple(), activation: "relu"));
            model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            model.Add(new Dropout(0.25));
            model.Add(new Flatten());
            model.Add(new Dense(128, activation: "relu"));
            model.Add(new Dropout(0.5));
            model.Add(new Dense(num_classes, activation: "softmax"));

            model.Compile(loss: "categorical_crossentropy",
                          optimizer: new Adadelta(), metrics: new string[] { "accuracy" });

            model.Fit(x_train, y_train,
                      batch_size: batch_size,
                      epochs: epochs,
                      verbose: 1,
                      validation_data: new NDarray[] { x_test, y_test });

            model.Save(h5Path);
            model.SaveTensorflowJSFormat(modelPath);
            //model.SaveOnnx("model.onnx");

            var score = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Test loss:" + score[0]);
            Console.WriteLine("Test accuracy:" + score[1]);
        }
Example #13
0
        public static void Run()
        {
            int batch_size  = 128;
            int num_classes = 10;
            int epochs      = 100;

            // the data, split between train and test sets
            var((x_train, y_train), (x_test, y_test)) = Cifar10.LoadData();

            Console.WriteLine("x_train shape: " + x_train.shape);
            Console.WriteLine(x_train.shape[0] + " train samples");
            Console.WriteLine(x_test.shape[0] + " test samples");

            // convert class vectors to binary class matrices
            y_train = Util.ToCategorical(y_train, num_classes);
            y_test  = Util.ToCategorical(y_test, num_classes);

            // Build CNN model
            var model = new Sequential();

            model.Add(new Conv2D(32, kernel_size: (3, 3).ToTuple(),
                                 padding: "same",
                                 input_shape: new Shape(32, 32, 3)));
            model.Add(new Activation("relu"));
            model.Add(new Conv2D(32, (3, 3).ToTuple()));
            model.Add(new Activation("relu"));
            model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            model.Add(new Dropout(0.25));

            model.Add(new Conv2D(64, kernel_size: (3, 3).ToTuple(),
                                 padding: "same"));
            model.Add(new Activation("relu"));
            model.Add(new Conv2D(64, (3, 3).ToTuple()));
            model.Add(new Activation("relu"));
            model.Add(new MaxPooling2D(pool_size: (2, 2).ToTuple()));
            model.Add(new Dropout(0.25));

            model.Add(new Flatten());
            model.Add(new Dense(512));
            model.Add(new Activation("relu"));
            model.Add(new Dropout(0.5));
            model.Add(new Dense(num_classes));
            model.Add(new Activation("softmax"));

            model.Compile(loss: "categorical_crossentropy",
                          optimizer: new RMSprop(lr: 0.0001f, decay: 1e-6f), metrics: new string[] { "accuracy" });

            x_train  = x_train.astype(np.float32);
            x_test   = x_test.astype(np.float32);
            x_train /= 255;
            x_test  /= 255;

            model.Fit(x_train, y_train,
                      batch_size: batch_size,
                      epochs: epochs,
                      verbose: 1,
                      validation_data: new NDarray[] { x_test, y_test },
                      shuffle: true);

            //Save model and weights
            //string model_path = "./model.json";
            //string weight_path = "./weights.h5";
            //string json = model.ToJson();
            //File.WriteAllText(model_path, json);
            //model.SaveWeight(weight_path);
            model.Save("model.h5");
            model.SaveTensorflowJSFormat("./");

            //Score trained model.
            var score = model.Evaluate(x_test, y_test, verbose: 0);

            Console.WriteLine("Test loss:" + score[0]);
            Console.WriteLine("Test accuracy:" + score[1]);
        }