public void FeatureNormalization_Normalize()
        {
            // Use StreamReader(filepath) when running from filesystem
            var parser     = new CsvParser(() => new StringReader(Resources.winequality_white));
            var targetName = "quality";

            // read feature matrix (all columns different from the targetName)
            var observations = parser.EnumerateRows(c => c != targetName)
                               .ToF64Matrix();

            // create minmax normalizer (normalizes each feature from 0.0 to 1.0)
            var minMaxTransformer = new MinMaxTransformer(0.0, 1.0);

            // transforms features using the feature normalization transform
            minMaxTransformer.Transform(observations, observations);

            // read targets
            var targets = parser.EnumerateRows(targetName)
                          .ToF64Vector();

            // create learner
            // neural net requires features to be normalize.
            // This makes convergens much faster.
            var net = new NeuralNet();

            net.Add(new InputLayer(observations.ColumnCount));
            net.Add(new SoftMaxLayer(targets.Distinct().Count())); // no hidden layer and softmax output correpsonds to logistic regression
            var learner = new ClassificationNeuralNetLearner(net, new LogLoss());

            // learns a logistic regression classifier
            var model = learner.Learn(observations, targets);
        }
        public void ClassificationNeuralNetLearner_Learn_Early_Stopping()
        {
            var numberOfObservations = 500;
            var numberOfFeatures     = 5;
            var numberOfClasses      = 5;

            var random = new Random(32);

            var(observations, targets) = CreateData(numberOfObservations,
                                                    numberOfFeatures, numberOfClasses, random);

            var(validationObservations, validationTargets) = CreateData(numberOfObservations,
                                                                        numberOfFeatures, numberOfClasses, random);

            var net = new NeuralNet();

            net.Add(new InputLayer(numberOfFeatures));
            net.Add(new DenseLayer(10));
            net.Add(new SvmLayer(numberOfClasses));

            var sut   = new ClassificationNeuralNetLearner(net, new AccuracyLoss());
            var model = sut.Learn(observations, targets,
                                  validationObservations, validationTargets);

            var validationPredictions = model.Predict(validationObservations);

            var evaluator = new TotalErrorClassificationMetric <double>();
            var actual    = evaluator.Error(validationTargets, validationPredictions);

            Assert.AreEqual(0.798, actual);
        }
        public void ClassificationNeuralNetLearner_Learn()
        {
            var numberOfObservations = 500;
            var numberOfFeatures     = 5;
            var numberOfClasses      = 5;

            var       random = new Random(32);
            F64Matrix observations;

            double[] targets;
            CreateData(numberOfObservations, numberOfFeatures, numberOfClasses, random, out observations, out targets);

            var net = new NeuralNet();

            net.Add(new InputLayer(numberOfFeatures));
            net.Add(new DenseLayer(10));
            net.Add(new SvmLayer(numberOfClasses));

            var sut   = new ClassificationNeuralNetLearner(net, new AccuracyLoss());
            var model = sut.Learn(observations, targets);

            var predictions = model.Predict(observations);

            var evaluator = new TotalErrorClassificationMetric <double>();
            var actual    = evaluator.Error(targets, predictions);

            Assert.AreEqual(0.762, actual);
        }
Пример #4
0
        public void ClassificationNeuralNetModel_Save()
        {
            var numberOfObservations = 500;
            var numberOfFeatures     = 5;
            var numberOfClasses      = 5;

            var random       = new Random(32);
            var observations = new F64Matrix(numberOfObservations, numberOfFeatures);

            observations.Map(() => random.NextDouble());
            var targets = Enumerable.Range(0, numberOfObservations).Select(i => (double)random.Next(0, numberOfClasses)).ToArray();

            var net = new NeuralNet();

            net.Add(new InputLayer(numberOfFeatures));
            net.Add(new DenseLayer(10));
            net.Add(new SvmLayer(numberOfClasses));

            var learner = new ClassificationNeuralNetLearner(net, new AccuracyLoss());
            var sut     = learner.Learn(observations, targets);

            // save model.
            var writer = new StringWriter();

            sut.Save(() => writer);

            // load model and assert prediction results.
            sut = ClassificationNeuralNetModel.Load(() => new StringReader(writer.ToString()));
            var predictions = sut.Predict(observations);

            var evaluator = new TotalErrorClassificationMetric <double>();
            var actual    = evaluator.Error(targets, predictions);

            Assert.AreEqual(0.762, actual, 0.0000001);
        }
        public void Classification_Neural_Net_Using_ValidtionSet_For_Selecting_The_best_Model()
        {
            #region Read Data
            // Use StreamReader(filepath) when running from filesystem
            var trainingParser = new CsvParser(() => new StringReader(Resources.mnist_small_train));
            var testParser     = new CsvParser(() => new StringReader(Resources.mnist_small_test));

            var targetName = "Class";

            var featureNames = trainingParser.EnumerateRows(c => c != targetName).First().ColumnNameToIndex.Keys.ToArray();

            // read feature matrix (training)
            var trainingObservations = trainingParser
                                       .EnumerateRows(featureNames)
                                       .ToF64Matrix();
            // read classification targets (training)
            var trainingTargets = trainingParser.EnumerateRows(targetName)
                                  .ToF64Vector();

            // read feature matrix (test)
            var testObservations = testParser
                                   .EnumerateRows(featureNames)
                                   .ToF64Matrix();
            // read classification targets (test)
            var testTargets = testParser.EnumerateRows(targetName)
                              .ToF64Vector();
            #endregion

            // transform pixel values to be between 0 and 1.
            trainingObservations.Map(p => p / 255);
            testObservations.Map(p => p / 255);

            // create training validation split
            var splitter = new StratifiedTrainingTestIndexSplitter <double>(trainingPercentage: 0.7, seed: 24);
            var split    = splitter.SplitSet(trainingObservations, trainingTargets);

            // the output layer must know the number of classes.
            var numberOfClasses = trainingTargets.Distinct().Count();

            var net = new NeuralNet();
            net.Add(new InputLayer(width: 28, height: 28, depth: 1)); // MNIST data is 28x28x1.
            net.Add(new DenseLayer(800, Activation.Relu));
            net.Add(new SoftMaxLayer(numberOfClasses));

            // using classification accuracy as error metric.
            // When using a validation set, the error metric
            // is used for selecting the best iteration based on models error on the validation set.
            var learner = new ClassificationNeuralNetLearner(net, iterations: 10, loss: new AccuracyLoss());

            var model = learner.Learn(split.TrainingSet.Observations, split.TrainingSet.Targets, //);
                                      split.TestSet.Observations, split.TestSet.Targets);        // the validation set for estimating how well the network generalises to new data.

            var metric      = new TotalErrorClassificationMetric <double>();
            var predictions = model.Predict(testObservations);

            Trace.WriteLine("Test Error: " + metric.Error(testTargets, predictions));
        }
        public void Classification_Convolutional_Neural_Net()
        {
            #region Read Data

            // Use StreamReader(filepath) when running from filesystem
            var trainingParser = new CsvParser(() => new StringReader(Resources.mnist_small_train));
            var testParser     = new CsvParser(() => new StringReader(Resources.mnist_small_test));

            var targetName = "Class";

            var featureNames = trainingParser.EnumerateRows(c => c != targetName).First().ColumnNameToIndex.Keys.ToArray();

            // read feature matrix (training)
            var trainingObservations = trainingParser
                                       .EnumerateRows(featureNames)
                                       .ToF64Matrix();
            // read classification targets (training)
            var trainingTargets = trainingParser.EnumerateRows(targetName)
                                  .ToF64Vector();

            // read feature matrix (test)
            var testObservations = testParser
                                   .EnumerateRows(featureNames)
                                   .ToF64Matrix();
            // read classification targets (test)
            var testTargets = testParser.EnumerateRows(targetName)
                              .ToF64Vector();
            #endregion

            // transform pixel values to be between 0 and 1.
            trainingObservations.Map(p => p / 255);
            testObservations.Map(p => p / 255);

            // the output layer must know the number of classes.
            var numberOfClasses = trainingTargets.Distinct().Count();

            // define the neural net.
            var net = new NeuralNet();
            net.Add(new InputLayer(width: 28, height:  28, depth: 1)); // MNIST data is 28x28x1.
            net.Add(new Conv2DLayer(filterWidth: 5, filterHeight: 5, filterCount: 32));
            net.Add(new MaxPool2DLayer(poolWidth: 2, poolHeight: 2));
            net.Add(new DropoutLayer(0.5));
            net.Add(new DenseLayer(256, Activation.Relu));
            net.Add(new DropoutLayer(0.5));
            net.Add(new SoftMaxLayer(numberOfClasses));

            // using only 10 iteration to make the example run faster.
            // using classification accuracy as error metric. This is only used for reporting progress.
            var learner = new ClassificationNeuralNetLearner(net, iterations: 10, loss: new AccuracyLoss());
            var model   = learner.Learn(trainingObservations, trainingTargets);

            var metric      = new TotalErrorClassificationMetric <double>();
            var predictions = model.Predict(testObservations);

            Trace.WriteLine("Test Error: " + metric.Error(testTargets, predictions));
        }
        public void ClassificationNeuralNetLearner_Constructor_Throw_On_Wrong_OutputLayerType()
        {
            var net = new NeuralNet();

            net.Add(new InputLayer(10));
            net.Add(new DenseLayer(10));
            net.Add(new SquaredErrorRegressionLayer());

            var sut = new ClassificationNeuralNetLearner(net, new AccuracyLoss());
        }
Пример #8
0
        public void Training()
        {
            LoadImageGray();

            // the output layer must know the number of classes.
            var numberOfClasses = imageBindingModel.ConvertAll(x => x.Label).Distinct().Count();
            var numberByteOfRow = resolutionImage.Width * resolutionImage.Height;

            //var net = new NeuralNet();
            //net.Add(new InputLayer(width: resolutionImage.Width, height: resolutionImage.Height, depth: 1)); // MNIST data is 28x28x3.
            //net.Add(new DropoutLayer(0.2));
            //net.Add(new DenseLayer(800, Activation.Relu));
            //net.Add(new DropoutLayer(0.5));
            //net.Add(new DenseLayer(800, Activation.Relu));
            //net.Add(new DropoutLayer(0.5));
            //net.Add(new SoftMaxLayer(numberOfClasses));

            var net = new NeuralNet();

            net.Add(new InputLayer(width: 28, height: 28, depth: 1)); // MNIST data is 28x28x1.
            net.Add(new SoftMaxLayer(numberOfClasses));               // No hidden layers and SoftMax output layer corresponds to logistic regression classifer.


            var learner = new ClassificationNeuralNetLearner(net, iterations: 10, loss: new AccuracyLoss());


            var inputmatrix = new List <double>();

            imageBindingModel = new List <ImageBindingModel>()
            {
                imageBindingModel[0]
            };
            foreach (var item in imageBindingModel)
            {
                inputmatrix.AddRange(item.dImage.Select(x => x / 255));
            }
            var f64 = new F64Matrix(inputmatrix.ToArray(), imageBindingModel.Count, numberByteOfRow);

            var labels = imageBindingModel.ConvertAll(x => x.Id).ToArray();


            var model = learner.Learn(f64, labels);

            var metric      = new TotalErrorClassificationMetric <double>();
            var predictions = model.Predict(f64);

            var a = metric.Error(imageBindingModel.ConvertAll(x => x.Id).ToArray(), predictions);
        }
Пример #9
0
        public ClassificationNeuralNetModel LearnNetwork()
        {
            var learner = new ClassificationNeuralNetLearner(GetNeuralNetwork(), iterations: 100, loss: new AccuracyLoss());

            return(learner.Learn(_trainingObservations, _trainingTargets));
        }
Пример #10
0
        public void Classification_Neural_Net_Using_BatchNormalization()
        {
            #region Read Data
            // Use StreamReader(filepath) when running from filesystem
            var trainingParser = new CsvParser(() => new StringReader(Resources.cifar10_train_small));
            var testParser     = new CsvParser(() => new StringReader(Resources.cifar10_test_small));

            var targetName = "label";
            var id         = "id";

            var featureNames = trainingParser.EnumerateRows(v => v != targetName && v != id).First().ColumnNameToIndex.Keys.ToArray();

            var index = 0.0;
            var targetNameToTargetValue = trainingParser.EnumerateRows(targetName)
                                          .ToStringVector().Distinct().ToDictionary(v => v, v => index++);

            // read feature matrix (training)
            var trainingObservations = trainingParser
                                       .EnumerateRows(featureNames)
                                       .ToF64Matrix();
            // read classification targets (training)
            var trainingTargets = trainingParser.EnumerateRows(targetName)
                                  .ToStringVector().Select(v => targetNameToTargetValue[v]).ToArray();

            // read feature matrix (test)
            var testObservations = testParser
                                   .EnumerateRows(featureNames)
                                   .ToF64Matrix();
            // read classification targets (test)
            var testTargets = testParser.EnumerateRows(targetName)
                              .ToStringVector().Select(v => targetNameToTargetValue[v]).ToArray();
            #endregion

            // transform pixel values to be between 0 and 1.
            trainingObservations.Map(p => p / 255);
            testObservations.Map(p => p / 255);

            // the output layer must know the number of classes.
            var numberOfClasses = trainingTargets.Distinct().Count();

            // batch normalization can be added to all layers with weights + biases.
            // Batch normalization will increase the error reduction pr. iteration
            // but will also make each iteration more slow due to the extra work.
            // Batch normalization usually has the best effect on deeper networks.
            var useBatchNorm = true;
            var net          = new NeuralNet();
            net.Add(new InputLayer(width: 32, height: 32, depth: 3)); // CIFAR data is 32x32x3.
            net.Add(new Conv2DLayer(3, 3, 32)
            {
                BatchNormalization = useBatchNorm
            });                                                                       // activate batch normalization.
            net.Add(new MaxPool2DLayer(2, 2));
            net.Add(new DropoutLayer(0.25));
            net.Add(new Conv2DLayer(3, 3, 64)
            {
                BatchNormalization = useBatchNorm
            });                                                                       // activate batch normalization.
            net.Add(new Conv2DLayer(3, 3, 64)
            {
                BatchNormalization = useBatchNorm
            });                                                                       // activate batch normalization.
            net.Add(new MaxPool2DLayer(2, 2));
            net.Add(new DropoutLayer(0.25));
            net.Add(new DenseLayer(512)
            {
                BatchNormalization = useBatchNorm
            });                                                                 // activate batch normalization.
            net.Add(new DropoutLayer(0.5));
            net.Add(new SoftMaxLayer(numberOfClasses));

            // using classification accuracy as error metric.
            // When using a validation set, the error metric
            // is used for selecting the best iteration based on models error on the validation set.
            var learner = new ClassificationNeuralNetLearner(net, iterations: 5, loss: new AccuracyLoss());

            var model = learner.Learn(trainingObservations, trainingTargets);

            var metric      = new TotalErrorClassificationMetric <double>();
            var predictions = model.Predict(testObservations);

            Trace.WriteLine("Test Error: " + metric.Error(testTargets, predictions));
        }
Пример #11
0
        /// <summary>
        /// Method trains created earlier network. Learner requires minimum 5 samples of languages(=batchSize)[more=better]
        /// </summary>
        private void LearnAnn()
        {
            var learner = new ClassificationNeuralNetLearner(neuralNetwork, iterations: 300, loss: new AccuracyLoss(), batchSize: 5);

            annModel = learner.Learn(observations, dependentVariableAsNumber);
        }
        public FoodType ClassifyWithMl(IList <Ingredient> ingrediants)
        {
            #region Read Data

            // Use StreamReader(filepath) when running from filesystem
            var trainingParser = new CsvParser(() => new StringReader(System.IO.File.ReadAllText(@"D:\Programming\VisualStudio\clean\MyRecipesRepo\RecipesProj\Content\ML\training.csv")));
            var testParser     = new CsvParser(() => new StringReader(System.IO.File.ReadAllText(@"D:\Programming\VisualStudio\clean\MyRecipesRepo\RecipesProj\Content\ML\training.csv")));

            var targetName = "Class";

            var featureNames = trainingParser.EnumerateRows(c => c != targetName).First().ColumnNameToIndex.Keys.ToArray();

            // read feature matrix (training)
            var matrix = trainingParser.EnumerateRows(featureNames).ToStringMatrix();
            matrix.Map(cellValue => (cellValue.GetHashCode() / 1.0).ToString());

            var trainingObservations = matrix.ToF64Matrix();
            // read classification targets (training)
            var trainingTargets = trainingParser.EnumerateRows(targetName)
                                  .ToF64Vector();

            // read feature matrix (test)

            var matrix2 = testParser.EnumerateRows(featureNames).ToStringMatrix();
            matrix2.Map(cellValue => (cellValue.GetHashCode() / 1.0).ToString());

            var testObservations = matrix2.ToF64Matrix();
            // read classification targets (test)
            //var targetMatrix2 = testParser.EnumerateRows(targetName).ToStringMatrix();
            //targetMatrix2.Map(cellValue => (cellValue.GetHashCode() / 1.0).ToString());
            //targetMatrix2 = targetMatrix2.ToF64Matrix()

            //var testTargets = targetMatrix2.ToF64Vector();
            #endregion

            // transform pixel values to be between 0 and 1.
            trainingObservations.Map(p => Math.Abs(Math.Cos(p)));
            testObservations.Map(p => Math.Abs(Math.Cos(p)));

            // the output layer must know the number of classes.
            var numberOfClasses = trainingTargets.Distinct().Count();

            var net = new NeuralNet();
            net.Add(new InputLayer(width: 2, height: 1, depth: 1)); // MNIST data is 28x28x1.
            net.Add(new DropoutLayer(0.2));
            net.Add(new DenseLayer(800, Activation.Relu));
            net.Add(new DropoutLayer(0.5));
            net.Add(new DenseLayer(800, Activation.Relu));
            net.Add(new DropoutLayer(0.5));
            net.Add(new SoftMaxLayer(numberOfClasses));

            // using only 10 iteration to make the example run faster.
            // using classification accuracy as error metric. This is only used for reporting progress.
            var learner = new ClassificationNeuralNetLearner(net, iterations: 10, loss: new AccuracyLoss());
            var model   = learner.Learn(trainingObservations, trainingTargets);

            var metric      = new TotalErrorClassificationMetric <double>();
            var predictions = model.Predict(testObservations);

            //Trace.WriteLine("Test Error: " + metric.Error(testTargets, predictions));

            var retVal = new FoodType();
            retVal.ID   = 1;
            retVal.Type = "type";
            return(retVal);
        }