Example #1
0
 /// <summary>
 /// The method trains the SVM model automatically by choosing the optimal parameters C, gamma, p, nu, coef0, degree from CvSVMParams. By the optimality one mean that the cross-validation estimate of the test set error is minimal.
 /// </summary>
 /// <param name="trainData">The training data.</param>
 /// <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times</param>
 /// <param name="cGrid">cGrid</param>
 /// <param name="gammaGrid">grid for gamma</param>
 /// <param name="pGrid">grid for p</param>
 /// <param name="nuGrid">grid for nu</param>
 /// <param name="coefGrid">grid for coeff</param>
 /// <param name="degreeGrid">grid for degree</param>
 /// <param name="balanced">If true and the problem is 2-class classification then the method creates more balanced cross-validation subsets that is proportions between classes in subsets are close to such proportion in the whole train dataset.</param>
 /// <returns></returns>
 public bool TrainAuto(
     TrainData trainData,
     int kFold,
     MCvParamGrid cGrid,
     MCvParamGrid gammaGrid,
     MCvParamGrid pGrid,
     MCvParamGrid nuGrid,
     MCvParamGrid coefGrid,
     MCvParamGrid degreeGrid,
     bool balanced = false)
 {
     return(MlInvoke.CvSVMTrainAuto(
                Ptr,
                trainData.Ptr,
                kFold,
                ref cGrid,
                ref gammaGrid,
                ref pGrid,
                ref nuGrid,
                ref coefGrid,
                ref degreeGrid,
                balanced));
 }
Example #2
0
      public void TestANN_MLP()
      {
         int trainSampleCount = 100;

         #region Generate the traning data and classes
         Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2);
         Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1);

         Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500);

         Matrix<float> sample = new Matrix<float>(1, 2);
         Matrix<float> prediction = new Matrix<float>(1, 1);

         Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1);
         trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50));
         Matrix<float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1);
         trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50));

         Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1);
         trainClasses1.SetValue(1);
         Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1);
         trainClasses2.SetValue(2);
         #endregion

         using(Matrix<int> layerSize = new Matrix<int>(new int[] { 2, 5, 1 }))
         using(Mat layerSizeMat = layerSize.Mat)

         using (TrainData td = new TrainData(trainData, MlEnum.DataLayoutType.RowSample, trainClasses))
         using (ANN_MLP network = new ANN_MLP())
         { 
            network.SetLayerSizes(layerSizeMat);
            network.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym, 0, 0);
            network.TermCriteria = new MCvTermCriteria(10, 1.0e-8);
            network.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Backprop, 0.1, 0.1);
            network.Train(td, (int) Emgu.CV.ML.MlEnum.AnnMlpTrainingFlag.Default);

#if !NETFX_CORE
            String fileName = Path.Combine(Path.GetTempPath(), "ann_mlp_model.xml");
            network.Save(fileName);
            if (File.Exists(fileName))
               File.Delete(fileName);
#endif

            for (int i = 0; i < img.Height; i++)
            {
               for (int j = 0; j < img.Width; j++)
               {
                  sample.Data[0, 0] = j;
                  sample.Data[0, 1] = i;
                  network.Predict(sample, prediction);

                  // estimates the response and get the neighbors' labels
                  float response = prediction.Data[0, 0];

                  // highlight the pixel depending on the accuracy (or confidence)
                  img[i, j] = response < 1.5 ? new Bgr(90, 0, 0) : new Bgr(0, 90, 0);
               }
            }
         }

         // display the original training samples
         for (int i = 0; i < (trainSampleCount >> 1); i++)
         {
            PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]);
            img.Draw(new CircleF(p1, 2), new Bgr(255, 100, 100), -1);
            PointF p2 = new PointF((int) trainData2[i, 0], (int) trainData2[i, 1]);
            img.Draw(new CircleF(p2, 2), new Bgr(100, 255, 100), -1);
         }

         //Emgu.CV.UI.ImageViewer.Show(img);
      }
Example #3
0
      public void TestRTreesLetterRecognition()
      {
         Matrix<float> data, response;
         ReadLetterRecognitionData(out data, out response);

         int trainingSampleCount = (int) (data.Rows * 0.8);

         Matrix<Byte> varType = new Matrix<byte>(data.Cols + 1, 1);
         varType.SetValue((byte) MlEnum.VarType.Numerical); //the data is numerical
         varType[data.Cols, 0] = (byte) MlEnum.VarType.Categorical; //the response is catagorical

         Matrix<byte> sampleIdx = new Matrix<byte>(data.Rows, 1);
         using (Matrix<byte> sampleRows = sampleIdx.GetRows(0, trainingSampleCount, 1))
            sampleRows.SetValue(255);

         using (RTrees forest = new RTrees())
         using (TrainData td = new TrainData(data, MlEnum.DataLayoutType.RowSample, response, null, sampleIdx, null, varType))
         {
            forest.MaxDepth = 10;
            forest.MinSampleCount = 10;
            forest.RegressionAccuracy = 0.0f;
            forest.UseSurrogates = false;
            forest.MaxCategories = 15;
            forest.CalculateVarImportance = true;
            forest.ActiveVarCount = 4;
            forest.TermCriteria = new MCvTermCriteria(100, 0.01f);
            bool success = forest.Train(td);

            if (!success)
               return;
            
            double trainDataCorrectRatio = 0;
            double testDataCorrectRatio = 0;
            for (int i = 0; i < data.Rows; i++)
            {
               using (Matrix<float> sample = data.GetRow(i))
               {
                  double r = forest.Predict(sample, null);
                  r = Math.Abs(r - response[i, 0]);
                  if (r < 1.0e-5)
                  {
                     if (i < trainingSampleCount)
                        trainDataCorrectRatio++;
                     else
                        testDataCorrectRatio++;
                  }
               }
            }

            trainDataCorrectRatio /= trainingSampleCount;
            testDataCorrectRatio /= (data.Rows - trainingSampleCount);

            StringBuilder builder = new StringBuilder("Variable Importance: ");
            /*
            using (Matrix<float> varImportance = forest.VarImportance)
            {
               for (int i = 0; i < varImportance.Cols; i++)
               {
                  builder.AppendFormat("{0} ", varImportance[0, i]);
               }
            }*/

            EmguAssert.WriteLine(String.Format("Prediction accuracy for training data :{0}%", trainDataCorrectRatio * 100));
            EmguAssert.WriteLine(String.Format("Prediction accuracy for test data :{0}%", testDataCorrectRatio * 100));
            EmguAssert.WriteLine(builder.ToString());
         }
      }
Example #4
0
      public void TestNormalBayesClassifier()
      {
         Bgr[] colors = new Bgr[] { 
            new Bgr(0, 0, 255), 
            new Bgr(0, 255, 0),
            new Bgr(255, 0, 0)};
         int trainSampleCount = 150;

         #region Generate the training data and classes
         Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2);
         Matrix<int> trainClasses = new Matrix<int>(trainSampleCount, 1);
         
         Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500);

         Matrix<float> sample = new Matrix<float>(1, 2);

         Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount / 3, 1);
         trainData1.GetCols(0, 1).SetRandNormal(new MCvScalar(100), new MCvScalar(50));
         trainData1.GetCols(1, 2).SetRandNormal(new MCvScalar(300), new MCvScalar(50));

         Matrix<float> trainData2 = trainData.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1);
         trainData2.SetRandNormal(new MCvScalar(400), new MCvScalar(50));

         Matrix<float> trainData3 = trainData.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1);
         trainData3.GetCols(0, 1).SetRandNormal(new MCvScalar(300), new MCvScalar(50));
         trainData3.GetCols(1, 2).SetRandNormal(new MCvScalar(100), new MCvScalar(50));

         Matrix<int> trainClasses1 = trainClasses.GetRows(0, trainSampleCount / 3, 1);
         trainClasses1.SetValue(1);
         Matrix<int> trainClasses2 = trainClasses.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1);
         trainClasses2.SetValue(2);
         Matrix<int> trainClasses3 = trainClasses.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1);
         trainClasses3.SetValue(3);
         #endregion

         using (TrainData td = new TrainData(trainData, MlEnum.DataLayoutType.RowSample, trainClasses))
         using (NormalBayesClassifier classifier = new NormalBayesClassifier())
         {
            //ParamDef[] defs = classifier.GetParams();
            classifier.Train(trainData, MlEnum.DataLayoutType.RowSample, trainClasses);
            classifier.Clear();
            classifier.Train(td);
#if !NETFX_CORE
            String fileName = Path.Combine(Path.GetTempPath(), "normalBayes.xml");
            classifier.Save(fileName);
            if (File.Exists(fileName))
               File.Delete(fileName);
#endif

            #region Classify every image pixel
            for (int i = 0; i < img.Height; i++)
               for (int j = 0; j < img.Width; j++)
               {
                  sample.Data[0, 0] = i;
                  sample.Data[0, 1] = j;
                  int response = (int) classifier.Predict(sample, null);

                  Bgr color = colors[response - 1];

                  img[j, i] = new Bgr(color.Blue * 0.5, color.Green * 0.5, color.Red * 0.5);
               }
            #endregion 
         }

         // display the original training samples
         for (int i = 0; i < (trainSampleCount / 3); i++)
         {
            PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]);
            img.Draw(new CircleF(p1, 2.0f), colors[0], -1);
            PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]);
            img.Draw(new CircleF(p2, 2.0f), colors[1], -1);
            PointF p3 = new PointF(trainData3[i, 0], trainData3[i, 1]);
            img.Draw(new CircleF(p3, 2.0f), colors[2], -1);
         }

         //Emgu.CV.UI.ImageViewer.Show(img);
      }
Example #5
0
      public void TestSVM()
      {
         int trainSampleCount = 150;
         int sigma = 60;

         #region Generate the training data and classes

         Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2);
         Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1);

         Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500);

         Matrix<float> sample = new Matrix<float>(1, 2);

         Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount / 3, 1);
         trainData1.GetCols(0, 1).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma));
         trainData1.GetCols(1, 2).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma));

         Matrix<float> trainData2 = trainData.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1);
         trainData2.SetRandNormal(new MCvScalar(400), new MCvScalar(sigma));

         Matrix<float> trainData3 = trainData.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1);
         trainData3.GetCols(0, 1).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma));
         trainData3.GetCols(1, 2).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma));

         Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount / 3, 1);
         trainClasses1.SetValue(1);
         Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1);
         trainClasses2.SetValue(2);
         Matrix<float> trainClasses3 = trainClasses.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1);
         trainClasses3.SetValue(3);

         #endregion
         //using (SVM.Params p = new SVM.Params(MlEnum.SvmType.CSvc, MlEnum.SvmKernelType.Linear, 0, 1, 0, 1, 0, 0, null, new MCvTermCriteria(100, 1.0e-6)))
         using (SVM model = new SVM())
         using (Matrix<int> trainClassesInt = trainClasses.Convert<int>())
         using (TrainData td = new TrainData(trainData, MlEnum.DataLayoutType.RowSample, trainClassesInt))
         {
            model.Type = SVM.SvmType.CSvc;
            model.SetKernel(SVM.SvmKernelType.Inter);
            model.Degree = 0;
            model.Gamma = 1;
            model.Coef0 = 0;
            model.C = 1;
            model.Nu = 0;
            model.P = 0;
            model.TermCriteria = new MCvTermCriteria(100, 1.0e-6);
            //bool trained = model.TrainAuto(td, 5);
            model.Train(td);
#if !NETFX_CORE
            String fileName = "svmModel.xml";
            //String fileName = Path.Combine(Path.GetTempPath(), "svmModel.xml");
            model.Save(fileName);

            SVM model2 = new SVM();
            FileStorage fs = new FileStorage(fileName,  FileStorage.Mode.Read);
            model2.Read(fs.GetFirstTopLevelNode());

            if (File.Exists(fileName))
               File.Delete(fileName);
#endif

            for (int i = 0; i < img.Height; i++)
            {
               for (int j = 0; j < img.Width; j++)
               {
                  sample.Data[0, 0] = j;
                  sample.Data[0, 1] = i;

                  float response = model.Predict(sample);

                  img[i, j] =
                     response == 1 ? new Bgr(90, 0, 0) :
                     response == 2 ? new Bgr(0, 90, 0) :
                     new Bgr(0, 0, 90);
               }
            }
            Mat supportVectors = model.GetSupportVectors();
            //TODO: find out how to draw the support vectors
            Image<Gray, float> pts = supportVectors.ToImage<Gray, float>(); 
            PointF[] vectors = new PointF[supportVectors.Rows];
            GCHandle handler = GCHandle.Alloc(vectors, GCHandleType.Pinned);
            using (
               Mat vMat = new Mat(supportVectors.Rows, supportVectors.Cols, DepthType.Cv32F, 1,
                  handler.AddrOfPinnedObject(), supportVectors.Cols*4))
            {
               supportVectors.CopyTo(vMat);
            }
            handler.Free();

            /*
            int c = model.GetSupportVectorCount();
            for (int i = 0; i < c; i++)
            {
               float[] v = model.GetSupportVector(i);
               PointF p1 = new PointF(v[0], v[1]);
               img.Draw(new CircleF(p1, 4), new Bgr(128, 128, 128), 2);
            }*/
         }

         // display the original training samples
         for (int i = 0; i < (trainSampleCount / 3); i++)
         {
            PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]);
            img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1);
            PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]);
            img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1);
            PointF p3 = new PointF(trainData3[i, 0], trainData3[i, 1]);
            img.Draw(new CircleF(p3, 2.0f), new Bgr(100, 100, 255), -1);
         }

         //Emgu.CV.UI.ImageViewer.Show(img);
      }
        public bool train()
        {
            TrainData td = new TrainData(tDescriptors, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, labels);
            bool trained = false;

            //set SVM parameters
            svmClassifier.SetKernel(Emgu.CV.ML.SVM.SvmKernelType.Rbf);
            svmClassifier.Gamma = 2;
            svmClassifier.Type = Emgu.CV.ML.SVM.SvmType.CSvc;
            svmClassifier.C = 5;

            trained = svmClassifier.Train(td);
            return trained;
        }
Example #7
0
 public static bool Train(this IStatModel model, TrainData trainData, int flags = 0)
 {
     return(MlInvoke.StatModelTrainWithData(model.StatModelPtr, trainData, flags));
 }
Example #8
0
 public static bool Train(this IStatModel model, TrainData trainData, int flags = 0)
 {
    return MlInvoke.StatModelTrainWithData(model.StatModelPtr, trainData, flags);
 }
Example #9
0
 /// <summary>
 /// The method trains the SVM model automatically by choosing the optimal parameters C, gamma, p, nu, coef0, degree from CvSVMParams. By the optimality one mean that the cross-validation estimate of the test set error is minimal. 
 /// </summary>
 /// <param name="trainData">The training data.</param>
 /// <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times</param>
 /// <param name="cGrid">cGrid</param>
 /// <param name="gammaGrid">grid for gamma</param>
 /// <param name="pGrid">grid for p</param>
 /// <param name="nuGrid">grid for nu</param>
 /// <param name="coefGrid">grid for coeff</param>
 /// <param name="degreeGrid">grid for degree</param>
 /// <param name="balanced">If true and the problem is 2-class classification then the method creates more balanced cross-validation subsets that is proportions between classes in subsets are close to such proportion in the whole train dataset.</param>
 /// <returns></returns>
 public bool TrainAuto(
    TrainData trainData,
    int kFold,
    MCvParamGrid cGrid,
    MCvParamGrid gammaGrid,
    MCvParamGrid pGrid,
    MCvParamGrid nuGrid,
    MCvParamGrid coefGrid,
    MCvParamGrid degreeGrid,
    bool balanced = false)
 {
    return MlInvoke.CvSVMTrainAuto(
       Ptr,
       trainData.Ptr,
       kFold,
       ref cGrid,
       ref gammaGrid, 
       ref pGrid, 
       ref nuGrid,
       ref coefGrid,
       ref degreeGrid,
       balanced);
 }
Example #10
0
 /// <summary>
 /// The method trains the SVM model automatically by choosing the optimal parameters C, gamma, p, nu, coef0, degree from CvSVMParams. By the optimality one mean that the cross-validation estimate of the test set error is minimal. 
 /// </summary>
 /// <param name="trainData">The training data.</param>
 /// <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times</param>
 /// <returns></returns>
 public bool TrainAuto(
    TrainData trainData,
    int kFold = 10)
 {
    return TrainAuto(
       trainData,
       kFold,
       GetDefaultGrid(ParamType.C),
       GetDefaultGrid(ParamType.Gamma),
       GetDefaultGrid(ParamType.P),
       GetDefaultGrid(ParamType.Nu),
       GetDefaultGrid(ParamType.Coef),
       GetDefaultGrid(ParamType.Degree));
 }