public void Save() { topLayerSVM = new SVM(); SVMParams p = new SVMParams(); p.KernelType = SVM_KERNEL_TYPE.LINEAR; p.SVMType = SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); topLayerSVM.Train(trainingDescriptors, labels, null, null, p); IFormatter formatter = new BinaryFormatter(); Stream fs = File.OpenWrite(directory + "obj\\dic.xml"); formatter.Serialize(fs, topLayerDic); fs.Dispose(); topLayerSVM.Save(directory + "obj\\svm.xml"); }
//SVMParams _parameters public SVMClassifier() { model = new SVM(); SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); parameters = p; }
/// <summary> /// Train the SVM model with the specific paramters /// </summary> /// <param name="trainData">The training data.</param> /// <param name="responses">The response for the training data.</param> /// <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix<int> of nx1</param> /// <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix<int> of nx1</param> /// <param name="parameters">The parameters for SVM</param> /// <returns></returns> public bool Train( Matrix <float> trainData, Matrix <float> responses, Matrix <Byte> varIdx, Matrix <Byte> sampleIdx, SVMParams parameters) { return(MlInvoke.CvSVMTrain( _ptr, trainData.Ptr, responses.Ptr, varIdx == null ? IntPtr.Zero: varIdx.Ptr, sampleIdx == null ? IntPtr.Zero : varIdx.Ptr, parameters.MCvSVMParams)); }
/// <summary> /// Train the SVM model with the specific paramters /// </summary> /// <param name="trainData">The training data.</param> /// <param name="responses">The response for the training data.</param> /// <param name="varIdx">Can be null if not needed. When specified, identifies variables (features) of interest. It is a Matrix<int> of nx1</param> /// <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix<int> of nx1</param> /// <param name="parameters">The parameters for SVM</param> /// <returns></returns> public bool Train( Matrix<float> trainData, Matrix<float> responses, Matrix<int> varIdx, Matrix<int> sampleIdx, SVMParams parameters) { return MlInvoke.CvSVMTrain( _ptr, trainData.Ptr, responses.Ptr, varIdx == null ? IntPtr.Zero: varIdx.Ptr, sampleIdx == null ? IntPtr.Zero : varIdx.Ptr, parameters.MCvSVMParams); }
public SupportVectorMachine(Matrix<float> trainData, Matrix<float> trainClasses) { { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); bool trained1 = model1.Train(trainData, trainClasses, null, null, p); //bool trained2 = model2.TrainAuto(trainData, trainClasses, null, null, p.MCvSVMParams, 5); } }
public ShapeClassifier() { isTrained = false; // SVM settings // TODO: Play with SVM parameters SvmParameters = new SVMParams(); SvmParameters.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; SvmParameters.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.POLY; SvmParameters.Gamma = 3; SvmParameters.Degree = 3; SvmParameters.C = 1; SvmParameters.TermCrit = new MCvTermCriteria(100, 0.00001); // Obtain the number of colour classes int numSignColours = Enum.GetValues(typeof(SignColour)).Length; // Create an empty SVM model for each colour svms = new ShapeSvm[numSignColours]; for (int i = 0; i < numSignColours; ++i) { svms[i].model = new SVM(); svms[i].isTrained = false; svms[i].shapes = new List<SignShape>(); } // Define which shapes are used for each sign colour svms[(int)SignColour.RED].shapes.AddRange( new SignShape[] { SignShape.Garbage, SignShape.Circle, SignShape.Octagon, SignShape.TriangleUp, SignShape.TriangleDown } ); svms[(int)SignColour.BLUE].shapes.AddRange( new SignShape[] { SignShape.Garbage, SignShape.Circle, SignShape.Rectangle } ); }
private void button7_Click(object sender, EventArgs e) { ///////////////////////////////////////////// Test /////////////////////////////////////// //Create a matrix //Matrix<Byte> matrix1 = new Matrix<Byte>(10, 10); //Byte element = 0; ////Set the elements //for (int i = 0; i < 10; i++) //{ // for (int j = 0; j < 10; j++) // { // matrix1.Data[i, j] = 1; // //matrix1.Data[i, j] = element; // element++; // } //} //int WW = matrix1.Cols; //int HH = matrix1.Rows; ///////////////////////////////////////////// Test /////////////////////////////////////// int W = Train_I.Cols; int H = Train_I.Rows; int rW = (mW - 1) / 2; int rH = (mH - 1) / 2; double pp = 0; double ee = 0; int ind = 0; int count; int maskCenter_H_start = rH + 1; int maskCenter_W_start = rW + 1; int maskCenter_H_end = H - rH; int maskCenter_W_end = W - rW; double[] fVave = new double[ ( maskCenter_H_end - maskCenter_H_start) * (maskCenter_W_end - maskCenter_W_start) ]; double[] fVstd = new double[( maskCenter_H_end - maskCenter_H_start) * (maskCenter_W_end - maskCenter_W_start) ]; double[] fVentropy = new double[( maskCenter_H_end - maskCenter_H_start) * (maskCenter_W_end - maskCenter_W_start) ]; classV = new Matrix<float>( (maskCenter_H_end - maskCenter_H_start) * (maskCenter_W_end - maskCenter_W_start), 1); // Mask scan over the whole image "gR_R_Picked_Map" for (int i = maskCenter_H_start; i < maskCenter_H_end; i++) { for (int j = maskCenter_W_start; j < maskCenter_W_end; j++) { double sum = 0; double ave = 0; double sumDiff = 0; double std = 0; entropy = 0; count = 0; // initial value all 0, since every mask should count independently for (int k = 0; k < mH; k++) { for (int l = 0; l < mW; l++) { int mask_x = j - rW + l - 1; int mask_y = i - rH + k - 1; sum = sum + gR_R_Picked_Map[ mask_y, mask_x ]; // Bug???? //sum = sum + matrix1[i - rH + k - 1 , j - rW + l - 1 ]; // test // judge mask is PART of TREE or NOT if (Class_I.Data[ mask_y, mask_x , 0] == 1) { count ++; } } } // end of mask // To detect the binary values in the mask and record it in classV float maskSampleCount = mH * mW; float mask_plant_percent = (count /maskSampleCount); if ( mask_plant_percent > 0.7 ) { classV[ind, 0] = 1; } else { classV[ind, 0] = 0; } // Ave ave = sum / (maskSampleCount); // StandardDeviation for (int k = 0; k < mH; k++) { for (int l = 0; l < mW; l++) { int mask_x = j - rW + l - 1; int mask_y = i - rH + k - 1; sumDiff = sumDiff + Math.Pow((gR_R_Picked_Map[ mask_y, mask_x] - ave), 2); // sumDiff = sumDiff + Math.Pow((matrix1[i - rH + k - 1, j - rW + l - 1] - ave), 2); //test } } std = Math.Sqrt(sumDiff / (maskSampleCount - 1)); // Entropy for (int k = 1; k < mH; k++) { for (int l = 1; l < mW; l++) { int mask_x = j - rW + l - 1; int mask_y = i - rH + k - 1; pp = gR_R_Picked_Map[mask_y, mask_x] / sum; //p = matrix1[i - rH + k - 1, j - rW + l - 1] / sum; if (pp > 0) // Log p, p > 0 { ee = Math.Log(pp); } else { ee = 0; } //System.Console.WriteLine("-"); //System.Console.WriteLine(p); //System.Console.WriteLine(ee); //System.Console.WriteLine("-"); entropy = entropy + (-(pp * ee)); } } fVave[ind] = ave; fVstd[ind] = std; fVentropy[ind] = entropy; ind++; } // for i } // for j trainData_all = new Matrix<float>(ind, 3); for (int i = 0; i < ind; i++) { trainData_all.Data[i, 0] = (float)fVave[i]; trainData_all.Data[i, 1] = (float)fVstd[i]; trainData_all.Data[i, 2] = (float)fVentropy[i]; } using (Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM()) { SVMParams p = new SVMParams(); /* p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001);*/ p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.RBF; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.Gamma = 0.1; p.Coef0 = 0; p.Degree = 2; p.C = 1; p.TermCrit = new MCvTermCriteria(500, 0.00001); //迭代的終止準則,最大迭代次數500次,結果的精確性 //bool trained = model.Train(trainData, trainClasses, null, null, p); bool trained = model.TrainAuto(trainData_all, classV, null, null, p.MCvSVMParams, 5); model.Save(@"D:\Code\Data\train_func.xml"); } System.Console.WriteLine(" END OF button7_Click Masking "); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Image<Bgr, byte> predict_image = Train_I2.Clone(); using (Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM()) { model.Load(@"D:\Code\Data\train_func.xml"); int ind_predict = 0; for (int i = maskCenter_H_start; i < maskCenter_H_end; i++) for (int j = maskCenter_W_start; j < maskCenter_W_end; j++) { Matrix<float> feature_test = TakeRow(trainData_all.Data, ind_predict); //Matrix<float> feature_test = trainData_all.GetRow( ind_predict ) ; // System.Console.WriteLine(feature_test); float predict_class = model.Predict( feature_test ); if (predict_class == 1) { for (int k = 0; k < mH; k++) for (int l = 0; l < mW; l++) { int mask_x = j - rW + l -1; int mask_y = i - rH + k -1; predict_image.Data[ mask_y, mask_x , 0] = 255; predict_image.Data[mask_y , mask_x , 2] = 255; } } ind_predict++; // System.Console.WriteLine(predict_class); } // end of all //String name2 = name.Insert(name.Length - 4, "_predict"); String name3 = "predict"; predict_image.Save(@"D:\Code\Data\predict.bmp"); pictureBox9.Image = Image.FromFile(@"D:\Code\Data\predict.bmp"); // Image<Bgr, byte> predict_image = Train_I2.Clone(); System.Console.WriteLine(" END OF Predict "); } }
static void Main(string[] args) { int trainSampleCount = 0; Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); //conversion of CSV to gesture variables List<Gesture> gestureListClass1 = csvToGestureList(@"C:\Users\Dan\Desktop\thesis data\testEB-3-20.csv"); List<Gesture> gestureListClass2 = csvToGestureList(@"C:\Users\Dan\Desktop\thesis data\testSNC-3-20.csv"); trainSampleCount = (gestureListClass1.Count) + (gestureListClass2.Count); //set the sample count to the number of gestures we have available //create relevant matrices based on size of the gestureList Matrix<float> sample = new Matrix<float>(1, 2); //a sample has 2 columns because of 2 features Matrix<float> trainTestData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainTestClasses = new Matrix<float>(trainSampleCount, 1); //GESTURE MATH INCOMING foreach (Gesture g in gestureListClass1) { g.runMetrics(); } foreach (Gesture g in gestureListClass2) { g.runMetrics(); } #region Generate the training data and classes //fill first set of data for (int i = 0; i < gestureListClass1.Count; i++) { double[] gMetrics = (gestureListClass1[i].returnMetrics()).ToArray(); /*order of values * list[0] - xyRatio * list[1] - totalGestureTime */ trainTestData[i, 0] = ((float)gMetrics[0])*150; trainTestData[i, 1] = ((float)gMetrics[1])/4; } Matrix<float> trainTestData1 = trainTestData.GetRows(0, gestureListClass1.Count, 1); for (int j = 0; j < gestureListClass2.Count; j++) { double[] gMetrics = (gestureListClass2[j].returnMetrics()).ToArray(); trainTestData[(j + gestureListClass1.Count), 0] = (float)gMetrics[0] * 150; trainTestData[(j + gestureListClass1.Count), 1] = ((float)gMetrics[1])/4; } Matrix<float> trainTestData2 = trainTestData.GetRows(gestureListClass1.Count, trainSampleCount, 1); Matrix<float> trainTestClasses1 = trainTestClasses.GetRows(0, gestureListClass1.Count, 1); trainTestClasses1.SetValue(1); Matrix<float> trainTestClasses2 = trainTestClasses.GetRows(gestureListClass1.Count, trainSampleCount, 1); trainTestClasses2.SetValue(2); #endregion using (SVM model = new SVM()) { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; //p.Gamma = 0.1; p.C = 10; p.TermCrit = new MCvTermCriteria(100, 0.00001); //bool trained = model.Train(trainTestData, trainTestClasses, null, null, p); bool trained = model.TrainAuto(trainTestData, trainTestClasses, null, null, p.MCvSVMParams, 5); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; float response = model.Predict(sample); img[i, j] = response == 1 ? new Bgr(90, 0, 0) : new Bgr(0, 0, 90); //response == 2 ? new Bgr(0, 90, 0) : } } int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(255, 255, 128), 2); } } // display the original training samples for (int i = 0; i < (trainSampleCount / 2); i++) { if (i < trainTestData1.Rows) { PointF p1 = new PointF((trainTestData1[i, 0]), (trainTestData1[i, 1])); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); } if (i < trainTestData2.Rows) { PointF p2 = new PointF((trainTestData2[i, 0]), (trainTestData2[i, 1])); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 100, 255), -1); } } Emgu.CV.UI.ImageViewer.Show(img); }
private void button1_Click(object sender, EventArgs e) { int trainSampleCount = 150; int sigma = 60; #region Generate the training data and classes Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1); Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); Matrix<float> sample = new Matrix<float>(1, 2); Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount / 3, 1); trainData1.GetCols(0, 1).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); trainData1.GetCols(1, 2).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); Matrix<float> trainData2 = trainData.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainData2.SetRandNormal(new MCvScalar(400), new MCvScalar(sigma)); Matrix<float> trainData3 = trainData.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainData3.GetCols(0, 1).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); trainData3.GetCols(1, 2).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount / 3, 1); trainClasses1.SetValue(1); Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainClasses2.SetValue(2); Matrix<float> trainClasses3 = trainClasses.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainClasses3.SetValue(3); #endregion using (SVM model = new SVM()) { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); //bool trained = model.Train(trainData, trainClasses, null, null, p); bool trained = model.TrainAuto(trainData, trainClasses, null, null, p.MCvSVMParams, 5); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; float response = model.Predict(sample); img[i, j] = response == 1 ? new Bgr(90, 0, 0) : response == 2 ? new Bgr(0, 90, 0) : new Bgr(0, 0, 90); } } int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(128, 128, 128), 2); } } // display the original training samples for (int i = 0; i < (trainSampleCount / 3); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); PointF p3 = new PointF(trainData3[i, 0], trainData3[i, 1]); img.Draw(new CircleF(p3, 2.0f), new Bgr(100, 100, 255), -1); } img.Save(@"D:\Code\svm.bmp"); // Emgu.CV.UI.ImageViewer.Show(img); pictureBox1.Image = Image.FromFile(@"D:\Code\svm.bmp"); }
private void button2_Click(object sender, EventArgs e) { folderBrowserDialog1.SelectedPath = @"D:\Code\Data"; try { if (folderBrowserDialog1.ShowDialog() == DialogResult.OK) { String[] files = System.IO.Directory.GetFiles(folderBrowserDialog1.SelectedPath); if (trainData_all != null) { trainData_all.Dispose(); trainData_all = null; } foreach (String name in files) { if (name.Contains("_class")) { if (Class_I != null) { Class_I.Dispose(); Class_I = null; } if (Train_I != null) { Train_I.Dispose(); Train_I = null; } ////////// String name2 = name.Replace("_class", ""); name2 = name2.Replace("bmp", "tif"); if (files.Contains(name2)) { Class_I = new Image<Bgr, Byte>(name).Resize(scale, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); //////類別 Train_I = new Image<Bgr, Byte>(name2).Resize(scale, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); //////原影像 Cal_features(); } } } ///////SVM using (Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM()) { SVMParams p = new SVMParams(); /* p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001);*/ p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.RBF; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.Gamma = 0.1; p.Coef0 = 0; p.Degree = 2; p.C = 1; p.TermCrit = new MCvTermCriteria(500, 0.00001); //bool trained = model.Train(trainData, trainClasses, null, null, p); bool trained = model.TrainAuto(trainData_all, trainClasses_all, null, null, p.MCvSVMParams, 5); model.Save(@"D:\Code\Data\train_func.xml"); } } } catch { MessageBox.Show("讀取錯誤!"); } } // end of button2
public void TrainEach(string dir) { directory = dir; foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { SecondLayerNum++; } labels = new Matrix<float>(SecondLayerNum, 1); trainingDescriptors = new Matrix<float>(SecondLayerNum, classNum); foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { Extract(new Image<Bgr, byte>(file.FullName)); } MakeDic(); if (SecondLayerDic == null) throw new Exception("!!!"); j = 0; foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { MakeDescriptors(new Image<Bgr, byte>(file.FullName)); } Console.WriteLine(); SecondLayerSVM = new SVM(); SVMParams p = new SVMParams(); p.KernelType = SVM_KERNEL_TYPE.LINEAR; p.SVMType = SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); SecondLayerSVM.Train(trainingDescriptors, labels, null, null, p); IFormatter formatter = new BinaryFormatter(); Stream fs = File.OpenWrite(directory + "\\obj\\dic.xml"); formatter.Serialize(fs, SecondLayerDic); fs.Dispose(); SecondLayerSVM.Save(directory + "\\obj\\svm.xml"); }
private Image<Bgr, Byte> svm() { Stopwatch timer = new Stopwatch(); timer.Start(); int trainSampleCount = 150; int sigma = 60; #region Generate the training data and classes Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1); Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); Matrix<float> sample = new Matrix<float>(1, 2); Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount / 3, 1); trainData1.GetCols(0, 1).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); trainData1.GetCols(1, 2).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); Matrix<float> trainData2 = trainData.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainData2.SetRandNormal(new MCvScalar(400), new MCvScalar(sigma)); Matrix<float> trainData3 = trainData.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainData3.GetCols(0, 1).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); trainData3.GetCols(1, 2).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount / 3, 1); trainClasses1.SetValue(1); Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainClasses2.SetValue(2); Matrix<float> trainClasses3 = trainClasses.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainClasses3.SetValue(3); #endregion timer.Stop(); MessageBox.Show("生成" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); using (SVM model = new SVM()) { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); //model.Load(@"D:\Play Data\训练数据"); //bool trained = model.Train(trainData, trainClasses, null, null, p); bool trained = model.TrainAuto(trainData, trainClasses, null, null, p.MCvSVMParams, 5); timer.Stop(); MessageBox.Show("训练" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; //float response = model.Predict(sample); //img[i, j] = // response == 1 ? new Bgr(90, 0, 0) : // response == 2 ? new Bgr(0, 90, 0) : // new Bgr(0, 0, 90); } } //model.Save(@"D:\Play Data\训练数据"); timer.Stop(); MessageBox.Show("染色" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(128, 128, 128), 2); } timer.Stop(); MessageBox.Show("画圈" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); } // display the original training samples for (int i = 0; i < (trainSampleCount / 3); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); PointF p3 = new PointF(trainData3[i, 0], trainData3[i, 1]); img.Draw(new CircleF(p3, 2.0f), new Bgr(100, 100, 255), -1); } timer.Stop(); MessageBox.Show("标点" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); return img; }
public FeatureRecognizer() { isTrained = false; SVMParameters = new SVMParams(); SVMParameters.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; //SVMParameters.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.NU_SVC; //p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.POLY; //SVMParameters.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; SVMParameters.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.POLY; //SVMParameters.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.RBF; SVMParameters.Gamma = 3; SVMParameters.Degree = 3; SVMParameters.C = 1; SVMParameters.TermCrit = new MCvTermCriteria(100, 0.00001); //SVMParameters.Nu //SVMParameters.P //SVMParameters.Coef0 = paramsm = new MCvSVMParams(); paramsm.svm_type = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; paramsm.kernel_type = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; paramsm.gamma = 2; paramsm.degree = 3; paramsm.C = 3; paramsm.term_crit = new MCvTermCriteria(100, 0.00001); //debugImages = new List<DebugImage>(); debugImages = null; //SVMModel = new SVM(); // foreach(SignShape shape in Enum.GetValues(typeof(SignShape))) for (int i = 0;i < signShapeCount;i++) { SVM model = new SVM(); SVMModels.Add(model); } int R = 10; int C = 10; double v = 2; for (int i = 0; i < 8; i++) { GWOutputImage = GWOutputImage.ConcateHorizontal(GaborWavelet(R, C, i, v)); Image<Gray, double> GW = GaborWavelet(R, C, i, v); ConvolutionKernelF ckernel = new ConvolutionKernelF(10, 10); for (int l = 0; l < 10; l++) for (int k = 0; k < 10; k++) ckernel[l, k] = (float)GW[l, k].Intensity / 10000; ckernel.Center = new Point(5, 5); cKernelList.Add(ckernel); } }
public void TrainEach(string dir) { directory = dir; foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { SecondLayerNum++; } labels = new Matrix<float>(SecondLayerNum, 1); trainingDescriptors = new Matrix<float>(SecondLayerNum, classNum); foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { Extract(new Image<Bgr, byte>(file.FullName)); } MakeDic(); j = 0; foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { MakeDescriptors(new Image<Bgr, byte>(file.FullName)); } Console.WriteLine(); SecondLayerSVM = new SVM(); SVMParams p = new SVMParams(); p.KernelType = SVM_KERNEL_TYPE.LINEAR; p.SVMType = SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); SecondLayerSVM.Train(trainingDescriptors, labels, null, null, p); }
static void Main(string[] args) { int trainSampleCount = 0; Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); //conversion of CSV to gesture variables List<Gesture> gestureListClass1 = csvToGestureList(@"C:\Users\faculty\Desktop\testEB-3-20.csv"); List<Gesture> gestureListClass2 = csvToGestureList(@"C:\Users\faculty\Desktop\testSNC-3-20.csv"); trainSampleCount = (gestureListClass1.Count) + (gestureListClass2.Count); //set the sample count to the number of gestures we have available //create relevant matrices based on size of the gestureList Matrix<float> sample = new Matrix<float>(1, 16); Matrix<float> trainTestData = new Matrix<float>(trainSampleCount, 16); Matrix<float> trainTestClasses = new Matrix<float>(trainSampleCount, 1); //GESTURE MATH INCOMING foreach (Gesture g in gestureListClass1) { g.runMetrics(); } foreach (Gesture g in gestureListClass2) { g.runMetrics(); } #region Generate the training data and classes //fill first set of data for (int i = 0; i < gestureListClass1.Count; i++) { double[] gMetrics = (gestureListClass1[i].returnMetrics()).ToArray(); /* * //add gestures to list temp.Add(xyRatio); //[0] temp.Add(totalGestureTime); //[1] temp.Add(majorAvg); //[2] temp.Add(minorAvg); //[3] temp.Add(avgXVelo); //[4] temp.Add(avgYVelo); //[5] temp.Add(majorVariance); //[6] temp.Add(minorVariance); //[7] //add substrokes temp.Add(firstVeloMag); //[8] temp.Add(firstVeloDir); //[9] temp.Add(secondVeloMag); //[10] temp.Add(secondVeloDir); //[11] temp.Add(thirdVeloMag); //[12] temp.Add(thirdVeloDir); //[13] temp.Add(fourthVeloMag); //[14] temp.Add(fourthVeloDir); //[15] */ trainTestData[i, 0] = ((float)gMetrics[0]) * 150; //xy ratio trainTestData[i, 1] = ((float)gMetrics[1]) / 4; //totalGestureTime trainTestData[i, 2] = ((float)gMetrics[2]); trainTestData[i, 3] = ((float)gMetrics[3]); trainTestData[i, 4] = ((float)gMetrics[4]); trainTestData[i, 5] = ((float)gMetrics[5]); trainTestData[i, 6] = ((float)gMetrics[6]); trainTestData[i, 7] = ((float)gMetrics[7]); trainTestData[i, 8] = ((float)gMetrics[8]); trainTestData[i, 9] = ((float)gMetrics[9]); trainTestData[i, 10] = ((float)gMetrics[10]); trainTestData[i, 11] = ((float)gMetrics[11]); trainTestData[i, 12] = ((float)gMetrics[12]); trainTestData[i, 13] = ((float)gMetrics[13]); trainTestData[i, 14] = ((float)gMetrics[14]); trainTestData[i, 15] = ((float)gMetrics[15]); } Matrix<float> trainTestData1 = trainTestData.GetRows(0, gestureListClass1.Count, 1); for (int j = 0; j < gestureListClass2.Count; j++) { double[] gMetrics = (gestureListClass2[j].returnMetrics()).ToArray(); trainTestData[(j + gestureListClass1.Count), 0] = (float)gMetrics[0] * 150; trainTestData[(j + gestureListClass1.Count), 1] = ((float)gMetrics[1]) / 4; trainTestData[(j + gestureListClass1.Count), 2] = ((float)gMetrics[2]); trainTestData[(j + gestureListClass1.Count), 3] = ((float)gMetrics[3]); trainTestData[(j + gestureListClass1.Count), 4] = ((float)gMetrics[4]); trainTestData[(j + gestureListClass1.Count), 5] = ((float)gMetrics[5]); trainTestData[(j + gestureListClass1.Count), 6] = ((float)gMetrics[6]); trainTestData[(j + gestureListClass1.Count), 7] = ((float)gMetrics[7]); trainTestData[(j + gestureListClass1.Count), 8] = ((float)gMetrics[8]); trainTestData[(j + gestureListClass1.Count), 9] = ((float)gMetrics[9]); trainTestData[(j + gestureListClass1.Count), 10] = ((float)gMetrics[10]); trainTestData[(j + gestureListClass1.Count), 11] = ((float)gMetrics[11]); trainTestData[(j + gestureListClass1.Count), 12] = ((float)gMetrics[12]); trainTestData[(j + gestureListClass1.Count), 13] = ((float)gMetrics[13]); trainTestData[(j + gestureListClass1.Count), 14] = ((float)gMetrics[14]); trainTestData[(j + gestureListClass1.Count), 15] = ((float)gMetrics[15]); } Matrix<float> trainTestData2 = trainTestData.GetRows(gestureListClass1.Count, trainSampleCount, 1); Matrix<float> trainTestClasses1 = trainTestClasses.GetRows(0, gestureListClass1.Count, 1); trainTestClasses1.SetValue(1); Matrix<float> trainTestClasses2 = trainTestClasses.GetRows(gestureListClass1.Count, trainSampleCount, 1); trainTestClasses2.SetValue(2); #endregion using (SVM model = new SVM()) { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; //p.Gamma = 0.1; p.C = 10; p.TermCrit = new MCvTermCriteria(100, 0.00001); //bool trained = model.Train(trainTestData, trainTestClasses, null, null, p); bool trained = model.TrainAuto(trainTestData, trainTestClasses, null, null, p.MCvSVMParams, 5); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; float response = model.Predict(sample); img[i, j] = response == 1 ? new Bgr(90, 0, 0) : new Bgr(0, 0, 90); //response == 2 ? new Bgr(0, 90, 0) : } } int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(255, 255, 128), 2); } model.Save(@"C:\Users\faculty\Desktop\svm-function3coord16.xml"); } // display the original training samples for (int i = 0; i < (trainSampleCount / 2); i++) { if (i < trainTestData1.Rows) { PointF p1 = new PointF((trainTestData1[i, 0]), (trainTestData1[i, 1])); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); } if (i < trainTestData2.Rows) { PointF p2 = new PointF((trainTestData2[i, 0]), (trainTestData2[i, 1])); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 100, 255), -1); } } Emgu.CV.UI.ImageViewer.Show(img); }