public void Save() { topLayerSVM = new SVM(); SVMParams p = new SVMParams(); p.KernelType = SVM_KERNEL_TYPE.LINEAR; p.SVMType = SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); topLayerSVM.Train(trainingDescriptors, labels, null, null, p); IFormatter formatter = new BinaryFormatter(); Stream fs = File.OpenWrite(directory + "obj\\dic.xml"); formatter.Serialize(fs, topLayerDic); fs.Dispose(); topLayerSVM.Save(directory + "obj\\svm.xml"); }
public TrainForm() { InitializeComponent(); model = CreateSvm(); data = Directory.GetDirectories(TestBase).SelectMany(Directory.GetFiles).ToDictionary(file => file, file => new List<TextLineInfo>()); files = new Queue<string>(data.Keys); OpenNext(); // LoadDetectedLine(); }
//SVMParams _parameters public SVMClassifier() { model = new SVM(); SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); parameters = p; }
public Load_L2BOF(int classnum, string src) { SecondLayerDic = null; directory = src; classNum = classnum; bowTrainer = new BOWKMeansTrainer(classNum, new MCvTermCriteria(10, 0.01), 3, KMeansInitType.PPCenters); IFormatter formatter = new BinaryFormatter(); FileStream fs = File.OpenRead(directory + "\\obj\\dic.xml"); SecondLayerDic = (Matrix<float>)formatter.Deserialize(fs); fs.Dispose(); bowDe.SetVocabulary(SecondLayerDic); SecondLayerSVM = new SVM(); SecondLayerSVM.Load(directory + "\\obj\\svm.xml"); Console.WriteLine("Finished Loading L2 SVM.");loaded = true; }
public Load_L1BOF(int classnum,string loc) { prefix = loc; topLayerDic = null; classNum = classnum; bowTrainer = new BOWKMeansTrainer(classNum, new MCvTermCriteria(10, 0.01), 3, KMeansInitType.PPCenters); IFormatter formatter = new BinaryFormatter(); FileStream fs = File.OpenRead(loc + "obj\\dic.xml"); topLayerDic = (Matrix<float>)formatter.Deserialize(fs); fs.Dispose(); bowDe.SetVocabulary(topLayerDic); topLayerSVM = new SVM(); topLayerSVM.Load(loc + "obj\\svm.xml"); Console.WriteLine("Finished Loading L1 SVM."); loaded = true; }
private void btnTrain_Click(object sender, EventArgs e) { HOGDescriptor hog = new HOGDescriptor(new Size(36, 36), new Size(36, 36), new Size(6, 6), new Size(6, 6)); fsPeg = Directory.GetFiles(txtPosPath.Text); for (int i = 0; i < fsPeg.Length; i++) { String cFileName = txtPosPath.Text + fsPeg[i]; Image <Bgr, byte> vImage = new Image <Bgr, byte>(cFileName); Image <Gray, byte> vGray = vImage.Convert <Gray, byte>(); float[] fAttr = hog.Compute(vGray); for (int j = 0; j < fAttr.Length; j++) { DataMatrix[i, j] = fAttr[j]; } AttrMatrix[i, 0] = 1; } fsNeg = Directory.GetFiles(txtNegPath.Text); for (int i = 0; i < fsNeg.Length; i++) { String cFileName = txtNegPath.Text + fsNeg[i]; Image <Bgr, byte> vImage = new Image <Bgr, byte>(cFileName); Image <Gray, byte> vGray = vImage.Convert <Gray, byte>(); float[] fAttr = hog.Compute(vGray); for (int j = 0; j < fAttr.Length; j++) { DataMatrix[i, j] = fAttr[j]; } AttrMatrix[i, 0] = 0; } Emgu.CV.ML.SVM vSVM = new Emgu.CV.ML.SVM(); vSVM.Type = Emgu.CV.ML.SVM.SvmType.CSvc; vSVM.SetKernel(Emgu.CV.ML.SVM.SvmKernelType.Linear); vSVM.TermCriteria = new MCvTermCriteria(1000, 0.1); TrainData td = new TrainData(DataMatrix, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, AttrMatrix); String cExportFileName = txtFileName.Text; vSVM.Save(cExportFileName); }
public Classifier(List<string> folders) { _folders = new List<FileInfo[]>(); svmClassifier = new SVM(); foreach(string folder in folders) { try { FileInfo[] files = new DirectoryInfo(folder).GetFiles(); _folders.Add(files); class_num++; } catch(DirectoryNotFoundException ex) { System.Console.WriteLine(folder + " not found"); System.Console.WriteLine(ex.Data); continue; } } }
private void btnTest_Click(object sender, EventArgs e) { MyRect[] regions = null; Emgu.CV.ML.SVM vSVM = new Emgu.CV.ML.SVM(); String cExportFileName = txtFileName.Text; FileStorage fileStorage = new FileStorage(cExportFileName, FileStorage.Mode.Read); vSVM.Read(fileStorage.GetFirstTopLevelNode()); int iHeight = vSVM.GetSupportVectors().Height; int iWidth = vSVM.GetSupportVectors().Width; var svmMat = new Matrix <float>(iWidth, iHeight); Matrix <float> resultMat = new Matrix <float>(1, iWidth); Matrix <float> alphaMat = new Matrix <float>(1, iHeight); float[] mydetector = new float[iWidth + 1]; for (int i = 0; i < iWidth; i++) { mydetector[i] = resultMat[0, i]; } //mydetector[iWidth] = rhoValue; Mat vImage = new Mat(); HOGDescriptor hog = new HOGDescriptor(new Size(36, 36), new Size(36, 36), new Size(6, 6), new Size(6, 6)); hog.SetSVMDetector(mydetector); MCvObjectDetection[] results = hog.DetectMultiScale(vImage); regions = new MyRect[results.Length]; for (int i = 0; i < results.Length; i++) { regions[i] = new MyRect(); regions[i].Rect = results[i].Rect; regions[i].Score = results[i].Score; } }
public void Evaluate(IEvolutionState state, Individual ind, int subpop, int threadnum) { if (!ind.Evaluated) { int counter = 0; var features = new int[5, NumOfImagesPerCategory *NumOfClasses, NumOfFeatures]; var labels = new int[5, NumOfImagesPerCategory *NumOfClasses]; for (int i = 0; i < 5; i++) { counter = 0; var categoryDir = CategoryDirs[i]; var subcategoryDirs = Directory.EnumerateDirectories(categoryDir).ToArray(); for (int j = 0; j < NumOfClasses; j++) { var subcategoryDir = subcategoryDirs[j]; var images = Directory.GetFiles(subcategoryDir); for (int k = 0; k < NumOfImagesPerCategory; k++) { var tempImage = new Image <Gray, Byte>(images[k]); tempImage.CopyTo(currentImage[threadnum]); tempImage.CopyTo(originalImage[threadnum]); tempImage.Dispose(); ((GPIndividual)ind).Trees[0].Child.Eval(state, threadnum, Input, Stack, ((GPIndividual)ind), this); int[] imageFeatures = ImageTransformer.GetSquareSuperpixelFeatures(currentImage[threadnum], SuperpixelSize); for (int x = 0; x < imageFeatures.Length; x++) { features[i, counter, x] = imageFeatures[x]; labels[i, counter] = j + 1; } counter++; } } } /* * var trainDataFile = new StreamWriter(@"F:\Gesty\problem2\features\traindata" + threadnum + ".txt"); * var testDataFile = new StreamWriter(@"F:\Gesty\problem2\features\testdata" + threadnum + ".txt"); * * for(int i=0; i<4; i++) * { * for(int j=0; j<1000; j++) * { * var line = new StringBuilder(); * line.Append(labels[i, j].ToString() + " "); * for (int k=0; k<NumOfFeatures; k++) * { * line.Append((k + 1).ToString() + ":" + features[i, j, k].ToString() + " "); * } * trainDataFile.WriteLine(line.ToString().Trim()); * } * } * for (int j = 0; j < 1000; j++) * { * var line = new StringBuilder(); * line.Append(labels[4, j].ToString() + " "); * for (int k = 0; k < NumOfFeatures; k++) * { * line.Append((k + 1).ToString() + ":" + features[4, j, k].ToString() + " "); * } * testDataFile.WriteLine(line.ToString().Trim()); * } * trainDataFile.Close(); * testDataFile.Close(); */ var confMatI = new double[10, 10]; double accuracy = 0; for (int x = 0; x < 5; x++) { var trainData = new Matrix <float>(NumOfImagesPerCategory * NumOfClasses * 4, NumOfFeatures); var trainClasses = new Matrix <int>(NumOfImagesPerCategory * NumOfClasses * 4, 1); var testData = new Matrix <float>(NumOfImagesPerCategory * NumOfClasses, NumOfFeatures); var testClasses = new Matrix <int>(NumOfImagesPerCategory * NumOfClasses, 1); //trainData int imageCount = 0; for (int i = 0; i < 5; i++) { if (i != x) { for (int j = 0; j < NumOfImagesPerCategory * NumOfClasses; j++) { for (int k = 0; k < NumOfFeatures; k++) { trainData[imageCount, k] = features[i, j, k]; trainClasses[imageCount, 0] = labels[i, j]; } imageCount++; } } else { for (int j = 0; j < NumOfImagesPerCategory * NumOfClasses; j++) { for (int k = 0; k < NumOfFeatures; k++) { testData[j, k] = features[i, j, k]; testClasses[j, 0] = labels[i, j]; } } } } Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM(); var predictions = new Matrix <float>(NumOfImagesPerCategory * NumOfClasses, 1); var trainData2 = new TrainData(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses); model.Type = Emgu.CV.ML.SVM.SvmType.CSvc; model.SetKernel(Emgu.CV.ML.SVM.SvmKernelType.Poly); model.TermCriteria = new MCvTermCriteria(10000, 0.001); model.Degree = 3; model.Gamma = 0.001; model.Coef0 = 0; model.C = 1000; model.Nu = 0.5; model.P = 0.1; model.Train(trainData2); model.Predict(testData, predictions); // var predictionsArray = (float[,])predictions.GetData(); int correctPredictions = 0; for (int i = 0; i < predictions.Rows; i++) { if ((int)predictions[i, 0] == testClasses[i, 0]) { correctPredictions++; } var predictedLabel = (int)predictions[i, 0]; var trueLabel = testClasses[i, 0]; confMatI[predictedLabel - 1, trueLabel - 1]++; } for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { confMat[i, j] = (confMatI[i, j] / 500) * 100; } } if (correctPredictions > 0) { accuracy += 100 * ((double)correctPredictions / (double)predictions.Rows); } } /* * for(int i=0; i<NumOfImagesPerCategory*NumOfClasses*4; i++) * { * for(int j=0; j<NumOfFeatures; j++) * { * //trainData[i, j] = ((trainData[i, j] - 0) / (255 - 0)) * (1 + 1) - 1; * } * trainClasses[i, 0] = ((trainClasses[i, 0] - 1) / (NumOfClasses - 1)) * (1 + 1) - 1; * } */ //testData /* * for (int j = 0; j < NumOfImagesPerCategory * NumOfClasses; j++) * { * var line = new StringBuilder(); * line.Append(labels[4, j] + " "); * for (int k = 0; k < NumOfFeatures; k++) * { * line.Append(k + 1 + ":" + features[4, j, k] + " "); * } * testData.WriteLine(line.ToString().Trim()); * } * * trainData.Close(); * testData.Close(); */ //predictions.Dispose(); /* * var netData = new SharpLearning.Containers.Matrices.F64Matrix(NumOfImagesPerCategory * NumOfClasses * 4, NumOfFeatures); * var netTargets = new double[NumOfImagesPerCategory * NumOfClasses * 4]; * int imageCount = 0; * for (int i = 0; i < 4; i++) * { * for (int j = 0; j < NumOfImagesPerCategory * NumOfClasses; j++) * { * for (int k = 0; k < NumOfFeatures; k++) * { * netData[imageCount, k] = features[i, j, k]; * netTargets[imageCount] = labels[i, j]; * } * imageCount++; * } * } */ /* * var CVNeuralNet = new Emgu.CV.ML.ANN_MLP(); * * CVNeuralNet.TermCriteria = new MCvTermCriteria(10000, 0.001); * var layerSizes = new Matrix<int>(new int[4] { NumOfFeatures, NumOfFeatures * 10, NumOfFeatures*5, 1 }); * CVNeuralNet.SetLayerSizes(layerSizes); * CVNeuralNet.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Rprop); * CVNeuralNet.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym); * CVNeuralNet.BackpropMomentumScale = 0.01; * CVNeuralNet.BackpropWeightScale = 0.2; * var trainData2 = new TrainData(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses); * var predictions = new Matrix<float>(NumOfImagesPerCategory * NumOfClasses*4, 1); * CVNeuralNet.Train(trainData2); * CVNeuralNet.Predict(trainData, predictions); */ /* * var net = new NeuralNet(); * net.Add(new InputLayer(NumOfFeatures)); * net.Add(new DropoutLayer(0.5)); * net.Add(new DenseLayer(NumOfFeatures * 4, SharpLearning.Neural.Activations.Activation.Sigmoid)); * net.Add(new DenseLayer(NumOfFeatures * 4, SharpLearning.Neural.Activations.Activation.Sigmoid)); * net.Add(new DropoutLayer(0.5)); * net.Add(new SoftMaxLayer(NumOfClasses)); * var learner = new ClassificationNeuralNetLearner(net, new SquareLoss()); * var model = learner.Learn(netData, netTargets); * var predictions = model.Predict(netData); * int correctPredictions = 0; * for (int i = 0; i < predictions.Length; i++) * { * if ((int)predictions[i] == netTargets[i]) * correctPredictions++; * } * if (correctPredictions > 0) * accuracy = 100 * ((double)correctPredictions / (double)predictions.Length); */ /* * var problem = SVMProblemHelper.Load(@"F:\Gesty\problem2\features\traindata" + threadnum + ".txt"); * var testProblem = SVMProblemHelper.Load(@"F:\Gesty\problem2\features\testdata" + threadnum + ".txt"); * var model = problem.Train(Parameter); * double[] target = testProblem.Predict(model); * double accuracy = testProblem.EvaluateClassificationProblem(target); */ var f = ((KozaFitness)ind.Fitness); f.SetStandardizedFitness(state, (float)(100 - (accuracy / 5))); ind.Evaluated = true; var transFeatures = new StreamWriter(@"F:\Gesty\testy\transFeatures.csv"); for (int i1 = 0; i1 < 1000; i1++) { for (int i2 = 0; i2 < 5; i2++) { var line = new StringBuilder(); line.Append(labels[i2, i1].ToString() + ','); for (int i3 = 0; i3 < 64; i3++) { line.Append(features[i2, i1, i3].ToString() + ','); } transFeatures.WriteLine(line.ToString().Trim(',')); } } transFeatures.Close(); } }
internal static extern void CvSVMGetDefaultGrid(SVM.ParamType type, ref MCvParamGrid grid);
public void TrainEach(string dir) { directory = dir; foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { SecondLayerNum++; } labels = new Matrix<float>(SecondLayerNum, 1); trainingDescriptors = new Matrix<float>(SecondLayerNum, classNum); foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { Extract(new Image<Bgr, byte>(file.FullName)); } MakeDic(); j = 0; foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { MakeDescriptors(new Image<Bgr, byte>(file.FullName)); } Console.WriteLine(); SecondLayerSVM = new SVM(); SVMParams p = new SVMParams(); p.KernelType = SVM_KERNEL_TYPE.LINEAR; p.SVMType = SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); SecondLayerSVM.Train(trainingDescriptors, labels, null, null, p); }
public static SVM CreateSvm() { var model = new SVM(); model.Load(ModelFileName); return model; }
private void button1_Click(object sender, EventArgs e) { int trainSampleCount = 150; int sigma = 60; #region Generate the training data and classes Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1); Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); Matrix<float> sample = new Matrix<float>(1, 2); Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount / 3, 1); trainData1.GetCols(0, 1).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); trainData1.GetCols(1, 2).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); Matrix<float> trainData2 = trainData.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainData2.SetRandNormal(new MCvScalar(400), new MCvScalar(sigma)); Matrix<float> trainData3 = trainData.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainData3.GetCols(0, 1).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); trainData3.GetCols(1, 2).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount / 3, 1); trainClasses1.SetValue(1); Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainClasses2.SetValue(2); Matrix<float> trainClasses3 = trainClasses.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainClasses3.SetValue(3); #endregion using (SVM model = new SVM()) { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); //bool trained = model.Train(trainData, trainClasses, null, null, p); bool trained = model.TrainAuto(trainData, trainClasses, null, null, p.MCvSVMParams, 5); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; float response = model.Predict(sample); img[i, j] = response == 1 ? new Bgr(90, 0, 0) : response == 2 ? new Bgr(0, 90, 0) : new Bgr(0, 0, 90); } } int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(128, 128, 128), 2); } } // display the original training samples for (int i = 0; i < (trainSampleCount / 3); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); PointF p3 = new PointF(trainData3[i, 0], trainData3[i, 1]); img.Draw(new CircleF(p3, 2.0f), new Bgr(100, 100, 255), -1); } img.Save(@"D:\Code\svm.bmp"); // Emgu.CV.UI.ImageViewer.Show(img); pictureBox1.Image = Image.FromFile(@"D:\Code\svm.bmp"); }
static void Main(string[] args) { int trainSampleCount = 0; Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); //conversion of CSV to gesture variables List<Gesture> gestureListClass1 = csvToGestureList(@"C:\Users\Dan\Desktop\thesis data\testEB-3-20.csv"); List<Gesture> gestureListClass2 = csvToGestureList(@"C:\Users\Dan\Desktop\thesis data\testSNC-3-20.csv"); trainSampleCount = (gestureListClass1.Count) + (gestureListClass2.Count); //set the sample count to the number of gestures we have available //create relevant matrices based on size of the gestureList Matrix<float> sample = new Matrix<float>(1, 2); //a sample has 2 columns because of 2 features Matrix<float> trainTestData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainTestClasses = new Matrix<float>(trainSampleCount, 1); //GESTURE MATH INCOMING foreach (Gesture g in gestureListClass1) { g.runMetrics(); } foreach (Gesture g in gestureListClass2) { g.runMetrics(); } #region Generate the training data and classes //fill first set of data for (int i = 0; i < gestureListClass1.Count; i++) { double[] gMetrics = (gestureListClass1[i].returnMetrics()).ToArray(); /*order of values * list[0] - xyRatio * list[1] - totalGestureTime */ trainTestData[i, 0] = ((float)gMetrics[0])*150; trainTestData[i, 1] = ((float)gMetrics[1])/4; } Matrix<float> trainTestData1 = trainTestData.GetRows(0, gestureListClass1.Count, 1); for (int j = 0; j < gestureListClass2.Count; j++) { double[] gMetrics = (gestureListClass2[j].returnMetrics()).ToArray(); trainTestData[(j + gestureListClass1.Count), 0] = (float)gMetrics[0] * 150; trainTestData[(j + gestureListClass1.Count), 1] = ((float)gMetrics[1])/4; } Matrix<float> trainTestData2 = trainTestData.GetRows(gestureListClass1.Count, trainSampleCount, 1); Matrix<float> trainTestClasses1 = trainTestClasses.GetRows(0, gestureListClass1.Count, 1); trainTestClasses1.SetValue(1); Matrix<float> trainTestClasses2 = trainTestClasses.GetRows(gestureListClass1.Count, trainSampleCount, 1); trainTestClasses2.SetValue(2); #endregion using (SVM model = new SVM()) { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; //p.Gamma = 0.1; p.C = 10; p.TermCrit = new MCvTermCriteria(100, 0.00001); //bool trained = model.Train(trainTestData, trainTestClasses, null, null, p); bool trained = model.TrainAuto(trainTestData, trainTestClasses, null, null, p.MCvSVMParams, 5); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; float response = model.Predict(sample); img[i, j] = response == 1 ? new Bgr(90, 0, 0) : new Bgr(0, 0, 90); //response == 2 ? new Bgr(0, 90, 0) : } } int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(255, 255, 128), 2); } } // display the original training samples for (int i = 0; i < (trainSampleCount / 2); i++) { if (i < trainTestData1.Rows) { PointF p1 = new PointF((trainTestData1[i, 0]), (trainTestData1[i, 1])); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); } if (i < trainTestData2.Rows) { PointF p2 = new PointF((trainTestData2[i, 0]), (trainTestData2[i, 1])); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 100, 255), -1); } } Emgu.CV.UI.ImageViewer.Show(img); }
private void button2_Click(object sender, EventArgs e) { folderBrowserDialog1.SelectedPath = @"D:\Code\Data"; try { if (folderBrowserDialog1.ShowDialog() == DialogResult.OK) { String[] files = System.IO.Directory.GetFiles(folderBrowserDialog1.SelectedPath); if (trainData_all != null) { trainData_all.Dispose(); trainData_all = null; } foreach (String name in files) { if (name.Contains("_class")) { if (Class_I != null) { Class_I.Dispose(); Class_I = null; } if (Train_I != null) { Train_I.Dispose(); Train_I = null; } ////////// String name2 = name.Replace("_class", ""); name2 = name2.Replace("bmp", "tif"); if (files.Contains(name2)) { Class_I = new Image<Bgr, Byte>(name).Resize(scale, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); //////類別 Train_I = new Image<Bgr, Byte>(name2).Resize(scale, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); //////原影像 Cal_features(); } } } ///////SVM using (Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM()) { SVMParams p = new SVMParams(); /* p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001);*/ p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.RBF; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.Gamma = 0.1; p.Coef0 = 0; p.Degree = 2; p.C = 1; p.TermCrit = new MCvTermCriteria(500, 0.00001); //bool trained = model.Train(trainData, trainClasses, null, null, p); bool trained = model.TrainAuto(trainData_all, trainClasses_all, null, null, p.MCvSVMParams, 5); model.Save(@"D:\Code\Data\train_func.xml"); } } } catch { MessageBox.Show("讀取錯誤!"); } } // end of button2
//SVMParams _parameters public SVMClassifier(string filename) { model = new SVM(); model.Load(filename); }
public void TrainEach(string dir) { directory = dir; foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { SecondLayerNum++; } labels = new Matrix<float>(SecondLayerNum, 1); trainingDescriptors = new Matrix<float>(SecondLayerNum, classNum); foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { Extract(new Image<Bgr, byte>(file.FullName)); } MakeDic(); if (SecondLayerDic == null) throw new Exception("!!!"); j = 0; foreach (FileInfo file in new DirectoryInfo(dir).GetFiles()) { MakeDescriptors(new Image<Bgr, byte>(file.FullName)); } Console.WriteLine(); SecondLayerSVM = new SVM(); SVMParams p = new SVMParams(); p.KernelType = SVM_KERNEL_TYPE.LINEAR; p.SVMType = SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); SecondLayerSVM.Train(trainingDescriptors, labels, null, null, p); IFormatter formatter = new BinaryFormatter(); Stream fs = File.OpenWrite(directory + "\\obj\\dic.xml"); formatter.Serialize(fs, SecondLayerDic); fs.Dispose(); SecondLayerSVM.Save(directory + "\\obj\\svm.xml"); }
private void button1_Click(object sender, EventArgs e) { folderBrowserDialog1.SelectedPath = @"D:\Code\Data"; try { if (folderBrowserDialog1.ShowDialog() == DialogResult.OK) { String[] files = System.IO.Directory.GetFiles(folderBrowserDialog1.SelectedPath); foreach (String name in files) { if (name.Contains("_class") )/////////有分類標籤的影像 { /* if (Class_I != null) { Class_I.Dispose(); Class_I = null; } if (Train_I != null) { Train_I.Dispose(); Train_I = null; } ////////// String name2 = name.Replace("_class", ""); name2 = name2.Replace("bmp", "tif"); if (files.Contains(name2)) { Class_I = new Image<Bgr, Byte>(name); //////類別 Train_I = new Image<Bgr, Byte>(name2); //////原影像 if (trainData_all != null) { trainData_all.Dispose(); trainData_all = null; } Cal_features(); Image<Bgr, byte> predict_image = Train_I.Clone(); using (Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM()) { model.Load(@"D:\程式區\Plant_training\train_func.xml"); for (int i = 0; i < Train_I.Height; i++) for (int j = 0; j < Train_I.Width; j++) { float predict_class = model.Predict(trainData_all.GetRow(i * Train_I.Width + j)); if (predict_class == 1) { predict_image.Data[i, j, 0] = 255; predict_image.Data[i, j, 2] = 255; } } } predict_image.Save(@"D:\程式區\Plant_training\predict.tif"); }*/ } else { if (Train_I != null) { Train_I.Dispose(); Train_I = null; } if (Class_I != null) { Class_I.Dispose(); Class_I = null; } Train_I = new Image<Bgr, Byte>(name).Resize(scale, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); //////原影像 if (trainData_all != null) { trainData_all.Dispose(); trainData_all = null; } Cal_features(); Image<Bgr, byte> predict_image = Train_I.Clone(); using (Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM()) { model.Load(@"D:\Code\Data\train_func.xml"); for (int i = 0; i < Train_I.Height; i++) for (int j = 0; j < Train_I.Width; j++) { Matrix<float> feature_test = trainData_all.GetRow(i * Train_I.Width + j); int aaaaa = 1; float predict_class = model.Predict(feature_test); if (predict_class == 1) { predict_image.Data[i, j, 0] = 255; predict_image.Data[i, j, 2] = 255; } } } String name2 = name.Insert(name.Length - 4, "_predict"); //String name2 = "predict"; predict_image.Save(name2); } } } } catch { MessageBox.Show("讀取錯誤!"); } }
static void Main(string[] args) { int trainSampleCount = 0; Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); //conversion of CSV to gesture variables List<Gesture> gestureListClass1 = csvToGestureList(@"C:\Users\faculty\Desktop\testEB-3-20.csv"); List<Gesture> gestureListClass2 = csvToGestureList(@"C:\Users\faculty\Desktop\testSNC-3-20.csv"); trainSampleCount = (gestureListClass1.Count) + (gestureListClass2.Count); //set the sample count to the number of gestures we have available //create relevant matrices based on size of the gestureList Matrix<float> sample = new Matrix<float>(1, 16); Matrix<float> trainTestData = new Matrix<float>(trainSampleCount, 16); Matrix<float> trainTestClasses = new Matrix<float>(trainSampleCount, 1); //GESTURE MATH INCOMING foreach (Gesture g in gestureListClass1) { g.runMetrics(); } foreach (Gesture g in gestureListClass2) { g.runMetrics(); } #region Generate the training data and classes //fill first set of data for (int i = 0; i < gestureListClass1.Count; i++) { double[] gMetrics = (gestureListClass1[i].returnMetrics()).ToArray(); /* * //add gestures to list temp.Add(xyRatio); //[0] temp.Add(totalGestureTime); //[1] temp.Add(majorAvg); //[2] temp.Add(minorAvg); //[3] temp.Add(avgXVelo); //[4] temp.Add(avgYVelo); //[5] temp.Add(majorVariance); //[6] temp.Add(minorVariance); //[7] //add substrokes temp.Add(firstVeloMag); //[8] temp.Add(firstVeloDir); //[9] temp.Add(secondVeloMag); //[10] temp.Add(secondVeloDir); //[11] temp.Add(thirdVeloMag); //[12] temp.Add(thirdVeloDir); //[13] temp.Add(fourthVeloMag); //[14] temp.Add(fourthVeloDir); //[15] */ trainTestData[i, 0] = ((float)gMetrics[0]) * 150; //xy ratio trainTestData[i, 1] = ((float)gMetrics[1]) / 4; //totalGestureTime trainTestData[i, 2] = ((float)gMetrics[2]); trainTestData[i, 3] = ((float)gMetrics[3]); trainTestData[i, 4] = ((float)gMetrics[4]); trainTestData[i, 5] = ((float)gMetrics[5]); trainTestData[i, 6] = ((float)gMetrics[6]); trainTestData[i, 7] = ((float)gMetrics[7]); trainTestData[i, 8] = ((float)gMetrics[8]); trainTestData[i, 9] = ((float)gMetrics[9]); trainTestData[i, 10] = ((float)gMetrics[10]); trainTestData[i, 11] = ((float)gMetrics[11]); trainTestData[i, 12] = ((float)gMetrics[12]); trainTestData[i, 13] = ((float)gMetrics[13]); trainTestData[i, 14] = ((float)gMetrics[14]); trainTestData[i, 15] = ((float)gMetrics[15]); } Matrix<float> trainTestData1 = trainTestData.GetRows(0, gestureListClass1.Count, 1); for (int j = 0; j < gestureListClass2.Count; j++) { double[] gMetrics = (gestureListClass2[j].returnMetrics()).ToArray(); trainTestData[(j + gestureListClass1.Count), 0] = (float)gMetrics[0] * 150; trainTestData[(j + gestureListClass1.Count), 1] = ((float)gMetrics[1]) / 4; trainTestData[(j + gestureListClass1.Count), 2] = ((float)gMetrics[2]); trainTestData[(j + gestureListClass1.Count), 3] = ((float)gMetrics[3]); trainTestData[(j + gestureListClass1.Count), 4] = ((float)gMetrics[4]); trainTestData[(j + gestureListClass1.Count), 5] = ((float)gMetrics[5]); trainTestData[(j + gestureListClass1.Count), 6] = ((float)gMetrics[6]); trainTestData[(j + gestureListClass1.Count), 7] = ((float)gMetrics[7]); trainTestData[(j + gestureListClass1.Count), 8] = ((float)gMetrics[8]); trainTestData[(j + gestureListClass1.Count), 9] = ((float)gMetrics[9]); trainTestData[(j + gestureListClass1.Count), 10] = ((float)gMetrics[10]); trainTestData[(j + gestureListClass1.Count), 11] = ((float)gMetrics[11]); trainTestData[(j + gestureListClass1.Count), 12] = ((float)gMetrics[12]); trainTestData[(j + gestureListClass1.Count), 13] = ((float)gMetrics[13]); trainTestData[(j + gestureListClass1.Count), 14] = ((float)gMetrics[14]); trainTestData[(j + gestureListClass1.Count), 15] = ((float)gMetrics[15]); } Matrix<float> trainTestData2 = trainTestData.GetRows(gestureListClass1.Count, trainSampleCount, 1); Matrix<float> trainTestClasses1 = trainTestClasses.GetRows(0, gestureListClass1.Count, 1); trainTestClasses1.SetValue(1); Matrix<float> trainTestClasses2 = trainTestClasses.GetRows(gestureListClass1.Count, trainSampleCount, 1); trainTestClasses2.SetValue(2); #endregion using (SVM model = new SVM()) { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; //p.Gamma = 0.1; p.C = 10; p.TermCrit = new MCvTermCriteria(100, 0.00001); //bool trained = model.Train(trainTestData, trainTestClasses, null, null, p); bool trained = model.TrainAuto(trainTestData, trainTestClasses, null, null, p.MCvSVMParams, 5); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; float response = model.Predict(sample); img[i, j] = response == 1 ? new Bgr(90, 0, 0) : new Bgr(0, 0, 90); //response == 2 ? new Bgr(0, 90, 0) : } } int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(255, 255, 128), 2); } model.Save(@"C:\Users\faculty\Desktop\svm-function3coord16.xml"); } // display the original training samples for (int i = 0; i < (trainSampleCount / 2); i++) { if (i < trainTestData1.Rows) { PointF p1 = new PointF((trainTestData1[i, 0]), (trainTestData1[i, 1])); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); } if (i < trainTestData2.Rows) { PointF p2 = new PointF((trainTestData2[i, 0]), (trainTestData2[i, 1])); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 100, 255), -1); } } Emgu.CV.UI.ImageViewer.Show(img); }
public void TestSVM() { int trainSampleCount = 150; int sigma = 60; #region Generate the training data and classes Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1); Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); Matrix<float> sample = new Matrix<float>(1, 2); Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount / 3, 1); trainData1.GetCols(0, 1).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); trainData1.GetCols(1, 2).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); Matrix<float> trainData2 = trainData.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainData2.SetRandNormal(new MCvScalar(400), new MCvScalar(sigma)); Matrix<float> trainData3 = trainData.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainData3.GetCols(0, 1).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); trainData3.GetCols(1, 2).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount / 3, 1); trainClasses1.SetValue(1); Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainClasses2.SetValue(2); Matrix<float> trainClasses3 = trainClasses.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainClasses3.SetValue(3); #endregion //using (SVM.Params p = new SVM.Params(MlEnum.SvmType.CSvc, MlEnum.SvmKernelType.Linear, 0, 1, 0, 1, 0, 0, null, new MCvTermCriteria(100, 1.0e-6))) using (SVM model = new SVM()) using (Matrix<int> trainClassesInt = trainClasses.Convert<int>()) using (TrainData td = new TrainData(trainData, MlEnum.DataLayoutType.RowSample, trainClassesInt)) { model.Type = SVM.SvmType.CSvc; model.SetKernel(SVM.SvmKernelType.Inter); model.Degree = 0; model.Gamma = 1; model.Coef0 = 0; model.C = 1; model.Nu = 0; model.P = 0; model.TermCriteria = new MCvTermCriteria(100, 1.0e-6); //bool trained = model.TrainAuto(td, 5); model.Train(td); #if !NETFX_CORE String fileName = "svmModel.xml"; //String fileName = Path.Combine(Path.GetTempPath(), "svmModel.xml"); model.Save(fileName); SVM model2 = new SVM(); FileStorage fs = new FileStorage(fileName, FileStorage.Mode.Read); model2.Read(fs.GetFirstTopLevelNode()); if (File.Exists(fileName)) File.Delete(fileName); #endif for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; float response = model.Predict(sample); img[i, j] = response == 1 ? new Bgr(90, 0, 0) : response == 2 ? new Bgr(0, 90, 0) : new Bgr(0, 0, 90); } } Mat supportVectors = model.GetSupportVectors(); //TODO: find out how to draw the support vectors Image<Gray, float> pts = supportVectors.ToImage<Gray, float>(); PointF[] vectors = new PointF[supportVectors.Rows]; GCHandle handler = GCHandle.Alloc(vectors, GCHandleType.Pinned); using ( Mat vMat = new Mat(supportVectors.Rows, supportVectors.Cols, DepthType.Cv32F, 1, handler.AddrOfPinnedObject(), supportVectors.Cols*4)) { supportVectors.CopyTo(vMat); } handler.Free(); /* int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(128, 128, 128), 2); }*/ } // display the original training samples for (int i = 0; i < (trainSampleCount / 3); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); PointF p3 = new PointF(trainData3[i, 0], trainData3[i, 1]); img.Draw(new CircleF(p3, 2.0f), new Bgr(100, 100, 255), -1); } //Emgu.CV.UI.ImageViewer.Show(img); }
private Image<Bgr, Byte> svm() { Stopwatch timer = new Stopwatch(); timer.Start(); int trainSampleCount = 150; int sigma = 60; #region Generate the training data and classes Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1); Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); Matrix<float> sample = new Matrix<float>(1, 2); Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount / 3, 1); trainData1.GetCols(0, 1).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); trainData1.GetCols(1, 2).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); Matrix<float> trainData2 = trainData.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainData2.SetRandNormal(new MCvScalar(400), new MCvScalar(sigma)); Matrix<float> trainData3 = trainData.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainData3.GetCols(0, 1).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma)); trainData3.GetCols(1, 2).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma)); Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount / 3, 1); trainClasses1.SetValue(1); Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1); trainClasses2.SetValue(2); Matrix<float> trainClasses3 = trainClasses.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1); trainClasses3.SetValue(3); #endregion timer.Stop(); MessageBox.Show("生成" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); using (SVM model = new SVM()) { SVMParams p = new SVMParams(); p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001); //model.Load(@"D:\Play Data\训练数据"); //bool trained = model.Train(trainData, trainClasses, null, null, p); bool trained = model.TrainAuto(trainData, trainClasses, null, null, p.MCvSVMParams, 5); timer.Stop(); MessageBox.Show("训练" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; //float response = model.Predict(sample); //img[i, j] = // response == 1 ? new Bgr(90, 0, 0) : // response == 2 ? new Bgr(0, 90, 0) : // new Bgr(0, 0, 90); } } //model.Save(@"D:\Play Data\训练数据"); timer.Stop(); MessageBox.Show("染色" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); int c = model.GetSupportVectorCount(); for (int i = 0; i < c; i++) { float[] v = model.GetSupportVector(i); PointF p1 = new PointF(v[0], v[1]); img.Draw(new CircleF(p1, 4), new Bgr(128, 128, 128), 2); } timer.Stop(); MessageBox.Show("画圈" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); } // display the original training samples for (int i = 0; i < (trainSampleCount / 3); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); PointF p3 = new PointF(trainData3[i, 0], trainData3[i, 1]); img.Draw(new CircleF(p3, 2.0f), new Bgr(100, 100, 255), -1); } timer.Stop(); MessageBox.Show("标点" + timer.ElapsedMilliseconds + "ms"); timer.Reset(); timer.Start(); return img; }
/// <summary> /// Get the default parameter grid for the specific SVM type /// </summary> /// <param name="type">The SVM type</param> /// <returns>The default parameter grid for the specific SVM type </returns> public static MCvParamGrid GetDefaultGrid(SVM.ParamType type) { MCvParamGrid grid = new MCvParamGrid(); MlInvoke.CvSVMGetDefaultGrid(type, ref grid); return grid; }
public FeatureRecognizer() { isTrained = false; SVMParameters = new SVMParams(); SVMParameters.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; //SVMParameters.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.NU_SVC; //p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.POLY; //SVMParameters.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; SVMParameters.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.POLY; //SVMParameters.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.RBF; SVMParameters.Gamma = 3; SVMParameters.Degree = 3; SVMParameters.C = 1; SVMParameters.TermCrit = new MCvTermCriteria(100, 0.00001); //SVMParameters.Nu //SVMParameters.P //SVMParameters.Coef0 = paramsm = new MCvSVMParams(); paramsm.svm_type = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; paramsm.kernel_type = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; paramsm.gamma = 2; paramsm.degree = 3; paramsm.C = 3; paramsm.term_crit = new MCvTermCriteria(100, 0.00001); //debugImages = new List<DebugImage>(); debugImages = null; //SVMModel = new SVM(); // foreach(SignShape shape in Enum.GetValues(typeof(SignShape))) for (int i = 0;i < signShapeCount;i++) { SVM model = new SVM(); SVMModels.Add(model); } int R = 10; int C = 10; double v = 2; for (int i = 0; i < 8; i++) { GWOutputImage = GWOutputImage.ConcateHorizontal(GaborWavelet(R, C, i, v)); Image<Gray, double> GW = GaborWavelet(R, C, i, v); ConvolutionKernelF ckernel = new ConvolutionKernelF(10, 10); for (int l = 0; l < 10; l++) for (int k = 0; k < 10; k++) ckernel[l, k] = (float)GW[l, k].Intensity / 10000; ckernel.Center = new Point(5, 5); cKernelList.Add(ckernel); } }
private void button7_Click(object sender, EventArgs e) { ///////////////////////////////////////////// Test /////////////////////////////////////// //Create a matrix //Matrix<Byte> matrix1 = new Matrix<Byte>(10, 10); //Byte element = 0; ////Set the elements //for (int i = 0; i < 10; i++) //{ // for (int j = 0; j < 10; j++) // { // matrix1.Data[i, j] = 1; // //matrix1.Data[i, j] = element; // element++; // } //} //int WW = matrix1.Cols; //int HH = matrix1.Rows; ///////////////////////////////////////////// Test /////////////////////////////////////// int W = Train_I.Cols; int H = Train_I.Rows; int rW = (mW - 1) / 2; int rH = (mH - 1) / 2; double pp = 0; double ee = 0; int ind = 0; int count; int maskCenter_H_start = rH + 1; int maskCenter_W_start = rW + 1; int maskCenter_H_end = H - rH; int maskCenter_W_end = W - rW; double[] fVave = new double[ ( maskCenter_H_end - maskCenter_H_start) * (maskCenter_W_end - maskCenter_W_start) ]; double[] fVstd = new double[( maskCenter_H_end - maskCenter_H_start) * (maskCenter_W_end - maskCenter_W_start) ]; double[] fVentropy = new double[( maskCenter_H_end - maskCenter_H_start) * (maskCenter_W_end - maskCenter_W_start) ]; classV = new Matrix<float>( (maskCenter_H_end - maskCenter_H_start) * (maskCenter_W_end - maskCenter_W_start), 1); // Mask scan over the whole image "gR_R_Picked_Map" for (int i = maskCenter_H_start; i < maskCenter_H_end; i++) { for (int j = maskCenter_W_start; j < maskCenter_W_end; j++) { double sum = 0; double ave = 0; double sumDiff = 0; double std = 0; entropy = 0; count = 0; // initial value all 0, since every mask should count independently for (int k = 0; k < mH; k++) { for (int l = 0; l < mW; l++) { int mask_x = j - rW + l - 1; int mask_y = i - rH + k - 1; sum = sum + gR_R_Picked_Map[ mask_y, mask_x ]; // Bug???? //sum = sum + matrix1[i - rH + k - 1 , j - rW + l - 1 ]; // test // judge mask is PART of TREE or NOT if (Class_I.Data[ mask_y, mask_x , 0] == 1) { count ++; } } } // end of mask // To detect the binary values in the mask and record it in classV float maskSampleCount = mH * mW; float mask_plant_percent = (count /maskSampleCount); if ( mask_plant_percent > 0.7 ) { classV[ind, 0] = 1; } else { classV[ind, 0] = 0; } // Ave ave = sum / (maskSampleCount); // StandardDeviation for (int k = 0; k < mH; k++) { for (int l = 0; l < mW; l++) { int mask_x = j - rW + l - 1; int mask_y = i - rH + k - 1; sumDiff = sumDiff + Math.Pow((gR_R_Picked_Map[ mask_y, mask_x] - ave), 2); // sumDiff = sumDiff + Math.Pow((matrix1[i - rH + k - 1, j - rW + l - 1] - ave), 2); //test } } std = Math.Sqrt(sumDiff / (maskSampleCount - 1)); // Entropy for (int k = 1; k < mH; k++) { for (int l = 1; l < mW; l++) { int mask_x = j - rW + l - 1; int mask_y = i - rH + k - 1; pp = gR_R_Picked_Map[mask_y, mask_x] / sum; //p = matrix1[i - rH + k - 1, j - rW + l - 1] / sum; if (pp > 0) // Log p, p > 0 { ee = Math.Log(pp); } else { ee = 0; } //System.Console.WriteLine("-"); //System.Console.WriteLine(p); //System.Console.WriteLine(ee); //System.Console.WriteLine("-"); entropy = entropy + (-(pp * ee)); } } fVave[ind] = ave; fVstd[ind] = std; fVentropy[ind] = entropy; ind++; } // for i } // for j trainData_all = new Matrix<float>(ind, 3); for (int i = 0; i < ind; i++) { trainData_all.Data[i, 0] = (float)fVave[i]; trainData_all.Data[i, 1] = (float)fVstd[i]; trainData_all.Data[i, 2] = (float)fVentropy[i]; } using (Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM()) { SVMParams p = new SVMParams(); /* p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.C = 1; p.TermCrit = new MCvTermCriteria(100, 0.00001);*/ p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.RBF; p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC; p.Gamma = 0.1; p.Coef0 = 0; p.Degree = 2; p.C = 1; p.TermCrit = new MCvTermCriteria(500, 0.00001); //迭代的終止準則,最大迭代次數500次,結果的精確性 //bool trained = model.Train(trainData, trainClasses, null, null, p); bool trained = model.TrainAuto(trainData_all, classV, null, null, p.MCvSVMParams, 5); model.Save(@"D:\Code\Data\train_func.xml"); } System.Console.WriteLine(" END OF button7_Click Masking "); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Image<Bgr, byte> predict_image = Train_I2.Clone(); using (Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM()) { model.Load(@"D:\Code\Data\train_func.xml"); int ind_predict = 0; for (int i = maskCenter_H_start; i < maskCenter_H_end; i++) for (int j = maskCenter_W_start; j < maskCenter_W_end; j++) { Matrix<float> feature_test = TakeRow(trainData_all.Data, ind_predict); //Matrix<float> feature_test = trainData_all.GetRow( ind_predict ) ; // System.Console.WriteLine(feature_test); float predict_class = model.Predict( feature_test ); if (predict_class == 1) { for (int k = 0; k < mH; k++) for (int l = 0; l < mW; l++) { int mask_x = j - rW + l -1; int mask_y = i - rH + k -1; predict_image.Data[ mask_y, mask_x , 0] = 255; predict_image.Data[mask_y , mask_x , 2] = 255; } } ind_predict++; // System.Console.WriteLine(predict_class); } // end of all //String name2 = name.Insert(name.Length - 4, "_predict"); String name3 = "predict"; predict_image.Save(@"D:\Code\Data\predict.bmp"); pictureBox9.Image = Image.FromFile(@"D:\Code\Data\predict.bmp"); // Image<Bgr, byte> predict_image = Train_I2.Clone(); System.Console.WriteLine(" END OF Predict "); } }
/* public void OnContactRecordGesture(object sender, FrameReceivedEventArgs e) { if (isTouching) { if (normalizedImage == null) { e.TryGetRawImage( ImageType.Normalized, 0, 0, InteractiveSurface.DefaultInteractiveSurface.Width, InteractiveSurface.DefaultInteractiveSurface.Height, out normalizedImage, out normalizedMetrics); } else //updates raw image data { e.UpdateRawImage( ImageType.Normalized, normalizedImage, 0, 0, InteractiveSurface.DefaultInteractiveSurface.Width, InteractiveSurface.DefaultInteractiveSurface.Height); } capture.OnContactRecordHelper(normalizedImage, normalizedMetrics); } } */ /// <summary> /// Controls what occurs when the user stops a gesture /// NOTE: This event fires more than it ought. So we have it ignore any data shorter than 600 millisecondss /// </summary> public void OffContactStopRecord(object sender, ContactEventArgs e) { capture.OffContactHelper(); if (capture.totalMillisec > 600) { lock (obj) { writeToCSV(); } capture.calculateRatio(touchManager); advanceGesture(); Gesture gnew = stListToGesture(touchManager); float result; using (SVM svm = new SVM()) { svm.Load(@"C:\Users\faculty\Desktop\svm-function3coord16.xml"); result = svm.Predict(getProcessedGesture(gnew)); } if (result == 1) { gestureOwner = "Evan"; } else { gestureOwner = "Stevie"; } } }