/// <summary> /// 初期化 /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> #else /// <summary> /// Training constructor /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> #endif public CvSVM( CvMat trainData, CvMat responses, CvMat varIdx = null, CvMat sampleIdx = null, CvSVMParams param = null) { if (trainData == null) { throw new ArgumentNullException(nameof(trainData)); } if (responses == null) { throw new ArgumentNullException(nameof(responses)); } if (param == null) { param = new CvSVMParams(); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); ptr = NativeMethods.ml_CvSVM_new2_CvMat( trainData.CvPtr, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), param.NativeStruct ); }
/// <summary> /// SVMを学習する /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> #else /// <summary> /// Trains SVM /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> #endif public virtual bool Train( Mat trainData, Mat responses, Mat varIdx = null, Mat sampleIdx = null, CvSVMParams param = null) { if (trainData == null) { throw new ArgumentNullException(nameof(trainData)); } if (responses == null) { throw new ArgumentNullException(nameof(responses)); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); if (param == null) { param = new CvSVMParams(); } return(NativeMethods.ml_CvSVM_train_Mat( ptr, trainData.CvPtr, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), param.NativeStruct) != 0); }
/// <summary> /// 初期化 /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> #else /// <summary> /// Training constructor /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> #endif public CvSVM( CvMat trainData, CvMat responses, CvMat varIdx = null, CvMat sampleIdx = null, CvSVMParams param = null) { if (trainData == null) throw new ArgumentNullException("trainData"); if (responses == null) throw new ArgumentNullException("responses"); if(param == null) param = new CvSVMParams(); trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); ptr = NativeMethods.ml_CvSVM_new2_CvMat( trainData.CvPtr, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), param.NativeStruct ); }
/// <summary> /// SVMを最適なパラメータで学習する /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> /// <param name="kFold">交差検定(Cross-validation)パラメータ.学習集合は,k_foldの部分集合に分割され,一つの部分集合がモデルの学習に用いられ,その他の部分集合はテスト集合となる.つまり,SVM アルゴリズムは,k_fold回実行される.</param> /// <param name="cGrid"></param> /// <param name="gammaGrid"></param> /// <param name="pGrid"></param> /// <param name="nuGrid"></param> /// <param name="coefGrid"></param> /// <param name="degreeGrid"></param> /// <param name="balanced"></param> /// <returns></returns> #else /// <summary> /// Trains SVM with optimal parameters /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> /// <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times. </param> /// <param name="cGrid"></param> /// <param name="gammaGrid"></param> /// <param name="pGrid"></param> /// <param name="nuGrid"></param> /// <param name="coefGrid"></param> /// <param name="degreeGrid"></param> /// <param name="balanced"></param> /// <returns></returns> #endif public virtual bool TrainAuto( Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams param, int kFold = 10, CvParamGrid?cGrid = null, CvParamGrid?gammaGrid = null, CvParamGrid?pGrid = null, CvParamGrid?nuGrid = null, CvParamGrid?coefGrid = null, CvParamGrid?degreeGrid = null, bool balanced = false) { if (trainData == null) { throw new ArgumentNullException(nameof(trainData)); } if (responses == null) { throw new ArgumentNullException(nameof(responses)); } if (varIdx == null) { throw new ArgumentNullException(nameof(varIdx)); } if (sampleIdx == null) { throw new ArgumentNullException(nameof(sampleIdx)); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); varIdx.ThrowIfDisposed(); sampleIdx.ThrowIfDisposed(); if (param == null) { param = new CvSVMParams(); } var defaultGrid = GetDefaultGrid(SVMParamType.C); var cGrid0 = cGrid.GetValueOrDefault(defaultGrid); var gammaGrid0 = gammaGrid.GetValueOrDefault(defaultGrid); var pGrid0 = pGrid.GetValueOrDefault(defaultGrid); var nuGrid0 = nuGrid.GetValueOrDefault(defaultGrid); var coefGrid0 = coefGrid.GetValueOrDefault(defaultGrid); var degreeGrid0 = degreeGrid.GetValueOrDefault(defaultGrid); return(NativeMethods.ml_CvSVM_train_auto_CvMat( ptr, trainData.CvPtr, responses.CvPtr, varIdx.CvPtr, sampleIdx.CvPtr, param.NativeStruct, kFold, cGrid0, gammaGrid0, pGrid0, nuGrid0, coefGrid0, degreeGrid0, balanced ? 1 : 0) != 0); }
public void Run() { // Training data var points = new CvPoint2D32f[500]; var responses = new int[points.Length]; var rand = new Random(); for (int i = 0; i < responses.Length; i++) { double x = rand.Next(0, 300); double y = rand.Next(0, 300); points[i] = new CvPoint2D32f(x, y); responses[i] = (y > f(x)) ? 1 : 2; } // Show training data and f(x) using (Mat pointsPlot = Mat.Zeros(300, 300, MatType.CV_8UC3)) { for (int i = 0; i < points.Length; i++) { int x = (int)points[i].X; int y = (int)(300 - points[i].Y); int res = responses[i]; Scalar color = (res == 1) ? Scalar.Red : Scalar.GreenYellow; pointsPlot.Circle(x, y, 2, color, -1); } // f(x) for (int x = 1; x < 300; x++) { int y1 = (int)(300 - f(x - 1)); int y2 = (int)(300 - f(x)); pointsPlot.Line(x - 1, y1, x, y2, Scalar.LightBlue, 1); } Window.ShowImages(pointsPlot); } // Train var dataMat = new Mat(points.Length, 2, MatType.CV_32FC1, points); var resMat = new Mat(responses.Length, 1, MatType.CV_32SC1, responses); using (var svm = new CvSVM()) { // normalize data dataMat /= 300.0; var criteria = TermCriteria.Both(1000, 0.000001); var param = new CvSVMParams( SVMType.CSvc, SVMKernelType.Rbf, 100.0, // degree 100.0, // gamma 1.0, // coeff0 1.0, // c 0.5, // nu 0.1, // p null, criteria); svm.Train(dataMat, resMat, null, null, param); // Predict for each 300x300 pixel using (Mat retPlot = Mat.Zeros(300, 300, MatType.CV_8UC3)) { for (int x = 0; x < 300; x++) { for (int y = 0; y < 300; y++) { float[] sample = {x / 300f, y / 300f}; var sampleMat = new CvMat(1, 2, MatrixType.F32C1, sample); int ret = (int)svm.Predict(sampleMat); var plotRect = new CvRect(x, 300 - y, 1, 1); if (ret == 1) retPlot.Rectangle(plotRect, Scalar.Red); else if (ret == 2) retPlot.Rectangle(plotRect, Scalar.GreenYellow); } } Window.ShowImages(retPlot); } } }
/// <summary> /// SVMを最適なパラメータで学習する /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> /// <param name="kFold">交差検定(Cross-validation)パラメータ.学習集合は,k_foldの部分集合に分割され,一つの部分集合がモデルの学習に用いられ,その他の部分集合はテスト集合となる.つまり,SVM アルゴリズムは,k_fold回実行される.</param> /// <param name="cGrid"></param> /// <param name="gammaGrid"></param> /// <param name="pGrid"></param> /// <param name="nuGrid"></param> /// <param name="coefGrid"></param> /// <param name="degreeGrid"></param> /// <param name="balanced"></param> /// <returns></returns> #else /// <summary> /// Trains SVM with optimal parameters /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> /// <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times. </param> /// <param name="cGrid"></param> /// <param name="gammaGrid"></param> /// <param name="pGrid"></param> /// <param name="nuGrid"></param> /// <param name="coefGrid"></param> /// <param name="degreeGrid"></param> /// <param name="balanced"></param> /// <returns></returns> #endif public virtual bool TrainAuto( Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams param, int kFold = 10, CvParamGrid? cGrid = null, CvParamGrid? gammaGrid = null, CvParamGrid? pGrid = null, CvParamGrid? nuGrid = null, CvParamGrid? coefGrid = null, CvParamGrid? degreeGrid = null, bool balanced = false) { if (trainData == null) throw new ArgumentNullException("trainData"); if (responses == null) throw new ArgumentNullException("responses"); if (varIdx == null) throw new ArgumentNullException("varIdx"); if (sampleIdx == null) throw new ArgumentNullException("sampleIdx"); trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); varIdx.ThrowIfDisposed(); sampleIdx.ThrowIfDisposed(); if (param == null) param = new CvSVMParams(); var defaultGrid = GetDefaultGrid(SVMParamType.C); var cGrid0 = cGrid.GetValueOrDefault(defaultGrid); var gammaGrid0 = gammaGrid.GetValueOrDefault(defaultGrid); var pGrid0 = pGrid.GetValueOrDefault(defaultGrid); var nuGrid0 = nuGrid.GetValueOrDefault(defaultGrid); var coefGrid0 = coefGrid.GetValueOrDefault(defaultGrid); var degreeGrid0 = degreeGrid.GetValueOrDefault(defaultGrid); return NativeMethods.ml_CvSVM_train_auto_CvMat( ptr, trainData.CvPtr, responses.CvPtr, varIdx.CvPtr, sampleIdx.CvPtr, param.NativeStruct, kFold, cGrid0, gammaGrid0, pGrid0, nuGrid0, coefGrid0, degreeGrid0, balanced ? 1 : 0) != 0; }
/// <summary> /// SVMを学習する /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> #else /// <summary> /// Trains SVM /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="param"></param> #endif public virtual bool Train( Mat trainData, Mat responses, Mat varIdx = null, Mat sampleIdx = null, CvSVMParams param = null) { if (trainData == null) throw new ArgumentNullException("trainData"); if (responses == null) throw new ArgumentNullException("responses"); trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); if (param == null) param = new CvSVMParams(); return NativeMethods.ml_CvSVM_train_Mat( ptr, trainData.CvPtr, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), param.NativeStruct) != 0; }
//学習ファイルの作成 public void TrainingExec(List<FaceFeature.FeatureValue> FeatureList) { //特徴量をMatに移し替える 2個で一つ //2個のfloat * LISTの大きさの配列 double[] feature_array = new double[2 * FeatureList.Count]; //特徴量をSVMで扱えるように配列に置き換える SetFeatureListToArray(FeatureList,ref feature_array); CvPoint2D32f[] feature_points = new CvPoint2D32f[feature_array.Length/2]; int id = 0; for (int i = 0; i < feature_array.Length / 2; i++) { feature_points[id].X = (float)feature_array[i * 2]; feature_points[id].Y = (float)feature_array[i * 2 + 1]; id++; } CvMat dataMat = new CvMat(feature_points.Length, 2, MatrixType.F32C1, feature_points, true); //これがラベル番号 int[] id_array = new int[FeatureList.Count]; for(int i = 0; i < id_array.Length;i++) { id_array[i] = FeatureList[i].ID; } CvMat resMat = new CvMat(id_array.Length, 1, MatrixType.S32C1, id_array, true); // dataとresponsesの様子を描画 CvPoint2D32f[] points = new CvPoint2D32f[id_array.Length]; int idx = 0; for (int i = 0; i < id_array.Length; i++) { points[idx].X = (float)feature_array[i * 2]; points[idx].Y = (float)feature_array[i * 2 + 1]; idx++; } //学習データを図にする Debug_DrawInputFeature(points, id_array); //デバッグ用 学習させる特徴量を出力する OutPut_FeatureAndID(points, id_array); //LibSVMのテスト //学習用のデータの読み込み SVMProblem problem = SVMProblemHelper.Load(@"wine.txt"); SVMProblem testProblem = SVMProblemHelper.Load(@"wine.txt"); SVMParameter parameter = new SVMParameter(); parameter.Type = LibSVMsharp.SVMType.C_SVC; parameter.Kernel = LibSVMsharp.SVMKernelType.RBF; parameter.C = 1; parameter.Gamma = 1; SVMModel model = SVM.Train(problem, parameter); double[] target = new double[testProblem.Length]; for (int i = 0; i < testProblem.Length; i++) { target[i] = SVM.Predict(model, testProblem.X[i]); } double accuracy = SVMHelper.EvaluateClassificationProblem(testProblem, target); //SVMの用意 CvTermCriteria criteria = new CvTermCriteria(1000, 0.000001); CvSVMParams param = new CvSVMParams( OpenCvSharp.CPlusPlus.SVMType.CSvc, OpenCvSharp.CPlusPlus.SVMKernelType.Rbf, 10.0, // degree 100.0, // gamma 調整 1.0, // coeff0 10.0, // c 調整 0.5, // nu 0.1, // p null, criteria); //学習実行 svm.Train(dataMat, resMat, null, null, param); Debug_DispPredict(); }