// Use this for initialization void Start() { cascade = CvHaarClassifierCascade.FromFile(@"./Assets/haarcascade_frontalface_alt.xml"); capture = Cv.CreateCameraCapture(0); Cv.SetCaptureProperty(capture, CaptureProperty.FrameWidth, CAPTURE_WIDTH); Cv.SetCaptureProperty(capture, CaptureProperty.FrameHeight, CAPTURE_HEIGHT); IplImage frame = Cv.QueryFrame(capture); Cv.NamedWindow("FaceDetect"); CvSVM svm = new CvSVM (); CvTermCriteria criteria = new CvTermCriteria (CriteriaType.Epsilon, 1000, double.Epsilon); CvSVMParams param = new CvSVMParams (CvSVM.C_SVC, CvSVM.RBF, 10.0, 8.0, 1.0, 10.0, 0.5, 0.1, null, criteria); }
public SVM() { // CvSVM // SVMを利用して2次元ベクトルの3クラス分類問題を解く const int S = 1000; const int SIZE = 400; CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks); // (1)画像領域の確保と初期化 using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3)) { img.Zero(); // (2)学習データの生成 CvPoint[] pts = new CvPoint[S]; int[] res = new int[S]; for (int i = 0; i < S; i++) { pts[i].X = (int)(rng.RandInt() % SIZE); pts[i].Y = (int)(rng.RandInt() % SIZE); if (pts[i].Y > 50 * Math.Cos(pts[i].X * Cv.PI / 100) + 200) { img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(255, 0, 0)); img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(255, 0, 0)); res[i] = 1; } else { if (pts[i].X > 200) { img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 255, 0)); img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 255, 0)); res[i] = 2; } else { img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 0, 255)); img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 0, 255)); res[i] = 3; } } } // (3)学習データの表示 Cv.NamedWindow("SVM", WindowMode.AutoSize); Cv.ShowImage("SVM", img); Cv.WaitKey(0); // (4)学習パラメータの生成 float[] data = new float[S * 2]; for (int i = 0; i < S; i++) { data[i * 2] = ((float)pts[i].X) / SIZE; data[i * 2 + 1] = ((float)pts[i].Y) / SIZE; } // (5)SVMの学習 using (CvSVM svm = new CvSVM()) { CvMat data_mat = new CvMat(S, 2, MatrixType.F32C1, data); CvMat res_mat = new CvMat(S, 1, MatrixType.S32C1, res); CvTermCriteria criteria = new CvTermCriteria(1000, float.Epsilon); CvSVMParams param = new CvSVMParams(SVMType.CSvc, SVMKernelType.Rbf, 10.0, 8.0, 1.0, 10.0, 0.5, 0.1, null, criteria); svm.Train(data_mat, res_mat, null, null, param); // (6)学習結果の描画 for (int i = 0; i < SIZE; i++) { for (int j = 0; j < SIZE; j++) { float[] a = { (float)j / SIZE, (float)i / SIZE }; CvMat m = new CvMat(1, 2, MatrixType.F32C1, a); float ret = svm.Predict(m); CvColor color = new CvColor(); switch ((int)ret) { case 1: color = new CvColor(100, 0, 0); break; case 2: color = new CvColor(0, 100, 0); break; case 3: color = new CvColor(0, 0, 100); break; } img[i, j] = color; } } // (7)トレーニングデータの再描画 for (int i = 0; i < S; i++) { CvColor color = new CvColor(); switch (res[i]) { case 1: color = new CvColor(255, 0, 0); break; case 2: color = new CvColor(0, 255, 0); break; case 3: color = new CvColor(0, 0, 255); break; } img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), color); img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), color); } // (8)サポートベクターの描画 int sv_num = svm.GetSupportVectorCount(); for (int i = 0; i < sv_num; i++) { var support = svm.GetSupportVector(i); img.Circle(new CvPoint((int)(support[0] * SIZE), (int)(support[1] * SIZE)), 5, new CvColor(200, 200, 200)); } // (9)画像の表示 Cv.NamedWindow("SVM", WindowMode.AutoSize); Cv.ShowImage("SVM", img); Cv.WaitKey(0); Cv.DestroyWindow("SVM"); } } }
/// <summary> /// 初期化 /// </summary> /// <param name="train_data"></param> /// <param name="responses"></param> /// <param name="var_idx"></param> /// <param name="sample_idx"></param> /// <param name="params"></param> #else /// <summary> /// Training constructor /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="params"></param> #endif public CvSVM(CvMat trainData, CvMat responses, CvMat varIdx, CvMat sampleIdx, CvSVMParams @params) { if (trainData == null) throw new ArgumentNullException("trainData"); if (responses == null) throw new ArgumentNullException("responses"); if(@params == null) @params = new CvSVMParams(); IntPtr varIdxPtr = (varIdx == null) ? IntPtr.Zero : varIdx.CvPtr; IntPtr sampleIdxPtr = (sampleIdx == null) ? IntPtr.Zero : sampleIdx.CvPtr; ptr = MLInvoke.CvSVM_construct_training( trainData.CvPtr, responses.CvPtr, varIdxPtr, sampleIdxPtr, @params.NativeStruct ); NotifyMemoryPressure(SizeOf); }
/// <summary> /// SVMを最適なパラメータで学習する /// </summary> /// <param name="train_data"></param> /// <param name="responses"></param> /// <param name="var_idx"></param> /// <param name="sample_idx"></param> /// <param name="params"></param> /// <param name="k_fold">交差検定(Cross-validation)パラメータ.学習集合は,k_foldの部分集合に分割され,一つの部分集合がモデルの学習に用いられ,その他の部分集合はテスト集合となる.つまり,SVM アルゴリズムは,k_fold回実行される.</param> /// <param name="C_grid"></param> /// <param name="gamma_grid"></param> /// <param name="p_grid"></param> /// <param name="nu_grid"></param> /// <param name="coef_grid"></param> /// <param name="degree_grid"></param> /// <returns></returns> #else /// <summary> /// Trains SVM with optimal parameters /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="params"></param> /// <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times. </param> /// <param name="cGrid"></param> /// <param name="gammaGrid"></param> /// <param name="pGrid"></param> /// <param name="nuGrid"></param> /// <param name="coefGrid"></param> /// <param name="degreeGrid"></param> /// <returns></returns> #endif public virtual bool TrainAuto(CvMat trainData, CvMat responses, CvMat varIdx, CvMat sampleIdx, CvSVMParams @params, int kFold, CvParamGrid cGrid, CvParamGrid gammaGrid, CvParamGrid pGrid, CvParamGrid nuGrid, CvParamGrid coefGrid, CvParamGrid degreeGrid) { if (trainData == null) throw new ArgumentNullException("trainData"); if (responses == null) throw new ArgumentNullException("responses"); if(@params == null) @params = new CvSVMParams(); IntPtr varIdxPtr = (varIdx == null) ? IntPtr.Zero : varIdx.CvPtr; IntPtr sampleIdxPtr = (sampleIdx == null) ? IntPtr.Zero : sampleIdx.CvPtr; return MLInvoke.CvSVM_train_auto( ptr, trainData.CvPtr, responses.CvPtr, varIdxPtr, sampleIdxPtr, @params.NativeStruct, kFold, cGrid, gammaGrid, pGrid, nuGrid, coefGrid, degreeGrid ); }
/// <summary> /// SVMを最適なパラメータで学習する /// </summary> /// <param name="train_data"></param> /// <param name="responses"></param> /// <param name="var_idx"></param> /// <param name="sample_idx"></param> /// <param name="params"></param> /// <param name="k_fold">交差検定(Cross-validation)パラメータ.学習集合は,k_foldの部分集合に分割され,一つの部分集合がモデルの学習に用いられ,その他の部分集合はテスト集合となる.つまり,SVM アルゴリズムは,k_fold回実行される.</param> /// <returns></returns> #else /// <summary> /// Trains SVM with optimal parameters /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="params"></param> /// <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times. </param> /// <returns></returns> #endif public virtual bool TrainAuto(CvMat trainData, CvMat responses, CvMat varIdx, CvMat sampleIdx, CvSVMParams @params, int kFold) { return TrainAuto(trainData, responses, varIdx, sampleIdx, @params, kFold, GetDefaultGrid(SVMParamType.C), GetDefaultGrid(SVMParamType.Gamma),GetDefaultGrid(SVMParamType.P), GetDefaultGrid(SVMParamType.Nu),GetDefaultGrid(SVMParamType.Coef),GetDefaultGrid(SVMParamType.Degree)); }
/// <summary> /// SVMを最適なパラメータで学習する /// </summary> /// <param name="train_data"></param> /// <param name="responses"></param> /// <param name="var_idx"></param> /// <param name="sample_idx"></param> /// <param name="params"></param> /// <returns></returns> #else /// <summary> /// Trains SVM with optimal parameters /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="params"></param> /// <returns></returns> #endif public virtual bool TrainAuto(CvMat trainData, CvMat responses, CvMat varIdx, CvMat sampleIdx, CvSVMParams @params) { return TrainAuto(trainData, responses, varIdx, sampleIdx, @params, 10); }
/// <summary> /// SVMを学習する /// </summary> /// <param name="train_data"></param> /// <param name="responses"></param> /// <param name="var_idx"></param> /// <param name="sample_idx"></param> /// <param name="params"></param> #else /// <summary> /// Trains SVM /// </summary> /// <param name="trainData"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="params"></param> #endif public virtual bool Train(CvMat trainData, CvMat responses, CvMat varIdx, CvMat sampleIdx, CvSVMParams @params) { if (trainData == null) throw new ArgumentNullException("trainData"); if (responses == null) throw new ArgumentNullException("responses"); if(@params == null) @params = new CvSVMParams(); IntPtr varIdxPtr = (varIdx == null) ? IntPtr.Zero : varIdx.CvPtr; IntPtr sampleIdxPtr = (sampleIdx == null) ? IntPtr.Zero : sampleIdx.CvPtr; return MLInvoke.CvSVM_train( ptr, trainData.CvPtr, responses.CvPtr, varIdxPtr, sampleIdxPtr, @params.NativeStruct ); }