public IplImage Init(string fileName1) { _Obj = Cv.LoadImage(fileName1, LoadMode.GrayScale); _ObjColor = Cv.CreateImage(_Obj.Size, BitDepth.U8, 3); using (CvMemStorage storage = Cv.CreateMemStorage(0)) { Cv.CvtColor(_Obj, _ObjColor, ColorConversion.GrayToBgr); Stopwatch watch = Stopwatch.StartNew(); { CvSURFParams param = new CvSURFParams(500, true); Cv.ExtractSURF(_Obj, null, out _ObjectKeypoints, out _ObjectDescriptors, storage, param); Console.WriteLine("Object Descriptors: {0}", _ObjectDescriptors.Total); } watch.Stop(); } return _Obj; }
/// <summary> /// 画像中からSURF(Speeded Up Robust Features)を検出する /// </summary> /// <param name="image">8ビット,グレースケールの入力画像. </param> /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param> /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param> /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param> /// <param name="storage">キーポイントとディスクリプタが格納されるメモリストレージ</param> /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param> /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param> #else /// <summary> /// Extracts Speeded Up Robust Features from image /// </summary> /// <param name="image">The input 8-bit grayscale image. </param> /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param> /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param> /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param> /// <param name="storage">Memory storage where keypoints and descriptors will be stored. </param> /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param> /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param> #endif public static void ExtractSURF(CvArr image, CvArr mask, ref CvSeq<CvSURFPoint> keypoints, out CvSeq<float> descriptors, CvMemStorage storage, CvSURFParams param, bool useProvidedKeyPts) { if (image == null) throw new ArgumentNullException("img"); if (storage == null) throw new ArgumentNullException("img"); IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr; IntPtr descriptorsPtr = IntPtr.Zero; IntPtr keypointsPtr = IntPtr.Zero; if (useProvidedKeyPts) { keypoints = new CvSeq<CvSURFPoint>(SeqType.Zero, storage); keypointsPtr = keypoints.CvPtr; CvInvoke.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, useProvidedKeyPts); descriptors = new CvSeq<float>(descriptorsPtr); } else { CvInvoke.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, useProvidedKeyPts); keypoints = new CvSeq<CvSURFPoint>(keypointsPtr); descriptors = new CvSeq<float>(descriptorsPtr); } }
/// <summary> /// 画像中からSURF(Speeded Up Robust Features)を検出する /// </summary> /// <param name="image">8ビット,グレースケールの入力画像. </param> /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param> /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param> /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param> /// <param name="storage">キーポイントとディスクリプタが格納されるメモリストレージ</param> /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param> #else /// <summary> /// Extracts Speeded Up Robust Features from image /// </summary> /// <param name="image">The input 8-bit grayscale image. </param> /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param> /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param> /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param> /// <param name="storage">Memory storage where keypoints and descriptors will be stored. </param> /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param> #endif public static void ExtractSURF(CvArr image, CvArr mask, out CvSeq<CvSURFPoint> keypoints, out CvSeq<float> descriptors, CvMemStorage storage, CvSURFParams param) { keypoints = null; ExtractSURF(image, mask, ref keypoints, out descriptors, storage, param, false); }
public SURFSample() { // cvExtractSURF // SURFで対応点検出 // call cv::initModule_nonfree() before using SURF/SIFT. CvCpp.InitModule_NonFree(); using (IplImage obj = Cv.LoadImage(Const.ImageSurfBox, LoadMode.GrayScale)) using (IplImage image = Cv.LoadImage(Const.ImageSurfBoxinscene, LoadMode.GrayScale)) using (IplImage objColor = Cv.CreateImage(obj.Size, BitDepth.U8, 3)) using (IplImage correspond = Cv.CreateImage(new CvSize(image.Width, obj.Height + image.Height), BitDepth.U8, 1)) { Cv.CvtColor(obj, objColor, ColorConversion.GrayToBgr); Cv.SetImageROI(correspond, new CvRect(0, 0, obj.Width, obj.Height)); Cv.Copy(obj, correspond); Cv.SetImageROI(correspond, new CvRect(0, obj.Height, correspond.Width, correspond.Height)); Cv.Copy(image, correspond); Cv.ResetImageROI(correspond); // SURFの処理 CvSURFPoint[] objectKeypoints, imageKeypoints; float[][] objectDescriptors, imageDescriptors; Stopwatch watch = Stopwatch.StartNew(); { CvSURFParams param = new CvSURFParams(500, true); Cv.ExtractSURF(obj, null, out objectKeypoints, out objectDescriptors, param); Console.WriteLine("Object Descriptors: {0}", objectDescriptors.Length); Cv.ExtractSURF(image, null, out imageKeypoints, out imageDescriptors, param); Console.WriteLine("Image Descriptors: {0}", imageDescriptors.Length); } watch.Stop(); Console.WriteLine("Extraction time = {0}ms", watch.ElapsedMilliseconds); watch.Reset(); watch.Start(); // シーン画像にある局所画像の領域を線で囲む CvPoint[] srcCorners = new CvPoint[4] { new CvPoint(0, 0), new CvPoint(obj.Width, 0), new CvPoint(obj.Width, obj.Height), new CvPoint(0, obj.Height) }; CvPoint[] dstCorners = LocatePlanarObject(objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, srcCorners); if (dstCorners != null) { for (int i = 0; i < 4; i++) { CvPoint r1 = dstCorners[i%4]; CvPoint r2 = dstCorners[(i + 1)%4]; Cv.Line(correspond, new CvPoint(r1.X, r1.Y + obj.Height), new CvPoint(r2.X, r2.Y + obj.Height), CvColor.White); } } // 対応点同士を線で引く int[] ptPairs = FindPairs(objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors); for (int i = 0; i < ptPairs.Length; i += 2) { CvSURFPoint r1 = objectKeypoints[ptPairs[i]]; CvSURFPoint r2 = imageKeypoints[ptPairs[i + 1]]; Cv.Line(correspond, r1.Pt, new CvPoint(Cv.Round(r2.Pt.X), Cv.Round(r2.Pt.Y + obj.Height)), CvColor.White); } // 特徴点の場所に円を描く for (int i = 0; i < objectKeypoints.Length; i++) { CvSURFPoint r = objectKeypoints[i]; CvPoint center = new CvPoint(Cv.Round(r.Pt.X), Cv.Round(r.Pt.Y)); int radius = Cv.Round(r.Size*(1.2/9.0)*2); Cv.Circle(objColor, center, radius, CvColor.Red, 1, LineType.AntiAlias, 0); } watch.Stop(); Console.WriteLine("Drawing time = {0}ms", watch.ElapsedMilliseconds); // ウィンドウに表示 using (CvWindow windowObject = new CvWindow("Object", WindowMode.AutoSize)) using (CvWindow windowCorrespond = new CvWindow("Object Correspond", WindowMode.AutoSize)) { windowObject.ShowImage(correspond); windowCorrespond.ShowImage(objColor); Cv.WaitKey(0); } } }
private static float[][] ExtractSurfDescriptors(IntPtr descriptorsPtr, CvSURFParams param) { CvSeq<IntPtr> descriptorsSeq = new CvSeq<IntPtr>(descriptorsPtr); float[][] descriptors = new float[descriptorsSeq.Total][]; int dim = (param.Extended) ? 128 : 64; for (int i = 0; i < descriptorsSeq.Total; i++) { descriptors[i] = new float[dim]; IntPtr? ptr = descriptorsSeq[i]; if (ptr.HasValue) Marshal.Copy(ptr.Value, descriptors[i], 0, dim); } return descriptors; }
/// <summary> /// 画像中からSURF(Speeded Up Robust Features)を検出する /// </summary> /// <param name="image">8ビット,グレースケールの入力画像. </param> /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param> /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param> /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param> /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param> /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param> #else /// <summary> /// Extracts Speeded Up Robust Features from image /// </summary> /// <param name="image">The input 8-bit grayscale image. </param> /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param> /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param> /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param> /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param> /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param> #endif public static void ExtractSURF(CvArr image, CvArr mask, ref CvSURFPoint[] keypoints, out float[][] descriptors, CvSURFParams param, bool useProvidedKeyPts) { if (!useProvidedKeyPts) { ExtractSURF(image, mask, out keypoints, out descriptors, param); return; } if (image == null) throw new ArgumentNullException("image"); if (param == null) throw new ArgumentNullException("param"); if (keypoints == null) throw new ArgumentNullException("keypoints"); using (CvMemStorage storage = new CvMemStorage(0)) using (CvSeq <CvSURFPoint> keypointsSeqIn = CvSeq<CvSURFPoint>.FromArray(keypoints, SeqType.Zero, storage)) { IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr; IntPtr descriptorsPtr = IntPtr.Zero; IntPtr keypointsPtr = keypointsSeqIn.CvPtr; NativeMethods.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, false); CvSeq<CvSURFPoint> keypointsSeqOut = new CvSeq<CvSURFPoint>(keypointsPtr); keypoints = keypointsSeqOut.ToArray(); descriptors = ExtractSurfDescriptors(descriptorsPtr, param); } }
/// <summary> /// 画像中からSURF(Speeded Up Robust Features)を検出する /// </summary> /// <param name="image">8ビット,グレースケールの入力画像. </param> /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param> /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param> /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param> /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param> #else /// <summary> /// Extracts Speeded Up Robust Features from image /// </summary> /// <param name="image">The input 8-bit grayscale image. </param> /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param> /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param> /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param> /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param> #endif public static void ExtractSURF(CvArr image, CvArr mask, out CvSURFPoint[] keypoints, out float[][] descriptors, CvSURFParams param) { if (image == null) throw new ArgumentNullException("image"); if (param == null) throw new ArgumentNullException("param"); using (CvMemStorage storage = new CvMemStorage(0)) { IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr; IntPtr descriptorsPtr = IntPtr.Zero; IntPtr keypointsPtr = IntPtr.Zero; NativeMethods.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, false); CvSeq<CvSURFPoint> keypointsSeq = new CvSeq<CvSURFPoint>(keypointsPtr); keypoints = keypointsSeq.ToArray(); descriptors = ExtractSurfDescriptors(descriptorsPtr, param); } }
public IplImage Test(string fileName2) { IplImage correspond = null; using (CvMemStorage storage = Cv.CreateMemStorage(0)) using (IplImage image = Cv.LoadImage(fileName2, LoadMode.GrayScale)) { correspond = Cv.CreateImage(new CvSize(image.Width, _Obj.Height + image.Height), BitDepth.U8, 1); Cv.SetImageROI(correspond, new CvRect(0, 0, _Obj.Width, _Obj.Height)); Cv.Copy(_Obj, correspond); Cv.SetImageROI(correspond, new CvRect(0, _Obj.Height, correspond.Width, correspond.Height)); Cv.Copy(image, correspond); Cv.ResetImageROI(correspond); // SURFの処理 CvSeq<CvSURFPoint> imageKeypoints; CvSeq<float> imageDescriptors; Stopwatch watch = Stopwatch.StartNew(); { CvSURFParams param = new CvSURFParams(500, true); Cv.ExtractSURF(image, null, out imageKeypoints, out imageDescriptors, storage, param); Console.WriteLine("Image Descriptors: {0}", imageDescriptors.Total); } watch.Stop(); Console.WriteLine("Extraction time = {0}ms", watch.ElapsedMilliseconds); watch.Reset(); watch.Start(); // シーン画像にある局所画像の領域を線で囲む CvPoint[] srcCorners = new CvPoint[4]{ new CvPoint(0,0), new CvPoint(_Obj.Width,0), new CvPoint(_Obj.Width, _Obj.Height), new CvPoint(0, _Obj.Height) }; CvPoint[] dstCorners = LocatePlanarObject(_ObjectKeypoints, _ObjectDescriptors, imageKeypoints, imageDescriptors, srcCorners); if (dstCorners != null) { for (int i = 0; i < 4; i++) { CvPoint r1 = dstCorners[i % 4]; CvPoint r2 = dstCorners[(i + 1) % 4]; Cv.Line(correspond, new CvPoint(r1.X, r1.Y + _Obj.Height), new CvPoint(r2.X, r2.Y + _Obj.Height), CvColor.White); } } // 対応点同士を線で引く int[] ptpairs = FindPairs(_ObjectKeypoints, _ObjectDescriptors, imageKeypoints, imageDescriptors); for (int i = 0; i < ptpairs.Length; i += 2) { CvSURFPoint r1 = Cv.GetSeqElem<CvSURFPoint>(_ObjectKeypoints, ptpairs[i]).Value; CvSURFPoint r2 = Cv.GetSeqElem<CvSURFPoint>(imageKeypoints, ptpairs[i + 1]).Value; Cv.Line(correspond, r1.Pt, new CvPoint(Cv.Round(r2.Pt.X), Cv.Round(r2.Pt.Y + _Obj.Height)), CvColor.White); } // 特徴点の場所に円を描く for (int i = 0; i < _ObjectKeypoints.Total; i++) { CvSURFPoint r = Cv.GetSeqElem<CvSURFPoint>(_ObjectKeypoints, i).Value; CvPoint center = new CvPoint(Cv.Round(r.Pt.X), Cv.Round(r.Pt.Y)); int radius = Cv.Round(r.Size * (1.2 / 9.0) * 2); Cv.Circle(_ObjColor, center, radius, CvColor.Red, 1, LineType.AntiAlias, 0); } watch.Stop(); Console.WriteLine("Drawing time = {0}ms", watch.ElapsedMilliseconds); } return correspond; }