/// <summary> /// /// </summary> /// <param name="objectKeypoints"></param> /// <param name="objectDescriptors"></param> /// <param name="imageKeypoints"></param> /// <param name="imageDescriptors"></param> /// <returns></returns> private static int[] FindPairs(CvSeq <CvSURFPoint> objectKeypoints, CvSeq <float> objectDescriptors, CvSeq <CvSURFPoint> imageKeypoints, CvSeq <float> imageDescriptors) { CvSeqReader <float> reader = new CvSeqReader <float>(); CvSeqReader <CvSURFPoint> kreader = new CvSeqReader <CvSURFPoint>(); Cv.StartReadSeq(objectDescriptors, reader); Cv.StartReadSeq(objectKeypoints, kreader); List <int> ptpairs = new List <int>(); for (int i = 0; i < objectDescriptors.Total; i++) { CvSURFPoint kp = CvSURFPoint.FromPtr(kreader.Ptr); IntPtr descriptor = reader.Ptr; Cv.NEXT_SEQ_ELEM(kreader.Seq.ElemSize, kreader); Cv.NEXT_SEQ_ELEM(reader.Seq.ElemSize, reader); int nearestNeighbor = NaiveNearestNeighbor(descriptor, kp.Laplacian, imageKeypoints, imageDescriptors); if (nearestNeighbor >= 0) { ptpairs.Add(i); ptpairs.Add(nearestNeighbor); } } return(ptpairs.ToArray()); }
/// <summary> /// /// </summary> /// <param name="vec">Cではconst float*</param> /// <param name="laplacian"></param> /// <param name="model_keypoints"></param> /// <param name="model_descriptors"></param> /// <returns></returns> private static int NaiveNearestNeighbor(IntPtr vec, int laplacian, CvSeq <CvSURFPoint> model_keypoints, CvSeq <float> model_descriptors) { int length = (int)(model_descriptors.ElemSize / sizeof(float)); int neighbor = -1; double dist1 = 1e6, dist2 = 1e6; CvSeqReader <float> reader = new CvSeqReader <float>(); CvSeqReader <CvSURFPoint> kreader = new CvSeqReader <CvSURFPoint>(); Cv.StartReadSeq(model_keypoints, kreader, false); Cv.StartReadSeq(model_descriptors, reader, false); IntPtr mvec; CvSURFPoint kp; double d; for (int i = 0; i < model_descriptors.Total; i++) { // const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr; が結構曲者。 // OpenCvSharpの構造体はFromPtrでポインタからインスタンス生成できるようにしてるので、こう書ける。 kp = CvSURFPoint.FromPtr(kreader.Ptr); // まともにキャストする場合はこんな感じか // CvSURFPoint kp = (CvSURFPoint)Marshal.PtrToStructure(kreader.Ptr, typeof(CvSURFPoint)); mvec = reader.Ptr; Cv.NEXT_SEQ_ELEM(kreader.Seq.ElemSize, kreader); Cv.NEXT_SEQ_ELEM(reader.Seq.ElemSize, reader); if (laplacian != kp.Laplacian) { continue; } d = CompareSurfDescriptors(vec, mvec, dist2, length); if (d < dist1) { dist2 = dist1; dist1 = d; neighbor = i; } else if (d < dist2) { dist2 = d; } } if (dist1 < 0.6 * dist2) { return(neighbor); } else { return(-1); } }
/// <summary> /// CvSURFPoint*から初期化 /// </summary> /// <param name="ptr">CvSURFPoint*</param> #else /// <summary> /// Initializes from native pointer /// </summary> /// <param name="ptr">CvSURFPoint*</param> #endif public CvSURFPoint(IntPtr ptr) { if (ptr == IntPtr.Zero) { throw new ArgumentNullException("ptr"); } try { CvSURFPoint p = (CvSURFPoint)Marshal.PtrToStructure(ptr, typeof(CvSURFPoint)); Pt = p.Pt; Laplacian = p.Laplacian; Size = p.Size; Dir = p.Dir; Hessian = p.Hessian; } catch { throw new InvalidCastException(); } }
/// <summary> /// a rough implementation for object location /// </summary> /// <param name="objectKeypoints"></param> /// <param name="objectDescriptors"></param> /// <param name="imageKeypoints"></param> /// <param name="imageDescriptors"></param> /// <param name="srcCorners"></param> /// <returns></returns> private static CvPoint[] LocatePlanarObject(CvSURFPoint[] objectKeypoints, float[][] objectDescriptors, CvSURFPoint[] imageKeypoints, float[][] imageDescriptors, CvPoint[] srcCorners) { CvMat h = new CvMat(3, 3, MatrixType.F64C1); int[] ptpairs = FindPairs(objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors); int n = ptpairs.Length/2; if (n < 4) return null; CvPoint2D32f[] pt1 = new CvPoint2D32f[n]; CvPoint2D32f[] pt2 = new CvPoint2D32f[n]; for (int i = 0; i < n; i++) { pt1[i] = objectKeypoints[ptpairs[i*2]].Pt; pt2[i] = imageKeypoints[ptpairs[i*2 + 1]].Pt; } CvMat pt1Mat = new CvMat(1, n, MatrixType.F32C2, pt1); CvMat pt2Mat = new CvMat(1, n, MatrixType.F32C2, pt2); if (Cv.FindHomography(pt1Mat, pt2Mat, h, HomographyMethod.Ransac, 5) == 0) return null; CvPoint[] dstCorners = new CvPoint[4]; for (int i = 0; i < 4; i++) { double x = srcCorners[i].X; double y = srcCorners[i].Y; double Z = 1.0/(h[6]*x + h[7]*y + h[8]); double X = (h[0]*x + h[1]*y + h[2])*Z; double Y = (h[3]*x + h[4]*y + h[5])*Z; dstCorners[i] = new CvPoint(Cv.Round(X), Cv.Round(Y)); } return dstCorners; }
/// <summary> /// /// </summary> /// <param name="objectKeypoints"></param> /// <param name="objectDescriptors"></param> /// <param name="imageKeypoints"></param> /// <param name="imageDescriptors"></param> /// <returns></returns> private static int[] FindPairs(CvSURFPoint[] objectKeypoints, float[][] objectDescriptors, CvSURFPoint[] imageKeypoints, float[][] imageDescriptors) { List<int> ptPairs = new List<int>(); for (int i = 0; i < objectDescriptors.Length; i++) { CvSURFPoint kp = objectKeypoints[i]; int nearestNeighbor = NaiveNearestNeighbor(objectDescriptors[i], kp.Laplacian, imageKeypoints, imageDescriptors); if (nearestNeighbor >= 0) { ptPairs.Add(i); ptPairs.Add(nearestNeighbor); } } return ptPairs.ToArray(); }
/// <summary> /// /// </summary> /// <param name="vec">Cではconst float*</param> /// <param name="laplacian"></param> /// <param name="modelKeypoints"></param> /// <param name="modelDescriptors"></param> /// <returns></returns> private static int NaiveNearestNeighbor(float[] vec, int laplacian, CvSURFPoint[] modelKeypoints, float[][] modelDescriptors) { int neighbor = -1; double dist1 = 1e6, dist2 = 1e6; for (int i = 0; i < modelDescriptors.Length; i++) { // const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr; CvSURFPoint kp = modelKeypoints[i]; if (laplacian != kp.Laplacian) continue; double d = CompareSurfDescriptors(vec, modelDescriptors[i], dist2); if (d < dist1) { dist2 = dist1; dist1 = d; neighbor = i; } else if (d < dist2) dist2 = d; } if (dist1 < 0.6*dist2) return neighbor; else return -1; }
/// <summary> /// 画像中からSURF(Speeded Up Robust Features)を検出する /// </summary> /// <param name="image">8ビット,グレースケールの入力画像. </param> /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param> /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param> /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param> /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param> /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param> #else /// <summary> /// Extracts Speeded Up Robust Features from image /// </summary> /// <param name="image">The input 8-bit grayscale image. </param> /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param> /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param> /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param> /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param> /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param> #endif public static void ExtractSURF(CvArr image, CvArr mask, ref CvSURFPoint[] keypoints, out float[][] descriptors, CvSURFParams param, bool useProvidedKeyPts) { if (!useProvidedKeyPts) { ExtractSURF(image, mask, out keypoints, out descriptors, param); return; } if (image == null) throw new ArgumentNullException("image"); if (param == null) throw new ArgumentNullException("param"); if (keypoints == null) throw new ArgumentNullException("keypoints"); using (CvMemStorage storage = new CvMemStorage(0)) using (CvSeq <CvSURFPoint> keypointsSeqIn = CvSeq<CvSURFPoint>.FromArray(keypoints, SeqType.Zero, storage)) { IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr; IntPtr descriptorsPtr = IntPtr.Zero; IntPtr keypointsPtr = keypointsSeqIn.CvPtr; NativeMethods.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, false); CvSeq<CvSURFPoint> keypointsSeqOut = new CvSeq<CvSURFPoint>(keypointsPtr); keypoints = keypointsSeqOut.ToArray(); descriptors = ExtractSurfDescriptors(descriptorsPtr, param); } }
/// <summary> /// 画像中からSURF(Speeded Up Robust Features)を検出する /// </summary> /// <param name="image">8ビット,グレースケールの入力画像. </param> /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param> /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param> /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param> /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param> #else /// <summary> /// Extracts Speeded Up Robust Features from image /// </summary> /// <param name="image">The input 8-bit grayscale image. </param> /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param> /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param> /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param> /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param> #endif public static void ExtractSURF(CvArr image, CvArr mask, out CvSURFPoint[] keypoints, out float[][] descriptors, CvSURFParams param) { if (image == null) throw new ArgumentNullException("image"); if (param == null) throw new ArgumentNullException("param"); using (CvMemStorage storage = new CvMemStorage(0)) { IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr; IntPtr descriptorsPtr = IntPtr.Zero; IntPtr keypointsPtr = IntPtr.Zero; NativeMethods.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, false); CvSeq<CvSURFPoint> keypointsSeq = new CvSeq<CvSURFPoint>(keypointsPtr); keypoints = keypointsSeq.ToArray(); descriptors = ExtractSurfDescriptors(descriptorsPtr, param); } }
public IplImage Test(string fileName2) { IplImage correspond = null; using (CvMemStorage storage = Cv.CreateMemStorage(0)) using (IplImage image = Cv.LoadImage(fileName2, LoadMode.GrayScale)) { correspond = Cv.CreateImage(new CvSize(image.Width, _Obj.Height + image.Height), BitDepth.U8, 1); Cv.SetImageROI(correspond, new CvRect(0, 0, _Obj.Width, _Obj.Height)); Cv.Copy(_Obj, correspond); Cv.SetImageROI(correspond, new CvRect(0, _Obj.Height, correspond.Width, correspond.Height)); Cv.Copy(image, correspond); Cv.ResetImageROI(correspond); // SURFの処理 CvSeq <CvSURFPoint> imageKeypoints; CvSeq <float> imageDescriptors; Stopwatch watch = Stopwatch.StartNew(); { CvSURFParams param = new CvSURFParams(500, true); Cv.ExtractSURF(image, null, out imageKeypoints, out imageDescriptors, storage, param); Console.WriteLine("Image Descriptors: {0}", imageDescriptors.Total); } watch.Stop(); Console.WriteLine("Extraction time = {0}ms", watch.ElapsedMilliseconds); watch.Reset(); watch.Start(); // シーン画像にある局所画像の領域を線で囲む CvPoint[] srcCorners = new CvPoint[4] { new CvPoint(0, 0), new CvPoint(_Obj.Width, 0), new CvPoint(_Obj.Width, _Obj.Height), new CvPoint(0, _Obj.Height) }; CvPoint[] dstCorners = LocatePlanarObject(_ObjectKeypoints, _ObjectDescriptors, imageKeypoints, imageDescriptors, srcCorners); if (dstCorners != null) { for (int i = 0; i < 4; i++) { CvPoint r1 = dstCorners[i % 4]; CvPoint r2 = dstCorners[(i + 1) % 4]; Cv.Line(correspond, new CvPoint(r1.X, r1.Y + _Obj.Height), new CvPoint(r2.X, r2.Y + _Obj.Height), CvColor.White); } } // 対応点同士を線で引く int[] ptpairs = FindPairs(_ObjectKeypoints, _ObjectDescriptors, imageKeypoints, imageDescriptors); for (int i = 0; i < ptpairs.Length; i += 2) { CvSURFPoint r1 = Cv.GetSeqElem <CvSURFPoint>(_ObjectKeypoints, ptpairs[i]).Value; CvSURFPoint r2 = Cv.GetSeqElem <CvSURFPoint>(imageKeypoints, ptpairs[i + 1]).Value; Cv.Line(correspond, r1.Pt, new CvPoint(Cv.Round(r2.Pt.X), Cv.Round(r2.Pt.Y + _Obj.Height)), CvColor.White); } // 特徴点の場所に円を描く for (int i = 0; i < _ObjectKeypoints.Total; i++) { CvSURFPoint r = Cv.GetSeqElem <CvSURFPoint>(_ObjectKeypoints, i).Value; CvPoint center = new CvPoint(Cv.Round(r.Pt.X), Cv.Round(r.Pt.Y)); int radius = Cv.Round(r.Size * (1.2 / 9.0) * 2); Cv.Circle(_ObjColor, center, radius, CvColor.Red, 1, LineType.AntiAlias, 0); } watch.Stop(); Console.WriteLine("Drawing time = {0}ms", watch.ElapsedMilliseconds); } return(correspond); }
public List<CvSURFPoint> FastHessianDetector(Bitmap ibmp) { const int nOctaves = 4; const int nOctaveLayers = 2; const int hessianThreshold = 500; const int NX=3, NY=3, NXY=4, NM=1; const int HAAR_SIZE0 = 9; const int HAAR_SIZE_INC = 6; int margin,size; float dx, dy, dxy; int [,] dx_s = new int[NX,5] { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} }; int[,] dy_s = new int [NY,5] { { 2, 0, 7, 3, 1 }, { 2, 3, 7, 6, -2 }, { 2, 6, 7, 9, 1 } }; int [,] dxy_s = new int [NXY,5]{ {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; int [,] dm = new int [1,5] { {0, 0, 9, 9, 1} }; CvSurfHF[] Dx = new CvSurfHF[NX] { new CvSurfHF(), new CvSurfHF(), new CvSurfHF() }; CvSurfHF[] Dy = new CvSurfHF[NY] { new CvSurfHF(), new CvSurfHF(), new CvSurfHF() }; CvSurfHF[] Dxy = new CvSurfHF[NXY] { new CvSurfHF(), new CvSurfHF(), new CvSurfHF(), new CvSurfHF() }; CvSurfHF[] Dm = new CvSurfHF[1] { new CvSurfHF() }; Rectangle rect = new Rectangle(0, 0, ibmp.Width, ibmp.Height); System.Drawing.Imaging.BitmapData data = ibmp.LockBits(rect, System.Drawing.Imaging.ImageLockMode.ReadWrite, ibmp.PixelFormat); integralMap = Integral(data); this._Width = data.Width; this._Height = data.Height; float[][] dets = new float[nOctaves][] ; float[][] traces = new float[nOctaves][]; int[] HaarPatternSizes = new int[nOctaves]; for (int layer = 0; layer <= nOctaves - 1; layer++) { dets[layer] = new float[(data.Width-1) *(data.Height-1) ]; traces[layer] = new float[(data.Width - 1) * (data.Height - 1)]; } //CvMemStorage storage = Cv.CreateMemStorage(); //CvSeq keypoints = Cv.CreateSeq(SeqType.Zero, CvSeq.SizeOf, CvSeq<CvSURFPoint>.SizeOf, storage); List<CvSURFPoint> keypoints = new List<CvSURFPoint>(); unsafe { //fixed (float* pdets = dets) //using (CvMemStorage storage = new CvMemStorage(0)) fixed (int* imgPtr = integralMap) { //keypoints = new CvSeq<CvSURFPoint>(SeqType.Zero, CvSeq < CvSURFPoint >.SizeOf, storage); //byte* imgPtr = (byte*)(data.Scan0); int* s_ptr; float* det_ptr; float* trace_ptr; for (int octave = 0, sampleStep = 1; octave < nOctaves; octave++, sampleStep *= 2) { int rows = (data.Height - 1) / sampleStep; int cols = (data.Width - 1) / sampleStep; for (int layer = 0; layer < nOctaveLayers + 1; layer++) { HaarPatternSizes[layer] = size = (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave; icvResizeHaarPattern(dx_s, Dx, NX, 9, size, data.Width); icvResizeHaarPattern(dy_s, Dy, NY, 9, size, data.Width); icvResizeHaarPattern(dxy_s, Dxy, NXY, 9, size, data.Width); margin = (size / 2) / sampleStep; for (int sum_i = 0, i = margin; sum_i <= (data.Height - 1) - size; sum_i += sampleStep, i++) { s_ptr = imgPtr + sum_i * data.Width; fixed (float* pdet_ptr = dets[layer]) fixed (float* ptrace_ptr = traces[layer]) { det_ptr = pdet_ptr + i * data.Width + margin; trace_ptr = ptrace_ptr + i * data.Width + margin; for (int sum_j = 0, j = margin; sum_j <= (data.Width - 1) - size; sum_j += sampleStep, j++) { dx = icvCalcHaarPattern(s_ptr, Dx, 3); dy = icvCalcHaarPattern(s_ptr, Dy, 3); dxy = icvCalcHaarPattern(s_ptr, Dxy, 4); s_ptr += sampleStep; *det_ptr++ = (float)(dx * dy - 0.81 * dxy * dxy); *trace_ptr++ = (float)(dx + dy); } } } } /* Find maxima in the determinant of the hessian */ for (int layer = 1; layer <= nOctaveLayers; layer++) { size = HaarPatternSizes[layer]; icvResizeHaarPattern(dm, Dm, NM, 9, size, data.Width); /* Ignore pixels without a 3x3 neighbourhood in the layer above */ margin = (HaarPatternSizes[layer + 1] / 2) / sampleStep + 1; for (int i = margin; i < rows - margin; i++) { fixed (float* pdet_ptr1 = dets[layer - 1]) fixed (float* ptrace_ptr1 = traces[layer - 1]) fixed (float* pdet_ptr2 = dets[layer]) fixed (float* ptrace_ptr2 = traces[layer]) fixed (float* pdet_ptr3 = dets[layer + 1]) fixed (float* ptrace_ptr3 = traces[layer + 1]) { det_ptr = pdet_ptr2 + i * data.Width; trace_ptr = ptrace_ptr2 + i * data.Width; for (int j = margin; j < cols - margin; j++) { float val0 = det_ptr[j]; if (val0 > hessianThreshold) { /* Coordinates for the start of the wavelet in the sum image. There is some integer division involved, so don't try to simplify this (cancel out sampleStep) without checking the result is the same */ int sum_i = sampleStep * (i - (size / 2) / sampleStep); int sum_j = sampleStep * (j - (size / 2) / sampleStep); /* The 3x3x3 neighbouring samples around the maxima. The maxima is included at N9[1][4] */ int c = data.Width; float* det1 = pdet_ptr1 + i * c + j; float* det2 = pdet_ptr2 + i * c + j; float* det3 = pdet_ptr3 + i * c + j; float[,] N9 = new float[3, 9]{ { det1[-c-1], det1[-c], det1[-c+1], det1[-1] , det1[0] , det1[1], det1[c-1] , det1[c] , det1[c+1] }, { det2[-c-1], det2[-c], det2[-c+1], det2[-1] , det2[0] , det2[1], det2[c-1] , det2[c] , det2[c+1 ] }, { det3[-c-1], det3[-c], det3[-c+1], det3[-1 ], det3[0] , det3[1], det3[c-1] , det3[c] , det3[c+1 ] } }; /* Check the mask - why not just check the mask at the center of the wavelet? */ //if( mask_sum ) //{ // const int* mask_ptr = mask_sum->data.i + mask_sum->cols*sum_i + sum_j; // float mval = icvCalcHaarPattern( mask_ptr, &Dm, 1 ); // if( mval < 0.5 ) // continue; //} /* Non-maxima suppression. val0 is at N9[1][4]*/ if (val0 > N9[0, 0] && val0 > N9[0, 1] && val0 > N9[0, 2] && val0 > N9[0, 3] && val0 > N9[0, 4] && val0 > N9[0, 5] && val0 > N9[0, 6] && val0 > N9[0, 7] && val0 > N9[0, 8] && val0 > N9[1, 0] && val0 > N9[1, 1] && val0 > N9[1, 2] && val0 > N9[1, 3] && val0 > N9[1, 5] && val0 > N9[1, 6] && val0 > N9[1, 7] && val0 > N9[1, 8] && val0 > N9[2, 0] && val0 > N9[2, 1] && val0 > N9[2, 2] && val0 > N9[2, 3] && val0 > N9[2, 4] && val0 > N9[2, 5] && val0 > N9[2, 6] && val0 > N9[2, 7] && val0 > N9[2, 8]) { /* Calculate the wavelet center coordinates for the maxima */ float center_i = sum_i + (float)(size - 1) / 2; float center_j = sum_j + (float)(size - 1) / 2; CvSURFPoint point = new CvSURFPoint(); point.Pt.X = center_j; point.Pt.Y = center_i; if (0 < trace_ptr[j]) { point.Laplacian = 1; } else if (trace_ptr[j] < 0) { point.Laplacian = -1; } else { point.Laplacian = 0; } point.Size = HaarPatternSizes[layer]; point.Dir = 0; point.Hessian = val0; /* Interpolate maxima location within the 3x3x3 neighbourhood */ int ds = HaarPatternSizes[layer] - HaarPatternSizes[layer - 1]; int interp_ok = icvInterpolateKeypoint(N9, sampleStep, sampleStep, ds, point); /* Sometimes the interpolation step gives a negative size etc. */ if (interp_ok != 0 && point.Size >= 1 && point.Pt.X >= 0 && point.Pt.X <= (data.Width - 1) && point.Pt.Y >= 0 && point.Pt.Y <= (data.Height - 1)) { /*printf( "KeyPoint %f %f %d\n", point.pt.x, point.pt.y, point.size );*/ keypoints.Add(point); //Cv.SeqPush<CvSURFPoint>(keypoints, point); } } } } } } } } // Unlock the bits. } } ibmp.UnlockBits(data); return keypoints; }
unsafe int icvInterpolateKeypoint( float[,] N9, int dx, int dy, int ds, CvSURFPoint point ) { //CvInvoke.cvso int solve_ok; float [] A= new float[9]; float [] x = new float [3]; float [] b = new float [3]; CvMat _A = new CvMat( 3, 3, MatrixType.F32C1, A); CvMat _x = new CvMat(3, 1, MatrixType.F32C1, x); CvMat _b = new CvMat(3, 1, MatrixType.F32C1, b); b[0] = -(N9[1,5]-N9[1,3])/2; /* Negative 1st deriv with respect to x */ b[1] = -(N9[1,7]-N9[1,1])/2; /* Negative 1st deriv with respect to y */ b[2] = -(N9[2,4]-N9[0,4])/2; /* Negative 1st deriv with respect to s */ A[0] = N9[1,3]-2*N9[1,4]+N9[1,5]; /* 2nd deriv x, x */ A[1] = (N9[1,8]-N9[1,6]-N9[1,2]+N9[1,0])/4; /* 2nd deriv x, y */ A[2] = (N9[2,5]-N9[2,3]-N9[0,5]+N9[0,3])/4; /* 2nd deriv x, s */ A[3] = A[1]; /* 2nd deriv y, x */ A[4] = N9[1,1]-2*N9[1,4]+N9[1,7]; /* 2nd deriv y, y */ A[5] = (N9[2,7]-N9[2,1]-N9[0,7]+N9[0,1])/4; /* 2nd deriv y, s */ A[6] = A[2]; /* 2nd deriv s, x */ A[7] = A[5]; /* 2nd deriv s, y */ A[8] = N9[0,4]-2*N9[1,4]+N9[2,4]; /* 2nd deriv s, s */ solve_ok = CvInvoke.cvSolve(_A.CvPtr,_b.CvPtr, _x.CvPtr,InvertMethod.LU); if( solve_ok != 0) { point.Pt.X += x[0]*dx; point.Pt.Y += x[1]*dy; point.Size = (int) Math.Round( point.Size + x[2]*ds ); } return solve_ok; }