/// <summary> /// detects corners using FAST algorithm by E. Rosten /// </summary> /// <param name="image"></param> /// <param name="keypoints"></param> /// <param name="threshold"></param> /// <param name="nonmaxSupression"></param> /// <param name="type"></param> public static void FASTX(InputArray image, out KeyPoint[] keypoints, int threshold, bool nonmaxSupression, int type) { if (image == null) throw new ArgumentNullException("image"); image.ThrowIfDisposed(); using (var kp = new VectorOfKeyPoint()) { NativeMethods.features2d_FASTX(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, type); keypoints = kp.ToArray(); } }
/// <summary> /// 高速なマルチスケール Hesian 検出器を用いて keypoint を検出します. /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <returns></returns> #else /// <summary> /// detects keypoints using fast multi-scale Hessian detector /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <returns></returns> #endif public KeyPoint[] Extract(Mat img, Mat mask) { if (img == null) throw new ArgumentNullException("img"); CvMat imgMat = img.ToCvMat(); CvMat maskMat = (mask == null) ? null : mask.ToCvMat(); CvSURFPoint[] keypoints; float[][] descriptors; Cv.ExtractSURF(imgMat, maskMat, out keypoints, out descriptors, this); KeyPoint[] result = new KeyPoint[keypoints.Length]; for (int i = 0; i < result.Length; i++) { CvSURFPoint kpt = keypoints[i]; result[i] = new KeyPoint(kpt.Pt, (float) kpt.Size, kpt.Dir, kpt.Hessian, GetPointOctave(kpt, this)); } return result; }
//Set Frame image from Webcam to start matching&detection //img에 Mat 하나를 등록. 본격적인 매칭을 시작하도록 한다 public static void setImg(TestSURF thisobj, Form1 mainform, IplImage imgFromCam) { if (thisobj.isComputing) { return; } thisobj.isComputing = true; //---frame 특징점, 디스크립터 //---frame image's keypoints and descriptor KeyPoint[] f_keypoints; Mat f_descriptor; Mat imgOrig; //캠 영상 (Original) Mat img; //캠 영상 (Grayscale) //Convert to GrayScale Mat imgOrig = Cv2.CvArrToMat(imgFromCam); img = new Mat(); Cv2.CvtColor(imgOrig, img, ColorConversion.BgrToGray); //---------------------1. 디스크립터 추출 (keypoint & descriptor retrieval) f_keypoints = new KeyPoint[10000]; f_descriptor = new Mat(); f_keypoints = thisobj.surfobj.Detect(img); //SIFT keypoint thisobj.surfobj.Compute(img, ref f_keypoints, f_descriptor); //SIFT descriptor //---------------------2. 매칭 (descriptor Matching) DMatch[] matches = new DMatch[10000]; try { matches = thisobj.fm.Match(thisobj.t_descriptor, f_descriptor); //MATCHING //matching error will be caught in this block } catch { return; } //record proper distances for choosing Good Matches //좋은 매치를 찾기 위해 디스크립터 간 매칭 거리를 기록한다 double max_dist = 0; double min_dist = 100; for (int i = 0; i < thisobj.t_descriptor.Rows; i++) { double dist = matches[i].Distance; if (dist < min_dist) { min_dist = dist; } if (dist > max_dist) { max_dist = dist; } } //---------------------3. gootmatch 탐색 (calculating goodMatches) List <DMatch> good_matches = new List <DMatch>(); for (int i = 0; i < thisobj.t_descriptor.Rows; i++) { if (matches[i].Distance < 3 * min_dist) { good_matches.Add(matches[i]); } } /* * KeyPoint[] goodkey = new KeyPoint[good_matches.Count]; * for(int goodidx = 0; goodidx < good_matches.Count; goodidx++) * { * goodkey[goodidx] = new KeyPoint((f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Pt.X), (f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Pt.Y), f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Size); * } */ //Goodmatches의 keypoint 중, target과 frame 이미지에 해당하는 keypoint 정리 Point2d[] target_lo = new Point2d[good_matches.Count]; Point2d[] frame_lo = new Point2d[good_matches.Count]; for (int i = 0; i < good_matches.Count; i++) { target_lo[i] = new Point2d(thisobj.t_keypoints[good_matches.ElementAt(i).QueryIdx].Pt.X, thisobj.t_keypoints[good_matches.ElementAt(i).QueryIdx].Pt.Y); frame_lo[i] = new Point2d(f_keypoints[good_matches.ElementAt(i).TrainIdx].Pt.X, f_keypoints[good_matches.ElementAt(i).TrainIdx].Pt.Y); } //Homography for RANSAC Mat hom = new Mat(); //-------------------------------4. RANSAC hom = Cv2.FindHomography(target_lo, frame_lo, HomographyMethod.Ransac); Point2d[] frame_corners; frame_corners = Cv2.PerspectiveTransform(thisobj.obj_corners, hom); //Mat -> iplimage //IplImage returnimg = (IplImage)imgOrig; mainform.setDetectionRec((int)frame_corners[0].X, (int)frame_corners[0].Y, (int)frame_corners[1].X, (int)frame_corners[1].Y, (int)frame_corners[2].X, (int)frame_corners[2].Y, (int)frame_corners[3].X, (int)frame_corners[3].Y); mainform.isComputing = false; thisobj.isComputing = false; //Cv2.DrawKeypoints(imgOrig, goodkey, imgOrig); //Cv2.DrawKeypoints(img, f_keypoints, img); //Cv2.ImWrite("temtem.png", img); return; }
/// <summary> /// Compute the BRISK features and descriptors on an image /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <param name="keyPoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> public void Run(InputArray image, InputArray mask, out KeyPoint[] keyPoints, OutputArray descriptors, bool useProvidedKeypoints = false) { ThrowIfDisposed(); if (image == null) throw new ArgumentNullException("image"); if (descriptors == null) throw new ArgumentNullException("descriptors"); image.ThrowIfDisposed(); descriptors.ThrowIfNotReady(); using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint()) { NativeMethods.features2d_BRISK_run2(ptr, image.CvPtr, Cv2.ToPtr(mask), keyPointsVec.CvPtr, descriptors.CvPtr, useProvidedKeypoints ? 1 : 0); keyPoints = keyPointsVec.ToArray(); } descriptors.Fix(); }
/// <summary> /// select the 512 "best description pairs" /// </summary> /// <param name="images">grayscale images set</param> /// <param name="keypoints">set of detected keypoints</param> /// <param name="corrThresh">correlation threshold</param> /// <param name="verbose">print construction information</param> /// <returns>list of best pair indexes</returns> public int[] SelectPairs(IEnumerable<Mat> images, out KeyPoint[][] keypoints, double corrThresh = 0.7, bool verbose = true) { if (images == null) throw new ArgumentNullException("images"); IntPtr[] imagesPtrs = EnumerableEx.SelectPtrs(images); using (var outVec = new VectorOfInt32()) using (var keypointsVec = new VectorOfVectorKeyPoint()) { NativeMethods.features2d_FREAK_selectPairs(ptr, imagesPtrs, imagesPtrs.Length, keypointsVec.CvPtr, corrThresh, verbose ? 1 : 0, outVec.CvPtr); keypoints = keypointsVec.ToArray(); return outVec.ToArray(); } }
/// <summary> /// /// </summary> /// <param name="img1"></param> /// <param name="img2"></param> /// <param name="H1to2"></param> /// <param name="keypoints1"></param> /// <param name="keypoints2"></param> /// <param name="repeatability"></param> /// <param name="correspCount"></param> public static void EvaluateFeatureDetector( Mat img1, Mat img2, Mat H1to2, ref KeyPoint[] keypoints1, ref KeyPoint[] keypoints2, out float repeatability, out int correspCount) { if (img1 == null) throw new ArgumentNullException(nameof(img1)); if (img2 == null) throw new ArgumentNullException(nameof(img2)); if (H1to2 == null) throw new ArgumentNullException(nameof(H1to2)); if (keypoints1 == null) throw new ArgumentNullException(nameof(keypoints1)); if (keypoints2 == null) throw new ArgumentNullException(nameof(keypoints2)); using (var keypoints1Vec = new VectorOfKeyPoint(keypoints1)) using (var keypoints2Vec = new VectorOfKeyPoint(keypoints2)) { NativeMethods.features2d_evaluateFeatureDetector( img1.CvPtr, img2.CvPtr, H1to2.CvPtr, keypoints1Vec.CvPtr, keypoints2Vec.CvPtr, out repeatability, out correspCount); keypoints1 = keypoints1Vec.ToArray(); keypoints2 = keypoints2Vec.ToArray(); } }
/// <summary> /// Finds the keypoints using FAST detector. /// </summary> /// <param name="image">Image where keypoints (corners) are detected. /// Only 8-bit grayscale images are supported.</param> /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param> /// <param name="keypoints">The output vector of keypoints.</param> public void Run(GpuMat image, GpuMat mask, out KeyPoint[] keypoints) { if (disposed) throw new ObjectDisposedException(GetType().Name); if (image == null) throw new ArgumentNullException("image"); if (mask == null) throw new ArgumentNullException("mask"); using (var keypointsVec = new VectorOfKeyPoint()) { NativeMethods.gpu_FAST_GPU_operator2(ptr, image.CvPtr, mask.CvPtr, keypointsVec.CvPtr); keypoints = keypointsVec.ToArray(); } GC.KeepAlive(image); GC.KeepAlive(mask); }
/// <summary> /// Compute the descriptors for a keypoints collection detected in image collection. /// </summary> /// <param name="images">Image collection.</param> /// <param name="keypoints">Input keypoints collection. keypoints[i] is keypoints detected in images[i]. /// Keypoints for which a descriptor cannot be computed are removed.</param> /// <param name="descriptors">Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i].</param> public virtual void Compute(IEnumerable<Mat> images, ref KeyPoint[][] keypoints, IEnumerable<Mat> descriptors) { if (disposed) throw new ObjectDisposedException(GetType().Name); if (images == null) throw new ArgumentNullException("images"); if (descriptors == null) throw new ArgumentNullException("descriptors"); IntPtr[] imagesPtrs = EnumerableEx.SelectPtrs(images); IntPtr[] descriptorsPtrs = EnumerableEx.SelectPtrs(descriptors); using (var keypointsVec = new VectorOfVectorKeyPoint(keypoints)) { NativeMethods.features2d_Feature2D_compute2( ptr, imagesPtrs, imagesPtrs.Length, keypointsVec.CvPtr, descriptorsPtrs, descriptorsPtrs.Length); keypoints = keypointsVec.ToArray(); } }
/// <summary> /// Compute the descriptors for a set of keypoints in an image. /// </summary> /// <param name="image">The image.</param> /// <param name="keypoints">The input keypoints. Keypoints for which a descriptor cannot be computed are removed.</param> /// <param name="descriptors">Copmputed descriptors. Row i is the descriptor for keypoint i.</param>param> public virtual void Compute(Mat image, ref KeyPoint[] keypoints, Mat descriptors) { if (image == null) throw new ArgumentNullException("image"); if (descriptors == null) throw new ArgumentNullException("descriptors"); using (var keypointsVec = new VectorOfKeyPoint(keypoints)) { NativeMethods.features2d_DescriptorExtractor_compute1( ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr); keypoints = keypointsVec.ToArray(); } }
/// <summary> /// Computes an image descriptor using the set visual vocabulary. /// </summary> /// <param name="image">Image, for which the descriptor is computed.</param> /// <param name="keypoints">Keypoints detected in the input image.</param> /// <param name="imgDescriptor">Computed output image descriptor.</param> /// <param name="pointIdxsOfClusters">pointIdxsOfClusters Indices of keypoints that belong to the cluster. /// This means that pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster(word of vocabulary) returned if it is non-zero.</param> /// <param name="descriptors">Descriptors of the image keypoints that are returned if they are non-zero.</param> public void Compute(InputArray image, out KeyPoint[] keypoints, OutputArray imgDescriptor, out int[][] pointIdxsOfClusters, Mat descriptors = null) { if (IsDisposed) throw new ObjectDisposedException(GetType().Name); if (image == null) throw new ArgumentNullException(nameof(image)); if (imgDescriptor == null) throw new ArgumentNullException(nameof(imgDescriptor)); using (var keypointsVec = new VectorOfKeyPoint()) using (var pointIdxsOfClustersVec = new VectorOfVectorInt()) { NativeMethods.features2d_BOWImgDescriptorExtractor_compute11(ptr, image.CvPtr, keypointsVec.CvPtr, imgDescriptor.CvPtr, pointIdxsOfClustersVec.CvPtr, Cv2.ToPtr(descriptors)); keypoints = keypointsVec.ToArray(); pointIdxsOfClusters = pointIdxsOfClustersVec.ToArray(); } GC.KeepAlive(image); GC.KeepAlive(imgDescriptor); GC.KeepAlive(descriptors); }
static int VoteForSizeAndOrientation(KeyPoint[] modelKeyPoints, KeyPoint[] observedKeyPoints, DMatch[][] matches, Mat mask, float scaleIncrement, int rotationBins) { int idx = 0; int nonZeroCount = 0; byte[] maskMat = new byte[mask.Rows]; GCHandle maskHandle = GCHandle.Alloc(maskMat, GCHandleType.Pinned); using (Mat m = new Mat(mask.Rows, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject())) { mask.CopyTo(m); List<float> logScale = new List<float>(); List<float> rotations = new List<float>(); double s, maxS, minS, r; maxS = -1.0e-10f; minS = 1.0e10f; //if you get an exception here, it's because you're passing in the model and observed keypoints backwards. Just switch the order. for (int i = 0; i < maskMat.Length; i++) { if (maskMat[i] > 0) { KeyPoint observedKeyPoint = observedKeyPoints[i]; KeyPoint modelKeyPoint = modelKeyPoints[matches[i][0].TrainIdx]; s = Math.Log10(observedKeyPoint.Size / modelKeyPoint.Size); logScale.Add((float)s); maxS = s > maxS ? s : maxS; minS = s < minS ? s : minS; r = observedKeyPoint.Angle - modelKeyPoint.Angle; r = r < 0.0f ? r + 360.0f : r; rotations.Add((float)r); } } int scaleBinSize = (int)Math.Ceiling((maxS - minS) / Math.Log10(scaleIncrement)); if (scaleBinSize < 2) scaleBinSize = 2; float[] scaleRanges = { (float)minS, (float)(minS + scaleBinSize + Math.Log10(scaleIncrement)) }; using (MatOfFloat scalesMat = new MatOfFloat(rows: logScale.Count, cols: 1, data: logScale.ToArray())) using (MatOfFloat rotationsMat = new MatOfFloat(rows: rotations.Count, cols: 1, data: rotations.ToArray())) using (MatOfFloat flagsMat = new MatOfFloat(logScale.Count, 1)) using (Mat hist = new Mat()) { flagsMat.SetTo(new Scalar(0.0f)); float[] flagsMatFloat1 = flagsMat.ToArray(); int[] histSize = { scaleBinSize, rotationBins }; float[] rotationRanges = { 0.0f, 360.0f }; int[] channels = { 0, 1 }; Rangef[] ranges = { new Rangef(scaleRanges[0], scaleRanges[1]), new Rangef(rotations.Min(), rotations.Max()) }; double minVal, maxVal; Mat[] arrs = { scalesMat, rotationsMat }; Cv2.CalcHist(arrs, channels, null, hist, 2, histSize, ranges); Cv2.MinMaxLoc(hist, out minVal, out maxVal); Cv2.Threshold(hist, hist, maxVal * 0.5, 0, ThresholdTypes.Tozero); Cv2.CalcBackProject(arrs, channels, hist, flagsMat, ranges); MatIndexer<float> flagsMatIndexer = flagsMat.GetIndexer(); for (int i = 0; i < maskMat.Length; i++) { if (maskMat[i] > 0) { if (flagsMatIndexer[idx++] != 0.0f) { nonZeroCount++; } else maskMat[i] = 0; } } m.CopyTo(mask); } } maskHandle.Free(); return nonZeroCount; }
/// <summary> /// /// </summary> /// <param name="image"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> public void Compute(Mat image, out KeyPoint[] keypoints, Mat descriptors) { if (image == null) throw new ArgumentNullException("image"); using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint()) { NativeMethods.features2d_Feature2D_compute(ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr); keypoints = keypointsVec.ToArray(); } }
/// <summary> /// Converts std::vector to managed array /// </summary> /// <returns></returns> public KeyPoint[] ToArray() { int size = Size; if (size == 0) { return new KeyPoint[0]; } KeyPoint[] dst = new KeyPoint[size]; using (ArrayAddress1<KeyPoint> dstPtr = new ArrayAddress1<KeyPoint>(dst)) { Util.CopyMemory(dstPtr, ElemPtr, Marshal.SizeOf(typeof(KeyPoint)) * dst.Length); } return dst; }
/// <summary> /// /// </summary> /// <param name="data"></param> public StdVectorKeyPoint(KeyPoint[] data) { if (data == null) throw new ArgumentNullException("data"); ptr = CppInvoke.vector_cvKeyPoint_new3(data, new IntPtr(data.Length)); }
public static extern void features2d_drawKeypoints(IntPtr image, KeyPoint[] keypoints, int keypointsLength, IntPtr outImage, CvScalar color, int flags);
public static extern void features2d_drawMatches2(IntPtr img1, KeyPoint[] keypoints1, int keypoints1Length, IntPtr img2, KeyPoint[] keypoints2, int keypoints2Length, IntPtr[] matches1to2, int matches1to2Size1, int[] matches1to2Size2, IntPtr outImg, CvScalar matchColor, CvScalar singlePointColor, IntPtr[] matchesMask, int matchesMaskSize1, int[] matchesMaskSize2, int flags);
/// <summary> /// Computes an image descriptor using the set visual vocabulary. /// </summary> /// <param name="image">Image, for which the descriptor is computed.</param> /// <param name="keypoints">Keypoints detected in the input image.</param> /// <param name="imgDescriptor">Computed output image descriptor.</param> public void Compute2(Mat image, out KeyPoint[] keypoints, Mat imgDescriptor) { if (IsDisposed) throw new ObjectDisposedException(GetType().Name); if (image == null) throw new ArgumentNullException(nameof(image)); if (imgDescriptor == null) throw new ArgumentNullException(nameof(imgDescriptor)); using (var keypointsVec = new VectorOfKeyPoint()) { NativeMethods.features2d_BOWImgDescriptorExtractor_compute2( ptr, image.CvPtr, keypointsVec.CvPtr, imgDescriptor.CvPtr); keypoints = keypointsVec.ToArray(); } GC.KeepAlive(image); GC.KeepAlive(imgDescriptor); }
/// <summary> /// Compute the descriptors for a set of keypoints in an image. /// </summary> /// <param name="image">The image.</param> /// <param name="inKeypoints">The input keypoints. Keypoints for which a descriptor cannot be computed are removed.</param> /// <param name="outKeypoints"></param> /// <param name="descriptors">Copmputed descriptors. Row i is the descriptor for keypoint i.</param>param> public virtual void Compute(InputArray image, KeyPoint[] inKeypoints, out KeyPoint[] outKeypoints, OutputArray descriptors) { if (image == null) throw new ArgumentNullException("image"); if (disposed) throw new ObjectDisposedException(GetType().Name); using (var keypointsVec = new VectorOfKeyPoint(inKeypoints)) { NativeMethods.features2d_Feature2D_compute1(ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr); outKeypoints = keypointsVec.ToArray(); } }
public static extern void features2d_drawMatches1(IntPtr img1, KeyPoint[] keypoints1, int keypoints1Length, IntPtr img2, KeyPoint[] keypoints2, int keypoints2Length, DMatch[] matches1to2, int matches1to2Length, IntPtr outImg, Scalar matchColor, Scalar singlePointColor, byte[] matchesMask, int matchesMaskLength, int flags);
/// <summary> /// Detects keypoints and computes the descriptors /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> public virtual void DetectAndCompute( InputArray image, InputArray mask, out KeyPoint[] keypoints, OutputArray descriptors, bool useProvidedKeypoints = false) { if (disposed) throw new ObjectDisposedException(GetType().Name); if (image == null) throw new ArgumentNullException("image"); if (descriptors == null) throw new ArgumentNullException("descriptors"); image.ThrowIfDisposed(); if (mask != null) mask.ThrowIfDisposed(); using (var keypointsVec = new VectorOfKeyPoint()) { NativeMethods.features2d_Feature2D_detectAndCompute( ptr, image.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr, descriptors.CvPtr, useProvidedKeypoints ? 1 : 0); keypoints = keypointsVec.ToArray(); } GC.KeepAlive(image); GC.KeepAlive(mask); descriptors.Fix(); }
/// <summary> /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = true] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> #else /// <summary> /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = true] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> #endif public void Run(InputArray img, InputArray mask, out KeyPoint[] keypoints, OutputArray descriptors, bool useProvidedKeypoints = false) { ThrowIfDisposed(); if (img == null) throw new ArgumentNullException("img"); if (descriptors == null) throw new ArgumentNullException("descriptors"); img.ThrowIfDisposed(); descriptors.ThrowIfNotReady(); using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint()) { NativeMethods.nonfree_SURF_run2_OutputArray(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr, descriptors.CvPtr, useProvidedKeypoints ? 1 : 0); keypoints = keypointsVec.ToArray(); } }
/// <summary> /// keypoint を検出し,その SIFT ディスクリプタを計算します. /// </summary> /// <param name="img">Input 8-bit grayscale image</param> /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param> /// <param name="keypoints">The input/output vector of keypoints</param> /// <param name="descriptors">The output matrix of descriptors. </param> /// <param name="useProvidedKeypoints">Boolean flag. If it is true, the keypoint detector is not run. /// Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.</param> #else /// <summary> /// detects keypoints and computes the SIFT descriptors for them. /// </summary> /// <param name="img">Input 8-bit grayscale image</param> /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param> /// <param name="keypoints">The input/output vector of keypoints</param> /// <param name="descriptors">The output matrix of descriptors. </param> /// <param name="useProvidedKeypoints">Boolean flag. If it is true, the keypoint detector is not run. /// Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.</param> #endif public void Run(InputArray img, InputArray mask, out KeyPoint[] keypoints, out float[] descriptors, bool useProvidedKeypoints = false) { // SIFTは std::vector<float> でdescriptorを受け取れないっぽいので、自前実装 MatOfFloat descriptorsMat = new MatOfFloat(); Run(img, mask, out keypoints, descriptorsMat, useProvidedKeypoints); descriptors = descriptorsMat.ToArray(); }
// get a key point's coordinates from the given enum public int[] get_key_point(KeyPoint key_point) { switch(key_point) { case KeyPoint.DUNGEON_ENTRANCE: return dungeon_entrance; case KeyPoint.PORTAL_ENTRANCE: return portal_entrance; case KeyPoint.TOWN_1: return town_1; case KeyPoint.TOWN_2: return town_2; case KeyPoint.TOWN_3: return town_3; default: return null; } }
/// <summary> /// /// </summary> /// <param name="image"></param> /// <param name="keypoints"></param> /// <param name="threshold"></param> /// <param name="nonmax_supression"></param> public static void FAST(Mat image, out KeyPoint[] keypoints, int threshold, bool nonmax_supression = true) { if (image == null) throw new ArgumentNullException("image"); using (StdVectorKeyPoint kp = new StdVectorKeyPoint()) { CppInvoke.cv_FAST(image.CvPtr, kp.CvPtr, threshold, nonmax_supression); keypoints = kp.ToArray(); } }
/// <summary> /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = false] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> #else /// <summary> /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = false] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> #endif public void Extract(Mat img, Mat mask, out KeyPoint[] keypoints, out float[][] descriptors) { if (img == null) throw new ArgumentNullException("img"); CvMat imgMat = img.ToCvMat(); CvMat maskMat = (mask == null) ? null : mask.ToCvMat(); CvSURFPoint[] kpt; Cv.ExtractSURF(imgMat, maskMat, out kpt, out descriptors, this); keypoints = new KeyPoint[kpt.Length]; for (int i = 0; i < keypoints.Length; i++) { CvSURFPoint p = kpt[i]; keypoints[i] = new KeyPoint(p.Pt, (float) p.Size, p.Dir, p.Hessian, GetPointOctave(p, this)); } }
/// <summary> /// StarDetectorアルゴリズムによりキーポイントを取得する /// </summary> /// <param name="image">8ビット グレースケールの入力画像</param> /// <returns></returns> #else /// <summary> /// Retrieves keypoints using the StarDetector algorithm. /// </summary> /// <param name="image">The input 8-bit grayscale image</param> /// <returns></returns> #endif public KeyPoint[] GetKeyPoints(Mat image) { if (image == null) throw new ArgumentNullException("img"); using (CvMemStorage storage = new CvMemStorage(0)) { IntPtr ptr = CvInvoke.cvGetStarKeypoints(image.ToCvMat().CvPtr, storage.CvPtr, _p); if (ptr == IntPtr.Zero) { return new KeyPoint[0]; } CvSeq<CvStarKeypoint> keypoints = new CvSeq<CvStarKeypoint>(ptr); KeyPoint[] result = new KeyPoint[keypoints.Total]; for (int i = 0; i < keypoints.Total; i++) { CvStarKeypoint kpt = keypoints[i].Value; result[i] = new KeyPoint(kpt.Pt, (float)kpt.Size, -1.0f, kpt.Response, 0); } return result; } }
/// <summary> /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = true] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> #else /// <summary> /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = true] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> #endif public void Extract(Mat img, Mat mask, KeyPoint[] keypoints, out float[][] descriptors) { if (img == null) throw new ArgumentNullException("img"); CvMat imgMat = img.ToCvMat(); CvMat maskMat = (mask == null) ? null : mask.ToCvMat(); CvSURFPoint[] kpt = new CvSURFPoint[keypoints.Length]; for (int i = 0; i < keypoints.Length; i++) { KeyPoint k = keypoints[i]; kpt[i] = new CvSURFPoint(k.Pt, 1, (int) Math.Round(k.Size), k.Angle, k.Response); } Cv.ExtractSURF(imgMat, maskMat, ref kpt, out descriptors, this, true); for (int i = 0; i < kpt.Length; i++) { CvSURFPoint p = kpt[i]; keypoints[i] = new KeyPoint(p.Pt, p.Size, p.Dir, p.Hessian, GetPointOctave(p, this)); } }
/// <summary> /// Compute the BRISK features and descriptors on an image /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <param name="keyPoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> public void Run(InputArray image, InputArray mask, out KeyPoint[] keyPoints, out float[] descriptors, bool useProvidedKeypoints = false) { MatOfFloat descriptorsMat = new MatOfFloat(); Run(image, mask, out keyPoints, descriptorsMat, useProvidedKeypoints); descriptors = descriptorsMat.ToArray(); }
public void process(Ecs ecs, float delta, params dynamic[] args) { if (args[0] == "render") { if (args[1] == "local") { LocalMap map = ecs.loadedMaps[(int)NeighbourMapDirection.center]; if (map.dirty) { TileMap gameMap = (TileMap)GetNode("/root/scene/gamespace/GameMap"); var tileSet = gameMap.TileSet; map.dirty = false; List <string> tags = new List <string>(); tags.Add("Camera2D"); tags.Add("LocalCamera"); tags.Add("ViewPort"); List <Entity> entities = ecs.getEntities(tags); var camera = entities[0]; ViewPort viewPort = camera.getComponent <ViewPort>(); int w = viewPort.x + viewPort.w; int h = viewPort.y + viewPort.h; List <string> playerTags = new List <string>(); playerTags.Add("Player"); List <Entity> playerList = ecs.getEntities(playerTags); Player player = (Player)playerList[0]; LightRadius lightRadius = player.getComponent <LightRadius>(); WorldPosition position = player.getComponent <WorldPosition>(); map.ComputeFov(position.localMapX, position.localMapY, lightRadius.radius, true); foreach (Cell cell in map.GetAllCells()) { if (cell.IsInFov) { map.SetCellProperties(cell.X, cell.Y, cell.IsTransparent, cell.IsWalkable, true); } } //NOTE: This is a hack to get the camera to display in the top left of the screen. //Consider setting this in the camera class instead?? int x = 0; int y = 0; for (int i = viewPort.x; i < (viewPort.x + viewPort.w); i++) { for (int j = viewPort.y; j < (viewPort.y + viewPort.h); j++) { if (i < 0 || j < 0 || i > map.Width - 1 || j > map.Height - 1) { gameMap.SetCell(x, y, -1); } else { KeyPoint p = new KeyPoint(i, j); KeyPoint p2 = new KeyPoint(i, j - 1); KeyPoint p3 = new KeyPoint(i, j + 1); int hc1 = p.GetHashCode(); int hc2 = p2.GetHashCode(); int hc3 = p3.GetHashCode(); Vicinity topNeighbour; Vicinity bottomNeighbour; bool hasTopNeighbour = false; bool hasBottomNeighbour = false; if (j >= 1) { hasTopNeighbour = true; if (!map.vicinities.ContainsKey(hc2)) { GD.Print(p2.X); GD.Print(p2.Y); } topNeighbour = map.vicinities[hc2]; } else { topNeighbour = map.vicinities[hc1]; } if (j <= 126) { hasBottomNeighbour = true; bottomNeighbour = map.vicinities[hc3]; } else { bottomNeighbour = map.vicinities[hc1]; } Vicinity vicinity = map.vicinities[hc1]; string tileID = vicinity.getValue(); string tileColor = vicinity.getColor(); if (hasTopNeighbour && topNeighbour.getValue() == vicinity.getValue() && !hasBottomNeighbour) { tileID = vicinity.getTerminalValue(); } else if (hasTopNeighbour && topNeighbour.getValue() == vicinity.getValue() && hasBottomNeighbour && bottomNeighbour.getValue() != vicinity.getValue()) { tileID = vicinity.getTerminalValue(); } else if (hasTopNeighbour && hasBottomNeighbour && topNeighbour.getValue() != vicinity.getValue() && bottomNeighbour.getValue() != vicinity.getValue()) { tileID = vicinity.getTerminalValue(); } else { tileID = vicinity.getValue(); } Cell cell = (Cell)map.GetCell(i, j); if (cell.IsExplored && cell.IsInFov) { string tileName = tileID + "_" + tileColor; int id = tileSet.FindTileByName(tileName); gameMap.SetCell(x, y, id); } else if (cell.IsExplored) { string tileName = tileID + "_" + "101024"; int id = tileSet.FindTileByName(tileName); gameMap.SetCell(x, y, id); } else { gameMap.SetCell(x, y, -1); } } y += 1; } x += 1; y = 0; } } } } }