/// <summary> /// /// </summary> /// <param name="img"></param> /// <param name="templ"></param> /// <param name="results"></param> /// <param name="cost"></param> /// <param name="templScale"></param> /// <param name="maxMatches"></param> /// <param name="minMatchDistance"></param> /// <param name="padX"></param> /// <param name="padY"></param> /// <param name="scales"></param> /// <param name="minScale"></param> /// <param name="maxScale"></param> /// <param name="orientationWeight"></param> /// <param name="truncate"></param> /// <returns></returns> public static int ChamferMatching( Mat img, Mat templ, out Point[][] results, out float[] cost, double templScale=1, int maxMatches = 20, double minMatchDistance = 1.0, int padX = 3, int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, double orientationWeight = 0.5, double truncate = 20) { if (img == null) throw new ArgumentNullException("img"); if (templ == null) throw new ArgumentNullException("templ"); img.ThrowIfDisposed(); templ.ThrowIfDisposed(); using (var resultsVec = new VectorOfVectorPoint()) using (var costVec = new VectorOfFloat()) { int ret = NativeMethods.contrib_chamerMatching( img.CvPtr, templ.CvPtr, resultsVec.CvPtr, costVec.CvPtr, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate); GC.KeepAlive(img); GC.KeepAlive(templ); results = resultsVec.ToArray(); cost = costVec.ToArray(); return ret; } }
/// <summary> /// evaluate specified ROI and return confidence value for each location in multiple scales /// </summary> /// <param name="img"></param> /// <param name="foundLocations"></param> /// <param name="locations"></param> /// <param name="hitThreshold"></param> /// <param name="groupThreshold"></param> public void DetectMultiScaleROI( Mat img, out Rect[] foundLocations, out DetectionROI[] locations, double hitThreshold = 0, int groupThreshold = 0) { if (disposed) throw new ObjectDisposedException("HOGDescriptor"); if (img == null) throw new ArgumentNullException("img"); img.ThrowIfDisposed(); using (var flVec = new VectorOfRect()) using (var scalesVec = new VectorOfDouble()) using (var locationsVec = new VectorOfVectorPoint()) using (var confidencesVec = new VectorOfVectorDouble()) { NativeMethods.objdetect_HOGDescriptor_detectMultiScaleROI( ptr, img.CvPtr, flVec.CvPtr, scalesVec.CvPtr, locationsVec.CvPtr, confidencesVec.CvPtr, hitThreshold, groupThreshold); foundLocations = flVec.ToArray(); double[] s = scalesVec.ToArray(); Point[][] l = locationsVec.ToArray(); double[][] c = confidencesVec.ToArray(); if(s.Length != l.Length || l.Length != c.Length) throw new OpenCvSharpException("Invalid result data 'locations'"); locations = new DetectionROI[s.Length]; for (int i = 0; i < s.Length; i++) { locations[i] = new DetectionROI { Scale = s[i], Locations = l[i], Confidences = c[i] }; } } }
/// <summary> /// 2値画像中の輪郭を検出します. /// </summary> /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます. /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param> /// <param name="mode">輪郭抽出モード</param> /// <param name="method">輪郭の近似手法</param> /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param> /// <return>検出された輪郭.各輪郭は,点のベクトルとして格納されます.</return> #else /// <summary> /// Finds contours in a binary image. /// </summary> /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. /// Zero pixels remain 0’s, so the image is treated as binary. /// The function modifies the image while extracting the contours.</param> /// <param name="mode">Contour retrieval mode</param> /// <param name="method">Contour approximation method</param> /// <param name="offset"> Optional offset by which every contour point is shifted. /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param> /// <returns>Detected contours. Each contour is stored as a vector of points.</returns> #endif public static Point[][] FindContoursAsArray(InputOutputArray image, ContourRetrieval mode, ContourChain method, Point? offset = null) { if (image == null) throw new ArgumentNullException("image"); image.ThrowIfNotReady(); CvPoint offset0 = offset.GetValueOrDefault(new Point()); IntPtr contoursPtr; NativeMethods.imgproc_findContours2_vector(image.CvPtr, out contoursPtr, (int)mode, (int)method, offset0); image.Fix(); using (var contoursVec = new VectorOfVectorPoint(contoursPtr)) { return contoursVec.ToArray(); } }
/// <summary> /// 2値画像中の輪郭を検出します. /// </summary> /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます. /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param> /// <param name="contours">検出された輪郭.各輪郭は,点のベクトルとして格納されます.</param> /// <param name="hierarchy">画像のトポロジーに関する情報を含む出力ベクトル.これは,輪郭数と同じ数の要素を持ちます.各輪郭 contours[i] に対して, /// 要素 hierarchy[i]のメンバにはそれぞれ,同じ階層レベルに存在する前後の輪郭,最初の子輪郭,および親輪郭の /// contours インデックス(0 基準)がセットされます.また,輪郭 i において,前後,親,子の輪郭が存在しない場合, /// それに対応する hierarchy[i] の要素は,負の値になります.</param> /// <param name="mode">輪郭抽出モード</param> /// <param name="method">輪郭の近似手法</param> /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param> #else /// <summary> /// Finds contours in a binary image. /// </summary> /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. /// Zero pixels remain 0’s, so the image is treated as binary. /// The function modifies the image while extracting the contours.</param> /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param> /// <param name="hierarchy">Optional output vector, containing information about the image topology. /// It has as many elements as the number of contours. For each i-th contour contours[i], /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.</param> /// <param name="mode">Contour retrieval mode</param> /// <param name="method">Contour approximation method</param> /// <param name="offset"> Optional offset by which every contour point is shifted. /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param> #endif public static void FindContours(InputOutputArray image, out Point[][] contours, out HierarchyIndex[] hierarchy, ContourRetrieval mode, ContourChain method, Point? offset = null) { if (image == null) throw new ArgumentNullException("image"); image.ThrowIfNotReady(); CvPoint offset0 = offset.GetValueOrDefault(new Point()); IntPtr contoursPtr, hierarchyPtr; NativeMethods.imgproc_findContours1_vector(image.CvPtr, out contoursPtr, out hierarchyPtr, (int)mode, (int)method, offset0); using (var contoursVec = new VectorOfVectorPoint(contoursPtr)) using (var hierarchyVec = new VectorOfVec4i(hierarchyPtr)) { contours = contoursVec.ToArray(); Vec4i[] hierarchyOrg = hierarchyVec.ToArray(); hierarchy = EnumerableEx.SelectToArray(hierarchyOrg, HierarchyIndex.FromVec4i); } image.Fix(); }
/// <summary> /// /// </summary> /// <param name="image"></param> /// <param name="msers"></param> /// <param name="bboxes"></param> public virtual void DetectRegions( InputArray image, out Point[][] msers, out Rect[] bboxes) { if (disposed) throw new ObjectDisposedException(GetType().Name); if (image == null) throw new ArgumentNullException("image"); image.ThrowIfDisposed(); using (var msersVec = new VectorOfVectorPoint()) using (var bboxesVec = new VectorOfRect()) { NativeMethods.features2d_MSER_detectRegions( ptr, image.CvPtr, msersVec.CvPtr, bboxesVec.CvPtr); msers = msersVec.ToArray(); bboxes = bboxesVec.ToArray(); } GC.KeepAlive(image); }
/// <summary> /// MSERのすべての輪郭情報を抽出する /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <returns></returns> #else /// <summary> /// Extracts the contours of Maximally Stable Extremal Regions /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <returns></returns> #endif public Point[][] Run(Mat image, Mat mask) { ThrowIfDisposed(); if (image == null) throw new ArgumentNullException("image"); image.ThrowIfDisposed(); IntPtr msers; NativeMethods.features2d_MSER_detect(ptr, image.CvPtr, out msers, Cv2.ToPtr(mask)); using (VectorOfVectorPoint msersVec = new VectorOfVectorPoint(msers)) { return msersVec.ToArray(); } }