Ejemplo n.º 1
0
        /// <summary>
        /// Draw a set of detected ChArUco Diamond markers.
        /// </summary>
        /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
        /// <param name="diamondCorners">positions of diamond corners in the same format returned by detectCharucoDiamond(). (e.g std::vector&lt;std::vector&lt;cv::Point2f&gt;&gt;). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
        /// <param name="diamondIds">vector of identifiers for diamonds in diamondCorners, in the same format returned by detectCharucoDiamond() (e.g. std::vector&lt;Vec4i&gt;). Optional, if not provided, ids are not painted.</param>
        /// <param name="borderColor">color of marker borders. Rest of colors (text color and first corner color) are calculated based on this one.</param>
        public static void DrawDetectedDiamonds(InputArray image,
                                                Point2f[][] diamondCorners, IEnumerable <Vec4i>?diamondIds, Scalar borderColor)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            if (diamondCorners == null)
            {
                throw new ArgumentNullException(nameof(diamondCorners));
            }

            using var cornersAddress = new ArrayAddress2 <Point2f>(diamondCorners);

            if (diamondIds == null)
            {
                NativeMethods.HandleException(
                    NativeMethods.aruco_drawDetectedDiamonds(image.CvPtr,
                                                             cornersAddress.GetPointer(), cornersAddress.GetDim1Length(), cornersAddress.GetDim2Lengths(),
                                                             IntPtr.Zero, borderColor));
            }
            else
            {
                using var ids = new VectorOfVec4i(diamondIds);

                NativeMethods.HandleException(
                    NativeMethods.aruco_drawDetectedDiamonds(image.CvPtr,
                                                             cornersAddress.GetPointer(), cornersAddress.GetDim1Length(), cornersAddress.GetDim2Lengths(),
                                                             ids.CvPtr, borderColor));
            }

            GC.KeepAlive(image);
        }
Ejemplo n.º 2
0
        /// <summary>
        /// Detect ChArUco Diamond markers.
        /// </summary>
        /// <param name="image">input image necessary for corner subpixel.</param>
        /// <param name="markerCorners">list of detected marker corners from detectMarkers function.</param>
        /// <param name="markerIds">list of marker ids in markerCorners.</param>
        /// <param name="squareMarkerLengthRate">rate between square and marker length: squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.</param>
        /// <param name="diamondCorners">output list of detected diamond corners (4 corners per diamond). The order is the same than in marker corners: top left, top right, bottom right and bottom left. Similar format than the corners returned by detectMarkers (e.g std::vector&lt;std::vector&lt;cv::Point2f&gt;&gt;).</param>
        /// <param name="diamondIds">ids of the diamonds in diamondCorners. The id of each diamond is in fact of type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the diamond.</param>
        /// <param name="cameraMatrix">Optional camera calibration matrix.</param>
        /// <param name="distCoeffs">Optional camera distortion coefficients.</param>
        public static void DetectCharucoDiamond(InputArray image, Point2f[][] markerCorners, IEnumerable <int> markerIds,
                                                float squareMarkerLengthRate, out Point2f[][] diamondCorners, out Vec4i[] diamondIds,
                                                InputArray?cameraMatrix = null, InputArray?distCoeffs = null)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            if (markerCorners == null)
            {
                throw new ArgumentNullException(nameof(markerCorners));
            }
            if (markerIds == null)
            {
                throw new ArgumentNullException(nameof(markerIds));
            }

            if (cameraMatrix == null && distCoeffs != null)
            {
                throw new ArgumentNullException(nameof(cameraMatrix));
            }
            if (cameraMatrix != null && distCoeffs == null)
            {
                throw new ArgumentNullException(nameof(distCoeffs));
            }

            image.ThrowIfDisposed();

            cameraMatrix?.ThrowIfDisposed();
            distCoeffs?.ThrowIfDisposed();

            using var markerCornersAddress = new ArrayAddress2 <Point2f>(markerCorners);
            using var markerIdsVec         = new VectorOfInt32(markerIds);

            using var diamondCornersVec = new VectorOfVectorPoint2f();
            using var diamondIdsVec     = new VectorOfVec4i();

            NativeMethods.HandleException(
                NativeMethods.aruco_detectCharucoDiamond(
                    image.CvPtr, markerCornersAddress.GetPointer(), markerCornersAddress.GetDim1Length(), markerCornersAddress.GetDim2Lengths(),
                    markerIdsVec.CvPtr, squareMarkerLengthRate,
                    diamondCornersVec.CvPtr, diamondIdsVec.CvPtr,
                    cameraMatrix?.CvPtr ?? IntPtr.Zero, distCoeffs?.CvPtr ?? IntPtr.Zero));

            diamondCorners = diamondCornersVec.ToArray();
            diamondIds     = diamondIdsVec.ToArray();

            GC.KeepAlive(image);
            if (cameraMatrix != null)
            {
                GC.KeepAlive(cameraMatrix);
            }
            if (distCoeffs != null)
            {
                GC.KeepAlive(distCoeffs);
            }
        }
Ejemplo n.º 3
0
        /// <summary>
        /// Downloads results from cuda::HoughLinesDetector::detect to host memory.
        /// </summary>
        /// <param name="d_lines">Result of cuda::HoughLinesDetector::detect .</param>
        /// <param name="h_lines">Output host array.</param>
        /// <param name="stream">Stream for the asynchronous version.</param>
        public virtual void downloadResults(InputArray d_lines, out LineSegmentPoint[] h_lines, Stream stream = null)
        {
            if (d_lines == null)
            {
                throw new ArgumentNullException(nameof(d_lines));
            }
            d_lines.ThrowIfDisposed();

            using (var vec = new VectorOfVec4i()) {
                NativeMethods.cuda_imgproc_HoughSegmentDetector_downloadResults(ptr, d_lines.CvPtr, vec.CvPtr
                                                                                , stream?.CvPtr ?? Stream.Null.CvPtr);
                h_lines = vec.ToArray <LineSegmentPoint>();
            }

            GC.KeepAlive(this);
            GC.KeepAlive(d_lines);
            GC.KeepAlive(h_lines);
        }
Ejemplo n.º 4
0
        /// <summary>
        /// 確率的ハフ変換を利用して,2値画像から線分を検出します.
        /// </summary>
        /// <param name="image">8ビット,シングルチャンネルの2値入力画像.この画像は関数により書き換えられる可能性があります</param>
        /// <param name="rho">ピクセル単位で表される投票空間の距離分解能</param>
        /// <param name="theta">ラジアン単位で表される投票空間の角度分解能</param>
        /// <param name="threshold">投票の閾値パラメータ.十分な票( &gt; threshold )を得た直線のみが出力されます</param>
        /// <param name="minLineLength">最小の線分長.これより短い線分は棄却されます. [既定値は0]</param>
        /// <param name="maxLineGap">2点が同一線分上にあると見なす場合に許容される最大距離. [既定値は0]</param>
        /// <returns>検出された線分.各線分は,4要素のベクトル (x1, y1, x2, y2) で表現されます.</returns>
#else
        /// <summary>
        /// Finds lines segments in a binary image using probabilistic Hough transform.
        /// </summary>
        /// <param name="image"></param>
        /// <param name="rho">Distance resolution of the accumulator in pixels</param>
        /// <param name="theta">Angle resolution of the accumulator in radians</param>
        /// <param name="threshold">The accumulator threshold parameter. Only those lines are returned that get enough votes ( &gt; threshold )</param>
        /// <param name="minLineLength">The minimum line length. Line segments shorter than that will be rejected. [By default this is 0]</param>
        /// <param name="maxLineGap">The maximum allowed gap between points on the same line to link them. [By default this is 0]</param>
        /// <returns>The output lines. Each line is represented by a 4-element vector (x1, y1, x2, y2)</returns>
#endif
        public static CvLineSegmentPoint[] HoughLinesP(InputArray image, double rho, double theta, int threshold, 
            double minLineLength = 0, double maxLineGap = 0)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();
            using (var vec = new VectorOfVec4i())
            {
                NativeMethods.imgproc_HoughLinesP(image.CvPtr, vec.CvPtr, rho, theta, threshold, minLineLength, maxLineGap);
                return vec.ToArray<CvLineSegmentPoint>();
            }
        }
Ejemplo n.º 5
0
        /// <summary>
        /// Computes the contour convexity defects
        /// </summary>
        /// <param name="contour">Input contour.</param>
        /// <param name="convexHull">Convex hull obtained using convexHull() that 
        /// should contain indices of the contour points that make the hull.</param>
        /// <returns>The output vector of convexity defects. 
        /// Each convexity defect is represented as 4-element integer vector 
        /// (a.k.a. cv::Vec4i): (start_index, end_index, farthest_pt_index, fixpt_depth), 
        /// where indices are 0-based indices in the original contour of the convexity defect beginning, 
        /// end and the farthest point, and fixpt_depth is fixed-point approximation 
        /// (with 8 fractional bits) of the distance between the farthest contour point and the hull. 
        /// That is, to get the floating-point value of the depth will be fixpt_depth/256.0. </returns>
        public static Vec4i[] ConvexityDefects(IEnumerable<Point2f> contour, IEnumerable<int> convexHull)
        {
            if (contour == null)
                throw new ArgumentNullException("contour");
            if (convexHull == null)
                throw new ArgumentNullException("convexHull");
            Point2f[] contourArray = EnumerableEx.ToArray(contour);
            int[] convexHullArray = EnumerableEx.ToArray(convexHull);
            IntPtr convexityDefectsPtr;
            NativeMethods.imgproc_convexityDefects_Point2f(contourArray, contourArray.Length,
                convexHullArray, convexHullArray.Length, out convexityDefectsPtr);

            using (var convexityDefects = new VectorOfVec4i(convexityDefectsPtr))
            {
                return convexityDefects.ToArray();
            }
        }
Ejemplo n.º 6
0
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="contours">検出された輪郭.各輪郭は,点のベクトルとして格納されます.</param>
        /// <param name="hierarchy">画像のトポロジーに関する情報を含む出力ベクトル.これは,輪郭数と同じ数の要素を持ちます.各輪郭 contours[i] に対して,
        /// 要素 hierarchy[i]のメンバにはそれぞれ,同じ階層レベルに存在する前後の輪郭,最初の子輪郭,および親輪郭の 
        /// contours インデックス(0 基準)がセットされます.また,輪郭 i において,前後,親,子の輪郭が存在しない場合,
        /// それに対応する hierarchy[i] の要素は,負の値になります.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
        /// <param name="hierarchy">Optional output vector, containing information about the image topology. 
        /// It has as many elements as the number of contours. For each i-th contour contours[i], 
        /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next 
        /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. 
        /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.</param>
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
#endif
        public static void FindContours(InputOutputArray image, out Point[][] contours,
            out HierarchyIndex[] hierarchy, ContourRetrieval mode, ContourChain method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr, hierarchyPtr;
            NativeMethods.imgproc_findContours1_vector(image.CvPtr, out contoursPtr, out hierarchyPtr, (int)mode, (int)method, offset0);

            using (var contoursVec = new VectorOfVectorPoint(contoursPtr))
            using (var hierarchyVec = new VectorOfVec4i(hierarchyPtr))
            {
                contours = contoursVec.ToArray();
                Vec4i[] hierarchyOrg = hierarchyVec.ToArray();
                hierarchy = EnumerableEx.SelectToArray(hierarchyOrg, HierarchyIndex.FromVec4i);
            }
            image.Fix();
        }
Ejemplo n.º 7
0
        /// <summary>
        /// Worker thread for image processing.
        /// </summary>
        public void CvMainThread()
        {
            var faceCascade = new CascadeClassifier();
            var eyesCascade = new CascadeClassifier();

            faceCascade.load("haarcascade_frontalface_alt.xml");
            eyesCascade.load("haarcascade_eye_tree_eyeglasses.xml");
            
            var srcFrame = new Mat();
            var dstFrame = new Mat();

            var imgProc = new ImgProc();

            _videoIo.StartCapture();

            while (true)
            {
                _videoIo.GetFrame(srcFrame);

                switch (_processingMethodIndex)
                {
                    // passthrough
                    case 0:
                        break;
                    
                    // gray
                    case 1:
                        imgProc.cvtColor(srcFrame, dstFrame, ColorConversionCodes.COLOR_RGBA2GRAY);
                        imgProc.cvtColor(dstFrame, srcFrame, ColorConversionCodes.COLOR_GRAY2RGB);
                        break;
                    
                    // canny
                    case 3:
                        imgProc.cvtColor(srcFrame, dstFrame, cvRT.ColorConversionCodes.COLOR_RGBA2GRAY);
                        imgProc.GaussianBlur(dstFrame, dstFrame, new cvRT.Size(7, 7), 1.5, 1.5);
                        imgProc.Canny(dstFrame, dstFrame, 0, 30, 3);
                        imgProc.cvtColor(dstFrame, srcFrame, ColorConversionCodes.COLOR_GRAY2RGB);
                        break;

                    // contour
                    case 4:
                    {
                        var contours = new VectorOfVectorOfPoint();
                        var hierarchy = new VectorOfVec4i();
                        var color = new Scalar(255, 255, 255, 255);

                        imgProc.Canny(srcFrame, dstFrame, 100, 100 * 2, 3);
                        imgProc.FindContours(dstFrame, contours, hierarchy, ContourRetrievalAlgorithm.RETR_TREE, ContourApproximationModes.CHAIN_APPROX_SIMPLE, new Point(0, 0));

                        srcFrame.Set(new Scalar(0, 0, 0, 0));

                        for (var i = 0 ; i < contours.Count();  i++)
                        {
                            imgProc.DrawContours(srcFrame, contours, i, color, 2, 8, hierarchy, 0, new Point(0, 0));
                        }
                            
                        break;
                    }

                    // face detect
                    case 5:
                    {
                        imgProc.cvtColor(srcFrame, dstFrame, ColorConversionCodes.COLOR_RGBA2GRAY);
                        imgProc.EqualizeHist(dstFrame, dstFrame);

                        // Faces in the frame.
                        var faces = new List<Rect>();

                        try
                        {
                            faces = new List<Rect>();
                            faceCascade.detectMultiScale(dstFrame, faces, 1.1, 2, (int)(0 | CV_HAAR.SCALE_IMAGE), new cvRT.Size(30, 30));
                        }
                        catch (Exception ex)
                        {
                            Debug.WriteLine("Exception {0}", ex.Message);
                        }

                        // For each face, detect the eyes
                        foreach (var face in faces)
                        {
                            // Draw ellipse for the face.
                            var faceCenter = new Point(face.X + face.Width / 2, face.Y + face.Height / 2);
                            imgProc.Ellipse(srcFrame, faceCenter, new cvRT.Size(face.Width / 2, face.Height / 2), 0, 0, 360, new Scalar(255, 0, 255, 0), 4, 8, 0);

                            // Detect the eyes for the face
                            var faceRoi = dstFrame.RectOfInterest(face);
                            var eyes = new List<Rect>();
                            eyesCascade.detectMultiScale(faceRoi, eyes, 1.1, 2, (int) (0 | CASCADE_FLAG.CASCADE_SCALE_IMAGE),new cvRT.Size(30, 30));
                            
                            // Draw the eyes
                            foreach (var eye in eyes)
                            {
                                var eyeCenter = new Point(face.X + eye.X + eye.Width/2, face.Y + eye.Y + eye.Height/2);
                                var radius = (int) Math.Round((eye.Width + eye.Height) * 0.25);
                                imgProc.Circle(srcFrame, eyeCenter, radius, new Scalar(255, 0, 0, 0), 4, 8, 0);
                            }
                       }

                       break;
                    }

                    default:
                        break;
                }

                _videoIo.ShowFrame(srcFrame);
            }
        }
Ejemplo n.º 8
0
        /// <summary>
        /// Worker thread for image processing.
        /// </summary>
        public void CvMainThread()
        {
            var faceCascade = new CascadeClassifier();
            var eyesCascade = new CascadeClassifier();

            faceCascade.load("haarcascade_frontalface_alt.xml");
            eyesCascade.load("haarcascade_eye_tree_eyeglasses.xml");

            var srcFrame = new Mat();
            var dstFrame = new Mat();

            var imgProc = new ImgProc();

            _videoIo.StartCapture();

            while (true)
            {
                _videoIo.GetFrame(srcFrame);

                switch (_processingMethodIndex)
                {
                // passthrough
                case 0:
                    break;

                // gray
                case 1:
                    imgProc.cvtColor(srcFrame, dstFrame, ColorConversionCodes.COLOR_RGBA2GRAY);
                    imgProc.cvtColor(dstFrame, srcFrame, ColorConversionCodes.COLOR_GRAY2RGB);
                    break;

                // canny
                case 3:
                    imgProc.cvtColor(srcFrame, dstFrame, cvRT.ColorConversionCodes.COLOR_RGBA2GRAY);
                    imgProc.GaussianBlur(dstFrame, dstFrame, new cvRT.Size(7, 7), 1.5, 1.5);
                    imgProc.Canny(dstFrame, dstFrame, 0, 30, 3);
                    imgProc.cvtColor(dstFrame, srcFrame, ColorConversionCodes.COLOR_GRAY2RGB);
                    break;

                // contour
                case 4:
                {
                    var contours  = new VectorOfVectorOfPoint();
                    var hierarchy = new VectorOfVec4i();
                    var color     = new Scalar(255, 255, 255, 255);

                    imgProc.Canny(srcFrame, dstFrame, 100, 100 * 2, 3);
                    imgProc.FindContours(dstFrame, contours, hierarchy, ContourRetrievalAlgorithm.RETR_TREE, ContourApproximationModes.CHAIN_APPROX_SIMPLE, new Point(0, 0));

                    srcFrame.Set(new Scalar(0, 0, 0, 0));

                    for (var i = 0; i < contours.Count(); i++)
                    {
                        imgProc.DrawContours(srcFrame, contours, i, color, 2, 8, hierarchy, 0, new Point(0, 0));
                    }

                    break;
                }

                // face detect
                case 5:
                {
                    imgProc.cvtColor(srcFrame, dstFrame, ColorConversionCodes.COLOR_RGBA2GRAY);
                    imgProc.EqualizeHist(dstFrame, dstFrame);

                    // Faces in the frame.
                    var faces = new List <Rect>();

                    try
                    {
                        faces = new List <Rect>();
                        faceCascade.detectMultiScale(dstFrame, faces, 1.1, 2, (int)(0 | CV_HAAR.SCALE_IMAGE), new cvRT.Size(30, 30));
                    }
                    catch (Exception ex)
                    {
                        Debug.WriteLine("Exception {0}", ex.Message);
                    }

                    // For each face, detect the eyes
                    foreach (var face in faces)
                    {
                        // Draw ellipse for the face.
                        var faceCenter = new Point(face.X + face.Width / 2, face.Y + face.Height / 2);
                        imgProc.Ellipse(srcFrame, faceCenter, new cvRT.Size(face.Width / 2, face.Height / 2), 0, 0, 360, new Scalar(255, 0, 255, 0), 4, 8, 0);

                        // Detect the eyes for the face
                        var faceRoi = dstFrame.RectOfInterest(face);
                        var eyes    = new List <Rect>();
                        eyesCascade.detectMultiScale(faceRoi, eyes, 1.1, 2, (int)(0 | CASCADE_FLAG.CASCADE_SCALE_IMAGE), new cvRT.Size(30, 30));

                        // Draw the eyes
                        foreach (var eye in eyes)
                        {
                            var eyeCenter = new Point(face.X + eye.X + eye.Width / 2, face.Y + eye.Y + eye.Height / 2);
                            var radius    = (int)Math.Round((eye.Width + eye.Height) * 0.25);
                            imgProc.Circle(srcFrame, eyeCenter, radius, new Scalar(255, 0, 0, 0), 4, 8, 0);
                        }
                    }

                    break;
                }

                default:
                    break;
                }

                _videoIo.ShowFrame(srcFrame);
            }
        }