/// <summary>
        /// Groups the object candidate rectangles.
        /// </summary>
        /// <param name="rectList"> Input/output vector of rectangles. Output vector includes retained and grouped rectangles.</param>
        /// <param name="groupThreshold">Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.</param>
        /// <param name="eps"></param>
        public static void GroupRectangles(IList <Rect> rectList, int groupThreshold, double eps = 0.2)
        {
            if (rectList == null)
            {
                throw new ArgumentNullException(nameof(rectList));
            }

            using (var rectListVec = new VectorOfRect(rectList))
            {
                NativeMethods.objdetect_groupRectangles1(rectListVec.CvPtr, groupThreshold, eps);
                ClearAndAddRange(rectList, rectListVec.ToArray());
            }
        }
예제 #2
0
        /// <summary>
        /// Recognize text using Beam Search
        /// Optionally provides also the Rects for individual text elements found (e.g. words), and the list of those text elements with their confidence values.
        /// </summary>
        /// <param name="image">Input image CV_8UC1 with a single text line (or word)</param>
        /// <param name="mask">Text mask CV_8UC1 image</param>
        /// <param name="rects">Method will output a list of Rects for the individual text elements found (e.g. words)</param>
        /// <param name="texts">Method will output a list of text strings for the recognition of individual text elements found (e.g. words)</param>
        /// <param name="confidences">Method will output a list of confidence values for the recognition of individual text elements found (e.g. words)</param>
        /// <param name="component_level"></param>
        /// <returns></returns>
        public override string Run(Mat image, Mat mask, out Rect[] rects, out string[] texts, out float[] confidences, CvText.OCRLevel component_level)
        {
            using (VectorOfRect vecRects = new VectorOfRect())
                using (VectorOfString vecTexts = new VectorOfString())
                    using (VectorOfFloat vecConfidences = new VectorOfFloat())
                    {
                        NativeMethods.text_OCRBeamSearchDecoder_run2(ptr, image.CvPtr, mask.CvPtr, vecRects.CvPtr, vecTexts.CvPtr, vecConfidences.CvPtr, (int)component_level);

                        rects       = vecRects.ToArray();
                        texts       = vecTexts.ToArray();
                        confidences = vecConfidences.ToArray();
                    }

            return(texts.Length > 0 ? texts[0] : String.Empty);
        }
예제 #3
0
        /// <summary>
        /// Groups the object candidate rectangles.
        /// </summary>
        /// <param name="rectList"> Input/output vector of rectangles. Output vector includes retained and grouped rectangles.</param>
        /// <param name="weights"></param>
        /// <param name="groupThreshold">Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.</param>
        /// <param name="eps">Relative difference between sides of the rectangles to merge them into a group.</param>
        public static void GroupRectangles(IList <Rect> rectList, out int[] weights, int groupThreshold, double eps = 0.2)
        {
            if (rectList == null)
            {
                throw new ArgumentNullException("rectList");
            }

            using (var rectListVec = new VectorOfRect(rectList))
                using (var weightsVec = new VectorOfInt32())
                {
                    NativeMethods.objdetect_groupRectangles2(rectListVec.CvPtr, weightsVec.CvPtr, groupThreshold, eps);
                    ClearAndAddRange(rectList, rectListVec.ToArray());
                    weights = weightsVec.ToArray();
                }
        }
예제 #4
0
        /// <summary>
        /// This one has no documentation in OpenCV sources or knowledge base
        /// </summary>
        /// <param name="image">Original RGB or Greyscale image from wich the regions were extracted</param>
        /// <param name="channel">Vector of single channel images CV_8UC1 from wich the regions were extracted</param>
        /// <param name="regions">Regions extracted by DetectRegions function</param>
        /// <param name="groups_rects">The output of the algorithm are stored in this parameter as list of rectangles</param>
        /// <param name="method">Grouping method (see GroupingModes). Can be one of { OrientationHorizontal, OrientationAny }</param>
        /// <param name="filename">The XML or YAML file with the classifier model. Only to use when grouping method is OrientationAny</param>
        /// <param name="minProbablity">The minimum probability for accepting a group. Only to use when grouping method is OrientationAny</param>
        public static void ErGrouping(Mat image, Mat[] channels, Point[][] regions, out Rect[] groups_rects, GroupingModes method, string filename = null, float minProbablity = 0.5f)
        {
            using (var vecChannels = new InputArray(channels))
                using (var vecRegions = new VectorOfVectorPoint(regions))
                    using (var vecRects = new VectorOfRect())
                        using (var input = new InputArray(image))
                        {
                            if (null == filename)
                            {
                                filename = string.Empty;
                            }
                            NativeMethods.text_erGrouping2(input.CvPtr, vecChannels.CvPtr, vecRegions.CvPtr, vecRects.CvPtr, (int)method, filename, minProbablity);

                            groups_rects = vecRects.ToArray();
                        }
        }
        /// <summary>
        /// Groups the object candidate rectangles.
        /// </summary>
        /// <param name="rectList"></param>
        /// <param name="rejectLevels"></param>
        /// <param name="levelWeights"></param>
        /// <param name="groupThreshold"></param>
        /// <param name="eps"></param>
        public static void GroupRectangles(IList <Rect> rectList, out int[] rejectLevels, out double[] levelWeights, int groupThreshold, double eps = 0.2)
        {
            if (rectList == null)
            {
                throw new ArgumentNullException(nameof(rectList));
            }

            using (var rectListVec = new VectorOfRect(rectList))
                using (var rejectLevelsVec = new VectorOfInt32())
                    using (var levelWeightsVec = new VectorOfDouble())
                    {
                        NativeMethods.objdetect_groupRectangles4(rectListVec.CvPtr, rejectLevelsVec.CvPtr, levelWeightsVec.CvPtr, groupThreshold, eps);
                        ClearAndAddRange(rectList, rectListVec.ToArray());
                        rejectLevels = rejectLevelsVec.ToArray();
                        levelWeights = levelWeightsVec.ToArray();
                    }
        }
예제 #6
0
        /// <summary>
        /// Method that provides a quick and simple interface to detect text inside an image
        /// </summary>
        /// <param name="inputImage">an image to process</param>
        /// <param name="bbox"> a vector of Rect that will store the detected word bounding box</param>
        /// <param name="confidence">a vector of float that will be updated with the confidence the classifier has for the selected bounding box</param>
        public override void Detect(InputArray inputImage, out Rect[] bbox, out float[] confidence)
        {
            if (inputImage == null)
            {
                throw new ArgumentNullException(nameof(inputImage));
            }
            inputImage.ThrowIfDisposed();

            using (var bboxVec = new VectorOfRect())
                using (var confidenceVec = new VectorOfFloat())
                {
                    NativeMethods.text_TextDetectorCNN_detect(ptr, inputImage.CvPtr, bboxVec.CvPtr, confidenceVec.CvPtr);
                    bbox       = bboxVec.ToArray();
                    confidence = confidenceVec.ToArray();
                }

            GC.KeepAlive(this);
            GC.KeepAlive(inputImage);
        }
예제 #7
0
        /// <summary>
        /// Selects ROIs on the given image.
        /// Function creates a window and allows user to select a ROIs using mouse.
        /// Controls: use `space` or `enter` to finish current selection and start a new one,
        /// use `esc` to terminate multiple ROI selection process.
        /// </summary>
        /// <param name="windowName">name of the window where selection process will be shown.</param>
        /// <param name="img">image to select a ROI.</param>
        /// <param name="showCrosshair">if true crosshair of selection rectangle will be shown.</param>
        /// <param name="fromCenter">if true center of selection will match initial mouse position. In opposite case a corner of
        /// selection rectangle will correspond to the initial mouse position.</param>
        /// <returns>selected ROIs.</returns>
        // ReSharper disable once InconsistentNaming
        public static Rect[] SelectROIs(string windowName, InputArray img, bool showCrosshair = true, bool fromCenter = false)
        {
            if (string.IsNullOrEmpty(windowName))
            {
                throw new ArgumentNullException(nameof(windowName));
            }
            if (img == null)
            {
                throw new ArgumentNullException(nameof(img));
            }
            img.ThrowIfDisposed();

            using var boundingBoxesVec = new VectorOfRect();
            NativeMethods.HandleException(
                NativeMethods.highgui_selectROIs(windowName, img.CvPtr, boundingBoxesVec.CvPtr, showCrosshair ? 1 : 0, fromCenter ? 1 : 0));

            GC.KeepAlive(img);
            return(boundingBoxesVec.ToArray());
        }
예제 #8
0
        /// <summary>
        /// Groups the object candidate rectangles.
        /// </summary>
        /// <param name="rectList"></param>
        /// <param name="groupThreshold"></param>
        /// <param name="eps"></param>
        /// <param name="weights"></param>
        /// <param name="levelWeights"></param>
        public static void GroupRectangles(IList <Rect> rectList, int groupThreshold, double eps, out int[] weights, out double[] levelWeights)
        {
            if (rectList == null)
            {
                throw new ArgumentNullException(nameof(rectList));
            }

            using var rectListVec     = new VectorOfRect(rectList);
            using var weightsVec      = new VectorOfInt32();
            using var levelWeightsVec = new VectorOfDouble();

            NativeMethods.HandleException(
                NativeMethods.objdetect_groupRectangles3(
                    rectListVec.CvPtr, groupThreshold, eps, weightsVec.CvPtr, levelWeightsVec.CvPtr));

            ClearAndAddRange(rectList, rectListVec.ToArray());
            weights      = weightsVec.ToArray();
            levelWeights = levelWeightsVec.ToArray();
        }
예제 #9
0
파일: MSER.cs 프로젝트: 180D-FW-2020/Team13
        /// <summary>
        /// 
        /// </summary>
        /// <param name="image"></param>
        /// <param name="msers"></param>
        /// <param name="bboxes"></param>
        public virtual void DetectRegions(
            InputArray image, out Point[][] msers, out Rect[] bboxes)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException("nameof(image)");
            image.ThrowIfDisposed();

            using (var msersVec = new VectorOfVectorPoint())
            using (var bboxesVec = new VectorOfRect())
            {
                NativeMethods.features2d_MSER_detectRegions(
                    ptr, image.CvPtr, msersVec.CvPtr, bboxesVec.CvPtr);
                msers = msersVec.ToArray();
                bboxes = bboxesVec.ToArray();
            }

            GC.KeepAlive(image);
        }
예제 #10
0
        /// <summary>
        /// Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
        /// </summary>
        /// <param name="image">Matrix of the type CV_8U containing an image where objects are detected.</param>
        /// <param name="rejectLevels"></param>
        /// <param name="levelWeights"></param>
        /// <param name="scaleFactor">Parameter specifying how much the image size is reduced at each image scale.</param>
        /// <param name="minNeighbors">Parameter specifying how many neighbors each candidate rectangle should have to retain it.</param>
        /// <param name="flags">Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects.
        /// It is not used for a new cascade.</param>
        /// <param name="minSize">Minimum possible object size. Objects smaller than that are ignored.</param>
        /// <param name="maxSize">Maximum possible object size. Objects larger than that are ignored.</param>
        /// <param name="outputRejectLevels"></param>
        /// <returns>Vector of rectangles where each rectangle contains the detected object.</returns>
        public virtual Rect[] DetectMultiScale(
            Mat image,
            out int[] rejectLevels,
            out double[] levelWeights,
            double scaleFactor      = 1.1,
            int minNeighbors        = 3,
            HaarDetectionType flags = 0,
            Size?minSize            = null,
            Size?maxSize            = null,
            bool outputRejectLevels = false)
        {
            if (disposed)
            {
                throw new ObjectDisposedException("CascadeClassifier");
            }
            if (image == null)
            {
                throw new ArgumentNullException("nameof(image)");
            }
            _stopWatch.Start();
            image.ThrowIfDisposed();
            _stopWatch.Stop();
            Debug.Log("FF_throw: " + _stopWatch.ElapsedMilliseconds + "ms");
            _stopWatch.Reset();

            Size minSize0 = minSize.GetValueOrDefault(new Size());
            Size maxSize0 = maxSize.GetValueOrDefault(new Size());

            using (var objectsVec = new VectorOfRect())
                using (var rejectLevelsVec = new VectorOfInt32())
                    using (var levelWeightsVec = new VectorOfDouble())
                    {
                        NativeMethods.objdetect_CascadeClassifier_detectMultiScale2(
                            ptr, image.CvPtr, objectsVec.CvPtr, rejectLevelsVec.CvPtr, levelWeightsVec.CvPtr,
                            scaleFactor, minNeighbors, (int)flags, minSize0, maxSize0, outputRejectLevels ? 1 : 0);

                        rejectLevels = rejectLevelsVec.ToArray();
                        levelWeights = levelWeightsVec.ToArray();
                        return(objectsVec.ToArray());
                    }
        }
        /// <summary>
        ///
        /// </summary>
        /// <param name="rectList"></param>
        /// <param name="foundWeights"></param>
        /// <param name="foundScales"></param>
        /// <param name="detectThreshold"></param>
        /// <param name="winDetSize"></param>
        public static void GroupRectanglesMeanshift(IList <Rect> rectList, out double[] foundWeights,
                                                    out double[] foundScales, double detectThreshold = 0.0, Size?winDetSize = null)
        {
            if (rectList == null)
            {
                throw new ArgumentNullException(nameof(rectList));
            }

            Size winDetSize0 = winDetSize.GetValueOrDefault(new Size(64, 128));

            using (var rectListVec = new VectorOfRect(rectList))
                using (var foundWeightsVec = new VectorOfDouble())
                    using (var foundScalesVec = new VectorOfDouble())
                    {
                        NativeMethods.objdetect_groupRectangles_meanshift(
                            rectListVec.CvPtr, foundWeightsVec.CvPtr, foundScalesVec.CvPtr, detectThreshold, winDetSize0);
                        ClearAndAddRange(rectList, rectListVec.ToArray());
                        foundWeights = foundWeightsVec.ToArray();
                        foundScales  = foundScalesVec.ToArray();
                    }
        }
예제 #12
0
        //public virtual bool read( const FileNode& node );

        /// <summary>
        /// Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
        /// </summary>
        /// <param name="image">Matrix of the type CV_8U containing an image where objects are detected.</param>
        /// <param name="scaleFactor">Parameter specifying how much the image size is reduced at each image scale.</param>
        /// <param name="minNeighbors">Parameter specifying how many neighbors each candidate rectangle should have to retain it.</param>
        /// <param name="flags">Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects.
        /// It is not used for a new cascade.</param>
        /// <param name="minSize">Minimum possible object size. Objects smaller than that are ignored.</param>
        /// <param name="maxSize">Maximum possible object size. Objects larger than that are ignored.</param>
        /// <returns>Vector of rectangles where each rectangle contains the detected object.</returns>
        public virtual Rect[] DetectMultiScale(
            Mat image,
            double scaleFactor      = 1.1,
            int minNeighbors        = 3,
            HaarDetectionType flags = 0,
            Size?minSize            = null,
            Size?maxSize            = null)
        {
            if (disposed)
            {
                throw new ObjectDisposedException("CascadeClassifier");
            }
            if (image == null)
            {
                throw new ArgumentNullException("nameof(image)");
            }
            _stopWatch.Start();

            image.ThrowIfDisposed();

            Size minSize0 = minSize.GetValueOrDefault(new Size());
            Size maxSize0 = maxSize.GetValueOrDefault(new Size());

            _stopWatch.Stop();
            //Debug.Log("FF_throw: " + _stopWatch.ElapsedMilliseconds + "ms");
            _stopWatch.Reset();

            using (var objectsVec = new VectorOfRect())
            {
                _stopWatch.Start();
                NativeMethods.objdetect_CascadeClassifier_detectMultiScale1(
                    ptr, image.CvPtr, objectsVec.CvPtr,
                    scaleFactor, minNeighbors, (int)flags, minSize0, maxSize0);
                _stopWatch.Stop();
                //Debug.Log("FF_detect1: " + _stopWatch.ElapsedMilliseconds + "ms");
                _stopWatch.Reset();
                return(objectsVec.ToArray());
            }
        }
예제 #13
0
        /// <summary>
        /// Detect MSER regions
        /// </summary>
        /// <param name="image">input image (8UC1, 8UC3 or 8UC4, must be greater or equal than 3x3)</param>
        /// <param name="msers">resulting list of point sets</param>
        /// <param name="bboxes">resulting bounding boxes</param>
        public virtual void DetectRegions(
            InputArray image, out Point[][] msers, out Rect[] bboxes)
        {
            ThrowIfDisposed();
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            image.ThrowIfDisposed();

            using (var msersVec = new VectorOfVectorPoint())
                using (var bboxesVec = new VectorOfRect())
                {
                    NativeMethods.HandleException(
                        NativeMethods.features2d_MSER_detectRegions(
                            ptr, image.CvPtr, msersVec.CvPtr, bboxesVec.CvPtr));
                    GC.KeepAlive(this);
                    msers  = msersVec.ToArray();
                    bboxes = bboxesVec.ToArray();
                }

            GC.KeepAlive(image);
        }
예제 #14
0
        /// <summary>
        /// Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
        /// </summary>
        /// <param name="image">Matrix of the type CV_8U containing an image where objects are detected.</param>
        /// <param name="rejectLevels"></param>
        /// <param name="levelWeights"></param>
        /// <param name="scaleFactor">Parameter specifying how much the image size is reduced at each image scale.</param>
        /// <param name="minNeighbors">Parameter specifying how many neighbors each candidate rectangle should have to retain it.</param>
        /// <param name="flags">Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects.
        /// It is not used for a new cascade.</param>
        /// <param name="minSize">Minimum possible object size. Objects smaller than that are ignored.</param>
        /// <param name="maxSize">Maximum possible object size. Objects larger than that are ignored.</param>
        /// <param name="outputRejectLevels"></param>
        /// <returns>Vector of rectangles where each rectangle contains the detected object.</returns>
        public virtual Rect[] DetectMultiScale(
            Mat image,
            out int[] rejectLevels,
            out double[] levelWeights,
            double scaleFactor      = 1.1,
            int minNeighbors        = 3,
            HaarDetectionType flags = 0,
            Size?minSize            = null,
            Size?maxSize            = null,
            bool outputRejectLevels = false)
        {
            ThrowIfDisposed();
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            image.ThrowIfDisposed();

            var minSize0 = minSize.GetValueOrDefault(new Size());
            var maxSize0 = maxSize.GetValueOrDefault(new Size());

            using var objectsVec      = new VectorOfRect();
            using var rejectLevelsVec = new VectorOfInt32();
            using var levelWeightsVec = new VectorOfDouble();

            NativeMethods.HandleException(
                NativeMethods.objdetect_CascadeClassifier_detectMultiScale2(
                    ptr, image.CvPtr, objectsVec.CvPtr, rejectLevelsVec.CvPtr, levelWeightsVec.CvPtr,
                    scaleFactor, minNeighbors, (int)flags, minSize0, maxSize0, outputRejectLevels ? 1 : 0));

            GC.KeepAlive(this);
            GC.KeepAlive(image);

            rejectLevels = rejectLevelsVec.ToArray();
            levelWeights = levelWeightsVec.ToArray();
            return(objectsVec.ToArray());
        }
예제 #15
0
        /// <summary>
        /// Splits a motion history image into a few parts corresponding to separate independent motions
        /// (for example, left hand, right hand).
        /// </summary>
        /// <param name="mhi">Motion history image.</param>
        /// <param name="segmask">Image where the found mask should be stored, single-channel, 32-bit floating-point.</param>
        /// <param name="boundingRects">Vector containing ROIs of motion connected components.</param>
        /// <param name="timestamp">Current time in milliseconds or other units.</param>
        /// <param name="segThresh">Segmentation threshold that is recommended to be equal to the interval between motion history “steps” or greater.</param>
        public static void SegmentMotion(
            InputArray mhi, OutputArray segmask,
            out Rect[] boundingRects,
            double timestamp, double segThresh)
        {
            if (mhi == null)
            {
                throw new ArgumentNullException("nameof(mhi)");
            }
            if (segmask == null)
            {
                throw new ArgumentNullException("nameof(segmask)");
            }
            mhi.ThrowIfDisposed();
            segmask.ThrowIfNotReady();

            using (var br = new VectorOfRect())
            {
                NativeMethods.optflow_motempl_segmentMotion(
                    mhi.CvPtr, segmask.CvPtr, br.CvPtr, timestamp, segThresh);
                boundingRects = br.ToArray();
            }
            segmask.Fix();
        }