Пример #1
0
 public static CvAvgComp[] ToArrayAndDispose(this CvSeq <CvAvgComp> seq)
 {
     using (seq)
     {
         return(seq.ToArray());
     }
 }
Пример #2
0
        public unsafe SeqTest()
        {
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                Random      rand = new Random();
                CvSeq <int> seq  = new CvSeq <int>(SeqType.EltypeS32C1, storage);
                // push
                for (int i = 0; i < 10; i++)
                {
                    int push = seq.Push(rand.Next(100));//seq.Push(i);
                    Console.WriteLine("{0} is pushed", push);
                }
                Console.WriteLine("----------");

                // enumerate
                Console.WriteLine("contents of seq");
                foreach (int item in seq)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();

                // sort
                CvCmpFunc <int> func = delegate(int a, int b)
                {
                    return(a.CompareTo(b));
                };
                seq.Sort(func);

                // convert to array
                int[] array = seq.ToArray();
                Console.WriteLine("contents of sorted seq");
                foreach (int item in array)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();
                Console.WriteLine("----------");

                // pop
                for (int i = 0; i < 10; i++)
                {
                    int pop = seq.Pop();
                    Console.WriteLine("{0} is popped", pop);
                }
                Console.ReadKey();
            }
        }
Пример #3
0
        public unsafe SeqTest()
        {
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                Random rand = new Random();
                CvSeq<int> seq = new CvSeq<int>(SeqType.EltypeS32C1, storage);
                // push
                for (int i = 0; i < 10; i++)
                {
                    int push = seq.Push(rand.Next(100));//seq.Push(i);
                    Console.WriteLine("{0} is pushed", push);
                }
                Console.WriteLine("----------");

                // enumerate
                Console.WriteLine("contents of seq");
                foreach (int item in seq)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();

                // sort
                CvCmpFunc<int> func = delegate(int a, int b)
                {
                    return a.CompareTo(b);
                };
                seq.Sort(func);

                // convert to array
                int[] array = seq.ToArray();
                Console.WriteLine("contents of sorted seq");
                foreach (int item in array)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();
                Console.WriteLine("----------");

                // pop
                for (int i = 0; i < 10; i++)
                {
                    int pop = seq.Pop();
                    Console.WriteLine("{0} is popped", pop);
                }
                Console.ReadKey();
            }
        }
        /// <summary>
        /// Находит контуры на изображении и выбирает из них самый длинный контур являющийся границей дырки
        /// </summary>
        /// <param name="image">Изображение на котором будем искать контуры</param>
        /// <returns>Результат поиска</returns>
        public CvPoint[] FindMostLengthHole(IplImage image)
        {
            CvMemStorage    contours = new CvMemStorage();
            CvSeq <CvPoint> firstContour, mostLengthContour = null;
            double          maxContourLength = 0, perim = 0;

            // Отделяем изображение от фона
            separateBackground(image, tmpImg);

            // Находим все контуры на изображении
            Cv.FindContours(tmpImg, contours, out firstContour, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxNone);

            // Если не найдено ни одного контура
            if (firstContour == null)
            {
                return(new CvPoint[0]);
            }

            // Ищем самый длинный контур
            for (CvSeq <CvPoint> currentContour = firstContour; currentContour.HNext != null; currentContour = currentContour.HNext)
            {
                if (isHole(currentContour))
                {
                    perim = Cv.ContourPerimeter(currentContour);

                    if (perim >= maxContourLength)
                    {
                        maxContourLength  = perim;
                        mostLengthContour = currentContour;
                    }
                }
            }

            // Если не найдено ни одной дырки
            if (mostLengthContour == null)
            {
                return(new CvPoint[0]);
            }

            return(mostLengthContour.ToArray());
        }
Пример #5
0
        /// <summary>
        /// MSERのすべての輪郭情報を抽出する
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="contours"></param>
        /// <param name="storage"></param>
        /// <param name="params"></param>
#else
        /// <summary>
        /// Extracts the contours of Maximally Stable Extremal Regions
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="contours"></param>
        /// <param name="storage"></param>
        /// <param name="params"></param>
#endif
        public static void ExtractMSER(CvArr img, CvArr mask, out CvContour[] contours, CvMemStorage storage, CvMSERParams @params)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            if (storage == null)
                throw new ArgumentNullException("storage");

            IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr;
            IntPtr contoursPtr = IntPtr.Zero;

            CvInvoke.cvExtractMSER(img.CvPtr, maskPtr, ref contoursPtr, storage.CvPtr, @params.Struct);

            CvSeq<IntPtr> seq = new CvSeq<IntPtr>(contoursPtr);
            contours = Array.ConvertAll<IntPtr, CvContour>(seq.ToArray(), delegate(IntPtr p) { return new CvContour(p); });
        }
Пример #6
0
        static CvPoint[] FindSquares4(IplImage img, CvMemStorage storage)
        {
            const int N = 11;

            CvSize   sz   = new CvSize(img.Width & -2, img.Height & -2);
            IplImage timg = img.Clone(); // make a copy of input image
            IplImage gray = new IplImage(sz, BitDepth.U8, 1);
            IplImage pyr  = new IplImage(sz.Width / 2, sz.Height / 2, BitDepth.U8, 3);
            // create empty sequence that will contain points -
            // 4 points per square (the square's vertices)
            CvSeq <CvPoint> squares = new CvSeq <CvPoint>(SeqType.Zero, CvSeq.SizeOf, storage);

            // select the maximum ROI in the image
            // with the width and height divisible by 2
            timg.ROI = new CvRect(0, 0, sz.Width, sz.Height);

            // down-scale and upscale the image to filter out the noise
            Cv.PyrDown(timg, pyr, CvFilter.Gaussian5x5);
            Cv.PyrUp(pyr, timg, CvFilter.Gaussian5x5);
            IplImage tgray = new IplImage(sz, BitDepth.U8, 1);

            // find squares in every color plane of the image
            for (int c = 0; c < 3; c++)
            {
                // extract the c-th color plane
                timg.COI = c + 1;
                Cv.Copy(timg, tgray, null);

                // try several threshold levels
                for (int l = 0; l < N; l++)
                {
                    // hack: use Canny instead of zero threshold level.
                    // Canny helps to catch squares with gradient shading
                    if (l == 0)
                    {
                        // apply Canny. Take the upper threshold from slider
                        // and set the lower to 0 (which forces edges merging)
                        Cv.Canny(tgray, gray, 0, Thresh, ApertureSize.Size5);
                        // dilate canny output to remove potential
                        // holes between edge segments
                        Cv.Dilate(gray, gray, null, 1);
                    }
                    else
                    {
                        // apply threshold if l!=0:
                        //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                        Cv.Threshold(tgray, gray, (l + 1) * 255.0 / N, 255, ThresholdType.Binary);
                    }

                    // find contours and store them all as a list
                    CvSeq <CvPoint> contours;
                    Cv.FindContours(gray, storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxSimple, new CvPoint(0, 0));

                    // test each contour
                    while (contours != null)
                    {
                        // approximate contour with accuracy proportional
                        // to the contour perimeter
                        CvSeq <CvPoint> result = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, contours.ContourPerimeter() * 0.02, false);
                        // square contours should have 4 vertices after approximation
                        // relatively large area (to filter out noisy contours)
                        // and be convex.
                        // Note: absolute value of an area is used because
                        // area may be positive or negative - in accordance with the
                        // contour orientation
                        if (result.Total == 4 && Math.Abs(result.ContourArea(CvSlice.WholeSeq)) > 1000 && result.CheckContourConvexity())
                        {
                            double s = 0;

                            for (int i = 0; i < 5; i++)
                            {
                                // find minimum Angle between joint
                                // edges (maximum of cosine)
                                if (i >= 2)
                                {
                                    double t = Math.Abs(Angle(result[i].Value, result[i - 2].Value, result[i - 1].Value));
                                    s = s > t ? s : t;
                                }
                            }

                            // if cosines of all angles are small
                            // (all angles are ~90 degree) then write quandrange
                            // vertices to resultant sequence
                            if (s < 0.3)
                            {
                                for (int i = 0; i < 4; i++)
                                {
                                    //Console.WriteLine(result[i]);
                                    squares.Push(result[i].Value);
                                }
                            }
                        }

                        // take the next contour
                        contours = contours.HNext;
                    }
                }
            }

            // release all the temporary images
            gray.Dispose();
            pyr.Dispose();
            tgray.Dispose();
            timg.Dispose();

            return(squares.ToArray());
        }
Пример #7
0
        /// <summary>
        /// 画像中からSURF(Speeded Up Robust Features)を検出する
        /// </summary>
        /// <param name="image">8ビット,グレースケールの入力画像. </param>
        /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param>
        /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param>
        /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param>
        /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param>
        /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param>
#else
        /// <summary>
        /// Extracts Speeded Up Robust Features from image
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image. </param>
        /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param>
        /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param>
        /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param>
        /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param>
        /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param>
#endif
        public static void ExtractSURF(CvArr image, CvArr mask, ref CvSURFPoint[] keypoints, out float[][] descriptors, CvSURFParams param, bool useProvidedKeyPts)
        {
            if (!useProvidedKeyPts)
            {
                ExtractSURF(image, mask, out keypoints, out descriptors, param);
                return;
            }

            if (image == null)
                throw new ArgumentNullException("image");
            if (param == null)
                throw new ArgumentNullException("param");
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");

            using (CvMemStorage storage = new CvMemStorage(0))
            using (CvSeq <CvSURFPoint> keypointsSeqIn = CvSeq<CvSURFPoint>.FromArray(keypoints, SeqType.Zero, storage))
            {
                IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr;
                IntPtr descriptorsPtr = IntPtr.Zero;
                IntPtr keypointsPtr = keypointsSeqIn.CvPtr;
                NativeMethods.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, false);
                
                CvSeq<CvSURFPoint> keypointsSeqOut = new CvSeq<CvSURFPoint>(keypointsPtr);
                keypoints = keypointsSeqOut.ToArray();

                descriptors = ExtractSurfDescriptors(descriptorsPtr, param);
            }
        }
Пример #8
0
        /// <summary>
        /// 画像中からSURF(Speeded Up Robust Features)を検出する
        /// </summary>
        /// <param name="image">8ビット,グレースケールの入力画像. </param>
        /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param>
        /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param>
        /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param>
        /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param>
#else
        /// <summary>
        /// Extracts Speeded Up Robust Features from image
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image. </param>
        /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param>
        /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param>
        /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param>
        /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param>
#endif
        public static void ExtractSURF(CvArr image, CvArr mask, out CvSURFPoint[] keypoints, out float[][] descriptors, CvSURFParams param)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (param == null)
                throw new ArgumentNullException("param");

            using (CvMemStorage storage = new CvMemStorage(0))
            {
                IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr;
                IntPtr descriptorsPtr = IntPtr.Zero;
                IntPtr keypointsPtr = IntPtr.Zero;
                NativeMethods.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, false);
                CvSeq<CvSURFPoint> keypointsSeq = new CvSeq<CvSURFPoint>(keypointsPtr);
                keypoints = keypointsSeq.ToArray();

                descriptors = ExtractSurfDescriptors(descriptorsPtr, param);
            }
        }
Пример #9
0
        /// <summary>
        /// ハフ(Hough)変換で、method=CV_HOUGH_PROBABILISTICを用いて2値画像から直線を検出する
        /// </summary>
        /// <param name="rho">距離解像度(1ピクセル当たりの単位)</param>
        /// <param name="theta">角度解像度(ラジアン単位で計測)</param>
        /// <param name="threshold">閾値パラメータ.対応する投票数がthresholdより大きい場合のみ,抽出された線が返される.</param>
        /// <param name="param1">各手法に応じた1番目のパラメータ.標準的ハフ変換では,使用しない(0).確率的ハフ変換では,最小の線の長さ.マルチスケールハフ変換では, 距離解像度rhoの除数.(荒い距離解像度では rho であり,詳細な解像度では (rho / param1) となる).</param>
        /// <param name="param2">各手法に応じた2番目のパラメータ.標準的ハフ変換では,使用しない(0).確率的ハフ変換では,同一線上に存在する線分として扱う(つまり,それらを統合しても問題ない),二つの線分の最大の間隔. マルチスケールハフ変換では,角度解像度 thetaの除数. (荒い角度解像度では theta であり,詳細な解像度では (theta / param2) となる). </param>
        /// <returns>検出した直線を両端の点で表した形式、の配列</returns>
#else
        /// <summary>
        /// Finds lines in binary image using Hough transform.
        /// </summary>
        /// <param name="rho">Distance resolution in pixel-related units. </param>
        /// <param name="theta">Angle resolution measured in radians. </param>
        /// <param name="threshold">Threshold parameter. A line is returned by the function if the corresponding accumulator value is greater than threshold. </param>
        /// <param name="param1">The first method-dependent parameter.</param>
        /// <param name="param2">The second method-dependent parameter.</param>
        /// <returns></returns>
#endif
        public CvLineSegmentPoint[] HoughLinesProbabilistic(double rho, double theta, int threshold, double param1, double param2)
        {
            using (CvMemStorage lineStorage = new CvMemStorage())
            {
                IntPtr result = NativeMethods.cvHoughLines2(CvPtr, lineStorage.CvPtr, HoughLinesMethod.Probabilistic, rho, theta, threshold, param1, param2);
                if (result == IntPtr.Zero)
                    throw new OpenCvSharpException();
                
                CvSeq<CvLineSegmentPoint> seq = new CvSeq<CvLineSegmentPoint>(result);
                return seq.ToArray();
            }
        }
Пример #10
0
 public static CvAvgComp[] ToArrayAndDispose(this CvSeq <CvAvgComp> seq)
 {
     CvAvgComp[] arr = seq.ToArray();
     seq.Dispose();
     return(arr);
 }
Пример #11
0
        /// <summary>
        /// ハフ(Hough)変換で、method=CV_HOUGH_STANDARDを用いて2値画像から直線を検出する
        /// </summary>
        /// <param name="rho">距離解像度(1ピクセル当たりの単位)</param>
        /// <param name="theta">角度解像度(ラジアン単位で計測)</param>
        /// <param name="threshold">閾値パラメータ.対応する投票数がthresholdより大きい場合のみ,抽出された線が返される.</param>
        /// <param name="param1">各手法に応じた1番目のパラメータ.標準的ハフ変換では,使用しない(0).確率的ハフ変換では,最小の線の長さ.マルチスケールハフ変換では, 距離解像度rhoの除数.(荒い距離解像度では rho であり,詳細な解像度では (rho / param1) となる).</param>
        /// <param name="param2">各手法に応じた2番目のパラメータ.標準的ハフ変換では,使用しない(0).確率的ハフ変換では,同一線上に存在する線分として扱う(つまり,それらを統合しても問題ない),二つの線分の最大の間隔. マルチスケールハフ変換では,角度解像度 thetaの除数. (荒い角度解像度では theta であり,詳細な解像度では (theta / param2) となる). </param>
        /// <returns>検出した直線の極座標形式、の配列</returns>
#else
        /// <summary>
        /// Finds lines in binary image using Hough transform.
        /// </summary>
        /// <param name="rho">Distance resolution in pixel-related units. </param>
        /// <param name="theta">Angle resolution measured in radians. </param>
        /// <param name="threshold">Threshold parameter. A line is returned by the function if the corresponding accumulator value is greater than threshold. </param>
        /// <param name="param1">The first method-dependent parameter.</param>
        /// <param name="param2">The second method-dependent parameter.</param>
        /// <returns></returns>
#endif
        public CvLineSegmentPolar[] HoughLinesStandard(double rho, double theta, int threshold, double param1, double param2)
        {
            using (CvMemStorage lineStorage = new CvMemStorage())
            {
                IntPtr result = CvInvoke.cvHoughLines2(this.CvPtr, lineStorage.CvPtr, HoughLinesMethod.Standard, rho, theta, threshold, param1, param2);
                if (result == IntPtr.Zero)
                {
                    throw new OpenCvSharpException();
                }
                CvSeq<CvLineSegmentPolar> seq = new CvSeq<CvLineSegmentPolar>(result);
                return seq.ToArray();
            }
        }
Пример #12
0
        /// <summary>
        /// returns sequence of squares detected on the image.
        /// the sequence is stored in the specified memory storage
        /// </summary>
        /// <param name="img"></param>
        /// <param name="storage"></param>
        /// <returns></returns>
        static CvPoint[] FindSquares4(IplImage img, CvMemStorage storage)
        {
            const int N = 11;

            CvSize sz = new CvSize(img.Width & -2, img.Height & -2);
            IplImage timg = img.Clone(); // make a copy of input image
            IplImage gray = new IplImage(sz, BitDepth.U8, 1);
            IplImage pyr = new IplImage(sz.Width / 2, sz.Height / 2, BitDepth.U8, 3);
            // create empty sequence that will contain points -
            // 4 points per square (the square's vertices)
            CvSeq<CvPoint> squares = new CvSeq<CvPoint>(SeqType.Zero, CvSeq.SizeOf, storage);

            // select the maximum ROI in the image
            // with the width and height divisible by 2
            timg.ROI = new CvRect(0, 0, sz.Width, sz.Height);

            // down-Scale and upscale the image to filter out the noise
            Cv.PyrDown(timg, pyr, CvFilter.Gaussian5x5);
            Cv.PyrUp(pyr, timg, CvFilter.Gaussian5x5);
            IplImage tgray = new IplImage(sz, BitDepth.U8, 1);

            // find squares in every color plane of the image
            for (int c = 0; c < 3; c++)
            {
                // extract the c-th color plane
                timg.COI = c + 1;
                Cv.Copy(timg, tgray, null);

                // try several threshold levels
                for (int l = 0; l < N; l++)
                {
                    // hack: use Canny instead of zero threshold level.
                    // Canny helps to catch squares with gradient shading   
                    if (l == 0)
                    {
                        // apply Canny. Take the upper threshold from slider
                        // and set the lower to 0 (which forces edges merging) 
                        Cv.Canny(tgray, gray, 0, Thresh, ApertureSize.Size5);
                        // dilate canny output to remove potential
                        // holes between edge segments 
                        Cv.Dilate(gray, gray, null, 1);
                    }
                    else
                    {
                        // apply threshold if l!=0:
                        //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                        Cv.Threshold(tgray, gray, (l + 1) * 255.0 / N, 255, ThresholdType.Binary);
                    }

                    // find contours and store them all as a list
                    CvSeq<CvPoint> contours;
                    Cv.FindContours(gray, storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxSimple, new CvPoint(0, 0));

                    // test each contour
                    while (contours != null)
                    {
                        // approximate contour with accuracy proportional
                        // to the contour perimeter
                        CvSeq<CvPoint> result = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, contours.ContourPerimeter() * 0.02, false);
                        // square contours should have 4 vertices after approximation
                        // relatively large area (to filter out noisy contours)
                        // and be convex.
                        // Note: absolute value of an area is used because
                        // area may be positive or negative - in accordance with the
                        // contour orientation
                        if (result.Total == 4 && Math.Abs(result.ContourArea(CvSlice.WholeSeq)) > 1000 && result.CheckContourConvexity())
                        {
                            double s = 0;

                            for (int i = 0; i < 5; i++)
                            {
                                // find minimum Angle between joint
                                // edges (maximum of cosine)
                                if (i >= 2)
                                {
                                    double t = Math.Abs(Angle(result[i].Value, result[i - 2].Value, result[i - 1].Value));
                                    s = s > t ? s : t;
                                }
                            }

                            // if cosines of all angles are small
                            // (all angles are ~90 degree) then write quandrange
                            // vertices to resultant sequence 
                            if (s < 0.3)
                            {
                                for (int i = 0; i < 4; i++)
                                {
                                    //Console.WriteLine(result[i]);
                                    squares.Push(result[i].Value);
                                }
                            }
                        }

                        // take the next contour
                        contours = contours.HNext;
                    }
                }
            }

            // release all the temporary images
            gray.Dispose();
            pyr.Dispose();
            tgray.Dispose();
            timg.Dispose();

            return squares.ToArray();
        }
Пример #13
0
        /// <summary>
        /// MSERのすべての輪郭情報を抽出する
        /// </summary>
        /// <param name="image"></param>
        /// <param name="mask"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Extracts the contours of Maximally Stable Extremal Regions
        /// </summary>
        /// <param name="image"></param>
        /// <param name="mask"></param>
        /// <returns></returns>
#endif
        public CvPoint[][] Extract(Mat image, Mat mask)
        {
            if(image == null)
                throw new ArgumentNullException("image");

            CvMat _image = image.ToCvMat();
            IntPtr pmask = (mask == null) ? IntPtr.Zero : mask.ToCvMat().CvPtr;
            IntPtr pcontours = IntPtr.Zero;

            using(CvMemStorage storage = new CvMemStorage(0))
	        {
        	    CvInvoke.cvExtractMSER(_image.CvPtr, pmask, ref pcontours, storage.CvPtr, Struct);
                if (pcontours == IntPtr.Zero)
                {
                    return new CvPoint[0][];
                }
                CvSeq<IntPtr> seq = new CvSeq<IntPtr>(pcontours);
                CvContour[] contours = Array.ConvertAll<IntPtr, CvContour>(seq.ToArray(), delegate(IntPtr p) { return new CvContour(p); });
                CvPoint[][] result = new CvPoint[contours.Length][];
                for (int i = 0; i < contours.Length; i++)
                {
                    result[i] = contours[i].ToArray();
                }
                return result;
	        }
        }
Пример #14
0
        /// <summary>
        /// Detect the square in the image using contours
        /// </summary>
        /// <param name="img">Image</param>
        /// <param name="modifiedImg">Modified image to be return</param>
        /// <param name="storage">Memory storage</param>
        /// <returns></returns>
        public static CvPoint[] DetectSquares(IplImage img)
        {
            // Debug
            //System.Diagnostics.Stopwatch stopWatch = new System.Diagnostics.Stopwatch();
            //stopWatch.Start();

            using (CvMemStorage storage = new CvMemStorage())
            {
                // create empty sequence that will contain points -
                // 4 points per square (the square's vertices)
                CvSeq<CvPoint> squares = new CvSeq<CvPoint>(SeqType.Zero, CvSeq.SizeOf, storage);

                using (IplImage timg = img.Clone())
                using (IplImage gray = new IplImage(timg.Size, BitDepth.U8, 1))
                using (IplImage dstCanny = new IplImage(timg.Size, BitDepth.U8, 1))
                {
                    // Get gray scale
                    timg.CvtColor(gray, ColorConversion.BgrToGray);

                    // Canny
                    Cv.Canny(gray, dstCanny, 70, 300);

                    // dilate canny output to remove potential
                    // holes between edge segments
                    Cv.Dilate(dstCanny, dstCanny, null, 2);

                    // find contours and store them all as a list
                    CvSeq<CvPoint> contours;
                    dstCanny.FindContours(storage, out contours);

                    // Debug
                    //Cv.ShowImage("Edge", dstCanny);
                    //if (contours != null) Console.WriteLine(contours.Count());

                    // Test each contour
                    while (contours != null)
                    {
                        // Debug
                        //if (stopWatch.ElapsedMilliseconds > 100)
                        //{
                        //    Console.WriteLine("ROI detection is taking too long and is skipped.");
                        //}

                        // approximate contour with accuracy proportional
                        // to the contour perimeter
                        CvSeq<CvPoint> result = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, contours.ContourPerimeter() * 0.02, false);

                        // square contours should have 4 vertices after approximation
                        // relatively large area (to filter out noisy contours)
                        // and be convex.
                        // Note: absolute value of an area is used because
                        // area may be positive or negative - in accordance with the
                        // contour orientation
                        if (result.Total == 4 &&
                            Math.Abs(result.ContourArea(CvSlice.WholeSeq)) > 250 &&
                            result.CheckContourConvexity())
                        {
                            double s = 0;

                            for (int i = 0; i < 5; i++)
                            {
                                // find minimum Angle between joint
                                // edges (maximum of cosine)
                                if (i >= 2)
                                {
                                    double t = Math.Abs(Angle(result[i].Value, result[i - 2].Value, result[i - 1].Value));
                                    s = s > t ? s : t;
                                }
                            }

                            // if cosines of all angles are small
                            // (all angles are ~90 degree) then write quandrange
                            // vertices to resultant sequence
                            if (s < 0.3)
                            {
                                //Console.WriteLine("ROI found!");  // Debug
                                for (int i = 0; i < 4; i++)
                                {
                                    //Console.WriteLine(result[i]);
                                    squares.Push(result[i].Value);
                                }
                            }
                        }

                        // Take the next contour
                        contours = contours.HNext;
                    }
                }
                //stopWatch.Stop();
                //Console.WriteLine("ROI Detection : {0} ms", stopWatch.ElapsedMilliseconds); // Debug
                return squares.ToArray();
            }
        }
Пример #15
0
    private ShapeClip DetectClip(CvSeq <CvPoint> contour, IplImage image)
    {
        // Approximate contours to rectange.
        CvMemStorage    cstorage = new CvMemStorage();
        CvSeq <CvPoint> verts    = contour.ApproxPoly(CvContour.SizeOf, cstorage, ApproxPolyMethod.DP, contour.ContourPerimeter() * 0.05);
        CvRect          rect     = Cv.BoundingRect(verts);

        // scale BB
        CvSize originalSize = rect.Size;
        CvSize size         = new CvSize((int)(rect.Width * 1.5), (int)(rect.Height * 1.5));
        CvSize sizeDist     = new CvSize(rect.Width - size.Width, rect.Height - size.Height);

        rect = new CvRect(
            Math.Max(rect.Location.X + sizeDist.Width / 2, 0),
            Math.Max(rect.Location.Y + sizeDist.Height / 2, 0), size.Width, size.Height);

        // If rect, convert to region of interest and approximate top.
        if (verts.Total >= 4 && new CvRect(0, 0, image.Width, image.Height).Contains(rect))
        {
            DetectionState detectionState = verts.Total == 4 ? DetectionState.SemiOriented : DetectionState.Candidate;
            double         angle          = (180.0 / Math.PI) * ComputeOrientationFromVerts(verts.ToArray());

            using (IplImage region = image.Clone(rect))
                using (IplImage finalRegion = image.Clone(rect))
                    using (IplImage colorRegion = new IplImage(region.Size.Width, region.Size.Height, BitDepth.U8, 3))
                        using (IplImage debug = new IplImage(region.Size.Width + 20, region.Size.Height + 20, BitDepth.U8, 3))
                        {
                            // Rotate into position based on the line angle estimate
                            Cv.WarpAffine(region, region, Cv.GetRotationMatrix2D(new CvPoint2D32f(rect.Width / 2, rect.Height / 2), angle, 1));
                            Cv.FloodFill(region, new CvPoint(0, 0), 255, 0, 150);

                            // Project image and find clusters
                            region.Not(region);
                            double[] horizontalProjection, verticalProjection;
                            int[]    horizontalPrjClusters = ComputeClusters(region, true, out horizontalProjection);
                            int      horizontalClusters = horizontalPrjClusters[0], lastHorizontalCluster = horizontalPrjClusters[1];
                            int[]    verticalPrjClusters = ComputeClusters(region, false, out verticalProjection);
                            int      verticalClusters = verticalPrjClusters[0], lastVerticalCluster = verticalPrjClusters[1];



                            // Correct the orientation based on the clusters found
                            bool foundLDRs = false;
                            if (verticalClusters > horizontalClusters)
                            {
                                // 90 deg

                                if (lastHorizontalCluster < region.Width / 2)
                                {
                                    // 90deg
                                    angle    += 90;
                                    foundLDRs = true;
                                }
                                else
                                {
                                    // 270 deg
                                    angle    += 270;
                                    foundLDRs = true;
                                }
                            }
                            else if (verticalClusters < horizontalClusters)
                            {
                                // 0 deg
                                if (lastVerticalCluster < region.Height / 2)
                                {
                                    // 0deg
                                    foundLDRs = true;
                                }
                                else
                                {
                                    // 180 deg
                                    angle    += 180;
                                    foundLDRs = true;
                                }
                            }
                            else
                            {
                                // something went wrong with our initial alignment
                                //    NO proper orientation found - could not identify the LDRs
                            }

                            #region DEBUG
                            //debug.Zero();
                            //Cv.CvtColor(finalRegion, colorRegion, ColorConversion.GrayToRgb);
                            //debug.DrawImage(20, 0, region.Width, region.Height, colorRegion);

                            //for (int i = 0; i < region.Width / 2; i++)
                            //    debug.DrawRect(20 + i, debug.Height - (int)(horizontalProjection[i] * 100), 20 + i, debug.Height, CvColor.Red, 1);
                            //for (int i = 0; i < region.Height / 2; i++)
                            //    debug.DrawRect(0, i, (int)(verticalProjection[i] * 100), i, CvColor.Red, 1);
                            //debugWindow.ShowImage(debug);
                            #endregion

                            if (foundLDRs)
                            {
                                detectionState = DetectionState.FullyOriented;
                            }
                        }

            // Compute pixel space mapping
            Vec2F scale = new Vec2F(screenResolution.X / image.Width, screenResolution.Y / image.Height);

            return(new ShapeClip(
                       detectionState,
                       new Vec2F(rect.Location.X + 0.5f * rect.Width, rect.Location.Y + 0.5f * rect.Height).Scale(scale),
                       new Vec2F(originalSize).Scale(scale),
                       angle));
        }
        else
        {
            return(null);
        }
    }