Пример #1
0
        public HoughCircles()
        {
            using (IplImage imgSrc = new IplImage(Const.ImageWalkman, LoadMode.Color))
            using (IplImage imgGray = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (IplImage imgHough = imgSrc.Clone())
            {
                Cv.CvtColor(imgSrc, imgGray, ColorConversion.BgrToGray);
                Cv.Smooth(imgGray, imgGray, SmoothType.Gaussian, 9);
                //Cv.Canny(imgGray, imgGray, 75, 150, ApertureSize.Size3);

                using (CvMemStorage storage = new CvMemStorage())
                {
                    CvSeq<CvCircleSegment> seq = imgGray.HoughCircles(storage, HoughCirclesMethod.Gradient, 1, 100, 150, 55, 0, 0);
                    foreach (CvCircleSegment item in seq)
                    {
                        imgHough.Circle(item.Center, (int)item.Radius, CvColor.Red, 3);
                    }
                }

                // (5)検出結果表示用のウィンドウを確保し表示する
                using (new CvWindow("gray", WindowMode.AutoSize, imgGray))
                using (new CvWindow("Hough circles", WindowMode.AutoSize, imgHough))
                {
                    CvWindow.WaitKey(0);
                }
            }
        }
Пример #2
0
        /// <summary>
        /// 
        /// </summary>
        public SeqPartition()
        {
            CvMemStorage storage = new CvMemStorage(0);
            pointSeq = new CvSeq<CvPoint>(SeqType.EltypeS32C2, CvSeq.SizeOf, storage);
            Random rand = new Random();
            canvas = new IplImage(Width, Height, BitDepth.U8, 3);

            colors = new CvScalar[Count];
            for (int i = 0; i < Count; i++)
            {
                CvPoint pt = new CvPoint
                {
                    X = rand.Next(Width),
                    Y = rand.Next(Height)
                };
                pointSeq.Push(pt);
                int icolor = rand.Next() | 0x00404040;
                colors[i] = Cv.RGB(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255);
            }

            using (window = new CvWindowEx() { Text = "points" })
            {
                window.CreateTrackbar("threshold", 10, 50, OnTrack);
                OnTrack(10);
                CvWindowEx.WaitKey();
            }
        }
Пример #3
0
        public FaceDetect()
        {
            CheckMemoryLeak();

            // CvHaarClassifierCascade, cvHaarDetectObjects

            CvColor[] colors = new CvColor[]{
                new CvColor(0,0,255),
                new CvColor(0,128,255),
                new CvColor(0,255,255),
                new CvColor(0,255,0),
                new CvColor(255,128,0),
                new CvColor(255,255,0),
                new CvColor(255,0,0),
                new CvColor(255,0,255),
            };

            const double Scale = 1.14;
            const double ScaleFactor = 1.0850;
            const int MinNeighbors = 2;

            using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color))
            using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
            {
                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                    Cv.EqualizeHist(smallImg, smallImg);
                }

                using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade))  
                using (var storage = new CvMemStorage())
                {
                    storage.Clear();

                    // 顔の検出
                    Stopwatch watch = Stopwatch.StartNew();
                    CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
                    watch.Stop();
                    Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);

                    // 検出した箇所にまるをつける
                    for (int i = 0; i < faces.Total; i++)
                    {
                        CvRect r = faces[i].Value.Rect;
                        CvPoint center = new CvPoint
                        {
                            X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                            Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                        };
                        int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                        img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                    }
                }

                // ウィンドウに表示
                CvWindow.ShowImages(img);
            }
        }
Пример #4
0
        public Squares()
        {
            // create memory storage that will contain all the dynamic data
            CvMemStorage storage = new CvMemStorage(0);

            for (int i = 0; i < _names.Length; i++)
            {
                // load i-th image
                using (IplImage img = new IplImage(_names[i], LoadMode.Color))
                {
                    // create window and a trackbar (slider) with parent "image" and set callback
                    // (the slider regulates upper threshold, passed to Canny edge detector) 
                    Cv.NamedWindow(WindowName, WindowMode.AutoSize);

                    // find and draw the squares
                    DrawSquares(img, FindSquares4(img, storage));                    
                }

                // clear memory storage - reset free space position
                storage.Clear(); 

                // wait for key.
                // Also the function cvWaitKey takes care of event processing
                int c = Cv.WaitKey(0);
                if ((char)c == 27)
                    break;
            }

            Cv.DestroyWindow(WindowName);
        }
Пример #5
0
        public LatentSVM()
        {
            using (var detector = new CvLatentSvmDetector(FilePath.Text.LatentSvmCat))
            using (var imageSrc = new IplImage(FilePath.Image.Cat, LoadMode.Color))
            using (var imageDst = imageSrc.Clone())
            using (var storage = new CvMemStorage())
            {
                Console.WriteLine("Running LatentSVM...");
                Stopwatch watch = Stopwatch.StartNew();

                CvSeq<CvObjectDetection> result = detector.DetectObjects(imageSrc, storage, 0.5f, 2);

                watch.Stop();
                Console.WriteLine("Elapsed time: {0}ms", watch.ElapsedMilliseconds);

                foreach (CvObjectDetection detection in result)
                {
                    CvRect boundingBox = detection.Rect;
                    imageDst.Rectangle(
                        new CvPoint(boundingBox.X, boundingBox.Y), 
                        new CvPoint(boundingBox.X + boundingBox.Width, boundingBox.Y + boundingBox.Height),
                        CvColor.Red, 3);
                }

                using (new CvWindow("LatentSVM result", imageDst))
                {
                    Cv.WaitKey();
                }
            }
        }
Пример #6
0
        /// <summary>
        /// Находит контуры на изображении и выбирает из них самый длинный контур являющийся границей дырки
        /// </summary>
        /// <param name="image">Изображение на котором будем искать контуры</param>
        /// <returns>Результат поиска</returns>
        public CvPoint[] FindMostLengthHole(IplImage image)
        {
            CvMemStorage contours = new CvMemStorage();
            CvSeq<CvPoint> firstContour, mostLengthContour = null;
            double maxContourLength = 0, perim = 0;

            // Отделяем изображение от фона
            separateBackground(image, tmpImg);

            // Находим все контуры на изображении
            Cv.FindContours(tmpImg, contours, out firstContour, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxNone);

            // Если не найдено ни одного контура
            if (firstContour == null) return new CvPoint[0];

            // Ищем самый длинный контур
            for (CvSeq<CvPoint> currentContour = firstContour; currentContour.HNext != null; currentContour = currentContour.HNext)
            {
                if (isHole(currentContour))
                {
                    perim = Cv.ContourPerimeter(currentContour);

                    if (perim >= maxContourLength)
                    {
                        maxContourLength = perim;
                        mostLengthContour = currentContour;
                    }
                }
            }

            // Если не найдено ни одной дырки
            if (mostLengthContour == null) return new CvPoint[0];

            return mostLengthContour.ToArray();
        }
Пример #7
0
            /// <summary>
            /// 画像から顔を見つける
            /// </summary>
            /// <param name="file_name"></param>
            /// <param name="read_count"></param>
            private void DetectFace(String file_name,int read_count)
            {
                //カスケード分類器の特徴量を取得する
                CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(@"C:\opencv2.4.8\sources\data\haarcascades\haarcascade_frontalface_alt.xml");
                CvMemStorage strage = new CvMemStorage(0);   // メモリを確保

                using (IplImage img = new IplImage(file_name))
                {
                    //グレースケールに変換
                    using (IplImage gray_image = Cv.CreateImage(new CvSize(img.Width, img.Height), BitDepth.U8, 1))
                    {
                        Cv.CvtColor(img, gray_image, ColorConversion.BgrToGray);

                        //発見した矩形
                        var result = Cv.HaarDetectObjects(gray_image, cascade, strage);
                        for (int i = 0; i < result.Total; i++)
                        {
                            //矩形の大きさに書き出す
                            CvRect rect = result[i].Value.Rect;
                            Cv.Rectangle(img, rect, new CvColor(255, 0, 0));

                            //矩形部分をファイル出力する
                            img.ROI = rect;
                            string out_name = this.OutputFoldaName + @"\out" + read_count + @"_" + i + @".bmp";
                            Cv.SaveImage(out_name, img);
                        }
                    }
                }
            }
        public System.Drawing.Bitmap FaceDetect(IplImage src)
        {
            
            // CvHaarClassifierCascade, cvHaarDetectObjects
            // 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다

            CvColor[] colors = new CvColor[]{
                new CvColor(0,0,255),
                new CvColor(0,128,255),
                new CvColor(0,255,255),
                new CvColor(0,255,0),
                new CvColor(255,128,0),
                new CvColor(255,255,0),
                new CvColor(255,0,0),
                new CvColor(255,0,255),
            };

            const double scale = 1.04;
            const double scaleFactor = 1.139;
            const int minNeighbors = 1;

            using (IplImage img = src.Clone())
            using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1))
            {
                // 얼굴 검출을 위한 화상을 생성한다.
                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                    Cv.EqualizeHist(smallImg, smallImg);
                }

                using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Environment.CurrentDirectory + "\\" + "haarcascade_frontalface_alt.xml"))
                using (CvMemStorage storage = new CvMemStorage())
                {
                    storage.Clear();

                    // 얼굴을 검출한다.
                    CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(20, 20));

                    // 검출한 얼굴에 검은색 원을 덮어씌운다.
                    for (int i = 0; i < faces.Total; i++)
                    {
                        CvRect r = faces[i].Value.Rect;
                        CvPoint center = new CvPoint
                        {
                            X = Cv.Round((r.X + r.Width * 0.5) * scale),
                            Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
                        };
                        int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
                        img.Circle(center, radius, new CvColor(0, 0, 0), -1, LineType.Link8, 0);
                    }
                }
                FindFace = img.Clone();

                //생성한 IplImage 화상을 비트맵으로 변환해 반환한다.
                return FindFace.ToBitmap(System.Drawing.Imaging.PixelFormat.Format24bppRgb);
            }
        }
Пример #9
0
        public Contour()
        {
            // cvContourArea, cvArcLength
            // 輪郭によって区切られた領域の面積と,輪郭の長さを求める
            
            const int SIZE = 500;

            // (1)画像を確保し初期化する
            using (CvMemStorage storage = new CvMemStorage())
            using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
            {
                img.Zero();
                // (2)点列を生成する 
                CvSeq<CvPoint> points = new CvSeq<CvPoint>(SeqType.PolyLine, storage);
                CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks);
                double scale = rng.RandReal() + 0.5;
                CvPoint pt0 = new CvPoint
                {
                    X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2),
                    Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2)
                };
                img.Circle(pt0, 2, CvColor.Green);
                points.Push(pt0);
                for (int i = 1; i < 20; i++)
                {
                    scale = rng.RandReal() + 0.5;
                    CvPoint pt1 = new CvPoint
                    {
                        X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2),
                        Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2)
                    };
                    img.Line(pt0, pt1, CvColor.Green, 2);
                    pt0.X = pt1.X;
                    pt0.Y = pt1.Y;
                    img.Circle(pt0, 3, CvColor.Green, Cv.FILLED);
                    points.Push(pt0);
                }
                img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2);
                // (3)包含矩形,面積,長さを求める
                CvRect rect = points.BoundingRect(false);
                double area = points.ContourArea();
                double length = points.ArcLength(CvSlice.WholeSeq, 1);
                // (4)結果を画像に書き込む
                img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2);
                string text_area = string.Format("Area:   wrect={0}, contour={1}", rect.Width * rect.Height, area);
                string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length);
                using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias))
                {
                    img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White);
                    img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White);
                }
                // (5)画像を表示,キーが押されたときに終了 
                using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
                {
                    window.Image = img;
                    CvWindow.WaitKey(0);
                }
            }
        }
Пример #10
0
        /// <summary>
        /// 空のグラフを生成する
        /// </summary>
        /// <param name="graphFlags">生成したグラフのタイプ.無向グラフの場合,CV_SEQ_KIND_GRAPH,有向グラフの場合,CV_SEQ_KIND_GRAPH | CV_GRAPH_FLAG_ORIENTED. </param>
        /// <param name="headerSize">グラフのヘッダサイズ (sizeof(CvGraph)以上)</param>
        /// <param name="vtxSize">グラフの頂点サイズ</param>
        /// <param name="edgeSize">グラフの辺サイズ</param>
        /// <param name="storage">グラフコンテナ</param>
#else
        /// <summary>
        /// Creates empty graph
        /// </summary>
        /// <param name="graphFlags">Type of the created graph. Usually, it is either CV_SEQ_KIND_GRAPH for generic unoriented graphs and CV_SEQ_KIND_GRAPH | CV_GRAPH_FLAG_ORIENTED for generic oriented graphs. </param>
        /// <param name="headerSize">Graph header size; may not be less than sizeof(CvGraph).</param>
        /// <param name="vtxSize">Graph vertex size; the custom vertex structure must start with CvGraphVtx  (use CV_GRAPH_VERTEX_FIELDS()) </param>
        /// <param name="edgeSize">Graph edge size; the custom edge structure must start with CvGraphEdge  (use CV_GRAPH_EDGE_FIELDS()) </param>
        /// <param name="storage">The graph container. </param>
        /// <remarks>The function cvCreateGraph creates an empty graph and returns it.</remarks>
#endif
        public CvGraph(SeqType graphFlags, int vtxSize, int edgeSize, CvMemStorage storage, int headerSize)
        {
            if (storage == null)
                throw new ArgumentNullException();
            
            IntPtr p = NativeMethods.cvCreateGraph(graphFlags, headerSize, vtxSize, edgeSize, storage.CvPtr);
            Initialize(p);
            holdingStorage = storage;
        }
Пример #11
0
        /// <summary>
        /// 空のセットを生成する
        /// </summary>
        /// <param name="setFlags">生成するセットのタイプ. </param>
        /// <param name="headerSize">セットのヘッダのサイズ(sizeof(CvSet)以上). </param>
        /// <param name="elemSize">セットの要素のサイズ(CvSetElem 以上). </param>
        /// <param name="storage">セットのためのコンテナ. </param>
#else
        /// <summary>
        /// Creates empty set
        /// </summary>
        /// <param name="setFlags">Type of the created set. </param>
        /// <param name="headerSize">Set header size; may not be less than sizeof(CvSet). </param>
        /// <param name="elemSize">Set element size; may not be less than CvSetElem. </param>
        /// <param name="storage">Container for the set. </param>
#endif
        public CvSet(SeqType setFlags, int headerSize, int elemSize, CvMemStorage storage)
        {
            if (storage == null)
                throw new ArgumentNullException();
            
            IntPtr p = NativeMethods.cvCreateSet(setFlags, headerSize, elemSize, storage.CvPtr);
            Initialize(p);
            holdingStorage = storage;
        }
Пример #12
0
    // Update is called once per frame
    void Update()
    {
        IplImage frame = Cv.QueryFrame(capture);

        using (IplImage img = Cv.CloneImage(frame))
        using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
        {
            using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
            {
                Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                Cv.Resize(gray, smallImg, Interpolation.Linear);
                Cv.EqualizeHist(smallImg, smallImg);
            }

            using (CvMemStorage storage = new CvMemStorage())
            {
                storage.Clear();

                CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(64, 64));

                for (int i = 0; i < faces.Total; i++)
                {
                    CvRect r = faces[i].Value.Rect;
                    CvPoint center = new CvPoint
                    {
                        X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                        Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                    };
                    int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                    img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                }

                if (faces.Total > 0)
                {
                    CvRect r = faces[0].Value.Rect;
                    facepos = new Vector2((r.X + r.Width / 2.0f) / CAPTURE_WIDTH, (r.Y + r.Height / 2.0f) / CAPTURE_HEIGHT);

                }
                else
                {
                    facepos = Vector2.zero;
                }

                if(facepos.x >= 0.2 && facepos.x <= 0.7 && facepos.y >= 0.2 && facepos.x <= 0.7)
                {
                    isFaceInCapture = true;
                }
                else
                {
                    isFaceInCapture = false;
                }
            }

            Cv.ShowImage("FaceDetect", img);
        }
    }
Пример #13
0
        /// <summary>
        /// 空のセットを生成する
        /// </summary>
        /// <param name="set_flags">生成するセットのタイプ. </param>
        /// <param name="header_size">セットのヘッダのサイズ(sizeof(CvSet)以上). </param>
        /// <param name="elem_size">セットの要素のサイズ(CvSetElem 以上). </param>
        /// <param name="storage">セットのためのコンテナ. </param>
#else
        /// <summary>
        /// Creates empty set
        /// </summary>
        /// <param name="set_flags">Type of the created set. </param>
        /// <param name="header_size">Set header size; may not be less than sizeof(CvSet). </param>
        /// <param name="elem_size">Set element size; may not be less than CvSetElem. </param>
        /// <param name="storage">Container for the set. </param>
#endif
        public CvSet(SeqType set_flags, int header_size, int elem_size, CvMemStorage storage)
        {
            if (storage == null)
            {
                throw new ArgumentNullException();
            }
            IntPtr ptr = CvInvoke.cvCreateSet(set_flags, header_size, elem_size, storage.CvPtr);
            Initialize(ptr);
            this._storage = storage;
        }
Пример #14
0
        /// <summary>
        /// 空のグラフを生成する
        /// </summary>
        /// <param name="graph_flags">生成したグラフのタイプ.無向グラフの場合,CV_SEQ_KIND_GRAPH,有向グラフの場合,CV_SEQ_KIND_GRAPH | CV_GRAPH_FLAG_ORIENTED. </param>
        /// <param name="header_size">グラフのヘッダサイズ (sizeof(CvGraph)以上)</param>
        /// <param name="vtx_size">グラフの頂点サイズ</param>
        /// <param name="edge_size">グラフの辺サイズ</param>
        /// <param name="storage">グラフコンテナ</param>
#else
        /// <summary>
        /// Creates empty graph
        /// </summary>
        /// <param name="graph_flags">Type of the created graph. Usually, it is either CV_SEQ_KIND_GRAPH for generic unoriented graphs and CV_SEQ_KIND_GRAPH | CV_GRAPH_FLAG_ORIENTED for generic oriented graphs. </param>
        /// <param name="header_size">Graph header size; may not be less than sizeof(CvGraph).</param>
        /// <param name="vtx_size">Graph vertex size; the custom vertex structure must start with CvGraphVtx  (use CV_GRAPH_VERTEX_FIELDS()) </param>
        /// <param name="edge_size">Graph edge size; the custom edge structure must start with CvGraphEdge  (use CV_GRAPH_EDGE_FIELDS()) </param>
        /// <param name="storage">The graph container. </param>
        /// <remarks>The function cvCreateGraph creates an empty graph and returns it.</remarks>
#endif
        public CvGraph(SeqType graph_flags, int vtx_size, int edge_size, CvMemStorage storage, int header_size)
        {
            if (storage == null)
            {
                throw new ArgumentNullException();
            }
            IntPtr ptr = CvInvoke.cvCreateGraph(graph_flags, header_size, vtx_size, edge_size, storage.CvPtr);
            Initialize(ptr);
            this._storage = storage;
        }
Пример #15
0
        public ConvexityDefect()
        {
            using (var imgSrc = new IplImage(FilePath.Image.Hand, LoadMode.Color))
            using (var imgHSV = new IplImage(imgSrc.Size, BitDepth.U8, 3))
            using (var imgH = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (var imgS = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (var imgV = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (var imgBackProjection = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (var imgFlesh = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (var imgHull = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (var imgDefect = new IplImage(imgSrc.Size, BitDepth.U8, 3))
            using (var imgContour = new IplImage(imgSrc.Size, BitDepth.U8, 3))
            using (var storage = new CvMemStorage())
            {
                // RGB -> HSV
                Cv.CvtColor(imgSrc, imgHSV, ColorConversion.BgrToHsv);
                Cv.CvtPixToPlane(imgHSV, imgH, imgS, imgV, null);
                IplImage[] hsvPlanes = { imgH, imgS, imgV };

                // skin region
                RetrieveFleshRegion(imgSrc, hsvPlanes, imgBackProjection);
                // gets max blob
                FilterByMaximumBlob(imgBackProjection, imgFlesh);
                Interpolate(imgFlesh);

                // find contours of the max blob
                CvSeq<CvPoint> contours = FindContours(imgFlesh, storage);
                if (contours != null)
                {
                    Cv.DrawContours(imgContour, contours, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias);

                    // finds convex hull
                    int[] hull;
                    Cv.ConvexHull2(contours, out hull, ConvexHullOrientation.Clockwise);
                    Cv.Copy(imgFlesh, imgHull);
                    DrawConvexHull(contours, hull, imgHull);

                    // gets convexity defexts
                    Cv.Copy(imgContour, imgDefect);
                    CvSeq<CvConvexityDefect> defect = Cv.ConvexityDefects(contours, hull);
                    DrawDefects(imgDefect, defect);
                }

                using (new CvWindow("src", imgSrc))
                using (new CvWindow("back projection", imgBackProjection))
                using (new CvWindow("hull", imgHull))
                using (new CvWindow("defect", imgDefect))
                {
                    Cv.WaitKey();
                }
            }
        }
Пример #16
0
        public ConvexityDefect()
        {
            using (IplImage imgSrc = new IplImage(Const.ImageHand, LoadMode.Color))
            using (IplImage imgHSV = new IplImage(imgSrc.Size, BitDepth.U8, 3))
            using (IplImage imgH = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (IplImage imgS = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (IplImage imgV = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (IplImage imgBackProjection = new IplImage(imgSrc.Size, BitDepth.U8, 1))     
            using (IplImage imgFlesh = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (IplImage imgHull = new IplImage(imgSrc.Size, BitDepth.U8, 1))
            using (IplImage imgDefect = new IplImage(imgSrc.Size, BitDepth.U8, 3))
            using (IplImage imgContour = new IplImage(imgSrc.Size, BitDepth.U8, 3))
            using (CvMemStorage storage = new CvMemStorage())
            {
                // RGB -> HSV
                Cv.CvtColor(imgSrc, imgHSV, ColorConversion.BgrToHsv);
                Cv.CvtPixToPlane(imgHSV, imgH, imgS, imgV, null);
                IplImage[] hsvPlanes = { imgH, imgS, imgV };

                // 肌色領域を求める
                RetrieveFleshRegion(imgSrc, hsvPlanes, imgBackProjection);
                // 最大の面積の領域を残す
                FilterByMaximalBlob(imgBackProjection, imgFlesh);
                Interpolate(imgFlesh);

                // 輪郭を求める
                CvSeq<CvPoint> contours = FindContours(imgFlesh, storage);
                if (contours != null)
                {
                    Cv.DrawContours(imgContour, contours, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias);

                    // 凸包を求める
                    int[] hull;
                    Cv.ConvexHull2(contours, out hull, ConvexHullOrientation.Clockwise);
                    Cv.Copy(imgFlesh, imgHull);
                    DrawConvexHull(contours, hull, imgHull);

                    // 凹状欠損を求める
                    Cv.Copy(imgContour, imgDefect);
                    CvSeq<CvConvexityDefect> defect = Cv.ConvexityDefects(contours, hull);
                    DrawDefects(imgDefect, defect);
                }

                using (new CvWindow("src", imgSrc))
                using (new CvWindow("back projection", imgBackProjection))
                using (new CvWindow("hull", imgHull))
                using (new CvWindow("defect", imgDefect))
                {
                    Cv.WaitKey();
                }
            }
        }
Пример #17
0
        public ContourScanner()
        {
            // create IplImages
            using (var src = new IplImage(FilePath.Image.Lenna, LoadMode.Color))
            using (var gray = new IplImage(src.Size, BitDepth.U8, 1))
            using (var canny = new IplImage(src.Size, BitDepth.U8, 1))
            using (var result = src.Clone())
            {
                // detect edges
                Cv.CvtColor(src, gray, ColorConversion.BgrToGray);
                Cv.Canny(gray, canny, 50, 200);

                // find all contours
                using (CvMemStorage storage = new CvMemStorage())
                {
                    // find contours by CvContourScanner

                    // native style
                    /*
                    CvContourScanner scanner = Cv.StartFindContours(canny, storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple);
                    while (true)
                    {
                        CvSeq<CvPoint> c = Cv.FindNextContour(scanner);
                        if (c == null)
                            break;
                        else
                            Cv.DrawContours(result, c, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias);
                    }
                    Cv.EndFindContours(scanner);
                    //*/

                    // wrapper style
                    using (CvContourScanner scanner = new CvContourScanner(canny, storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple))
                    {
                        foreach (CvSeq<CvPoint> c in scanner)
                        {
                            result.DrawContours(c, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias);
                        }
                    }                    
                }

                // show canny and result
                using (new CvWindow("ContourScanner canny", canny))
                using (new CvWindow("ContourScanner result", result))
                {
                    Cv.WaitKey();
                }             
            }
        }
Пример #18
0
        /// <summary>
        /// 1つ目のカメラでカメラ初期化
        /// </summary>
        /// <param name="output"></param>
        public Camera(logOutput output = null)
        {
            LogOutput = output;

            // カメラの用意
            cap = Cv.CreateCameraCapture(0);
            Log(cap.CaptureType + ", " + cap.FrameWidth + "x" + cap.FrameHeight + ", " + cap.Mode);

            Cv.SetCaptureProperty(cap, CaptureProperty.FrameWidth, WIDTH);
            Cv.SetCaptureProperty(cap, CaptureProperty.FrameHeight, HEIGHT);

            // 検出器の用意
            cvHCC = Cv.Load<CvHaarClassifierCascade>("haarcascade_profileface.xml");
            stor = Cv.CreateMemStorage(0);
        }
Пример #19
0
        public unsafe SeqTest()
        {
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                Random rand = new Random();
                CvSeq<int> seq = new CvSeq<int>(SeqType.EltypeS32C1, storage);
                // push
                for (int i = 0; i < 10; i++)
                {
                    int push = seq.Push(rand.Next(100));//seq.Push(i);
                    Console.WriteLine("{0} is pushed", push);
                }
                Console.WriteLine("----------");

                // enumerate
                Console.WriteLine("contents of seq");
                foreach (int item in seq)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();

                // sort
                CvCmpFunc<int> func = delegate(int a, int b)
                {
                    return a.CompareTo(b);
                };
                seq.Sort(func);

                // convert to array
                int[] array = seq.ToArray();
                Console.WriteLine("contents of sorted seq");
                foreach (int item in array)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();
                Console.WriteLine("----------");

                // pop
                for (int i = 0; i < 10; i++)
                {
                    int pop = seq.Pop();
                    Console.WriteLine("{0} is popped", pop);
                }
                Console.ReadKey();
            }
        }
Пример #20
0
        public TreeNodeIterator()
        {
            using (CvMemStorage storage = new CvMemStorage(0))
            using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.Color))
            using (IplImage srcImgGray = new IplImage(srcImg.Size, BitDepth.U8, 1))
            using (IplImage tmpImg = new IplImage(srcImg.Size, BitDepth.U8, 1))
            {
                Cv.CvtColor(srcImg, srcImgGray, ColorConversion.BgrToGray);

                // (1)画像の二値化と輪郭の検出
                Cv.Threshold(srcImgGray, tmpImg, 120, 255, ThresholdType.Binary);
                CvSeq<CvPoint> contours;
                Cv.FindContours(tmpImg, storage, out contours, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple);
                /* 輪郭シーケンスから座標を取得 */
                using (CvFileStorage fs = new CvFileStorage("contours.yaml", null, FileStorageMode.Write))
                {
                    // (2)ツリーノードイテレータの初期化
                    CvTreeNodeIterator<CvSeq<CvPoint>> it = new CvTreeNodeIterator<CvSeq<CvPoint>>(contours, 1);
                    // (3)各ノード(輪郭)を走査
                    //CvSeq<CvPoint> contour;
                    //while ((contour = it.NextTreeNode()) != null)
                    foreach(CvSeq<CvPoint> contour in it)
                    {
                        fs.StartWriteStruct("contour", NodeType.Seq);
                        // (4)輪郭を構成する頂点座標を取得
                        CvPoint tmp = contour[-1].Value;
                        for (int i = 0; i < contour.Total; i++)
                        {
                            CvPoint point = contour[i].Value;
                            srcImg.Line(tmp, point, CvColor.Blue, 2);
                            fs.StartWriteStruct(null, NodeType.Map | NodeType.Flow);
                            fs.WriteInt("x", point.X);
                            fs.WriteInt("y", point.Y);
                            fs.EndWriteStruct();
                            tmp = point;
                        }
                        fs.EndWriteStruct();
                    }
                }

                Console.WriteLine(File.ReadAllText("contours.yaml"));

                using (new CvWindow("Contours", srcImg))
                {
                    Cv.WaitKey(0);
                }
            }
        }
Пример #21
0
        public BoundingRect()
        {
            // cvBoundingRect 
            // 点列を包含する矩形を求める

            // (1)画像とメモリストレージを確保し初期化する
            // (メモリストレージは、CvSeqを使わないのであれば不要)
            using (IplImage img = new IplImage(640, 480, BitDepth.U8, 3))
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                img.Zero();
                CvRNG rng = new CvRNG(DateTime.Now);
                // (2)点列を生成する
                ///*
                // お手軽な方法 (普通の配列を使う)
                CvPoint[] points = new CvPoint[50];
                for (int i = 0; i < 50; i++)
                {
                    points[i] = new CvPoint()
                    {
                        X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4),
                        Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4)
                    };
                    img.Circle(points[i], 3, new CvColor(0, 255, 0), Cv.FILLED);
                }
                //*/
                /*
                // サンプルに準拠した方法 (CvSeqを使う)
                CvSeq points = new CvSeq(SeqType.EltypePoint, CvSeq.SizeOf, CvPoint.SizeOf, storage);
                for (int i = 0; i < 50; i++) {
                    CvPoint pt = new CvPoint();
                    pt.X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4);
                    pt.Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4);
                    points.Push(pt);
                    img.Circle(pt, 3, new CvColor(0, 255, 0), Cv.FILLED);
                }
                //*/
                // (3)点列を包含する矩形を求めて描画する
                CvRect rect = Cv.BoundingRect(points);
                img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), new CvColor(255, 0, 0), 2);
                // (4)画像の表示,キーが押されたときに終了 
                using (CvWindow w = new CvWindow("BoundingRect", WindowMode.AutoSize, img))
                {
                    CvWindow.WaitKey(0);
                }
            }
        }
Пример #22
0
        public Delaunay()
        {
            CvRect rect = new CvRect(0, 0, 600, 600);
            CvColor activeFacetColor = new CvColor(255, 0, 0);
            CvColor delaunayColor = new CvColor(0, 0, 0);
            CvColor voronoiColor = new CvColor(0, 180, 0);
            CvColor bkgndColor = new CvColor(255, 255, 255);
            Random rand = new Random();
            
            using (CvMemStorage storage = new CvMemStorage(0))
            using (IplImage img = new IplImage(rect.Size, BitDepth.U8, 3))
            using (CvWindow window = new CvWindow("delaunay"))
            {
                img.Set(bkgndColor);
                CvSubdiv2D subdiv = new CvSubdiv2D(rect, storage);
                for (int i = 0; i < 200; i++)
                {
                    CvPoint2D32f fp = new CvPoint2D32f
                    {
                        X = (float)rand.Next(5, rect.Width - 10),
                        Y = (float)rand.Next(5, rect.Height - 10)
                    };
                    LocatePoint(subdiv, fp, img, activeFacetColor);
                    window.Image = img;

                    if (CvWindow.WaitKey(100) >= 0)
                    {
                        break;
                    }
                    subdiv.Insert(fp);
                    subdiv.CalcVoronoi2D();
                    img.Set(bkgndColor);
                    DrawSubdiv(img, subdiv, delaunayColor, voronoiColor);
                    window.Image = img;
                    if (CvWindow.WaitKey(100) >= 0)
                    {
                        break;
                    }
                }
                img.Set(bkgndColor);
                PaintVoronoi(subdiv, img);
                window.Image = img;

                CvWindow.WaitKey(0);
            }
        }
Пример #23
0
        /// <summary>
        /// Extracts MSER by C-style code (cvExtractMSER)
        /// </summary>
        /// <param name="imgGray"></param>
        /// <param name="imgRender"></param>
        private void CStyleMSER(IplImage imgGray, IplImage imgDst)
        {
            using (CvMemStorage storage = new CvMemStorage())
            {
                CvContour[] contours;
                CvMSERParams param = new CvMSERParams();
                Cv.ExtractMSER(imgGray, null, out contours, storage, param);

                foreach (CvContour c in contours)
                {
                    CvColor color = CvColor.Random();
                    for (int i = 0; i < c.Total; i++)
                    {
                        imgDst.Circle(c[i].Value, 1, color);
                    }
                }
            }
        }
Пример #24
0
        //画像ファイルロード
        private bool LoadImageFile(String file_name)
        {
            //カスケード分類器の特徴量を取得する
            CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(@"C:\opencv2.4.10\sources\data\haarcascades\haarcascade_frontalface_alt.xml");
            CvMemStorage strage = new CvMemStorage(0);   // メモリを確保
            this.ImageFileName = file_name;

            using (IplImage img = new IplImage(this.ImageFileName))
            {
                //グレースケールに変換
                using( IplImage gray_image = Cv.CreateImage(new CvSize(img.Width,img.Height),BitDepth.U8,1) )
                {
                    Cv.CvtColor(img, gray_image, ColorConversion.BgrToGray);

                    //発見した矩形
                    var result = Cv.HaarDetectObjects(gray_image, cascade, strage);
                    for (int i = 0; i < result.Total; i++)
                    {
                        //矩形の大きさに書き出す
                        CvRect rect = result[i].Value.Rect;
                        Cv.Rectangle(img, rect, new CvColor(255, 0, 0));

                        //iplimageをコピー
                        img.ROI = rect;
                        CvRect roi_rect = img.ROI;
                        IplImage ipl_image = Cv.CreateImage(new CvSize(img.Width, img.Height), BitDepth.U8, 1);
                        ipl_image = img.Clone(img.ROI);
/*
                        //確認
                        new CvWindow(ipl_image);
                        Cv.WaitKey();
*/
                        //見つけた顔候補をすべてチェックするために記録する
                        this.FaceIplList.Add(ipl_image);
                    }
                }

                //メモリ解放
                cascade.Dispose();
                strage.Dispose();

                return true;
            }
        }
Пример #25
0
        public Bitmap Detect2()
        {
            IplImage image = videoCapture.QueryFrame();
            IplImage ret = getHandPicturesGMG(image);

            CvMemStorage storage = new CvMemStorage();
            CvSeq<CvAvgComp> hands = Cv.HaarDetectObjects(ret, cascade, storage, 1.139);
            foreach (CvAvgComp obj in hands)
            {
                ret.Rectangle(obj.Rect, CvColor.Red);
            }

            Bitmap bt = ret.ToBitmap();
            ret.Dispose();
            image.Dispose();
            storage.Dispose();
            hands.Dispose();
            return bt;
        }
Пример #26
0
        /// <summary>
        /// Extracts keypoints by C-style code (cvGetStarKeypoints)
        /// </summary>
        /// <param name="img"></param>
        /// <param name="cimg"></param>
        private void CStyleStarDetector(IplImage img, IplImage cimg)
        {
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                CvStarDetectorParams param = new CvStarDetectorParams(45);
                CvSeq<CvStarKeypoint> keypoints = Cv.GetStarKeypoints(img, storage, param);

                if (keypoints != null)
                {
                    for (int i = 0; i < keypoints.Total; i++)
                    {
                        CvStarKeypoint kpt = keypoints[i].Value;
                        int r = kpt.Size / 2;
                        //Cv.Circle(cimg, kpt.Pt, r, new CvColor(0, 255, 0));
                        //Cv.Line(cimg, new CvPoint(kpt.Pt.X + r, kpt.Pt.Y + r), new CvPoint(kpt.Pt.X - r, kpt.Pt.Y - r), new CvColor(0, 255, 0));
                        //Cv.Line(cimg, new CvPoint(kpt.Pt.X - r, kpt.Pt.Y + r), new CvPoint(kpt.Pt.X + r, kpt.Pt.Y - r), new CvColor(0, 255, 0));
                        cimg.DrawMarker(kpt.Pt.X, kpt.Pt.Y, CvColor.Green, MarkerStyle.CircleAndTiltedCross, kpt.Size);
                    }
                }
            }
        }
Пример #27
0
        /// <summary>
	    /// cvCreateSubdivDelaunay2Dで初期化
	    /// </summary>
        /// <param name="rect"></param>
	    /// <param name="storage"></param>
#else
        /// <summary>
        /// Initializes using cvCreateSubdivDelaunay2D
        /// </summary>
        /// <param name="rect"></param>
        /// <param name="storage"></param>
#endif
        public CvSubdiv2D(CvRect rect, CvMemStorage storage)
        {
            if (storage == null)
            {
                throw new ArgumentNullException();
            }

            IntPtr subdiv = CvInvoke.cvCreateSubdiv2D(
                SeqType.KindSubdiv2D, CvSubdiv2D.SizeOf, CvSubdiv2DPoint.SizeOf, CvQuadEdge2D.SizeOf, storage.CvPtr
            );

            if (subdiv == IntPtr.Zero)
            {
                throw new OpenCvSharpException("Failed to create CvSubdiv2D");
            }

            CvInvoke.cvInitSubdivDelaunay2D(subdiv, rect);

            Initialize(subdiv);
            holdingStorage = storage;
        }
Пример #28
0
        private static CvSeq<CvPoint> FindContours(IplImage img, CvMemStorage storage)
        {
            CvSeq<CvPoint> contours;
            using (IplImage imgClone = img.Clone())
            {
                Cv.FindContours(imgClone, storage, out contours);
                if (contours == null)
                {
                    return null;
                }
                contours = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, 3, true);
            }

            CvSeq<CvPoint> max = contours;
            for (CvSeq<CvPoint> c = contours; c != null; c = c.HNext)
            {
                if (max.Total < c.Total)
                {
                    max = c;
                }
            }
            return max;
        }
Пример #29
0
        /// <summary>
        /// シーケンスを生成する
        /// </summary>
        /// <param name="seqFlags">生成されたシーケンスのフラグ.生成されたシーケンスが,特定のシーケンスタイプを引数にとるような関数に一切渡されない場合は,この値に0を指定してもかまわない.そうでない場合は,定義済みのシーケンスタイプのリストから適切なタイプが選択されなければならない.</param>
        /// <param name="headerSize">シーケンスのヘッダサイズ.sizeof(CvSeq)以上でなければならない. また,特別なタイプかその拡張が指示されている場合,そのタイプは基本タイプのヘッダと合致していなければならない.</param>
        /// <param name="storage">シーケンスが保存される場所</param>
#else
        /// <summary>
        /// Creates sequence
        /// </summary>
        /// <param name="seqFlags">Flags of the created sequence. If the sequence is not passed to any function working with a specific type of sequences, the sequence value may be set to 0, otherwise the appropriate type must be selected from the list of predefined sequence types. </param>
        /// <param name="headerSize">Size of the sequence header; must be greater or equal to sizeof(CvSeq). If a specific type or its extension is indicated, this type must fit the base type header. </param>
        /// <param name="storage">Sequence location. </param>
#endif
        public CvSeq(SeqType seqFlags, int headerSize, CvMemStorage storage)
            : base(seqFlags, headerSize, Util.SizeOf(typeof(T)), storage)
        {
        }
Пример #30
0
        /// <summary>
        /// 画像中からSURF(Speeded Up Robust Features)を検出する
        /// </summary>
        /// <param name="image">8ビット,グレースケールの入力画像. </param>
        /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param>
        /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param>
        /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param>
        /// <param name="storage">キーポイントとディスクリプタが格納されるメモリストレージ</param>
        /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param>
        /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param>
#else
        /// <summary>
        /// Extracts Speeded Up Robust Features from image
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image. </param>
        /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param>
        /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param>
        /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param>
        /// <param name="storage">Memory storage where keypoints and descriptors will be stored. </param>
        /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param>
        /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param>
#endif
        public static void ExtractSURF(CvArr image, CvArr mask, ref CvSeq<CvSURFPoint> keypoints, out CvSeq<float> descriptors, CvMemStorage storage, CvSURFParams param, bool useProvidedKeyPts)
        {
            if (image == null)
                throw new ArgumentNullException("img");
            if (storage == null)
                throw new ArgumentNullException("img");

            IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr;
            IntPtr descriptorsPtr = IntPtr.Zero;
            IntPtr keypointsPtr = IntPtr.Zero;
            if (useProvidedKeyPts)
            {
                keypoints = new CvSeq<CvSURFPoint>(SeqType.Zero, storage);
                keypointsPtr = keypoints.CvPtr;
                CvInvoke.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, useProvidedKeyPts);
                descriptors = new CvSeq<float>(descriptorsPtr);
            }
            else
            {
                CvInvoke.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, useProvidedKeyPts);
                keypoints = new CvSeq<CvSURFPoint>(keypointsPtr);
                descriptors = new CvSeq<float>(descriptorsPtr);
            }
        }
Пример #31
0
        /// <summary>
        /// フリーマンチェーン(Freeman chain)をポリラインで近似する
        /// </summary>
        /// <param name="srcSeq">他のチェーンから参照可能なチェーンへの参照.</param>
        /// <param name="storage">計算結果保存用のストレージ.</param>
        /// <param name="method">推定手法.</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Approximates Freeman chain(s) with polygonal curve
        /// </summary>
        /// <param name="srcSeq">Freeman chain(s) </param>
        /// <param name="storage">Storage location for the resulting polylines. </param>
        /// <param name="method">Approximation method.</param>
        /// /// <returns></returns>
#endif
        public static CvSeq <CvPoint> ApproxChains(CvChain srcSeq, CvMemStorage storage, ContourChain method)
        {
            return(ApproxChains(srcSeq, storage, method, 0, 0, false));
        }
Пример #32
0
        /// <summary>
        /// フリーマンチェーン(Freeman chain)をポリラインで近似する
        /// </summary>
        /// <param name="srcSeq">他のチェーンから参照可能なチェーンへの参照.</param>
        /// <param name="storage">計算結果保存用のストレージ.</param>
        /// <param name="method">推定手法.</param>
        /// <param name="parameter">メソッドパラメータ(現在は使われていない).</param>
        /// <param name="minimalPerimeter">minimal_perimeter以上の周囲長をもつ輪郭のみを計算する.その他のチェーンは結果の構造体から削除される.</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Approximates Freeman chain(s) with polygonal curve
        /// </summary>
        /// <param name="srcSeq">Freeman chain(s) </param>
        /// <param name="storage">Storage location for the resulting polylines. </param>
        /// <param name="method">Approximation method.</param>
        /// <param name="parameter">Method parameter (not used now). </param>
        /// <param name="minimalPerimeter">Approximates only those contours whose perimeters are not less than minimal_perimeter. Other chains are removed from the resulting structure. </param>
        /// <returns></returns>
#endif
        public static CvSeq <CvPoint> ApproxChains(CvChain srcSeq, CvMemStorage storage, ContourChain method, double parameter, int minimalPerimeter)
        {
            return(ApproxChains(srcSeq, storage, method, parameter, minimalPerimeter, false));
        }
Пример #33
0
        /// <summary>
        /// 指定した精度でポリラインを近似する
        /// </summary>
        /// <param name="srcSeq">点のシーケンスまたは配列</param>
        /// <param name="headerSize">近似されたポリラインのヘッダサイズ.</param>
        /// <param name="storage">近似された輪郭の保存場所.nullの場合,入力シーケンスのストレージが使われる. </param>
        /// <param name="method">近似方法</param>
        /// <param name="parameter">近似方法に依存するパラメータ.CV_POLY_APPROX_DPの場合には,要求する近似精度である.</param>
        /// <returns>単一もしくは複数の近似曲線を計算した結果</returns>
#else
        /// <summary>
        /// Approximates polygonal curve(s) with desired precision.
        /// </summary>
        /// <param name="srcSeq">Sequence of array of points. </param>
        /// <param name="headerSize">Header size of approximated curve[s]. </param>
        /// <param name="storage">Container for approximated contours. If it is null, the input sequences' storage is used. </param>
        /// <param name="method">Approximation method; only ApproxPolyMethod.DP is supported, that corresponds to Douglas-Peucker algorithm. </param>
        /// <param name="parameter">Method-specific parameter; in case of CV_POLY_APPROX_DP it is a desired approximation accuracy. </param>
        /// <returns></returns>
#endif
        public static CvSeq <CvPoint> ApproxPoly(CvSeq <CvPoint> srcSeq, int headerSize, CvMemStorage storage, ApproxPolyMethod method, double parameter)
        {
            return(ApproxPoly(srcSeq, headerSize, storage, method, parameter, false));
        }
Пример #34
0
        /// <summary>
        /// 指定した精度でポリラインを近似する
        /// </summary>
        /// <param name="srcSeq">点のシーケンスまたは配列</param>
        /// <param name="headerSize">近似されたポリラインのヘッダサイズ.</param>
        /// <param name="storage">近似された輪郭の保存場所.nullの場合,入力シーケンスのストレージが使われる. </param>
        /// <param name="method">近似方法</param>
        /// <param name="parameter">近似方法に依存するパラメータ.CV_POLY_APPROX_DPの場合には,要求する近似精度である.</param>
        /// <param name="parameter2">src_seqが点の配列(CvMat)の場合, このパラメータは輪郭が閉じている(parameter2=true)か,開いているか(parameter2=false)を指定する.</param>
        /// <returns>単一もしくは複数の近似曲線を計算した結果</returns>
#else
        /// <summary>
        /// Approximates polygonal curve(s) with desired precision.
        /// </summary>
        /// <param name="srcSeq">Sequence of array of points. </param>
        /// <param name="headerSize">Header size of approximated curve[s]. </param>
        /// <param name="storage">Container for approximated contours. If it is null, the input sequences' storage is used. </param>
        /// <param name="method">Approximation method; only ApproxPolyMethod.DP is supported, that corresponds to Douglas-Peucker algorithm. </param>
        /// <param name="parameter">Method-specific parameter; in case of CV_POLY_APPROX_DP it is a desired approximation accuracy. </param>
        /// <param name="parameter2">If case if src_seq is sequence it means whether the single sequence should be approximated
        /// or all sequences on the same level or below src_seq (see cvFindContours for description of hierarchical contour structures).
        /// And if src_seq is array (CvMat*) of points, the parameter specifies whether the curve is closed (parameter2==true) or not (parameter2==false). </param>
        /// <returns></returns>
#endif
        public static CvSeq <CvPoint> ApproxPoly(CvSeq <CvPoint> srcSeq, int headerSize, CvMemStorage storage, ApproxPolyMethod method, double parameter, bool parameter2)
        {
            if (srcSeq == null)
            {
                throw new ArgumentNullException("srcSeq");
            }

            IntPtr result = NativeMethods.cvApproxPoly(srcSeq.CvPtr, headerSize, ToPtr(storage), method, parameter, parameter2);

            if (result == IntPtr.Zero)
            {
                return(null);
            }

            GC.KeepAlive(srcSeq);
            GC.KeepAlive(storage);

            return(new CvSeq <CvPoint>(result));
        }
Пример #35
0
        /// <summary>
        /// 入力行列のコピーを作成し返す (cvCloneSeq).
        /// </summary>
        /// <param name="storage">新しいシーケンスヘッダとコピーされたデータ(もしデータがあれば)を保存する出力ストレージ. nullの場合,入力シーケンスに含まれるストレージを使用する.</param>
        /// <returns>コピーされたCvSeq</returns>
#else
        /// <summary>
        /// Creates a copy of sequence (cvCloneSeq).
        /// </summary>
        /// <param name="storage">The destination storage to keep the new sequence header and the copied data if any. If it is null, the function uses the storage containing the input sequence. </param>
        /// <returns></returns>
#endif
        new public CvSeq <T> Clone(CvMemStorage storage)
        {
            return(Cv.CloneSeq(this, storage));
        }
        /// <summary>
        /// 与えられた画像からオブジェクトを含む様な矩形領域を検出し,それらの領域を矩形の列として返す.
        /// </summary>
        /// <param name="image">この画像の中からオブジェクトを検出する</param>
        /// <param name="storage">オブジェクト候補の矩形が得られた場合に,その矩形列を保存するメモリストレージ</param>
        /// <param name="scaleFactor">スキャン毎に探索ウィンドウがスケーリングされる際のスケールファクタ. 例えばこの値が 1.1 ならば,ウィンドウが 10% 大きくなる</param>
        /// <param name="minNeighbors">(これから 1 を引いた値が)オブジェクトを構成する近傍矩形の最小数となる. min_neighbors-1 よりも少ない矩形しか含まないようなグループは全て棄却される. もし min_neighbors が 0 である場合,この関数はグループを一つも生成せず,候補となる矩形を全て返す.これはユーザがカスタマイズしたグループ化処理を適用したい場合に有用である. </param>
        /// <param name="flags">処理モード</param>
        /// <returns>CvAvgCompを要素とするCvSeq</returns>
#else
        /// <summary>
        /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles.
        /// </summary>
        /// <param name="image">Image to detect objects in. </param>
        /// <param name="storage">Memory storage to store the resultant sequence of the object candidate rectangles. </param>
        /// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%. </param>
        /// <param name="minNeighbors">Minimum number (minus 1) of neighbor rectangles that makes up an object. All the groups of a smaller number of rectangles than min_neighbors-1 are rejected. If min_neighbors is 0, the function does not any grouping at all and returns all the detected candidate rectangles, which may be useful if the user wants to apply a customized grouping procedure. </param>
        /// <param name="flags">Mode of operation. Currently the only flag that may be specified is CV_HAAR_DO_CANNY_PRUNING. If it is set, the function uses Canny edge detector to reject some image regions that contain too few or too much edges and thus can not contain the searched object. The particular threshold values are tuned for face detection and in this case the pruning speeds up the processing. </param>
        /// <returns></returns>
#endif
        public CvSeq HaarDetectObjects(CvArr image, CvMemStorage storage, double scaleFactor, int minNeighbors, HaarDetectionType flags)
        {
            return(Cv.HaarDetectObjects(image, this, storage, scaleFactor, minNeighbors, flags));
        }
Пример #37
0
        /// <summary>
        /// データシーケンスを同値類(同じクラスに属すると定義されたデータ群)に分割する
        /// </summary>
        /// <param name="storage">同値類として分割されたシーケンスの保存領域.nullの場合は,seq->storage を使用する.</param>
        /// <param name="labels">出力パラメータ.入力シーケンスの各要素に割り振られた(分割結果を表す)0から始まるラベルシーケンスへのポインタのポインタ.</param>
        /// <param name="isEqual">2つのシーケンス要素が同じクラスである場合,関係関数は 0以外を返す. そうでなければ0を返す.分割アルゴリズムは,同値基準として関係関数の推移閉包を用いる.</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits sequence into equivalence classes
        /// </summary>
        /// <param name="storage">The storage to store the sequence of equivalence classes. If it is null, the function uses seq->storage for output labels. </param>
        /// <param name="labels">Output parameter. Double pointer to the sequence of 0-based labels of input sequence elements. </param>
        /// <param name="isEqual">The relation function that should return non-zero if the two particular sequence elements are from the same class, and zero otherwise. The partitioning algorithm uses transitive closure of the relation function as equivalence criteria. </param>
        /// <returns></returns>
#endif
        public int Partition(CvMemStorage storage, out CvSeq labels, CvCmpFunc <T> isEqual)
        {
            return(Cv.SeqPartition(this, storage, out labels, isEqual));
        }
        /// <summary>
        /// 与えられた画像からオブジェクトを含む様な矩形領域を検出し,それらの領域を矩形の列として返す.
        /// </summary>
        /// <param name="image">この画像の中からオブジェクトを検出する</param>
        /// <param name="storage">オブジェクト候補の矩形が得られた場合に,その矩形列を保存するメモリストレージ</param>
        /// <param name="scaleFactor">スキャン毎に探索ウィンドウがスケーリングされる際のスケールファクタ. 例えばこの値が 1.1 ならば,ウィンドウが 10% 大きくなる</param>
        /// <returns>CvAvgCompを要素とするCvSeq</returns>
#else
        /// <summary>
        /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles.
        /// </summary>
        /// <param name="image">Image to detect objects in. </param>
        /// <param name="storage">Memory storage to store the resultant sequence of the object candidate rectangles. </param>
        /// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%. </param>
        /// <returns></returns>
#endif
        public CvSeq HaarDetectObjects(CvArr image, CvMemStorage storage, double scaleFactor)
        {
            return(Cv.HaarDetectObjects(image, this, storage, scaleFactor));
        }
        /// <summary>
        /// 与えられた画像からオブジェクトを含む様な矩形領域を検出し,それらの領域を矩形の列として返す.
        /// </summary>
        /// <param name="image">この画像の中からオブジェクトを検出する</param>
        /// <param name="storage">オブジェクト候補の矩形が得られた場合に,その矩形列を保存するメモリストレージ</param>
        /// <returns>CvAvgCompを要素とするCvSeq</returns>
#else
        /// <summary>
        /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles.
        /// </summary>
        /// <param name="image">Image to detect objects in. </param>
        /// <param name="storage">Memory storage to store the resultant sequence of the object candidate rectangles. </param>
        /// <returns></returns>
#endif
        public CvSeq HaarDetectObjects(CvArr image, CvMemStorage storage)
        {
            return(Cv.HaarDetectObjects(image, this, storage));
        }
Пример #40
0
        /// <summary>
        /// シーケンススライスのための別のヘッダを作成する (cvSeqSlice).
        /// </summary>
        /// <param name="slice">抽出するシーケンスの一部分</param>
        /// <param name="storage">新しいシーケンスヘッダとコピーされたデータ(もしデータがあれば)を保存する出力ストレージ. nullの場合,この関数は入力シーケンスに含まれるストレージを使用する</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Makes separate header for the sequence slice (cvSeqSlice).
        /// </summary>
        /// <param name="slice">The part of the sequence to extract. </param>
        /// <param name="storage">The destination storage to keep the new sequence header and the copied data if any. If it is null, the function uses the storage containing the input sequence. </param>
        /// <returns></returns>
#endif
        new public CvSeq <T> Slice(CvSlice slice, CvMemStorage storage)
        {
            return(Cv.SeqSlice(this, slice, storage));
        }
Пример #41
0
        /// <summary>
        /// シーケンススライスのための別のヘッダを作成する (cvSeqSlice).
        /// </summary>
        /// <param name="slice">抽出するシーケンスの一部分</param>
        /// <param name="storage">新しいシーケンスヘッダとコピーされたデータ(もしデータがあれば)を保存する出力ストレージ. nullの場合,この関数は入力シーケンスに含まれるストレージを使用する</param>
        /// <param name="copyData">抽出されたスライスの要素をコピーするかしないかを示すフラグ</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Makes separate header for the sequence slice (cvSeqSlice).
        /// </summary>
        /// <param name="slice">The part of the sequence to extract. </param>
        /// <param name="storage">The destination storage to keep the new sequence header and the copied data if any. If it is null, the function uses the storage containing the input sequence. </param>
        /// <param name="copyData">The flag that indicates whether to copy the elements of the extracted slice (copy_data=true) or not (copy_data=false) </param>
        /// <returns></returns>
#endif
        new public CvSeq <T> Slice(CvSlice slice, CvMemStorage storage, bool copyData)
        {
            return(Cv.SeqSlice(this, slice, storage, copyData));
        }
Пример #42
0
        /// <summary>
        /// データの読み書きのためのファイルを開く.
        /// 書き込みの場合は,新しいファイルが作成されるか,ファイルが存在する場合には上書きされる.
        /// また,読み書きされるファイルの種類は拡張子で判別される. XMLの場合は.xml,YAMLの場合は.ymlまたは.yamlである.
        /// </summary>
        /// <param name="filename">ストレージに関連づけられたファイルの名前</param>
        /// <param name="memstorage">一時的なデータや,CvSeqや CvGraphなどの動的構造体の保存に使われるメモリストレージ.nullの場合,一時的なメモリが確保されて使用される.</param>
        /// <param name="flags">ファイルを開く方法または作成する方法</param>
#else
        /// <summary>
        /// Opens file storage for reading or writing data
        /// </summary>
        /// <param name="filename">Name of the file associated with the storage. </param>
        /// <param name="memstorage">Memory storage used for temporary data and for storing dynamic structures, such as CvSeq or CvGraph. If it is null, a temporary memory storage is created and used. </param>
        /// <param name="flags"></param>
#endif
        public CvFileStorage(string filename, CvMemStorage memstorage, FileStorageMode flags)
            : this(filename, memstorage, flags, null)
        {
        }
Пример #43
0
        /// <summary>
        /// sample of C style wrapper 
        /// </summary>
        private void SampleC()
        {
            // cvHoughLines2

            using (IplImage srcImgGray = new IplImage(FilePath.Image.Goryokaku, LoadMode.GrayScale))
            using (IplImage srcImgStd = new IplImage(FilePath.Image.Goryokaku, LoadMode.Color))
            using (IplImage srcImgProb = srcImgStd.Clone())
            {
                Cv.Canny(srcImgGray, srcImgGray, 50, 200, ApertureSize.Size3);
                using (CvMemStorage storage = new CvMemStorage())
                {
                    // Standard algorithm
                    CvSeq lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Standard, 1, Math.PI / 180, 50, 0, 0);
                    // wrapper style
                    //CvLineSegmentPolar[] lines = src_img_gray.HoughLinesStandard(1, Math.PI / 180, 50, 0, 0);

                    int limit = Math.Min(lines.Total, 10);
                    for (int i = 0; i < limit; i++)
                    {
                        // native code style
                        /*
                        unsafe
                        {
                            float* line = (float*)lines.GetElem<IntPtr>(i).Value.ToPointer();
                            float rho = line[0];
                            float theta = line[1];
                        }
                        //*/

                        // wrapper style
                        CvLineSegmentPolar elem = lines.GetSeqElem<CvLineSegmentPolar>(i).Value;
                        float rho = elem.Rho;
                        float theta = elem.Theta;

                        double a = Math.Cos(theta);
                        double b = Math.Sin(theta);
                        double x0 = a * rho;
                        double y0 = b * rho;
                        CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) };
                        CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) };
                        srcImgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0);
                    }

                    // Probabilistic algorithm
                    lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 50, 10);
                    // wrapper style
                    //CvLineSegmentPoint[] lines = src_img_gray.HoughLinesProbabilistic(1, Math.PI / 180, 50, 0, 0);

                    for (int i = 0; i < lines.Total; i++)
                    {
                        // native code style
                        /*
                        unsafe
                        {
                            CvPoint* point = (CvPoint*)lines.GetElem<IntPtr>(i).Value.ToPointer();
                            src_img_prob.Line(point[0], point[1], CvColor.Red, 3, LineType.AntiAlias, 0);
                        }
                        //*/

                        // wrapper style
                        CvLineSegmentPoint elem = lines.GetSeqElem<CvLineSegmentPoint>(i).Value;
                        srcImgProb.Line(elem.P1, elem.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
                    }
                }

                using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, srcImgStd))
                using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, srcImgProb))
                {
                    CvWindow.WaitKey(0);
                }
            }
        }
Пример #44
0
        /// <summary>
        /// フリーマンチェーン(Freeman chain)をポリラインで近似する
        /// </summary>
        /// <param name="srcSeq">他のチェーンから参照可能なチェーンへの参照.</param>
        /// <param name="storage">計算結果保存用のストレージ.</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Approximates Freeman chain(s) with polygonal curve
        /// </summary>
        /// <param name="srcSeq">Freeman chain(s) </param>
        /// <param name="storage">Storage location for the resulting polylines. </param>
        /// <returns></returns>
#endif
        public static CvSeq <CvPoint> ApproxChains(CvChain srcSeq, CvMemStorage storage)
        {
            return(ApproxChains(srcSeq, storage, ContourChain.ApproxSimple, 0, 0, false));
        }