Пример #1
0
        public IplImage ApproxPoly(IplImage src)
        {
            apcon = new IplImage(src.Size, BitDepth.U8, 3);

            Cv.Copy(src, apcon);
            bin = this.Binary(src, 200);

            CvMemStorage    Storage = new CvMemStorage();
            CvSeq <CvPoint> contours;

            Cv.FindContours(bin, Storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxNone);

            CvSeq <CvPoint> apcon_seq = Cv.ApproxPoly(contours, CvContour.SizeOf, Storage, ApproxPolyMethod.DP, 3, true);

            for (CvSeq <CvPoint> c = apcon_seq; c != null; c = c.HNext)
            {
                if (c.Total > 4)
                {
                    for (int i = 0; i < c.Total; i++)
                    {
                        //CvPoint conpt = new CvPoint(c[i].Value.X, c[i].Value.Y);
                        CvPoint?p = Cv.GetSeqElem(c, i);
                        CvPoint conpt;
                        conpt.X = p.Value.X;
                        conpt.Y = p.Value.Y;

                        Cv.DrawCircle(apcon, conpt, 3, CvColor.Black);
                    }
                }
            }
            return(apcon);
        }
Пример #2
0
        /// <summary>
        ///
        /// </summary>
        public SeqPartition()
        {
            CvMemStorage storage = new CvMemStorage(0);

            pointSeq = new CvSeq <CvPoint>(SeqType.EltypeS32C2, CvSeq.SizeOf, storage);
            Random rand = new Random();

            canvas = new IplImage(Width, Height, BitDepth.U8, 3);

            colors = new CvScalar[Count];
            for (int i = 0; i < Count; i++)
            {
                CvPoint pt = new CvPoint
                {
                    X = rand.Next(Width),
                    Y = rand.Next(Height)
                };
                pointSeq.Push(pt);
                int icolor = rand.Next() | 0x00404040;
                colors[i] = Cv.RGB(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255);
            }

            using (window = new CvWindowEx()
            {
                Text = "points"
            })
            {
                window.CreateTrackbar("threshold", 10, 50, OnTrack);
                OnTrack(10);
                CvWindowEx.WaitKey();
            }
        }
Пример #3
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="fileName"></param>
        private static void SampleFileStorageReadSeq(string fileName)
        {
            // cvGetHashedKey, cvGetFileNode

            using (CvFileStorage fs = new CvFileStorage("sequence.yml", null, FileStorageMode.Read))
            {
                CvStringHashNode xKey   = fs.GetHashedKey("x", true);
                CvStringHashNode yKey   = fs.GetHashedKey("y", true);
                CvFileNode       points = fs.GetFileNodeByName(null, "points");

                if ((points.Tag & NodeType.Seq) != 0)
                {
                    CvSeq       seq    = points.DataSeq;
                    int         total  = seq.Total;
                    CvSeqReader reader = new CvSeqReader();
                    seq.StartRead(reader, false);
                    for (int i = 0; i < total; i++)
                    {
                        CvFileNode pt = CvFileNode.FromPtr(reader.Ptr);
                        int        x  = fs.ReadIntByName(pt, "x", 0);
                        int        y  = fs.ReadIntByName(pt, "y", 0);

                        Cv.NEXT_SEQ_ELEM(seq.ElemSize, reader);
                        Console.WriteLine("{0}: ({1}, {2})", i, x, y);
                    }
                }
            }
            Console.ReadKey();
        }
Пример #4
0
        /// <summary>
        /// Find contours
        /// </summary>
        /// <param name="img"></param>
        /// <param name="storage"></param>
        /// <returns></returns>
        private CvSeq <CvPoint> FindContours(IplImage img, CvMemStorage storage)
        {
            // 輪郭抽出
            CvSeq <CvPoint> contours;

            using (IplImage imgClone = img.Clone())
            {
                Cv.FindContours(imgClone, storage, out contours);
                if (contours == null)
                {
                    return(null);
                }
                contours = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, 3, true);
            }
            // 一番長そうな輪郭のみを得る
            CvSeq <CvPoint> max = contours;

            for (CvSeq <CvPoint> c = contours; c != null; c = c.HNext)
            {
                if (max.Total < c.Total)
                {
                    max = c;
                }
            }
            return(max);
        }
Пример #5
0
        public IplImage VehicleDetect(IplImage src)
        {
            haarvehicle = new IplImage(src.Size, BitDepth.U8, 3);
            Cv.Copy(src, haarvehicle);

            gray = new IplImage(src.Size, BitDepth.U8, 1);
            Cv.CvtColor(src, gray, ColorConversion.BgrToGray);

            Cv.EqualizeHist(gray, gray);

            double scaleFactor  = 1.139;
            int    minNeighbors = 1;

            CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("../../../cars.xml");
            CvMemStorage            Storage = new CvMemStorage();

            CvSeq <CvAvgComp> vehicles = Cv.HaarDetectObjects(gray, cascade, Storage, scaleFactor, minNeighbors, HaarDetectionType.ScaleImage, new CvSize(90, 90), new CvSize(0, 0));

            for (int i = 0; i < vehicles.Total; i++)
            {
                CvRect r = vehicles[i].Value.Rect;

                int cX     = Cv.Round(r.X + r.Width * 0.5);
                int cY     = Cv.Round(r.Y + r.Height * 0.5);
                int radius = Cv.Round((r.Width + r.Height) * 0.25);

                //Cv.DrawCircle(haarvehicle, new CvPoint(cX, cY), radius, CvColor.Red, 3);
                Cv.DrawRect(haarvehicle, r, CvColor.Red, 5);
            }

            return(haarvehicle);
        }
Пример #6
0
        //=============================================================
        //
        // 確率的Hough変換処理
        //
        //=============================================================
        private void HoughPbl(PictureBox pbox, IplImage image)
        {
            IplImage gray;
            IplImage canny;
            IplImage hPbl;

            gray  = Cv.CreateImage(image.Size, BitDepth.U8, 1);
            canny = Cv.CreateImage(image.Size, BitDepth.U8, 1);
            hPbl  = Cv.CreateImage(image.Size, BitDepth.U8, 3);

            Cv.CvtColor(image, gray, ColorConversion.RgbToGray);
            Cv.Canny(gray, canny, 50, 200);
            Cv.CvtColor(canny, hPbl, ColorConversion.GrayToRgb);

            CvMemStorage storage = new CvMemStorage();
            CvSeq        lines   = Cv.HoughLines2(canny, storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 10, 10);

            for (int i = 0; i < lines.Total; i++)
            {
                CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value;
                Cv.Line(hPbl, elem.P1, elem.P2, CvColor.Red, 1, LineType.AntiAlias, 0);
            }
            lines.Dispose();
            storage.Dispose();



            ViewBitmap(pbox, hPbl);
            Cv.ReleaseImage(gray);
            Cv.ReleaseImage(canny);
            Cv.ReleaseImage(hPbl);
            pictureBox2.Invalidate();
        }
Пример #7
0
        public IplImage CenterPoint(IplImage src)
        {
            mom = new IplImage(src.Size, BitDepth.U8, 3);

            Cv.Copy(src, mom);
            bin = this.Binary(src, 200);

            CvMemStorage    Storage = new CvMemStorage();
            CvSeq <CvPoint> contours;

            Cv.FindContours(bin, Storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxNone);

            CvSeq <CvPoint> apcon_seq = Cv.ApproxPoly(contours, CvContour.SizeOf, Storage, ApproxPolyMethod.DP, 3, true);

            CvMoments moments;
            int       cX = 0, cY = 0;

            for (CvSeq <CvPoint> c = apcon_seq; c != null; c = c.HNext)
            {
                if (c.Total > 4)
                {
                    Cv.Moments(c, out moments, true);

                    cX = Convert.ToInt32(moments.M10 / moments.M00);
                    cY = Convert.ToInt32(moments.M01 / moments.M00);

                    Cv.DrawCircle(mom, new CvPoint(cX, cY), 5, CvColor.Red, -1);
                }
            }
            return(mom);
        }
Пример #8
0
        public EyeDetect()
        {
            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };

            const double Scale        = 1.25;
            const double ScaleFactor  = 2.5;
            const int    MinNeighbors = 2;

            using (CvCapture cap = CvCapture.FromCamera(1))
                using (CvWindow w = new CvWindow("Eye Tracker"))
                {
                    while (CvWindow.WaitKey(10) < 0)
                    {
                        using (IplImage img = cap.QueryFrame())
                            using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
                            {
                                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                                {
                                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                                    Cv.EqualizeHist(smallImg, smallImg);
                                }

                                using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("C:\\Program Files\\OpenCV\\data\\haarcascades\\haarcascade_eye.xml"))
                                    using (CvMemStorage storage = new CvMemStorage())
                                    {
                                        storage.Clear();

                                        Stopwatch         watch = Stopwatch.StartNew();
                                        CvSeq <CvAvgComp> eyes  = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
                                        watch.Stop();
                                        //Console.WriteLine("detection time = {0}msn", watch.ElapsedMilliseconds);

                                        for (int i = 0; i < eyes.Total; i++)
                                        {
                                            CvRect  r      = eyes[i].Value.Rect;
                                            CvPoint center = new CvPoint
                                            {
                                                X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                                                Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                                            };
                                            int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                                            img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                                        }
                                    }

                                w.Image = img;
                            }
                    }
                }
        }
Пример #9
0
        public IplImage FaceDetection(IplImage src)
        {
            haarface = new IplImage(src.Size, BitDepth.U8, 3);
            Cv.Copy(src, haarface);

            gray = this.GrayScale(src);
            Cv.EqualizeHist(gray, gray);

            double scaleFactor  = 1.139;
            int    minNeighbors = 1;

            CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("../../haarcascade_frontalface_alt.xml");
            CvMemStorage            Storage = new CvMemStorage();

            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(gray, cascade, Storage, scaleFactor, minNeighbors, HaarDetectionType.ScaleImage, new CvSize(90, 90), new CvSize(0, 0));

            for (int i = 0; i < faces.Total; i++)
            {
                CvRect r = faces[i].Value.Rect;

                int cX     = Cv.Round(r.X + r.Width * 0.5);
                int cY     = Cv.Round(r.Y + r.Height * 0.5);
                int radius = Cv.Round((r.Width + r.Height) * 0.25);

                Cv.DrawCircle(haarface, new CvPoint(cX, cY), radius, CvColor.Black, 3);
            }
            return(haarface);
        }
Пример #10
0
        public HoughCircles()
        {
            using (IplImage imgSrc = new IplImage(FilePath.Image.Walkman, LoadMode.Color))
                using (IplImage imgGray = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                    using (IplImage imgHough = imgSrc.Clone())
                    {
                        Cv.CvtColor(imgSrc, imgGray, ColorConversion.BgrToGray);
                        Cv.Smooth(imgGray, imgGray, SmoothType.Gaussian, 9);
                        //Cv.Canny(imgGray, imgGray, 75, 150, ApertureSize.Size3);

                        using (var storage = new CvMemStorage())
                        {
                            CvSeq <CvCircleSegment> seq = imgGray.HoughCircles(storage, HoughCirclesMethod.Gradient, 1, 100, 150, 55, 0, 0);
                            foreach (CvCircleSegment item in seq)
                            {
                                imgHough.Circle(item.Center, (int)item.Radius, CvColor.Red, 3);
                            }
                        }

                        using (new CvWindow("gray", WindowMode.AutoSize, imgGray))
                            using (new CvWindow("Hough circles", WindowMode.AutoSize, imgHough))
                            {
                                CvWindow.WaitKey(0);
                            }
                    }
        }
Пример #11
0
        /// <summary>
        /// 
        /// </summary>
        public SeqPartition()
        {
            CvMemStorage storage = new CvMemStorage(0);
            pointSeq = new CvSeq<CvPoint>(SeqType.EltypeS32C2, CvSeq.SizeOf, storage);
            Random rand = new Random();
            canvas = new IplImage(Width, Height, BitDepth.U8, 3);

            colors = new CvScalar[Count];
            for (int i = 0; i < Count; i++)
            {
                CvPoint pt = new CvPoint
                {
                    X = rand.Next(Width),
                    Y = rand.Next(Height)
                };
                pointSeq.Push(pt);
                int icolor = rand.Next() | 0x00404040;
                colors[i] = Cv.RGB(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255);
            }

            using (window = new CvWindowEx() { Text = "points" })
            {
                window.CreateTrackbar("threshold", 10, 50, OnTrack);
                OnTrack(10);
                CvWindowEx.WaitKey();
            }
        }
Пример #12
0
        public int Detect()
        {
            const double Scale        = 1.04;
            const double ScaleFactor  = 1.139;
            const int    MinNeighbors = 2;

            IplImage smallImg = new IplImage(new CvSize(Cv.Round(_src.Width / Scale), Cv.Round(_src.Height / Scale)), BitDepth.U8, 1);
            IplImage gray     = new IplImage(_src.Size, BitDepth.U8, 1);

            Cv.CvtColor(_src, gray, ColorConversion.BgrToGray);
            Cv.Resize(gray, smallImg, Interpolation.Linear);
            Cv.EqualizeHist(smallImg, smallImg);

            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));

            Faces.Clear();
            for (int i = 0; i < faces.Total; i++)
            {
                CvRect r = faces[i].Value.Rect;

                r.X      = Cv.Round(r.X * Scale);
                r.Y      = Cv.Round(r.Y * Scale);
                r.Width  = Cv.Round(r.Width * Scale);
                r.Height = Cv.Round(r.Height * Scale);
                _src.SetROI(r);
                IplImage p = new IplImage(64, 64, _src.Depth, 3);
                Cv.Resize(_src, p);
                Faces.Add(p);
                _src.ResetROI();
            }
            storage.Clear();
            return(faces.Total);
        }
Пример #13
0
        public LatentSVM()
        {
            using (CvLatentSvmDetector detector = new CvLatentSvmDetector(Const.XmlLatentSVMCat))
                using (IplImage imageSrc = new IplImage(Const.ImageCat, LoadMode.Color))
                    using (IplImage imageDst = imageSrc.Clone())
                        using (CvMemStorage storage = new CvMemStorage())
                        {
                            Console.WriteLine("Running LatentSVM...");
                            Stopwatch watch = Stopwatch.StartNew();

                            CvSeq <CvObjectDetection> result = detector.DetectObjects(imageSrc, storage, 0.5f, 2);

                            watch.Stop();
                            Console.WriteLine("Elapsed time: {0}ms", watch.ElapsedMilliseconds);

                            foreach (CvObjectDetection detection in result)
                            {
                                CvRect boundingBox = detection.Rect;
                                imageDst.Rectangle(
                                    new CvPoint(boundingBox.X, boundingBox.Y),
                                    new CvPoint(boundingBox.X + boundingBox.Width, boundingBox.Y + boundingBox.Height),
                                    CvColor.Red, 3);
                            }

                            using (new CvWindow("LatentSVM result", imageDst))
                            {
                                Cv.WaitKey();
                            }
                        }
        }
Пример #14
0
 public static CvAvgComp[] ToArrayAndDispose(this CvSeq <CvAvgComp> seq)
 {
     using (seq)
     {
         return(seq.ToArray());
     }
 }
Пример #15
0
        public FaceDetect()
        {
            CheckMemoryLeak();

            // CvHaarClassifierCascade, cvHaarDetectObjects

            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };

            const double Scale        = 1.14;
            const double ScaleFactor  = 1.0850;
            const int    MinNeighbors = 2;

            using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color))
                using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
                {
                    using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                    {
                        Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                        Cv.Resize(gray, smallImg, Interpolation.Linear);
                        Cv.EqualizeHist(smallImg, smallImg);
                    }

                    using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade))
                        using (var storage = new CvMemStorage())
                        {
                            storage.Clear();

                            // 顔の検出
                            Stopwatch         watch = Stopwatch.StartNew();
                            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
                            watch.Stop();
                            Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);

                            // 検出した箇所にまるをつける
                            for (int i = 0; i < faces.Total; i++)
                            {
                                CvRect  r      = faces[i].Value.Rect;
                                CvPoint center = new CvPoint
                                {
                                    X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                                    Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                                };
                                int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                                img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                            }
                        }

                    // ウィンドウに表示
                    CvWindow.ShowImages(img);
                }
        }
Пример #16
0
        public Contour()
        {
            // cvContourArea, cvArcLength
            // 輪郭によって区切られた領域の面積と,輪郭の長さを求める
            
            const int SIZE = 500;

            // (1)画像を確保し初期化する
            using (CvMemStorage storage = new CvMemStorage())
            using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
            {
                img.Zero();
                // (2)点列を生成する 
                CvSeq<CvPoint> points = new CvSeq<CvPoint>(SeqType.PolyLine, storage);
                CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks);
                double scale = rng.RandReal() + 0.5;
                CvPoint pt0 = new CvPoint
                {
                    X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2),
                    Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2)
                };
                img.Circle(pt0, 2, CvColor.Green);
                points.Push(pt0);
                for (int i = 1; i < 20; i++)
                {
                    scale = rng.RandReal() + 0.5;
                    CvPoint pt1 = new CvPoint
                    {
                        X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2),
                        Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2)
                    };
                    img.Line(pt0, pt1, CvColor.Green, 2);
                    pt0.X = pt1.X;
                    pt0.Y = pt1.Y;
                    img.Circle(pt0, 3, CvColor.Green, Cv.FILLED);
                    points.Push(pt0);
                }
                img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2);
                // (3)包含矩形,面積,長さを求める
                CvRect rect = points.BoundingRect(false);
                double area = points.ContourArea();
                double length = points.ArcLength(CvSlice.WholeSeq, 1);
                // (4)結果を画像に書き込む
                img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2);
                string text_area = string.Format("Area:   wrect={0}, contour={1}", rect.Width * rect.Height, area);
                string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length);
                using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias))
                {
                    img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White);
                    img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White);
                }
                // (5)画像を表示,キーが押されたときに終了 
                using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
                {
                    window.Image = img;
                    CvWindow.WaitKey(0);
                }
            }
        }
Пример #17
0
        public Contour()
        {
            // cvContourArea, cvArcLength
            // 輪郭によって区切られた領域の面積と,輪郭の長さを求める

            const int SIZE = 500;

            // (1)画像を確保し初期化する
            using (CvMemStorage storage = new CvMemStorage())
                using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
                {
                    img.Zero();
                    // (2)点列を生成する
                    CvSeq <CvPoint> points = new CvSeq <CvPoint>(SeqType.PolyLine, storage);
                    CvRNG           rng    = new CvRNG((ulong)DateTime.Now.Ticks);
                    double          scale  = rng.RandReal() + 0.5;
                    CvPoint         pt0    = new CvPoint
                    {
                        X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2),
                        Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2)
                    };
                    img.Circle(pt0, 2, CvColor.Green);
                    points.Push(pt0);
                    for (int i = 1; i < 20; i++)
                    {
                        scale = rng.RandReal() + 0.5;
                        CvPoint pt1 = new CvPoint
                        {
                            X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2),
                            Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2)
                        };
                        img.Line(pt0, pt1, CvColor.Green, 2);
                        pt0.X = pt1.X;
                        pt0.Y = pt1.Y;
                        img.Circle(pt0, 3, CvColor.Green, Cv.FILLED);
                        points.Push(pt0);
                    }
                    img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2);
                    // (3)包含矩形,面積,長さを求める
                    CvRect rect   = points.BoundingRect(false);
                    double area   = points.ContourArea();
                    double length = points.ArcLength(CvSlice.WholeSeq, 1);
                    // (4)結果を画像に書き込む
                    img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2);
                    string text_area   = string.Format("Area:   wrect={0}, contour={1}", rect.Width * rect.Height, area);
                    string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length);
                    using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias))
                    {
                        img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White);
                        img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White);
                    }
                    // (5)画像を表示,キーが押されたときに終了
                    using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
                    {
                        window.Image = img;
                        CvWindow.WaitKey(0);
                    }
                }
        }
Пример #18
0
        public System.Drawing.Bitmap FaceDetect(IplImage src)
        {
            // CvHaarClassifierCascade, cvHaarDetectObjects
            // 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다

            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };

            const double scale        = 1.04;
            const double scaleFactor  = 1.139;
            const int    minNeighbors = 1;

            using (IplImage img = src.Clone())
                using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1))
                {
                    // 얼굴 검출을 위한 화상을 생성한다.
                    using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                    {
                        Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                        Cv.Resize(gray, smallImg, Interpolation.Linear);
                        Cv.EqualizeHist(smallImg, smallImg);
                    }

                    using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Environment.CurrentDirectory + "\\" + "haarcascade_frontalface_alt.xml"))
                        using (CvMemStorage storage = new CvMemStorage())
                        {
                            storage.Clear();

                            // 얼굴을 검출한다.
                            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(20, 20));

                            // 검출한 얼굴에 검은색 원을 덮어씌운다.
                            for (int i = 0; i < faces.Total; i++)
                            {
                                CvRect  r      = faces[i].Value.Rect;
                                CvPoint center = new CvPoint
                                {
                                    X = Cv.Round((r.X + r.Width * 0.5) * scale),
                                    Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
                                };
                                int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
                                img.Circle(center, radius, new CvColor(0, 0, 0), -1, LineType.Link8, 0);
                            }
                        }
                    FindFace = img.Clone();

                    //생성한 IplImage 화상을 비트맵으로 변환해 반환한다.
                    return(FindFace.ToBitmap(System.Drawing.Imaging.PixelFormat.Format24bppRgb));
                }
        }
Пример #19
0
 public ComplexObject(CvSeq <CvPoint> cont, CvPoint2D32f center, bool etalon, int id, double area, double distance)
 {
     Cont     = cont;
     Center   = center;
     Etalon   = etalon;
     Id       = id;
     Area     = area;
     Distance = distance;
 }
Пример #20
0
        private void 얼굴검출ToolStripMenuItem_Click(object sender, EventArgs e)
        {
            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };

            const double scale        = 1.04;
            const double scaleFactor  = 1.139;
            const int    minNeighbors = 2;

            using (IplImage img = src.Clone())
                using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1))
                {
                    // 얼굴 검출용의 화상의 생성
                    using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                    {
                        Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                        Cv.Resize(gray, smallImg, Interpolation.Linear);
                        Cv.EqualizeHist(smallImg, smallImg);
                    }

                    //using (CvHaarClassifierCascade cascade = Cv.Load<CvHaarClassifierCascade>(Const.XmlHaarcascade))  // 아무거나 가능

                    using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Application.StartupPath + "\\" + "haarcascade_frontalface_alt.xml"))                  //
                        using (CvMemStorage storage = new CvMemStorage())
                        {
                            storage.Clear();

                            // 얼굴의 검출

                            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(30, 30), new CvSize(180, 180));

                            // 검출한 얼굴에 원을 그린다
                            for (int i = 0; i < faces.Total; i++)
                            {
                                CvRect  r      = faces[i].Value.Rect;
                                CvPoint center = new CvPoint
                                {
                                    X = Cv.Round((r.X + r.Width * 0.5) * scale),
                                    Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
                                };
                                int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
                                img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                            }
                        }
                    dst = img.Clone();
                    pictureBoxIpl2.ImageIpl = dst;
                }
        }
Пример #21
0
 private static void DrawConvexHull(CvSeq<CvPoint> contours, int[] hull, IplImage img)
 {
     CvPoint pt0 = contours[hull.Last()].Value;
     foreach (int idx in hull)
     {
         CvPoint pt = contours[idx].Value;
         Cv.Line(img, pt0, pt, new CvColor(255, 255, 255));
         pt0 = pt;
     }
 }
Пример #22
0
    // Update is called once per frame
    void Update()
    {
        IplImage frame = Cv.QueryFrame(capture);

        using (IplImage img = Cv.CloneImage(frame))
            using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
            {
                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                    Cv.EqualizeHist(smallImg, smallImg);
                }

                using (CvMemStorage storage = new CvMemStorage())
                {
                    storage.Clear();

                    CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(64, 64));

                    for (int i = 0; i < faces.Total; i++)
                    {
                        CvRect  r      = faces[i].Value.Rect;
                        CvPoint center = new CvPoint
                        {
                            X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                            Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                        };
                        int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                        img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                    }

                    if (faces.Total > 0)
                    {
                        CvRect r = faces[0].Value.Rect;
                        facepos = new Vector2((r.X + r.Width / 2.0f) / CAPTURE_WIDTH, (r.Y + r.Height / 2.0f) / CAPTURE_HEIGHT);
                    }
                    else
                    {
                        facepos = Vector2.zero;
                    }

                    if (facepos.x >= 0.2 && facepos.x <= 0.7 && facepos.y >= 0.2 && facepos.x <= 0.7)
                    {
                        isFaceInCapture = true;
                    }
                    else
                    {
                        isFaceInCapture = false;
                    }
                }

                Cv.ShowImage("FaceDetect", img);
            }
    }
Пример #23
0
        /*
         * public IplImage HoughLines_Point(IplImage src, int canny1, int canny2, int thresh, int sideData)
         * {
         *  // cvHoughLines2
         *  // 확률적 허프 변환을 지정해 선분의 검출을 실시한다
         *
         *  // (1) 화상 읽기
         *  IplImage srcImgStd = src.Clone();
         *  IplImage srcImgGray = new IplImage(src.Size, BitDepth.U8, 1);
         *
         *  CvMemStorage storage = new CvMemStorage();
         *  CvSeq houghLines;
         *  Cv.CvtColor(srcImgStd, srcImgGray, ColorConversion.BgrToGray);
         *  Cv.Canny(srcImgGray, srcImgGray, canny1, canny2, ApertureSize.Size3);
         *  houghLines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI/180, thresh, 5, 0);
         *
         *
         *  LinePoints.Clear();
         *  int limit = Math.Min(houghLines.Total, 6);
         *  for (int i = 0; i < limit; i++)
         *  {
         *      CvLineSegmentPolar elem = houghLines.GetSeqElem<CvLineSegmentPolar>(i).Value;
         *      CvPoint pt1 = houghLines.GetSeqElem<CvLineSegmentPoint>(i).Value.P1;
         *      CvPoint pt2 = houghLines.GetSeqElem<CvLineSegmentPoint>(i).Value.P2;
         *
         *      //Trace.WriteLine(pt1.X.ToString("000.00000  ") + pt1.Y.ToString("000.00000  ") + pt2.X.ToString("000.00000  ")+ pt2.Y.ToString("000.00000"));
         *
         *      srcImgStd.Line(pt1, pt2, CvColor.Red, 1, LineType.AntiAlias, 0);
         *
         *      LinePoints.Add(pt1);
         *      LinePoints.Add(pt2);
         *  }
         *  srcImgStd.Dispose();
         *  srcImgGray.Dispose();
         *  houghLines.Dispose();
         *  storage.Dispose();
         *  return srcImgStd;
         * }
         */

        public IplImage HoughLines_Point08(IplImage src, int canny1, int canny2, int thresh, int sideData)
        {
            List <CvPoint> LinePoints    = new List <CvPoint>();
            int            lineMinLength = 0;

            if (sideData == 0 && sideData == 2)
            {
                lineMinLength = src.Width / 2;
            }
            else
            {
                lineMinLength = src.Height / 2;
            }

            // cvHoughLines2
            // 확률적 허프 변환을 지정해 선분의 검출을 실시한다

            // (1) 화상 읽기
            using (IplImage srcImgStd = src.Clone())
                using (IplImage srcImgGray = new IplImage(src.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(srcImgStd, srcImgGray, ColorConversion.BgrToGray);

                    // (2) 허프변환을 위한 캐니엣지 처리
                    //Cv.Canny(srcImgGray, srcImgGray, 50, 200, ApertureSize.Size3);
                    Cv.Canny(srcImgGray, srcImgGray, canny1, canny2, ApertureSize.Size3);

                    houghLine = srcImgGray.Clone();

                    using (CvMemStorage storage = new CvMemStorage())
                    {
                        LinePoints.Clear();
                        // (3) 표준적 허프 변환에 의한 선의 검출과 검출된 선 그리기
                        CvSeq lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, thresh, 5, 0);
                        int   limit = Math.Min(lines.Total, 6);
                        for (int i = 0; i < limit; i++)
                        {
                            CvLineSegmentPolar elem = lines.GetSeqElem <CvLineSegmentPolar>(i).Value;
                            CvPoint            pt1  = lines.GetSeqElem <CvLineSegmentPoint>(i).Value.P1;
                            CvPoint            pt2  = lines.GetSeqElem <CvLineSegmentPoint>(i).Value.P2;

                            //Trace.WriteLine(pt1.X.ToString("000.00000  ") + pt1.Y.ToString("000.00000  ") + pt2.X.ToString("000.00000  ")+ pt2.Y.ToString("000.00000"));

                            srcImgStd.Line(pt1, pt2, CvColor.Red, 1, LineType.AntiAlias, 0);

                            LinePoints.Add(pt1);
                            LinePoints.Add(pt2);

                            houghLine = srcImgStd.Clone();
                        }
                    }
                }
            return(houghLine);
        }
Пример #24
0
        /// <summary>
        /// Преобразует координаты точек на изображении в условные координа точек в реальном мире
        /// </summary>
        /// <param name="imagePoints">Точки изображения</param>
        /// <returns>Точки в реальном мире</returns>
        public CvPoint2D32f[] GetRealPoints(CvSeq <CvPoint> imagePoints)
        {
            CvPoint2D32f[] imagePoints2D32f = new CvPoint2D32f[imagePoints.Total];
            for (int i = 0; i < imagePoints.Total; i++)
            {
                imagePoints2D32f[i].X = (float)imagePoints[i].Value.X;
                imagePoints2D32f[i].Y = (float)imagePoints[i].Value.Y;
            }

            return(GetRealPoints(imagePoints2D32f));
        }
Пример #25
0
        /// <summary>
        /// ConvexHullの描画
        /// </summary>
        /// <param name="contours"></param>
        /// <param name="hull"></param>
        /// <param name="img"></param>
        private void DrawConvexHull(CvSeq <CvPoint> contours, int[] hull, IplImage img)
        {
            CvPoint pt0 = contours[hull.Last()].Value;

            foreach (int idx in hull)
            {
                CvPoint pt = contours[idx].Value;
                Cv.Line(img, pt0, pt, new CvColor(255, 255, 255));
                pt0 = pt;
            }
        }
Пример #26
0
        public ConvexityDefect()
        {
            using (IplImage imgSrc = new IplImage(Const.ImageHand, LoadMode.Color))
                using (IplImage imgHSV = new IplImage(imgSrc.Size, BitDepth.U8, 3))
                    using (IplImage imgH = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                        using (IplImage imgS = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                            using (IplImage imgV = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                                using (IplImage imgBackProjection = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                                    using (IplImage imgFlesh = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                                        using (IplImage imgHull = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                                            using (IplImage imgDefect = new IplImage(imgSrc.Size, BitDepth.U8, 3))
                                                using (IplImage imgContour = new IplImage(imgSrc.Size, BitDepth.U8, 3))
                                                    using (CvMemStorage storage = new CvMemStorage())
                                                    {
                                                        // RGB -> HSV
                                                        Cv.CvtColor(imgSrc, imgHSV, ColorConversion.BgrToHsv);
                                                        Cv.CvtPixToPlane(imgHSV, imgH, imgS, imgV, null);
                                                        IplImage[] hsvPlanes = { imgH, imgS, imgV };

                                                        // 肌色領域を求める
                                                        RetrieveFleshRegion(imgSrc, hsvPlanes, imgBackProjection);
                                                        // 最大の面積の領域を残す
                                                        FilterByMaximalBlob(imgBackProjection, imgFlesh);
                                                        Interpolate(imgFlesh);

                                                        // 輪郭を求める
                                                        CvSeq <CvPoint> contours = FindContours(imgFlesh, storage);
                                                        if (contours != null)
                                                        {
                                                            Cv.DrawContours(imgContour, contours, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias);

                                                            // 凸包を求める
                                                            int[] hull;
                                                            Cv.ConvexHull2(contours, out hull, ConvexHullOrientation.Clockwise);
                                                            Cv.Copy(imgFlesh, imgHull);
                                                            DrawConvexHull(contours, hull, imgHull);

                                                            // 凹状欠損を求める
                                                            Cv.Copy(imgContour, imgDefect);
                                                            CvSeq <CvConvexityDefect> defect = Cv.ConvexityDefects(contours, hull);
                                                            DrawDefects(imgDefect, defect);
                                                        }

                                                        using (new CvWindow("src", imgSrc))
                                                            using (new CvWindow("back projection", imgBackProjection))
                                                                using (new CvWindow("hull", imgHull))
                                                                    using (new CvWindow("defect", imgDefect))
                                                                    {
                                                                        Cv.WaitKey();
                                                                    }
                                                    }
        }
Пример #27
0
        public static void GetSingleContour(Bitmap src, Point point, int id, Bitmap scene, out Bitmap result, out double area)
        {
            string adr = "temp.jpg";

            File.Delete(adr);
            src.Save(adr);
            IplImage image = Cv.LoadImage(adr, LoadMode.AnyColor);
            IplImage gray  = Cv.CreateImage(Cv.GetSize(image), BitDepth.U8, 1);
            IplImage bin   = Cv.CreateImage(Cv.GetSize(image), BitDepth.U8, 1);

            scene.Save(adr);
            IplImage dst = Cv.LoadImage(adr, LoadMode.AnyColor);

            Cv.CvtColor(image, gray, ColorConversion.RgbToGray);
            Cv.InRangeS(gray, 150, 255, bin);
            CvMemStorage    storage  = Cv.CreateMemStorage(0);
            CvSeq <CvPoint> contours = null;
            int             cont     = Cv.FindContours(bin, storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxTC89KCOS, Cv.Point(0, 0));

            contours = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, 0, true);
            double temp = 0;

            for (CvSeq <CvPoint> seq0 = contours; seq0 != null; seq0 = seq0.HNext)
            {
                if (Cv.PointPolygonTest(seq0, new CvPoint2D32f(point.X, point.Y), false) > 0 &&
                    Cv.ContourArea(seq0) > 1000 &&
                    Cv.ContourArea(seq0) < (image.Height * image.Width * 0.5))
                {
                    CvMoments moments = new CvMoments();
                    Cv.Moments(seq0, out moments, true);
                    int             xc = (int)(moments.M10 / moments.M00);
                    int             yc = (int)(moments.M01 / moments.M00);
                    CvConnectedComp comp;
                    if (id == 0)
                    {
                        Cv.FloodFill(dst, Cv.Point(point.X, point.Y), Cv.RGB(200, 0, 0), Cv.ScalarAll(10), Cv.ScalarAll(10), out comp, FloodFillFlag.FixedRange, null);
                    }
                    else
                    {
                        Cv.FloodFill(dst, Cv.Point(point.X, point.Y), Cv.RGB(0, 150, 50), Cv.ScalarAll(10), Cv.ScalarAll(10), out comp, FloodFillFlag.FixedRange, null);
                    }
                    dst.PutText(
                        id.ToString(),
                        Cv.Point(xc, yc),
                        new CvFont(FontFace.HersheySimplex, 2, 2, 1, 5, LineType.Link8),
                        CvColor.Black);
                    temp = Cv.ContourArea(seq0);
                }
            }
            result = dst.ToBitmap();
            area   = temp;
        }
Пример #28
0
        public ConvexityDefect()
        {
            using (var imgSrc = new IplImage(FilePath.Image.Hand, LoadMode.Color))
                using (var imgHSV = new IplImage(imgSrc.Size, BitDepth.U8, 3))
                    using (var imgH = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                        using (var imgS = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                            using (var imgV = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                                using (var imgBackProjection = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                                    using (var imgFlesh = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                                        using (var imgHull = new IplImage(imgSrc.Size, BitDepth.U8, 1))
                                            using (var imgDefect = new IplImage(imgSrc.Size, BitDepth.U8, 3))
                                                using (var imgContour = new IplImage(imgSrc.Size, BitDepth.U8, 3))
                                                    using (var storage = new CvMemStorage())
                                                    {
                                                        // RGB -> HSV
                                                        Cv.CvtColor(imgSrc, imgHSV, ColorConversion.BgrToHsv);
                                                        Cv.CvtPixToPlane(imgHSV, imgH, imgS, imgV, null);
                                                        IplImage[] hsvPlanes = { imgH, imgS, imgV };

                                                        // skin region
                                                        RetrieveFleshRegion(imgSrc, hsvPlanes, imgBackProjection);
                                                        // gets max blob
                                                        FilterByMaximumBlob(imgBackProjection, imgFlesh);
                                                        Interpolate(imgFlesh);

                                                        // find contours of the max blob
                                                        CvSeq <CvPoint> contours = FindContours(imgFlesh, storage);
                                                        if (contours != null)
                                                        {
                                                            Cv.DrawContours(imgContour, contours, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias);

                                                            // finds convex hull
                                                            int[] hull;
                                                            Cv.ConvexHull2(contours, out hull, ConvexHullOrientation.Clockwise);
                                                            Cv.Copy(imgFlesh, imgHull);
                                                            DrawConvexHull(contours, hull, imgHull);

                                                            // gets convexity defexts
                                                            Cv.Copy(imgContour, imgDefect);
                                                            CvSeq <CvConvexityDefect> defect = Cv.ConvexityDefects(contours, hull);
                                                            DrawDefects(imgDefect, defect);
                                                        }

                                                        using (new CvWindow("src", imgSrc))
                                                            using (new CvWindow("back projection", imgBackProjection))
                                                                using (new CvWindow("hull", imgHull))
                                                                    using (new CvWindow("defect", imgDefect))
                                                                    {
                                                                        Cv.WaitKey();
                                                                    }
                                                    }
        }
Пример #29
0
        public IplImage PostProcess(IplImage preProcessedImage, IplImage postProcessedImage)
        {
            using (CvMemStorage storage = new CvMemStorage())
            {
                CvSeq seq   = preProcessedImage.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 30, 40, 15);
                var   lines = new List <CvLineSegmentPoint>();
                for (int i = 0; i < seq.Total; i++)
                {
                    var cvLineSegmentPoint = seq.GetSeqElem <CvLineSegmentPoint>(i);
                    if (cvLineSegmentPoint != null)
                    {
                        lines.Add(cvLineSegmentPoint.Value);
                    }
                }

                var groupedLines = RectangleFinder.GroupSegments(lines);

                var rects = RectangleFinder.Convert(groupedLines);
                RectangleFinder.Filter(rects);

                foreach (var cvRect in rects)
                {
                    postProcessedImage.Rectangle(cvRect, CvColor.Red, 3, LineType.AntiAlias);
                }

                //for (int i = 0; i < groupedLines.Count; i++)
                //{
                //    var color = new CvColor(i*255/max,i*255/max,i*255/max);
                //    var group = groupedLines[i];
                //    for (int j = 0; j < group.Lines.Count; j++)
                //    {
                //        CvLineSegmentPoint elem = group.Lines[j];
                //        imgHough.Line(elem.P1, elem.P2, color, 3, LineType.AntiAlias, 0);
                //    }

                //}


                //Console.WriteLine(groupedLines.Count);

                CvSeq <CvCircleSegment> seq1 = preProcessedImage.HoughCircles(storage,
                                                                              HoughCirclesMethod.Gradient, 1,
                                                                              //imgGray.Size.Height / 8, 150, 55, 0, 50);
                                                                              15, 100, 30, 9, 51);

                foreach (CvCircleSegment item in seq1)
                {
                    postProcessedImage.Circle(item.Center, (int)item.Radius, CvColor.Red, 3);
                }
            }
            return(postProcessedImage);
        }
Пример #30
0
        ///////////////////////
        public static IplImage FaceDetect(IplImage src)
        {
            IplImage FindFace;

            // CvHaarClassifierCascade, cvHaarDetectObjects
            // 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다
            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };
            const double scale        = 1;
            const double scaleFactor  = 1.139;
            const int    minNeighbors = 2;
            IplImage     img          = src.Clone();
            IplImage     smallImg     = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 3);
            {
                // 얼굴 검출용의 화상의 생성
                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                    Cv.EqualizeHist(smallImg, smallImg);
                }
                using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("C:\\haarcascade_frontalface_default.xml"))
                    using (CvMemStorage storage = new CvMemStorage())
                    {
                        storage.Clear();
                        // 얼굴의 검출
                        CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(24, 24));
                        // 검출한 얼굴에 원을 그린다
                        for (int i = 0; i < faces.Total; i++)
                        {
                            CvRect  r      = faces[i].Value.Rect;
                            CvPoint center = new CvPoint
                            {
                                X = Cv.Round((r.X + r.Width * 0.5) * scale),
                                Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
                            };
                            int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
                            img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                        }
                    }
                FindFace = img.Clone();
                return(FindFace);
            }
        }
Пример #31
0
 private static void DrawDefects(IplImage img, CvSeq<CvConvexityDefect> defect)
 {
     int count = 0;
     foreach (CvConvexityDefect item in defect)
     {
         CvPoint p1 = item.Start, p2 = item.End;
         double dist = GetDistance(p1, p2);
         CvPoint2D64f mid = GetMidpoint(p1, p2);
         img.DrawLine(p1, p2, CvColor.White, 3);
         img.DrawCircle(item.DepthPoint, 10, CvColor.Green, -1);
         img.DrawLine(mid, item.DepthPoint, CvColor.White, 1);
         Console.WriteLine("No:{0} Depth:{1} Dist:{2}", count, item.Depth, dist);
         count++;
     }
 }
Пример #32
0
        public unsafe SeqTest()
        {
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                Random      rand = new Random();
                CvSeq <int> seq  = new CvSeq <int>(SeqType.EltypeS32C1, storage);
                // push
                for (int i = 0; i < 10; i++)
                {
                    int push = seq.Push(rand.Next(100));//seq.Push(i);
                    Console.WriteLine("{0} is pushed", push);
                }
                Console.WriteLine("----------");

                // enumerate
                Console.WriteLine("contents of seq");
                foreach (int item in seq)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();

                // sort
                CvCmpFunc <int> func = delegate(int a, int b)
                {
                    return(a.CompareTo(b));
                };
                seq.Sort(func);

                // convert to array
                int[] array = seq.ToArray();
                Console.WriteLine("contents of sorted seq");
                foreach (int item in array)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();
                Console.WriteLine("----------");

                // pop
                for (int i = 0; i < 10; i++)
                {
                    int pop = seq.Pop();
                    Console.WriteLine("{0} is popped", pop);
                }
                Console.ReadKey();
            }
        }
Пример #33
0
        public unsafe SeqTest()
        {
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                Random rand = new Random();
                CvSeq<int> seq = new CvSeq<int>(SeqType.EltypeS32C1, storage);
                // push
                for (int i = 0; i < 10; i++)
                {
                    int push = seq.Push(rand.Next(100));//seq.Push(i);
                    Console.WriteLine("{0} is pushed", push);
                }
                Console.WriteLine("----------");

                // enumerate
                Console.WriteLine("contents of seq");
                foreach (int item in seq)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();

                // sort
                CvCmpFunc<int> func = delegate(int a, int b)
                {
                    return a.CompareTo(b);
                };
                seq.Sort(func);

                // convert to array
                int[] array = seq.ToArray();
                Console.WriteLine("contents of sorted seq");
                foreach (int item in array)
                {
                    Console.Write("{0} ", item);
                }
                Console.WriteLine();
                Console.WriteLine("----------");

                // pop
                for (int i = 0; i < 10; i++)
                {
                    int pop = seq.Pop();
                    Console.WriteLine("{0} is popped", pop);
                }
                Console.ReadKey();
            }
        }
Пример #34
0
        static void Main(string[] args)
        {
            //读入源文件
            var src = IplImage.FromFile("source.jpg");

            //转换到灰度图
            var gray = Cv.CreateImage(src.Size, BitDepth.U8, 1);

            Cv.CvtColor(src, gray, ColorConversion.BgrToGray);

            //做一下膨胀,x与y方向都做,但系数不同
            //使用了Erode方法,腐蚀操作,针对白色区域,所以等效于对文字进行了膨胀
            var kernal = Cv.CreateStructuringElementEx(5, 2, 1, 1, ElementShape.Rect);

            Cv.Erode(gray, gray, kernal, 2);

            //二值化
            Cv.Threshold(gray, gray, 0, 255, ThresholdType.BinaryInv | ThresholdType.Otsu);

            //检测连通域,每一个连通域以一系列的点表示,FindContours方法只能得到第一个域
            var             storage = Cv.CreateMemStorage();
            CvSeq <CvPoint> contour = null;

            Cv.FindContours(gray, storage, out contour, CvContour.SizeOf, ContourRetrieval.CComp, ContourChain.ApproxSimple);
            var color = new CvScalar(0, 0, 255);

            //开始遍历
            while (contour != null)
            {
                //得到这个连通区域的外接矩形
                var rect = Cv.BoundingRect(contour);

                //如果高度不足,或者长宽比太小,认为是无效数据,否则把矩形画到原图上
                if (rect.Height > 10 && (rect.Width * 1.0 / rect.Height) > 0.2)
                {
                    Cv.DrawRect(src, rect, color);
                }

                //取下一个连通域
                contour = contour.HNext;
            }
            Cv.ReleaseMemStorage(storage);

            //显示
            Cv.ShowImage("Result", src);
            Cv.WaitKey();
            Cv.DestroyAllWindows();
        }
Пример #35
0
        private IplImage RotateImage(IplImage img, Double angle)
        {
            Mat             imgMat     = null;
            IplImage        tempImage  = null;
            IplImage        rotatedImg = null;
            CvSeq <CvPoint> contours   = null;
            CvMemStorage    storage    = null;

            if (angle == 0.0 || Double.IsNaN(angle))
            {
                angle = -3.0;
            }

            if (angle != 0.0 && !Double.IsNaN(angle))
            {
                try
                {
                    rotatedImg = new IplImage(img.Size, img.Depth, img.NChannels);
                    tempImage  = new IplImage(img.Size, img.Depth, 1);
                    Cv.CvtColor(img, tempImage, ColorConversion.RgbaToGray);
                    imgMat  = new Mat(tempImage);
                    storage = new CvMemStorage();
                    Cv.FindContours(tempImage, storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxNone);
                    contours = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, 3, true);
                    CvBox2D box    = Cv.MinAreaRect2(contours);
                    CvMat   rotMat = Cv.GetRotationMatrix2D(box.Center, angle, 1.0);
                    Cv.WarpAffine(img, rotatedImg, rotMat, Interpolation.Cubic);
                }
                catch (Exception ex)
                {
                    if (null != rotatedImg)
                    {
                        Cv.ReleaseImage(rotatedImg);
                        rotatedImg = null;
                    }
                    throw ex;
                }
                finally
                {
                    if (null != tempImage)
                    {
                        Cv.ReleaseImage(tempImage);
                    }
                }
            }

            return(rotatedImg);
        }
Пример #36
0
        /// <summary>
        /// 画像中からSURF(Speeded Up Robust Features)を検出する
        /// </summary>
        /// <param name="image">8ビット,グレースケールの入力画像. </param>
        /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param>
        /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param>
        /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param>
        /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param>
        /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param>
#else
        /// <summary>
        /// Extracts Speeded Up Robust Features from image
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image. </param>
        /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param>
        /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param>
        /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param>
        /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param>
        /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param>
#endif
        public static void ExtractSURF(CvArr image, CvArr mask, ref CvSURFPoint[] keypoints, out float[][] descriptors, CvSURFParams param, bool useProvidedKeyPts)
        {
            if (!useProvidedKeyPts)
            {
                ExtractSURF(image, mask, out keypoints, out descriptors, param);
                return;
            }

            if (image == null)
                throw new ArgumentNullException("image");
            if (param == null)
                throw new ArgumentNullException("param");
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");

            using (CvMemStorage storage = new CvMemStorage(0))
            using (CvSeq <CvSURFPoint> keypointsSeqIn = CvSeq<CvSURFPoint>.FromArray(keypoints, SeqType.Zero, storage))
            {
                IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr;
                IntPtr descriptorsPtr = IntPtr.Zero;
                IntPtr keypointsPtr = keypointsSeqIn.CvPtr;
                NativeMethods.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, false);
                
                CvSeq<CvSURFPoint> keypointsSeqOut = new CvSeq<CvSURFPoint>(keypointsPtr);
                keypoints = keypointsSeqOut.ToArray();

                descriptors = ExtractSurfDescriptors(descriptorsPtr, param);
            }
        }
Пример #37
0
        /// <summary>
        /// 画像中からSURF(Speeded Up Robust Features)を検出する
        /// </summary>
        /// <param name="image">8ビット,グレースケールの入力画像. </param>
        /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param>
        /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param>
        /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param>
        /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param>
#else
        /// <summary>
        /// Extracts Speeded Up Robust Features from image
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image. </param>
        /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param>
        /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param>
        /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param>
        /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param>
#endif
        public static void ExtractSURF(CvArr image, CvArr mask, out CvSURFPoint[] keypoints, out float[][] descriptors, CvSURFParams param)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (param == null)
                throw new ArgumentNullException("param");

            using (CvMemStorage storage = new CvMemStorage(0))
            {
                IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr;
                IntPtr descriptorsPtr = IntPtr.Zero;
                IntPtr keypointsPtr = IntPtr.Zero;
                NativeMethods.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, false);
                CvSeq<CvSURFPoint> keypointsSeq = new CvSeq<CvSURFPoint>(keypointsPtr);
                keypoints = keypointsSeq.ToArray();

                descriptors = ExtractSurfDescriptors(descriptorsPtr, param);
            }
        }
Пример #38
0
        /// <summary>
        /// 指定した精度でポリラインを近似する
        /// </summary>
        /// <param name="srcSeq">点のシーケンスまたは配列</param>
        /// <param name="headerSize">近似されたポリラインのヘッダサイズ.</param>
        /// <param name="storage">近似された輪郭の保存場所.nullの場合,入力シーケンスのストレージが使われる. </param>
        /// <param name="method">近似方法</param>
        /// <param name="parameter">近似方法に依存するパラメータ.CV_POLY_APPROX_DPの場合には,要求する近似精度である.</param>
        /// <returns>単一もしくは複数の近似曲線を計算した結果</returns>
#else
        /// <summary>
        /// Approximates polygonal curve(s) with desired precision.
        /// </summary>
        /// <param name="srcSeq">Sequence of array of points. </param>
        /// <param name="headerSize">Header size of approximated curve[s]. </param>
        /// <param name="storage">Container for approximated contours. If it is null, the input sequences' storage is used. </param>
        /// <param name="method">Approximation method; only ApproxPolyMethod.DP is supported, that corresponds to Douglas-Peucker algorithm. </param>
        /// <param name="parameter">Method-specific parameter; in case of CV_POLY_APPROX_DP it is a desired approximation accuracy. </param>
        /// <returns></returns>
#endif
        public static CvSeq<CvPoint> ApproxPoly(CvSeq<CvPoint> srcSeq, int headerSize, CvMemStorage storage, ApproxPolyMethod method, double parameter)
        {
            return ApproxPoly(srcSeq, headerSize, storage, method, parameter, false);
        }
Пример #39
0
        /// <summary>
        /// a rough implementation for object location
        /// </summary>
        /// <param name="objectKeypoints"></param>
        /// <param name="objectDescriptors"></param>
        /// <param name="imageKeypoints"></param>
        /// <param name="imageDescriptors"></param>
        /// <param name="srcCorners"></param>
        /// <returns></returns>
        private static CvPoint[] LocatePlanarObject(CvSeq<CvSURFPoint> objectKeypoints, CvSeq<float> objectDescriptors,
                            CvSeq<CvSURFPoint> imageKeypoints, CvSeq<float> imageDescriptors,
                            CvPoint[] srcCorners)
        {
            CvMat h = new CvMat(3, 3, MatrixType.F64C1);
            int[] ptpairs = FindPairs(objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors);
            int n = ptpairs.Length / 2;
            if (n < 4)
                return null;

            CvPoint2D32f[] pt1 = new CvPoint2D32f[n];
            CvPoint2D32f[] pt2 = new CvPoint2D32f[n];
            for (int i = 0; i < n; i++)
            {
                pt1[i] = (Cv.GetSeqElem<CvSURFPoint>(objectKeypoints, ptpairs[i * 2])).Value.Pt;
                pt2[i] = (Cv.GetSeqElem<CvSURFPoint>(imageKeypoints, ptpairs[i * 2 + 1])).Value.Pt;
            }

            CvMat pt1Mat = new CvMat(1, n, MatrixType.F32C2, pt1);
            CvMat pt2Mat = new CvMat(1, n, MatrixType.F32C2, pt2);
            if (Cv.FindHomography(pt1Mat, pt2Mat, h, HomographyMethod.Ransac, 5) == 0)
                return null;

            CvPoint[] dstCorners = new CvPoint[4];
            for (int i = 0; i < 4; i++)
            {
                double x = srcCorners[i].X;
                double y = srcCorners[i].Y;
                double Z = 1.0 / (h[6] * x + h[7] * y + h[8]);
                double X = (h[0] * x + h[1] * y + h[2]) * Z;
                double Y = (h[3] * x + h[4] * y + h[5]) * Z;
                dstCorners[i] = new CvPoint(Cv.Round(X), Cv.Round(Y));
            }

            return dstCorners;
        }
Пример #40
0
        /// <summary>
        /// 指定した精度でポリラインを近似する
        /// </summary>
        /// <param name="srcSeq">点のシーケンスまたは配列</param>
        /// <param name="headerSize">近似されたポリラインのヘッダサイズ.</param>
        /// <param name="storage">近似された輪郭の保存場所.nullの場合,入力シーケンスのストレージが使われる. </param>
        /// <param name="method">近似方法</param>
        /// <param name="parameter">近似方法に依存するパラメータ.CV_POLY_APPROX_DPの場合には,要求する近似精度である.</param>
        /// <param name="parameter2">src_seqが点の配列(CvMat)の場合, このパラメータは輪郭が閉じている(parameter2=true)か,開いているか(parameter2=false)を指定する.</param>
        /// <returns>単一もしくは複数の近似曲線を計算した結果</returns>
#else
        /// <summary>
        /// Approximates polygonal curve(s) with desired precision.
        /// </summary>
        /// <param name="srcSeq">Sequence of array of points. </param>
        /// <param name="headerSize">Header size of approximated curve[s]. </param>
        /// <param name="storage">Container for approximated contours. If it is null, the input sequences' storage is used. </param>
        /// <param name="method">Approximation method; only ApproxPolyMethod.DP is supported, that corresponds to Douglas-Peucker algorithm. </param>
        /// <param name="parameter">Method-specific parameter; in case of CV_POLY_APPROX_DP it is a desired approximation accuracy. </param>
        /// <param name="parameter2">If case if src_seq is sequence it means whether the single sequence should be approximated 
        /// or all sequences on the same level or below src_seq (see cvFindContours for description of hierarchical contour structures). 
        /// And if src_seq is array (CvMat*) of points, the parameter specifies whether the curve is closed (parameter2==true) or not (parameter2==false). </param>
        /// <returns></returns>
#endif
        public static CvSeq<CvPoint> ApproxPoly(CvSeq<CvPoint> srcSeq, int headerSize, CvMemStorage storage, ApproxPolyMethod method, double parameter, bool parameter2)
        {
            if (srcSeq == null)
            {
                throw new ArgumentNullException("srcSeq");
            }
            IntPtr storagePtr = (storage == null) ? IntPtr.Zero : storage.CvPtr;
            IntPtr result = NativeMethods.cvApproxPoly(srcSeq.CvPtr, headerSize, storagePtr, method, parameter, parameter2);
            if (result == IntPtr.Zero)
                return null;
            return new CvSeq<CvPoint>(result);
        }
Пример #41
0
        /// <summary>
        /// 画像中からSURF(Speeded Up Robust Features)を検出する
        /// </summary>
        /// <param name="image">8ビット,グレースケールの入力画像. </param>
        /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param>
        /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param>
        /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param>
        /// <param name="storage">キーポイントとディスクリプタが格納されるメモリストレージ</param>
        /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param>
        /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param>
#else
        /// <summary>
        /// Extracts Speeded Up Robust Features from image
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image. </param>
        /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param>
        /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param>
        /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param>
        /// <param name="storage">Memory storage where keypoints and descriptors will be stored. </param>
        /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param>
        /// <param name="useProvidedKeyPts">If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed at the locations provided in keypoints (a CvSeq of CvSURFPoint).</param>
#endif
        public static void ExtractSURF(CvArr image, CvArr mask, ref CvSeq<CvSURFPoint> keypoints, out CvSeq<float> descriptors, CvMemStorage storage, CvSURFParams param, bool useProvidedKeyPts)
        {
            if (image == null)
                throw new ArgumentNullException("img");
            if (storage == null)
                throw new ArgumentNullException("img");

            IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr;
            IntPtr descriptorsPtr = IntPtr.Zero;
            IntPtr keypointsPtr = IntPtr.Zero;
            if (useProvidedKeyPts)
            {
                keypoints = new CvSeq<CvSURFPoint>(SeqType.Zero, storage);
                keypointsPtr = keypoints.CvPtr;
                CvInvoke.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, useProvidedKeyPts);
                descriptors = new CvSeq<float>(descriptorsPtr);
            }
            else
            {
                CvInvoke.cvExtractSURF(image.CvPtr, maskPtr, ref keypointsPtr, ref descriptorsPtr, storage.CvPtr, param.Struct, useProvidedKeyPts);
                keypoints = new CvSeq<CvSURFPoint>(keypointsPtr);
                descriptors = new CvSeq<float>(descriptorsPtr);
            }
        }
Пример #42
0
        /// <summary>
        /// 輪郭の階層的表現を生成する
        /// </summary>
        /// <param name="contour">入力輪郭</param>
        /// <param name="storage">結果のツリーの出力先</param>
        /// <param name="threshold">近似精度</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Creates hierarchical representation of contour
        /// </summary>
        /// <param name="contour">Input contour. </param>
        /// <param name="storage">Container for output tree. </param>
        /// <param name="threshold">Approximation accuracy. </param>
        /// <returns></returns>
#endif
        public CvContourTree(CvSeq contour, CvMemStorage storage, double threshold)
            : base( NativeMethods.cvCreateContourTree(contour.CvPtr, storage.CvPtr, threshold) )
        {
        }
Пример #43
0
        /// <summary>
        /// returns sequence of squares detected on the image.
        /// the sequence is stored in the specified memory storage
        /// </summary>
        /// <param name="img"></param>
        /// <param name="storage"></param>
        /// <returns></returns>
        static CvPoint[] FindSquares4(IplImage img, CvMemStorage storage)
        {
            const int N = 11;

            CvSize sz = new CvSize(img.Width & -2, img.Height & -2);
            IplImage timg = img.Clone(); // make a copy of input image
            IplImage gray = new IplImage(sz, BitDepth.U8, 1);
            IplImage pyr = new IplImage(sz.Width / 2, sz.Height / 2, BitDepth.U8, 3);
            // create empty sequence that will contain points -
            // 4 points per square (the square's vertices)
            CvSeq<CvPoint> squares = new CvSeq<CvPoint>(SeqType.Zero, CvSeq.SizeOf, storage);

            // select the maximum ROI in the image
            // with the width and height divisible by 2
            timg.ROI = new CvRect(0, 0, sz.Width, sz.Height);

            // down-Scale and upscale the image to filter out the noise
            Cv.PyrDown(timg, pyr, CvFilter.Gaussian5x5);
            Cv.PyrUp(pyr, timg, CvFilter.Gaussian5x5);
            IplImage tgray = new IplImage(sz, BitDepth.U8, 1);

            // find squares in every color plane of the image
            for (int c = 0; c < 3; c++)
            {
                // extract the c-th color plane
                timg.COI = c + 1;
                Cv.Copy(timg, tgray, null);

                // try several threshold levels
                for (int l = 0; l < N; l++)
                {
                    // hack: use Canny instead of zero threshold level.
                    // Canny helps to catch squares with gradient shading   
                    if (l == 0)
                    {
                        // apply Canny. Take the upper threshold from slider
                        // and set the lower to 0 (which forces edges merging) 
                        Cv.Canny(tgray, gray, 0, Thresh, ApertureSize.Size5);
                        // dilate canny output to remove potential
                        // holes between edge segments 
                        Cv.Dilate(gray, gray, null, 1);
                    }
                    else
                    {
                        // apply threshold if l!=0:
                        //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                        Cv.Threshold(tgray, gray, (l + 1) * 255.0 / N, 255, ThresholdType.Binary);
                    }

                    // find contours and store them all as a list
                    CvSeq<CvPoint> contours;
                    Cv.FindContours(gray, storage, out contours, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxSimple, new CvPoint(0, 0));

                    // test each contour
                    while (contours != null)
                    {
                        // approximate contour with accuracy proportional
                        // to the contour perimeter
                        CvSeq<CvPoint> result = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, contours.ContourPerimeter() * 0.02, false);
                        // square contours should have 4 vertices after approximation
                        // relatively large area (to filter out noisy contours)
                        // and be convex.
                        // Note: absolute value of an area is used because
                        // area may be positive or negative - in accordance with the
                        // contour orientation
                        if (result.Total == 4 && Math.Abs(result.ContourArea(CvSlice.WholeSeq)) > 1000 && result.CheckContourConvexity())
                        {
                            double s = 0;

                            for (int i = 0; i < 5; i++)
                            {
                                // find minimum Angle between joint
                                // edges (maximum of cosine)
                                if (i >= 2)
                                {
                                    double t = Math.Abs(Angle(result[i].Value, result[i - 2].Value, result[i - 1].Value));
                                    s = s > t ? s : t;
                                }
                            }

                            // if cosines of all angles are small
                            // (all angles are ~90 degree) then write quandrange
                            // vertices to resultant sequence 
                            if (s < 0.3)
                            {
                                for (int i = 0; i < 4; i++)
                                {
                                    //Console.WriteLine(result[i]);
                                    squares.Push(result[i].Value);
                                }
                            }
                        }

                        // take the next contour
                        contours = contours.HNext;
                    }
                }
            }

            // release all the temporary images
            gray.Dispose();
            pyr.Dispose();
            tgray.Dispose();
            timg.Dispose();

            return squares.ToArray();
        }
Пример #44
0
        /// <summary>
        /// 2値画像中の輪郭を見つける
        /// </summary>
        /// <param name="image">入力画像(8ビットシングルチャンネル).値が0以外のピクセルは「1」,0のピクセルは「0」とする.</param>
        /// <param name="storage">抽出された輪郭を保存する領域</param>
        /// <param name="firstContour">出力パラメータ.一番外側の輪郭へのポインタが入っている.</param>
        /// <param name="headerSize">シーケンスヘッダのサイズ.method=CV_CHAIN_CODEの場合,>=sizeof(CvChain) ,それ以外の場合 >=sizeof(CvContour).</param>
        /// <returns>抽出した輪郭の個数</returns>
#else
        /// <summary>
        /// Retrieves contours from the binary image and returns the number of retrieved contours.
        /// </summary>
        /// <param name="image">The source 8-bit single channel image. Non-zero pixels are treated as 1’s, zero pixels remain 0’s - that is image treated as binary. 
        /// To get such a binary image from grayscale, one may use cvThreshold, cvAdaptiveThreshold or cvCanny. The function modifies the source image content. </param>
        /// <param name="storage">Container of the retrieved contours. </param>
        /// <param name="firstContour">Output parameter, will contain the pointer to the first outer contour. </param>
        /// <param name="headerSize">Size of the sequence header, >=sizeof(CvChain) if method=CV_CHAIN_CODE, and >=sizeof(CvContour) otherwise. </param>
        /// <returns>The number of retrieved contours.</returns>
#endif
        public static int FindContours(CvArr image, CvMemStorage storage, out CvSeq<CvPoint> firstContour, int headerSize)
        {
            return FindContours(image, storage, out firstContour, headerSize, ContourRetrieval.List, ContourChain.ApproxSimple, new CvPoint(0, 0));
        }
Пример #45
0
 /// <summary>
 /// Определяет является-ли контур границей дырки
 /// </summary>
 /// <param name="contour"></param>
 /// <returns></returns>
 private static bool isHole(CvSeq<CvPoint> contour)
 {
     return (contour.Flags & (int)SeqType.FlagHole) != 0;
 }
Пример #46
0
        /// <summary>
        /// ハフ(Hough)変換で、method=CV_HOUGH_PROBABILISTICを用いて2値画像から直線を検出する
        /// </summary>
        /// <param name="rho">距離解像度(1ピクセル当たりの単位)</param>
        /// <param name="theta">角度解像度(ラジアン単位で計測)</param>
        /// <param name="threshold">閾値パラメータ.対応する投票数がthresholdより大きい場合のみ,抽出された線が返される.</param>
        /// <param name="param1">各手法に応じた1番目のパラメータ.標準的ハフ変換では,使用しない(0).確率的ハフ変換では,最小の線の長さ.マルチスケールハフ変換では, 距離解像度rhoの除数.(荒い距離解像度では rho であり,詳細な解像度では (rho / param1) となる).</param>
        /// <param name="param2">各手法に応じた2番目のパラメータ.標準的ハフ変換では,使用しない(0).確率的ハフ変換では,同一線上に存在する線分として扱う(つまり,それらを統合しても問題ない),二つの線分の最大の間隔. マルチスケールハフ変換では,角度解像度 thetaの除数. (荒い角度解像度では theta であり,詳細な解像度では (theta / param2) となる). </param>
        /// <returns>検出した直線を両端の点で表した形式、の配列</returns>
#else
        /// <summary>
        /// Finds lines in binary image using Hough transform.
        /// </summary>
        /// <param name="rho">Distance resolution in pixel-related units. </param>
        /// <param name="theta">Angle resolution measured in radians. </param>
        /// <param name="threshold">Threshold parameter. A line is returned by the function if the corresponding accumulator value is greater than threshold. </param>
        /// <param name="param1">The first method-dependent parameter.</param>
        /// <param name="param2">The second method-dependent parameter.</param>
        /// <returns></returns>
#endif
        public CvLineSegmentPoint[] HoughLinesProbabilistic(double rho, double theta, int threshold, double param1, double param2)
        {
            using (CvMemStorage lineStorage = new CvMemStorage())
            {
                IntPtr result = NativeMethods.cvHoughLines2(CvPtr, lineStorage.CvPtr, HoughLinesMethod.Probabilistic, rho, theta, threshold, param1, param2);
                if (result == IntPtr.Zero)
                    throw new OpenCvSharpException();
                
                CvSeq<CvLineSegmentPoint> seq = new CvSeq<CvLineSegmentPoint>(result);
                return seq.ToArray();
            }
        }
Пример #47
0
 private static float[][] ExtractSurfDescriptors(IntPtr descriptorsPtr, CvSURFParams param)
 {
     CvSeq<IntPtr> descriptorsSeq = new CvSeq<IntPtr>(descriptorsPtr);
     float[][] descriptors = new float[descriptorsSeq.Total][];
     int dim = (param.Extended) ? 128 : 64;
     for (int i = 0; i < descriptorsSeq.Total; i++)
     {
         descriptors[i] = new float[dim];
         IntPtr? ptr = descriptorsSeq[i];
         if (ptr.HasValue)
             Marshal.Copy(ptr.Value, descriptors[i], 0, dim);
     }
     return descriptors;
 }
Пример #48
0
        /// <summary>
        /// MSERのすべての輪郭情報を抽出する
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="contours"></param>
        /// <param name="storage"></param>
        /// <param name="params"></param>
#else
        /// <summary>
        /// Extracts the contours of Maximally Stable Extremal Regions
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="contours"></param>
        /// <param name="storage"></param>
        /// <param name="params"></param>
#endif
        public static void ExtractMSER(CvArr img, CvArr mask, out CvContour[] contours, CvMemStorage storage, CvMSERParams @params)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            if (storage == null)
                throw new ArgumentNullException("storage");

            IntPtr maskPtr = (mask == null) ? IntPtr.Zero : mask.CvPtr;
            IntPtr contoursPtr = IntPtr.Zero;

            CvInvoke.cvExtractMSER(img.CvPtr, maskPtr, ref contoursPtr, storage.CvPtr, @params.Struct);

            CvSeq<IntPtr> seq = new CvSeq<IntPtr>(contoursPtr);
            contours = Array.ConvertAll<IntPtr, CvContour>(seq.ToArray(), delegate(IntPtr p) { return new CvContour(p); });
        }
Пример #49
0
        /// <summary>
        /// 2値画像中の輪郭を見つける
        /// </summary>
        /// <param name="image">入力画像(8ビットシングルチャンネル).値が0以外のピクセルは「1」,0のピクセルは「0」とする.</param>
        /// <param name="storage">抽出された輪郭を保存する領域</param>
        /// <param name="firstContour">出力パラメータ.一番外側の輪郭へのポインタが入っている.</param>
        /// <param name="headerSize">シーケンスヘッダのサイズ.method=CV_CHAIN_CODEの場合,>=sizeof(CvChain) ,それ以外の場合 >=sizeof(CvContour).</param>
        /// <param name="mode">抽出モード </param>
        /// <param name="method">近似手法</param>
        /// <returns>抽出した輪郭の個数</returns>
#else
        /// <summary>
        /// Retrieves contours from the binary image and returns the number of retrieved contours.
        /// </summary>
        /// <param name="image">The source 8-bit single channel image. Non-zero pixels are treated as 1’s, zero pixels remain 0’s - that is image treated as binary. 
        /// To get such a binary image from grayscale, one may use cvThreshold, cvAdaptiveThreshold or cvCanny. The function modifies the source image content. </param>
        /// <param name="storage">Container of the retrieved contours. </param>
        /// <param name="firstContour">Output parameter, will contain the pointer to the first outer contour. </param>
        /// <param name="headerSize">Size of the sequence header, >=sizeof(CvChain) if method=CV_CHAIN_CODE, and >=sizeof(CvContour) otherwise. </param>
        /// <param name="mode">Retrieval mode. </param>
        /// <param name="method">Approximation method. </param>
        /// <returns>The number of retrieved contours.</returns>
#endif
        public static int FindContours(CvArr image, CvMemStorage storage, out CvSeq<CvPoint> firstContour, int headerSize, ContourRetrieval mode, ContourChain method)
        {
            return FindContours(image, storage, out firstContour, headerSize, mode, method, new CvPoint(0, 0));
        }
Пример #50
0
        /// <summary>
        /// フリーマンチェーン(Freeman chain)をポリラインで近似する
        /// </summary>
        /// <param name="srcSeq">他のチェーンから参照可能なチェーンへの参照.</param>
        /// <param name="storage">計算結果保存用のストレージ.</param>
        /// <param name="method">推定手法.</param>
        /// <param name="parameter">メソッドパラメータ(現在は使われていない).</param>
        /// <param name="minimalPerimeter">minimal_perimeter以上の周囲長をもつ輪郭のみを計算する.その他のチェーンは結果の構造体から削除される.</param>
        /// <param name="recursive">trueの場合,src_seqからh_nextあるいはv_nextによって辿ることができる全てのチェーンを近似する.falseの場合,単一のチェーンを近似する. </param>
        /// <returns></returns>
#else
        /// <summary>
        /// Approximates Freeman chain(s) with polygonal curve
        /// </summary>
        /// <param name="srcSeq">Freeman chain(s) </param>
        /// <param name="storage">Storage location for the resulting polylines. </param>
        /// <param name="method">Approximation method.</param>
        /// <param name="parameter">Method parameter (not used now). </param>
        /// <param name="minimalPerimeter">Approximates only those contours whose perimeters are not less than minimal_perimeter. Other chains are removed from the resulting structure. </param>
        /// <param name="recursive">If true, the function approximates all chains that access can be obtained to from src_seq by h_next or v_next links. If false, the single chain is approximated. </param>
        /// <returns></returns>
#endif
        public static CvSeq<CvPoint> ApproxChains(CvChain srcSeq, CvMemStorage storage, ContourChain method, double parameter, int minimalPerimeter, bool recursive)
        {
            if (srcSeq == null)
                throw new ArgumentNullException("srcSeq");
            if (storage == null)
                throw new ArgumentNullException("storage");
            IntPtr resultPtr = NativeMethods.cvApproxChains(srcSeq.CvPtr, storage.CvPtr, method, parameter, minimalPerimeter, recursive);
            if (resultPtr == IntPtr.Zero)
                return null;
            
            CvSeq<CvPoint> result = new CvSeq<CvPoint>(resultPtr);
            return result;
        }
Пример #51
0
        /// <summary>
        /// 画像ピラミッドによる画像のセグメント化を実装する. ピラミッドは,levelまで作成する. 
        /// </summary>
        /// <param name="dst">出力画像</param>
        /// <param name="storage">結果として得られる連結成分のシーケンスを保存するための領域</param>
        /// <param name="comp">セグメント化された成分の出力シーケンスへのポインタ</param>
        /// <param name="level">セグメント化のためのピラミッドの最大レベル</param>
        /// <param name="threshold1">リンク構築のための誤差閾値</param>
        /// <param name="threshold2">セグメントクラスタリングのための誤差閾値</param>
#else
        /// <summary>
        /// Does image segmentation by pyramids.
        /// </summary>
        /// <param name="dst">The destination image. </param>
        /// <param name="storage">Storage; stores the resulting sequence of connected components. </param>
        /// <param name="comp">Pointer to the output sequence of the segmented components. </param>
        /// <param name="level">Maximum level of the pyramid for the segmentation. </param>
        /// <param name="threshold1">Error threshold for establishing the links. </param>
        /// <param name="threshold2">Error threshold for the segments clustering. </param>
#endif
        public void PyrSegmentation(IplImage dst, CvMemStorage storage, out CvSeq comp, int level, double threshold1, double threshold2)
        {
            Cv.PyrSegmentation(this, dst, storage, out comp, level, threshold1, threshold2);
        }
Пример #52
0
        /// <summary>
        /// 2値画像中の輪郭を見つける
        /// </summary>
        /// <param name="image">入力画像(8ビットシングルチャンネル).値が0以外のピクセルは「1」,0のピクセルは「0」とする.</param>
        /// <param name="storage">抽出された輪郭を保存する領域</param>
        /// <param name="firstContour">出力パラメータ.一番外側の輪郭へのポインタが入っている.</param>
        /// <param name="headerSize">シーケンスヘッダのサイズ.method=CV_CHAIN_CODEの場合,>=sizeof(CvChain) ,それ以外の場合 >=sizeof(CvContour).</param>
        /// <param name="mode">抽出モード </param>
        /// <param name="method">近似手法</param>
        /// <param name="offset">オフセット.全ての輪郭点はこれによってシフトされる.</param>
        /// <returns>抽出した輪郭の個数</returns>
#else
        /// <summary>
        /// Retrieves contours from the binary image and returns the number of retrieved contours.
        /// </summary>
        /// <param name="image">The source 8-bit single channel image. Non-zero pixels are treated as 1’s, zero pixels remain 0’s - that is image treated as binary. 
        /// To get such a binary image from grayscale, one may use cvThreshold, cvAdaptiveThreshold or cvCanny. The function modifies the source image content. </param>
        /// <param name="storage">Container of the retrieved contours. </param>
        /// <param name="firstContour">Output parameter, will contain the pointer to the first outer contour. </param>
        /// <param name="headerSize">Size of the sequence header, >=sizeof(CvChain) if method=CV_CHAIN_CODE, and >=sizeof(CvContour) otherwise. </param>
        /// <param name="mode">Retrieval mode. </param>
        /// <param name="method">Approximation method. </param>
        /// <param name="offset">Offset, by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context. </param>
        /// <returns>The number of retrieved contours.</returns>
#endif
        public static int FindContours(CvArr image, CvMemStorage storage, out CvSeq<CvPoint> firstContour, int headerSize, ContourRetrieval mode, ContourChain method, CvPoint offset)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (storage == null)
                throw new ArgumentNullException("storage");

            IntPtr firstContourPtr = IntPtr.Zero;
            int result = NativeMethods.cvFindContours(image.CvPtr, storage.CvPtr, ref firstContourPtr, headerSize, mode, method, offset);

            if (firstContourPtr == IntPtr.Zero)
                firstContour = null;
            else if (method == ContourChain.Code)
                firstContour = new CvChain(firstContourPtr);
            else
                firstContour = new CvContour(firstContourPtr);
            
            return result;
        }
Пример #53
0
        /// <summary>
        /// 2次元点列を包含するまっすぐな矩形を返す.
        /// </summary>
        /// <param name="points">CvPointの列挙子(CvPoint[], List&lt;CvPoint&gt;など). 内部でCvSeqに変換される.</param>
        /// <returns>矩形</returns>
#else
        /// <summary>
        /// Calculates up-right bounding rectangle of point set.
        /// </summary>
        /// <param name="points">An IEnumerable&lt;CvPoint&gt; object (ex. CvPoint[], List&lt;CvPoint&gt;, ....)</param>
        /// <returns></returns>
#endif
        public static CvRect BoundingRect(IEnumerable<CvPoint> points)
        {
            if (points == null)
            {
                throw new ArgumentNullException("points");
            }
            CvRect result = new CvRect();
            try  // ちょっと自信がないのでtryにしておく
            {
                using (CvMemStorage storage = new CvMemStorage(0))
                {
                    CvSeq<CvPoint> seq = new CvSeq<CvPoint>(SeqType.EltypePoint, CvSeq<CvPoint>.SizeOf, storage);
                    foreach (CvPoint p in points)
                    {
                        SeqPush<CvPoint>(seq, p);
                    }
                    result = CvInvoke.cvBoundingRect(seq.CvPtr, false);
                }
            }
            catch { }
            return result;
        }
Пример #54
0
        /// <summary>
        /// Finds high-curvature points of the contour
        /// </summary>
        /// <param name="contour">pointer to input contour object.</param>
        /// <param name="storage">memory storage</param>
        /// <param name="method"></param>
        /// <param name="parameter1">for IPAN algorithm - minimal distance</param>
        /// <param name="parameter2">for IPAN algorithm - maximal distance</param>
        /// <param name="parameter3">for IPAN algorithm - neighborhood distance (must be not greater than dmaximal distance)</param>
        /// <param name="parameter4">for IPAN algorithm - minimal distance</param>
        /// <returns>array of dominant points indices</returns>
#else
        /// <summary>
        /// Finds high-curvature points of the contour
        /// </summary>
        /// <param name="contour">pointer to input contour object.</param>
        /// <param name="storage">memory storage</param>
        /// <param name="method"></param>
        /// <param name="parameter1">for IPAN algorithm - minimal distance</param>
        /// <param name="parameter2">for IPAN algorithm - maximal distance</param>
        /// <param name="parameter3">for IPAN algorithm - neighborhood distance (must be not greater than dmaximal distance)</param>
        /// <param name="parameter4">for IPAN algorithm - maximal possible angle of curvature</param>
        /// <returns>array of dominant points indices</returns>
#endif
        public static CvSeq<int> FindDominantPoints(CvSeq contour, CvMemStorage storage, DominantsFlag method,
             double parameter1, double parameter2, double parameter3, double parameter4)
        {
            if (contour == null)
                throw new ArgumentNullException("contour");

            IntPtr storagePtr = (storage == null) ? IntPtr.Zero : storage.CvPtr;

            IntPtr result = NativeMethods.cvFindDominantPoints(contour.CvPtr, storagePtr, method, parameter1, parameter2, parameter3, parameter4);
            if (result == IntPtr.Zero)
                return null;
            else
                return new CvSeq<int>(result);
        }
Пример #55
0
        /// <summary>
        /// StarDetectorアルゴリズムによりキーポイントを取得する
        /// </summary>
        /// <param name="image">8ビット グレースケールの入力画像</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Retrieves keypoints using the StarDetector algorithm.
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image</param>
        /// <returns></returns>
#endif
        public KeyPoint[] GetKeyPoints(Mat image)
        {
            if (image == null)
                throw new ArgumentNullException("img");

            using (CvMemStorage storage = new CvMemStorage(0))
            {
                IntPtr ptr = CvInvoke.cvGetStarKeypoints(image.ToCvMat().CvPtr, storage.CvPtr, _p);
                if (ptr == IntPtr.Zero)
                {
                    return new KeyPoint[0];
                }
                CvSeq<CvStarKeypoint> keypoints = new CvSeq<CvStarKeypoint>(ptr);
                KeyPoint[] result = new KeyPoint[keypoints.Total];
                for (int i = 0; i < keypoints.Total; i++)
                {
                    CvStarKeypoint kpt = keypoints[i].Value;
                    result[i] = new KeyPoint(kpt.Pt, (float)kpt.Size, -1.0f, kpt.Response, 0);
                }
                return result;
            }
        }
Пример #56
0
        /// <summary>
        /// ハフ(Hough)変換で、method=CV_HOUGH_STANDARDを用いて2値画像から直線を検出する
        /// </summary>
        /// <param name="rho">距離解像度(1ピクセル当たりの単位)</param>
        /// <param name="theta">角度解像度(ラジアン単位で計測)</param>
        /// <param name="threshold">閾値パラメータ.対応する投票数がthresholdより大きい場合のみ,抽出された線が返される.</param>
        /// <param name="param1">各手法に応じた1番目のパラメータ.標準的ハフ変換では,使用しない(0).確率的ハフ変換では,最小の線の長さ.マルチスケールハフ変換では, 距離解像度rhoの除数.(荒い距離解像度では rho であり,詳細な解像度では (rho / param1) となる).</param>
        /// <param name="param2">各手法に応じた2番目のパラメータ.標準的ハフ変換では,使用しない(0).確率的ハフ変換では,同一線上に存在する線分として扱う(つまり,それらを統合しても問題ない),二つの線分の最大の間隔. マルチスケールハフ変換では,角度解像度 thetaの除数. (荒い角度解像度では theta であり,詳細な解像度では (theta / param2) となる). </param>
        /// <returns>検出した直線の極座標形式、の配列</returns>
#else
        /// <summary>
        /// Finds lines in binary image using Hough transform.
        /// </summary>
        /// <param name="rho">Distance resolution in pixel-related units. </param>
        /// <param name="theta">Angle resolution measured in radians. </param>
        /// <param name="threshold">Threshold parameter. A line is returned by the function if the corresponding accumulator value is greater than threshold. </param>
        /// <param name="param1">The first method-dependent parameter.</param>
        /// <param name="param2">The second method-dependent parameter.</param>
        /// <returns></returns>
#endif
        public CvLineSegmentPolar[] HoughLinesStandard(double rho, double theta, int threshold, double param1, double param2)
        {
            using (CvMemStorage lineStorage = new CvMemStorage())
            {
                IntPtr result = CvInvoke.cvHoughLines2(this.CvPtr, lineStorage.CvPtr, HoughLinesMethod.Standard, rho, theta, threshold, param1, param2);
                if (result == IntPtr.Zero)
                {
                    throw new OpenCvSharpException();
                }
                CvSeq<CvLineSegmentPolar> seq = new CvSeq<CvLineSegmentPolar>(result);
                return seq.ToArray();
            }
        }
Пример #57
0
        /// <summary>
        /// 画像中からSURF(Speeded Up Robust Features)を検出する
        /// </summary>
        /// <param name="image">8ビット,グレースケールの入力画像. </param>
        /// <param name="mask">オプション:8ビットのマスク画像.非0 のマスクピクセルが50%以上を占める領域からのみ,特徴点検出を行う.</param>
        /// <param name="keypoints">出力パラメータ.キーポイントのシーケンスへのポインタのポインタ. これは,CvSURFPoint 構造体のシーケンスになる</param>
        /// <param name="descriptors">オプション:出力パラメータ.ディスクリプタのシーケンスへのポインタのポインタ. シーケンスの各要素は,params.extended の値に依存して, 64-要素,あるいは 128-要素の浮動小数点数(CV_32F)ベクトルとなる. パラメータが NULL の場合,ディスクリプタは計算されない.</param>
        /// <param name="storage">キーポイントとディスクリプタが格納されるメモリストレージ</param>
        /// <param name="param">CvSURFParams 構造体に入れられた,様々なアルゴリズムパラメータ</param>
#else
        /// <summary>
        /// Extracts Speeded Up Robust Features from image
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image. </param>
        /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels. </param>
        /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of CvSURFPoint structures.</param>
        /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is null, the descriptors are not computed. </param>
        /// <param name="storage">Memory storage where keypoints and descriptors will be stored. </param>
        /// <param name="param">Various algorithm parameters put to the structure CvSURFParams</param>
#endif
        public static void ExtractSURF(CvArr image, CvArr mask, out CvSeq<CvSURFPoint> keypoints, out CvSeq<float> descriptors, CvMemStorage storage, CvSURFParams param)
        {
            keypoints = null;
            ExtractSURF(image, mask, ref keypoints, out descriptors, storage, param, false);
        }
Пример #58
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="vec">Cではconst float*</param>
        /// <param name="laplacian"></param>
        /// <param name="model_keypoints"></param>
        /// <param name="model_descriptors"></param>
        /// <returns></returns>
        private static int NaiveNearestNeighbor(IntPtr vec, int laplacian, CvSeq<CvSURFPoint> model_keypoints, CvSeq<float> model_descriptors)
        {
            int length = (int)(model_descriptors.ElemSize / sizeof(float));
            int neighbor = -1;
            double dist1 = 1e6, dist2 = 1e6;
            CvSeqReader<float> reader = new CvSeqReader<float>();
            CvSeqReader<CvSURFPoint> kreader = new CvSeqReader<CvSURFPoint>();
            Cv.StartReadSeq(model_keypoints, kreader, false);
            Cv.StartReadSeq(model_descriptors, reader, false);

            IntPtr mvec;
            CvSURFPoint kp;
            double d;
            for (int i = 0; i < model_descriptors.Total; i++)
            {
                // const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr; が結構曲者。
                // OpenCvSharpの構造体はFromPtrでポインタからインスタンス生成できるようにしてるので、こう書ける。
                kp = CvSURFPoint.FromPtr(kreader.Ptr);
                // まともにキャストする場合はこんな感じか
                // CvSURFPoint kp = (CvSURFPoint)Marshal.PtrToStructure(kreader.Ptr, typeof(CvSURFPoint));  

                mvec = reader.Ptr;
                Cv.NEXT_SEQ_ELEM(kreader.Seq.ElemSize, kreader);
                Cv.NEXT_SEQ_ELEM(reader.Seq.ElemSize, reader);
                if (laplacian != kp.Laplacian)
                {
                    continue;
                }
                d = CompareSurfDescriptors(vec, mvec, dist2, length);
                if (d < dist1)
                {
                    dist2 = dist1;
                    dist1 = d;
                    neighbor = i;
                }
                else if (d < dist2)
                    dist2 = d;
            }
            if (dist1 < 0.6 * dist2)
                return neighbor;
            else
                return -1;
        }
Пример #59
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="objectKeypoints"></param>
        /// <param name="objectDescriptors"></param>
        /// <param name="imageKeypoints"></param>
        /// <param name="imageDescriptors"></param>
        /// <returns></returns>
        private static int[] FindPairs(CvSeq<CvSURFPoint> objectKeypoints, CvSeq<float> objectDescriptors, CvSeq<CvSURFPoint> imageKeypoints, CvSeq<float> imageDescriptors)
        {
            CvSeqReader<float> reader = new CvSeqReader<float>();
            CvSeqReader<CvSURFPoint> kreader = new CvSeqReader<CvSURFPoint>();
            Cv.StartReadSeq(objectDescriptors, reader);
            Cv.StartReadSeq(objectKeypoints, kreader);

            List<int> ptpairs = new List<int>();

            for (int i = 0; i < objectDescriptors.Total; i++)
            {
                CvSURFPoint kp = CvSURFPoint.FromPtr(kreader.Ptr);
                IntPtr descriptor = reader.Ptr;
                Cv.NEXT_SEQ_ELEM(kreader.Seq.ElemSize, kreader);
                Cv.NEXT_SEQ_ELEM(reader.Seq.ElemSize, reader);
                int nearestNeighbor = NaiveNearestNeighbor(descriptor, kp.Laplacian, imageKeypoints, imageDescriptors);
                if (nearestNeighbor >= 0)
                {
                    ptpairs.Add(i);
                    ptpairs.Add(nearestNeighbor);
                }
            }
            return ptpairs.ToArray();
        }
Пример #60
0
        /// <summary>
        /// 2次元点列を包含するまっすぐな矩形を返す.
        /// </summary>
        /// <param name="points">CvPointの列挙子(CvPoint[], List&lt;CvPoint&gt;など). 内部でCvSeqに変換される.</param>
        /// <returns>矩形</returns>
#else
        /// <summary>
        /// Calculates up-right bounding rectangle of point set.
        /// </summary>
        /// <param name="points">An IEnumerable&lt;CvPoint&gt; object (ex. CvPoint[], List&lt;CvPoint&gt;, ....)</param>
        /// <returns></returns>
#endif
        public static CvRect BoundingRect(IEnumerable<CvPoint> points)
        {
            if (points == null)
                throw new ArgumentNullException("points");
            
            CvRect result;
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                CvSeq<CvPoint> seq = new CvSeq<CvPoint>(SeqType.EltypePoint, CvSeq.SizeOf, storage);
                foreach (CvPoint p in points)
                {
                    SeqPush(seq, p);
                }
                result = NativeMethods.cvBoundingRect(seq.CvPtr, false);
            }
            
            return result;
        }