예제 #1
0
        //Capture a image from webcam in Timer
        private void timer1_Tick(object sender, EventArgs e)
        {
            /*
             * if (capture_type == DO_WITH_IP_CAMERA)
             * {
             *  //capture = CvCapture.FromFile("[IP CAMERA ADDR]");
             *  capture.SetCaptureProperty(CaptureProperty.FrameWidth, 680);
             *  capture.SetCaptureProperty(CaptureProperty.FrameHeight, 480);
             * }
             */


            //Retrieve image as Iplimage to resize
            //이미지를 IplImage 타입으로 추출하여 가져온 후, resize한 이미지를 사용
            IplImage temimg = new IplImage(680, 480, BitDepth.U8, 3);

            temimg = capture.QueryFrame();

            if (temimg == null)
            {
                return;
            }

            imgSrc = Cv.CreateImage(new CvSize(680, 480), BitDepth.U8, 3);
            Cv.Resize(temimg, imgSrc, Interpolation.Linear);
            Cv.ReleaseImage(temimg);

            //Object Detection will be implmented by running thread.
            //If thread is running already, this procedure is to be skipped.
            //[This allows user see smooth webcam streaming]
            //영역 계산 중이라면 스킵. 계산 중이 아니라면 영역 계산 스레드를 실행
            if (!isComputing)
            {
                isComputing = true;

                ComputingSURF = new Thread(() => TestSURF.setImg(tsurf, this, imgSrc));
                ComputingSURF.Start();
            }


            //Draw Object Rectangle with cvLine
            //가지고 있는 영역 값을 이용해 매 영상마다 영역은 항상 표시하도록 한다.
            drawRec(rectanPts);
            pictureBoxIpl1.ImageIpl = imgSrc;
        }
예제 #2
0
        static int DO_WITH_IP_CAMERA     = 1;       //IP 웹캠(안드로이드도 된다!)



        public Form1()
        {
            InitializeComponent();
            isComputing = false;
            //capture_type = DO_WITH_IP_CAMERA;           //웹캠 or IP웹캠
            capture_type = DO_WITH_DIRECT_WEBCAM;

            rectanPts = new OpenCvSharp.CPlusPlus.Point[4];
            //init
            for (int i = 0; i < 4; i++)
            {
                rectanPts[i].X = rectanPts[i].Y = 0;
            }


            //TestSIFT는 SURF+kdTreeMatching+RANSAC 을 구현하여 영역을 추출한다.
            tsurf = new TestSURF();
            tsurf.setTarget("origim.png");      //검색하려는 이미지 파일명
        }
예제 #3
0
        //Set Frame image from Webcam to start matching&detection
        //img에 Mat 하나를 등록. 본격적인 매칭을 시작하도록 한다
        public static void setImg(TestSURF thisobj, Form1 mainform, IplImage imgFromCam)
        {
            if (thisobj.isComputing)
            {
                return;
            }


            thisobj.isComputing = true;

            //---frame 특징점, 디스크립터
            //---frame image's keypoints and descriptor
            KeyPoint[] f_keypoints;
            Mat        f_descriptor;

            Mat imgOrig;            //캠 영상 (Original)
            Mat img;                //캠 영상 (Grayscale)

            //Convert to GrayScale Mat
            imgOrig = Cv2.CvArrToMat(imgFromCam);
            img     = new Mat();
            Cv2.CvtColor(imgOrig, img, ColorConversion.BgrToGray);



            //---------------------1. 디스크립터 추출 (keypoint & descriptor retrieval)
            f_keypoints  = new KeyPoint[10000];
            f_descriptor = new Mat();

            f_keypoints = thisobj.surfobj.Detect(img);                      //SIFT keypoint
            thisobj.surfobj.Compute(img, ref f_keypoints, f_descriptor);    //SIFT descriptor



            //---------------------2. 매칭 (descriptor Matching)
            DMatch[] matches = new DMatch[10000];

            try
            {
                matches = thisobj.fm.Match(thisobj.t_descriptor, f_descriptor);          //MATCHING

                //matching error will be caught in this block
            }
            catch { return; }


            //record proper distances for choosing Good Matches
            //좋은 매치를 찾기 위해 디스크립터 간 매칭 거리를 기록한다
            double max_dist = 0;
            double min_dist = 100;

            for (int i = 0; i < thisobj.t_descriptor.Rows; i++)
            {
                double dist = matches[i].Distance;

                if (dist < min_dist)
                {
                    min_dist = dist;
                }
                if (dist > max_dist)
                {
                    max_dist = dist;
                }
            }



            //---------------------3. gootmatch 탐색 (calculating goodMatches)
            List <DMatch> good_matches = new List <DMatch>();

            for (int i = 0; i < thisobj.t_descriptor.Rows; i++)
            {
                if (matches[i].Distance < 3 * min_dist)
                {
                    good_matches.Add(matches[i]);
                }
            }

            /*
             * KeyPoint[] goodkey = new KeyPoint[good_matches.Count];
             * for(int goodidx = 0; goodidx < good_matches.Count; goodidx++)
             * {
             *  goodkey[goodidx] = new KeyPoint((f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Pt.X), (f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Pt.Y), f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Size);
             * }
             */


            //Goodmatches의 keypoint 중, target과 frame 이미지에 해당하는 keypoint 정리
            Point2d[] target_lo = new Point2d[good_matches.Count];
            Point2d[] frame_lo  = new Point2d[good_matches.Count];


            for (int i = 0; i < good_matches.Count; i++)
            {
                target_lo[i] = new Point2d(thisobj.t_keypoints[good_matches.ElementAt(i).QueryIdx].Pt.X,
                                           thisobj.t_keypoints[good_matches.ElementAt(i).QueryIdx].Pt.Y);
                frame_lo[i] = new Point2d(f_keypoints[good_matches.ElementAt(i).TrainIdx].Pt.X,
                                          f_keypoints[good_matches.ElementAt(i).TrainIdx].Pt.Y);
            }


            //Homography for RANSAC
            Mat hom = new Mat();


            //-------------------------------4. RANSAC
            hom = Cv2.FindHomography(target_lo, frame_lo, HomographyMethod.Ransac);

            Point2d[] frame_corners;
            frame_corners = Cv2.PerspectiveTransform(thisobj.obj_corners, hom);



            //Mat -> iplimage
            //IplImage returnimg = (IplImage)imgOrig;

            mainform.setDetectionRec((int)frame_corners[0].X, (int)frame_corners[0].Y,
                                     (int)frame_corners[1].X, (int)frame_corners[1].Y,
                                     (int)frame_corners[2].X, (int)frame_corners[2].Y,
                                     (int)frame_corners[3].X, (int)frame_corners[3].Y);

            mainform.isComputing = false;
            thisobj.isComputing  = false;

            //Cv2.DrawKeypoints(imgOrig, goodkey, imgOrig);
            //Cv2.DrawKeypoints(img, f_keypoints, img);
            //Cv2.ImWrite("temtem.png", img);

            return;
        }