Exemple #1
0
    //---------------------------------------------------------
    // 関数名 : Update
    // 機能   : 1フレーム毎に呼び出される
    // 引数   : なし
    // 戻り値 : なし
    //---------------------------------------------------------
    void Update()
    {
        using (Mat m_img = new Mat())
        {
            // Webカメラから画像を取得する
            video.Read(m_img);

            // Mat から IplImage に変換
            using (var i_img = m_img.ToIplImage())
            {
                // OpenCVのデータがBGRなのでHSVに変換
                Cv.CvtColor(i_img, i_img, ColorConversion.BgrToHsv);

                // 画像のヒストグラム平滑化
                Cv.Smooth(i_img, i_img, SmoothType.Median, 5, 0, 0, 0);

                // 画像の任意の点からデータ取得
                getPointData(i_img, pointResult);

                // 点情報から画像を生成(デバッグ用)
                getPointImage(i_img, pointResult);

                // 画像を画面に表示
                using (var r_img = Cv2.CvArrToMat(i_img))
                {
                    // 画像を jpeg にエンコード
                    texture.LoadImage(r_img.ImEncode(ext));
                    texture.Apply();
                }
            }
        }
    }
Exemple #2
0
        /// <summary>
        /// Create new generation.
        /// </summary>
        /// <param name="image"></param>
        private void Generation(IplImage image)
        {
            for (int i = 0; i < GenerationNo; i++)
            {
                // random no of vertixes
                int vertixes = (int)((4.0 * random.NextDouble()) + 1);

                // genrate points
                OpenCvSharp.CPlusPlus.Point[] points = new OpenCvSharp.CPlusPlus.Point[vertixes];
                for (int j = 0; j < vertixes; j++)
                {
                    int y = (int)(image.Height * random.NextDouble());
                    int x = (int)(image.Width * random.NextDouble());

                    points[j] = new OpenCvSharp.CPlusPlus.Point(x, y);
                }

                // color scale
                byte   colorInt = (byte)(255.0 * random.NextDouble());
                Scalar color    = new Scalar(colorInt);

                List <List <OpenCvSharp.CPlusPlus.Point> > polygon = new List <List <OpenCvSharp.CPlusPlus.Point> >();
                polygon.Add(points.ToList());

                // draw polygon
                children[i] = image.Clone();
                Cv2.FillPoly(Cv2.CvArrToMat(children[i]), polygon, color);
            }
        }
Exemple #3
0
        /// <summary>
        /// Find best fit in the generation.
        /// </summary>
        /// <param name="parent"></param>
        /// <param name="children"></param>
        /// <returns></returns>
        private IplImage BestChild(IplImage parent, IplImage[] children)
        {
            double[] distances = new double[GenerationNo];

            for (int i = 0; i < GenerationNo; i++)
            {
                distances[i] = Cv2.Norm(Cv2.CvArrToMat(parent), Cv2.CvArrToMat(children[i]));
            }

            int index = Array.IndexOf(distances, distances.Min());

            return(children[index]);
        }
Exemple #4
0
    // Update is called once per frame
    void Update()
    {
        // カメラ画像の取得
        i_img = cam.getCameraImage();

        // カメラ画像をBGRからHSVへ変換
        g.convertBgrToHsv(i_img, h_img);

        // 平滑化
        g.convertSmooothing(h_img);

        // カメラ画像の任意の点からデータ取得
        g.getPointData(h_img, ps_arr);


        /*     デバッグ用(HSV画像表示表示)     */
        /*---------------------------------------*/
        //using (var r_img = Cv2.CvArrToMat(h_img))
        //{
        //    texture.LoadImage(r_img.ImEncode(".jpeg"));
        //    texture.Apply();
        //}
        /*---------------------------------------*/
        /*     デバッグ用(点情報表示)     */
        /*----------------------------------*/
        d_img = g.getPointImage1(d_img, ps_arr);
        using (var r_img = Cv2.CvArrToMat(d_img))
        {
            texture.LoadImage(r_img.ImEncode(".jpeg"));
            texture.Apply();
        }
        /*----------------------------------*/

        /*       テスト       */
        //display3Ddot(ps_arr);
        /* ------------------ */
    }
Exemple #5
0
    // Update is called once per frame
    void Update()
    {
        /*     デバッグ用(FPS)     */
        frameCount++;
        float time = Time.realtimeSinceStartup - prevTime;

        /* ------------------------- */

        // 変数初期化
        f_vec = new ArrayList();
        d_vec = new ArrayList();

        // カメラ画像の取得
        i_img = cam.getCameraImage();

        // カメラ画像をBGRからHSVへ変換
        g.convertBgrToHsv(i_img, h_img);

        // 平滑化
        g.convertSmooothing(h_img);

        // カメラ画像の任意の点からデータ取得
        g.getPointData(h_img, ps_arr);

        // 図形の座標を配列に格納
        insertDataToVector2(xdata, ydata, f_vec);

        // 画像を初期化(真っ白に)
        initImg(d_img);

        // 内部に点がある場合
        if (isInsideOrOutside(io_flag, ps_arr))
        {
            // 図形を移動
            overrideXYData(xdata, ydata, io_flag);

            // 観測点データ初期化
            initMFlag(io_flag);

            // 図形と観測点の内部外部判定を行う
            getIODMonitoringPoint(f_vec, m_vec, io_flag);
        }

        // 図形を描画
        printFigureData(d_img, f_vec);

        // 観測点の内部外部判定結果を描写
        printIODMonitoringPoint(d_img, io_flag);

        // 手情報を描画
        printPointData(d_img, ps_arr);

        // テクスチャ表示
        using (var r_img = Cv2.CvArrToMat(d_img))
        {
            texture.LoadImage(r_img.ImEncode(".jpeg"));
            texture.Apply();
        }
        /*---------------------------------------*/
        /*            デバッグ用(FPS)          */
        /*---------------------------------------*/
        //if (time >= 0.5f)
        //{
        //    Debug.LogFormat("{0}fps", frameCount/time);
        //    frameCount = 0;
        //    prevTime = Time.realtimeSinceStartup;
        //}
        /* ------------------------- */
    }
Exemple #6
0
        //Set Frame image from Webcam to start matching&detection
        //img에 Mat 하나를 등록. 본격적인 매칭을 시작하도록 한다
        public static void setImg(TestSURF thisobj, Form1 mainform, IplImage imgFromCam)
        {
            if (thisobj.isComputing)
            {
                return;
            }


            thisobj.isComputing = true;

            //---frame 특징점, 디스크립터
            //---frame image's keypoints and descriptor
            KeyPoint[] f_keypoints;
            Mat        f_descriptor;

            Mat imgOrig;            //캠 영상 (Original)
            Mat img;                //캠 영상 (Grayscale)

            //Convert to GrayScale Mat
            imgOrig = Cv2.CvArrToMat(imgFromCam);
            img     = new Mat();
            Cv2.CvtColor(imgOrig, img, ColorConversion.BgrToGray);



            //---------------------1. 디스크립터 추출 (keypoint & descriptor retrieval)
            f_keypoints  = new KeyPoint[10000];
            f_descriptor = new Mat();

            f_keypoints = thisobj.surfobj.Detect(img);                      //SIFT keypoint
            thisobj.surfobj.Compute(img, ref f_keypoints, f_descriptor);    //SIFT descriptor



            //---------------------2. 매칭 (descriptor Matching)
            DMatch[] matches = new DMatch[10000];

            try
            {
                matches = thisobj.fm.Match(thisobj.t_descriptor, f_descriptor);          //MATCHING

                //matching error will be caught in this block
            }
            catch { return; }


            //record proper distances for choosing Good Matches
            //좋은 매치를 찾기 위해 디스크립터 간 매칭 거리를 기록한다
            double max_dist = 0;
            double min_dist = 100;

            for (int i = 0; i < thisobj.t_descriptor.Rows; i++)
            {
                double dist = matches[i].Distance;

                if (dist < min_dist)
                {
                    min_dist = dist;
                }
                if (dist > max_dist)
                {
                    max_dist = dist;
                }
            }



            //---------------------3. gootmatch 탐색 (calculating goodMatches)
            List <DMatch> good_matches = new List <DMatch>();

            for (int i = 0; i < thisobj.t_descriptor.Rows; i++)
            {
                if (matches[i].Distance < 3 * min_dist)
                {
                    good_matches.Add(matches[i]);
                }
            }

            /*
             * KeyPoint[] goodkey = new KeyPoint[good_matches.Count];
             * for(int goodidx = 0; goodidx < good_matches.Count; goodidx++)
             * {
             *  goodkey[goodidx] = new KeyPoint((f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Pt.X), (f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Pt.Y), f_keypoints[good_matches.ElementAt(goodidx).TrainIdx].Size);
             * }
             */


            //Goodmatches의 keypoint 중, target과 frame 이미지에 해당하는 keypoint 정리
            Point2d[] target_lo = new Point2d[good_matches.Count];
            Point2d[] frame_lo  = new Point2d[good_matches.Count];


            for (int i = 0; i < good_matches.Count; i++)
            {
                target_lo[i] = new Point2d(thisobj.t_keypoints[good_matches.ElementAt(i).QueryIdx].Pt.X,
                                           thisobj.t_keypoints[good_matches.ElementAt(i).QueryIdx].Pt.Y);
                frame_lo[i] = new Point2d(f_keypoints[good_matches.ElementAt(i).TrainIdx].Pt.X,
                                          f_keypoints[good_matches.ElementAt(i).TrainIdx].Pt.Y);
            }


            //Homography for RANSAC
            Mat hom = new Mat();


            //-------------------------------4. RANSAC
            hom = Cv2.FindHomography(target_lo, frame_lo, HomographyMethod.Ransac);

            Point2d[] frame_corners;
            frame_corners = Cv2.PerspectiveTransform(thisobj.obj_corners, hom);



            //Mat -> iplimage
            //IplImage returnimg = (IplImage)imgOrig;

            mainform.setDetectionRec((int)frame_corners[0].X, (int)frame_corners[0].Y,
                                     (int)frame_corners[1].X, (int)frame_corners[1].Y,
                                     (int)frame_corners[2].X, (int)frame_corners[2].Y,
                                     (int)frame_corners[3].X, (int)frame_corners[3].Y);

            mainform.isComputing = false;
            thisobj.isComputing  = false;

            //Cv2.DrawKeypoints(imgOrig, goodkey, imgOrig);
            //Cv2.DrawKeypoints(img, f_keypoints, img);
            //Cv2.ImWrite("temtem.png", img);

            return;
        }