コード例 #1
0
 void draw(SpellBook book, Mat frame)
 {
     if (book.isTrackedm == true)
     {
         Imgproc.circle(frame, new Point(book.xm, book.ym), 20, new Scalar(255, 255, 0));
         Imgproc.putText(frame, book.xm + ", " + book.ym, new Point(book.xm - 500, book.ym + 20), 5, 5, new Scalar(255, 255, 0), 2);
         Imgproc.putText(frame, book.color, new Point(book.xm, book.ym - 20), 5, 5, new Scalar(255, 255, 0), 2);
         Debug.Log("drawing" + book.color + ": " + book.xm.ToString() + ", " + book.ym.ToString());
     }
     else
     {
         Debug.Log(book.color + ": not found");
     }
 }
コード例 #2
0
 protected virtual void drawPredPoints(Point[] points, Mat frame)
 {
     for (int i = 0; i < points.Length; i++)
     {
         if (i < pointsColors.Length)
         {
             Imgproc.circle(frame, points[i], 2, pointsColors[i], 2);
         }
         else
         {
             Imgproc.circle(frame, points[i], 2, pointsColors[pointsColors.Length - 1], 2);
         }
     }
 }
コード例 #3
0
        // Update is called once per frame
        void Update()
        {
            //Loop play
            if (capture.get(Videoio.CAP_PROP_POS_FRAMES) >= capture.get(Videoio.CAP_PROP_FRAME_COUNT))
            {
                capture.set(Videoio.CAP_PROP_POS_FRAMES, 0);
            }

            //error PlayerLoop called recursively! on iOS.reccomend WebCamTexture.
            if (capture.grab())
            {
                capture.retrieve(rgbMat, 0);
                Imgproc.cvtColor(rgbMat, rgbMat, Imgproc.COLOR_BGR2RGB);


                selectPoint(rgbMat);
                if (selectedPointList.Count < 2)
                {
                    foreach (var point in selectedPointList)
                    {
                        Imgproc.circle(rgbMat, point, 6, new Scalar(0, 0, 255), 2);
                    }
                }
                else
                {
                    trackers.add(TrackerKCF.create(), rgbMat, new Rect2d(selectedPointList [0].x, selectedPointList [0].y, Math.Abs(selectedPointList [1].x - selectedPointList [0].x), Math.Abs(selectedPointList [1].y - selectedPointList [0].y)));
                    selectedPointList.Clear();
                    trackingColorList.Add(new Scalar(UnityEngine.Random.Range(0, 255), UnityEngine.Random.Range(0, 255), UnityEngine.Random.Range(0, 255)));
                }

                bool updated = trackers.update(rgbMat, objects);
                Debug.Log("updated " + updated);
//                if (!updated && objects.rows () > 0) {
//                    OnResetTrackerButtonClick ();
//                }


                Rect2d[] objectsArray = objects.toArray();
                for (int i = 0; i < objectsArray.Length; i++)
                {
                    Imgproc.rectangle(rgbMat, objectsArray [i].tl(), objectsArray [i].br(), trackingColorList [i], 2, 1, 0);
                }

                Imgproc.putText(rgbMat, "Please touch the screen, and select tracking regions.", new Point(5, rgbMat.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 0.8, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false);

                //Debug.Log ("Mat toString " + rgbMat.ToString ());

                Utils.matToTexture2D(rgbMat, texture, colors);
            }
        }
コード例 #4
0
ファイル: TextureManager.cs プロジェクト: uejun/Decomeal
    public Mat create_form(Mat latteMat)
    {
        int    numOfForm = 800;
        int    maxRadius = 3;
        Scalar color     = new Scalar(0, 30, 84);

        for (int i = 0; i < numOfForm; i++)
        {
            int xRandom      = (int)(UnityEngine.Random.value * latteMat.cols());
            int yRandom      = (int)(UnityEngine.Random.value * latteMat.rows());
            int radiusRandom = (int)(UnityEngine.Random.value * maxRadius);
            Imgproc.circle(latteMat, new Point(xRandom, yRandom), radiusRandom, color, -1);
        }
        return(latteMat);
    }
コード例 #5
0
        // Update is called once per frame
        void Update()
        {
            if (webCamTextureToMatHelper.IsPlaying() && webCamTextureToMatHelper.DidUpdateThisFrame())
            {
                Mat rgbaMat = webCamTextureToMatHelper.GetMat();

                Imgproc.cvtColor(rgbaMat, grayMat, Imgproc.COLOR_RGB2HSV);

                Scalar lower_red  = new Scalar(145, 42, 154);
                Scalar lower_blue = new Scalar(90, 50, 50);
                Scalar upper_red  = new Scalar(255, 255, 255);
                Scalar upper_blue = new Scalar(130, 255, 255);
                Core.inRange(grayMat, lower_red, upper_red, redframe_threshold);
                Core.inRange(grayMat, lower_blue, upper_blue, blueframe_threshold);
                Core.bitwise_or(redframe_threshold, blueframe_threshold, frame_threshold);


                Size size = new Size(5, 5);
                Imgproc.erode(frame_threshold, frame_threshold, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, size));
                Imgproc.dilate(frame_threshold, frame_threshold, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, size));

                Imgproc.erode(frame_threshold, frame_threshold, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, size));
                Imgproc.dilate(frame_threshold, frame_threshold, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, size));

                Imgproc.erode(frame_threshold, frame_threshold, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, size));
                Imgproc.dilate(frame_threshold, frame_threshold, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, size));


                using (Mat circles = new Mat()) {
                    Imgproc.HoughCircles(frame_threshold, circles, Imgproc.CV_HOUGH_GRADIENT, 1, frame_threshold.rows() / 2, 20, 15, 15, 100);
                    Point pt = new Point();

                    for (int i = 0; i < circles.cols(); i++)
                    {
                        double[] data = circles.get(0, i);
                        pt.x = data [0];
                        pt.y = data [1];
                        double rho = data [2];
                        // Imgproc.circle (rgbaMat, pt, 3, new Scalar (255, 0, 255), 5);
                        Imgproc.circle(rgbaMat, pt, (int)rho, new Scalar(255, 0, 0, 255), 5);
                    }
                }

                Imgproc.putText(rgbaMat, "W:" + rgbaMat.width() + " H:" + rgbaMat.height() + " SO:" + Screen.orientation, new Point(5, rgbaMat.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false);

                Utils.matToTexture2D(rgbaMat, texture, webCamTextureToMatHelper.GetBufferColors());
            }
        }
コード例 #6
0
    private void DrawVoronoiDiagram(List <GraphEdge> edges)
    {
        Imgproc.rectangle(VoronoiDiagram, new Point(0, 0), new Point(width - 1, height - 1), new Scalar(255, 255, 255), -1);
        // 畫點
        for (int i = 0; i < BestPs.Count; i++)
        {
            Point p = Vector3ToPoint(BestPs[i]);
            Imgproc.circle(VoronoiDiagram, p, 5, new Scalar(0, 0, 0), -1);
        }

        // 畫線
        for (int i = 0; i < edges.Count; i++)
        {
            Point p1 = new Point(edges[i].x1, edges[i].y1);
            Point p2 = new Point(edges[i].x2, edges[i].y2);
            Imgproc.line(VoronoiDiagram, p1, p2, new Scalar(0, 0, 0), 3);
        }
    }
コード例 #7
0
        // Update is called once per frame
        void Update()
        {
            //Loop play
            if (capture.get(Videoio.CAP_PROP_POS_FRAMES) >= capture.get(Videoio.CAP_PROP_FRAME_COUNT))
            {
                capture.set(Videoio.CAP_PROP_POS_FRAMES, 0);
            }

            //error PlayerLoop called recursively! on iOS.reccomend WebCamTexture.
            if (capture.grab())
            {
                capture.retrieve(rgbMat, 0);
                Imgproc.cvtColor(rgbMat, rgbMat, Imgproc.COLOR_BGR2RGB);


                selectPoint(rgbMat);
                if (selectedPointList.Count < 2)
                {
                    foreach (var point in selectedPointList)
                    {
                        Imgproc.circle(rgbMat, point, 6, new Scalar(0, 0, 255), 2);
                    }
                }
                else
                {
                    trackers.add(rgbMat, new Rect2d(selectedPointList [0].x, selectedPointList [0].y, Math.Abs(selectedPointList [1].x - selectedPointList [0].x), Math.Abs(selectedPointList [1].y - selectedPointList [0].y)));
                    selectedPointList.Clear();
                    trackingColorList.Add(new Scalar(UnityEngine.Random.Range(0, 255), UnityEngine.Random.Range(0, 255), UnityEngine.Random.Range(0, 255)));
                }

                trackers.update(rgbMat, objects);

                Rect2d[] objectsArray = objects.toArray();
                for (int i = 0; i < objectsArray.Length; i++)
                {
                    Imgproc.rectangle(rgbMat, objectsArray [i].tl(), objectsArray [i].br(), trackingColorList [i], 2, 1, 0);
                }


                //Debug.Log ("Mat toString " + rgbMat.ToString ());

                Utils.matToTexture2D(rgbMat, texture, colors);
            }
        }
コード例 #8
0
    public void RenderTracerToScreen(Point tracerPoint1, Point tracerPoint2, Scalar tracerHue, Scalar tracerNew)
    {
        Point ScaledPoint  = new Point(tracerPoint1.x * 4, tracerPoint1.y * 4);
        Point ScaledPoint2 = new Point(tracerPoint2.x * 4, tracerPoint2.y * 4);

        if (bSwitchAngleSpeed == true)
        {
            //show dots
            Imgproc.circle(MatDisplayx2, ScaledPoint, 3, tracerHue, 1);             //SHOW EITHER FOR ANGLE OR SPEED

            //show lines
            Imgproc.line(MatDisplayx2, ScaledPoint, ScaledPoint2, tracerHue);              //SHOW EITHER FOR ANGLE OR SPEED
        }
        else
        {
            Imgproc.circle(MatDisplayx2, ScaledPoint, 3, tracerNew, 1);
            Imgproc.line(MatDisplayx2, ScaledPoint, ScaledPoint2, tracerNew);
        }
    }
コード例 #9
0
    private void drawObject(List <TangramObject> theColorObjects, Mat frame, Mat temp, List <MatOfPoint> contours, Mat hierarchy)
    {
        for (int i = 0; i < theColorObjects.Count; i++)
        {
            var colorHSV = theColorObjects[i].getColor();
            Mat rgb      = new Mat();
            Mat hsv      = new Mat(1, 1, CvType.CV_8UC3, colorHSV);
            Imgproc.cvtColor(hsv, rgb, Imgproc.COLOR_HSV2RGB);
            var    colorRGB = rgb.get(0, 0);
            Scalar c        = new Scalar(colorRGB[0], colorRGB[1], colorRGB[2]);

            Imgproc.drawContours(frame, contours, i, c, 3, 8, hierarchy, int.MaxValue, new Point());
            Imgproc.circle(frame, new Point(theColorObjects[i].getXPos(), theColorObjects[i].getYPos()), 5, c);
            Imgproc.putText(frame, theColorObjects[i].getXPos() + " , " + theColorObjects[i].getYPos()
                            , new Point(theColorObjects[i].getXPos(), theColorObjects[i].getYPos() + 20), 1, 1, c, 2);
            Imgproc.putText(frame, theColorObjects[i].getType(),
                            new Point(theColorObjects[i].getXPos(), theColorObjects[i].getYPos() - 20), 1, 2, c, 2);
        }
    }
コード例 #10
0
    float GetDistanceToWall(Point pt_start, Point pt_end)
    {
        float distance = 0;
        int   check_x  = 0;
        int   check_y  = 0;

        for (int i = (int)pt_start.y; i >= (int)pt_end.y; i--)
        {
            check_x = (int)Math.Round(pt_start.x + ((pt_end.x - pt_start.x) / (pt_start.y - pt_end.y) * (pt_start.y - i)), 1);
            check_y = i;
            double[] buff = cameraMat.get(check_y, check_x);
            if (buff[0] != 0)
            {
                break;
            }
            distance++;
        }
        Imgproc.circle(cameraMat, new Point(check_x, check_y), 2, new Scalar(100), 1);
        //Debug.Log((check_x, check_y, distance));
        return(distance);
    }
コード例 #11
0
    void Start()
    {
        srcMat  = Imgcodecs.imread(Application.dataPath + "/Textures/feature.jpg", 1);
        grayMat = new Mat();
        Imgproc.cvtColor(srcMat, grayMat, Imgproc.COLOR_RGBA2GRAY);

        //会把五边形识别成圆。模糊处理,提高精确度。
        Imgproc.GaussianBlur(grayMat, grayMat, new Size(7, 7), 2, 2);

        Mat circles = new Mat();

        //霍夫圆
        Imgproc.HoughCircles(grayMat, circles, Imgproc.CV_HOUGH_GRADIENT, 2, 10, 160, 50, 10, 40);
        //Debug.Log(circles);

        //圆心坐标
        Point pt = new Point();

        for (int i = 0; i < circles.cols(); i++)
        {
            double[] data = circles.get(0, i);
            pt.x = data[0];
            pt.y = data[1];
            double rho = data[2];
            //绘制圆心
            Imgproc.circle(srcMat, pt, 3, new Scalar(255, 255, 0), -1, 8, 0);
            //绘制圆轮廓
            Imgproc.circle(srcMat, pt, (int)rho, new Scalar(255, 0, 0, 255), 5);
        }

        //在Mat上写字
        Imgproc.putText(srcMat, "W:" + srcMat.width() + " H:" + srcMat.height(), new Point(5, srcMat.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false);

        Texture2D t2d = new Texture2D(srcMat.width(), srcMat.height());
        Sprite    sp  = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);

        m_showImage.sprite         = sp;
        m_showImage.preserveAspect = true;
        Utils.matToTexture2D(srcMat, t2d);
    }
コード例 #12
0
    // Start is called before the first frame update
    void Start()
    {
        Texture2D sourceTexture = Resources.Load("maker") as Texture2D;
        Mat       inputMat      = new Mat(sourceTexture.height, sourceTexture.width, CvType.CV_8UC4);

        Utils.texture2DToMat(sourceTexture, inputMat);
        UnityEngine.Debug.Log("inputMat.ToString() " + inputMat.ToString());

        Mat src_mat = new Mat(4, 1, CvType.CV_32FC2);
        Mat dst_mat = new Mat(4, 1, CvType.CV_32FC2);

        Mat outputMat = inputMat.clone();

        src_mat.put(0, 0, 0.0, 0.0, sourceTexture.width, 0.0, 0.0, sourceTexture.height, sourceTexture.width, sourceTexture.height);
        dst_mat.put(0, 0, 0.0, 0.0, sourceTexture.width, 100.0, 0.0, sourceTexture.height, sourceTexture.width, sourceTexture.height);

        Mat perspectiveTransform = Imgproc.getPerspectiveTransform(src_mat, dst_mat);

        Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(sourceTexture.width, sourceTexture.height));

        Texture2D outputTexture = new Texture2D(outputMat.cols(), outputMat.rows(), TextureFormat.RGBA32, false);
        Texture2D inputTexture  = new Texture2D(inputMat.cols(), inputMat.rows(), TextureFormat.RGBA32, false);

        #region CIRCLE POINT
        Imgproc.circle(inputMat, new Point(0, 0), 4, new Scalar(255, 0, 0), 8);
        Imgproc.circle(inputMat, new Point(sourceTexture.width, 0), 4, new Scalar(255, 0, 0), 8);
        Imgproc.circle(inputMat, new Point(0, sourceTexture.height), 4, new Scalar(255, 0, 0), 8);
        Imgproc.circle(inputMat, new Point(sourceTexture.width, sourceTexture.height), 4, new Scalar(255, 0, 0), 8);

        Imgproc.circle(outputMat, new Point(0, 0), 4, new Scalar(0, 0, 255), 8);
        Imgproc.circle(outputMat, new Point(sourceTexture.width, 100), 4, new Scalar(0, 0, 255), 8);
        Imgproc.circle(outputMat, new Point(0, sourceTexture.height), 4, new Scalar(0, 0, 255), 8);
        Imgproc.circle(outputMat, new Point(sourceTexture.width, sourceTexture.height), 4, new Scalar(0, 0, 255), 8);
        #endregion

        Utils.matToTexture2D(outputMat, outputTexture);
        Utils.matToTexture2D(inputMat, inputTexture);
        perQuad.GetComponent <Renderer>().material.mainTexture = outputTexture;
        oriQuad.GetComponent <Renderer>().material.mainTexture = inputTexture;
    }
コード例 #13
0
        private void drawAxis(Mat img, Point start_pt, Point vec, Scalar color, double length)
        {
            int CV_AA = 16;

            Point end_pt = new Point(start_pt.x + length * vec.x, start_pt.y + length * vec.y);

            Imgproc.circle(img, start_pt, 5, color, 1);

            Imgproc.line(img, start_pt, end_pt, color, 1, CV_AA, 0);


            double angle = System.Math.Atan2(vec.y, vec.x);

            double qx0 = end_pt.x - 9 * System.Math.Cos(angle + System.Math.PI / 4);
            double qy0 = end_pt.y - 9 * System.Math.Sin(angle + System.Math.PI / 4);

            Imgproc.line(img, end_pt, new Point(qx0, qy0), color, 1, CV_AA, 0);

            double qx1 = end_pt.x - 9 * System.Math.Cos(angle - System.Math.PI / 4);
            double qy1 = end_pt.y - 9 * System.Math.Sin(angle - System.Math.PI / 4);

            Imgproc.line(img, end_pt, new Point(qx1, qy1), color, 1, CV_AA, 0);
        }
コード例 #14
0
        private void drawDebugFacePoints()
        {
            int index = faceChangeData.Count;

            foreach (FaceChangeData data in faceChangeData)
            {
                getFacePoints(data.target_landmark_points, target_points, target_affine_transform_keypoints);
                for (int i = 0; i < target_points.Length; i++)
                {
                    Imgproc.circle(target_frame_full, target_points[i], 1, new Scalar(255, 0, 0, 255), 2, Core.LINE_AA, 0);
                }
                for (int i = 0; i < target_affine_transform_keypoints.Length; i++)
                {
                    Imgproc.circle(target_frame_full, target_affine_transform_keypoints[i], 1, new Scalar(0, 255, 0, 255), 2, Core.LINE_AA, 0);
                }

                getFacePoints(data.source_landmark_points, source_points, source_affine_transform_keypoints);
                for (int i = 0; i < source_points.Length; i++)
                {
                    Imgproc.circle(data.source_frame, source_points[i], 1, new Scalar(255, 0, 0, 255), 2, Core.LINE_AA, 0);
                }
                for (int i = 0; i < source_affine_transform_keypoints.Length; i++)
                {
                    Imgproc.circle(data.source_frame, source_affine_transform_keypoints[i], 1, new Scalar(0, 255, 0, 255), 2, Core.LINE_AA, 0);
                }

                //
                target_rect = Imgproc.boundingRect(new MatOfPoint(target_points));
                source_rect = Imgproc.boundingRect(new MatOfPoint(source_points));
                Scalar color = new Scalar(127, 127, (int)(255 / faceChangeData.Count) * index, 255);
                Imgproc.rectangle(target_frame_full, this.target_rect.tl(), this.target_rect.br(), color, 1, Imgproc.LINE_8, 0);
                Imgproc.rectangle(data.source_frame, this.source_rect.tl(), this.source_rect.br(), color, 1, Imgproc.LINE_8, 0);
                //

                index--;
            }
        }
コード例 #15
0
        // Use this for initialization

        // Update is called once per frame
        public void UpdateScreen(Mat mat)
        {
            using (MatOfKeyPoint keypoints = new MatOfKeyPoint())
                using (Mat descriptors = new Mat())
                {
                    detector.detect(mat, keypoints);
                    //extractor.compute(mat, keypoints, descriptors);

                    var Points = keypoints.toArray();
                    foreach (KeyPoint kp in Points)
                    {
                        int    a     = (int)kp.pt.x / mat.width() * 250;
                        int    b     = (int)kp.pt.y / mat.height() * 250;
                        Scalar color = new Scalar(255, b, a, 100);
                        switch (Random.Range(0, 3))
                        {
                        case 0:
                            color = new Scalar(255, a, b, 100);
                            break;

                        case 1:
                            color = new Scalar(a, 255, b, 100);
                            break;

                        case 2:
                            color = new Scalar(a, b, 255, 100);
                            break;
                        }
                        Imgproc.circle(mat, kp.pt, 4, color, -1);
                    }
                    text = string.Format("PointFeature Count : {0}.", Points.Length);
                    DestroyImmediate(tex);
                    tex = new Texture2D(ARCameraManager.Instance.Width, ARCameraManager.Instance.Height);
                    OpenCVForUnity.Utils.matToTexture2D(mat, tex);
                    ARCameraManager.Instance.UpdateScreenTexture(tex);
                }
        }
コード例 #16
0
 private void drawDebugFacePoints()
 {
     for (int i = 0; i < points_ann.Length; i++)
     {
         Imgproc.circle(small_frame, points_ann[i], 1, new Scalar(255, 0, 0, 255), 2, Imgproc.LINE_AA, 0);
     }
     for (int i = 0; i < affine_transform_keypoints_ann.Length; i++)
     {
         Imgproc.circle(small_frame, affine_transform_keypoints_ann[i], 1, new Scalar(0, 255, 0, 255), 2, Imgproc.LINE_AA, 0);
     }
     for (int i = 0; i < points_bob.Length; i++)
     {
         Imgproc.circle(small_frame, points_bob[i], 1, new Scalar(255, 0, 0, 255), 2, Imgproc.LINE_AA, 0);
     }
     for (int i = 0; i < affine_transform_keypoints_bob.Length; i++)
     {
         Imgproc.circle(small_frame, affine_transform_keypoints_bob[i], 1, new Scalar(0, 255, 0, 255), 2, Imgproc.LINE_AA, 0);
     }
     //
     Imgproc.rectangle(small_frame, new Point(1, 1), new Point(small_frame_size.width - 2, small_frame_size.height - 2), new Scalar(255, 0, 0, 255), 2, Imgproc.LINE_8, 0);
     Imgproc.rectangle(small_frame, this.rect_ann.tl(), this.rect_ann.br(), new Scalar(255, 0, 0, 255), 1, Imgproc.LINE_8, 0);
     Imgproc.rectangle(small_frame, this.rect_bob.tl(), this.rect_bob.br(), new Scalar(255, 0, 0, 255), 1, Imgproc.LINE_8, 0);
     //
 }
コード例 #17
0
        private void DrawFaceLandmark(Mat imgMat, List <Point> points, Scalar color, int thickness, bool drawIndexNumbers = false)
        {
            if (points.Count == 5)
            {
                Imgproc.line(imgMat, points [0], points [1], color, thickness);
                Imgproc.line(imgMat, points [1], points [4], color, thickness);
                Imgproc.line(imgMat, points [4], points [3], color, thickness);
                Imgproc.line(imgMat, points [3], points [2], color, thickness);
            }
            else if (points.Count == 68)
            {
                for (int i = 1; i <= 16; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }

                for (int i = 28; i <= 30; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }

                for (int i = 18; i <= 21; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }
                for (int i = 23; i <= 26; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }
                for (int i = 31; i <= 35; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }
                Imgproc.line(imgMat, points [30], points [35], color, thickness);

                for (int i = 37; i <= 41; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }
                Imgproc.line(imgMat, points [36], points [41], color, thickness);

                for (int i = 43; i <= 47; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }
                Imgproc.line(imgMat, points [42], points [47], color, thickness);

                for (int i = 49; i <= 59; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }
                Imgproc.line(imgMat, points [48], points [59], color, thickness);

                for (int i = 61; i <= 67; ++i)
                {
                    Imgproc.line(imgMat, points [i], points [i - 1], color, thickness);
                }
                Imgproc.line(imgMat, points [60], points [67], color, thickness);
            }
            else
            {
                for (int i = 0; i < points.Count; i++)
                {
                    Imgproc.circle(imgMat, points [i], 2, color, -1);
                }
            }

            // Draw the index number of facelandmark points.
            if (drawIndexNumbers)
            {
                for (int i = 0; i < points.Count; ++i)
                {
                    Imgproc.putText(imgMat, i.ToString(), points [i], Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(255, 255, 255, 255), 1, Imgproc.LINE_AA, false);
                }
            }
        }
コード例 #18
0
        /// <summary>
        /// Processes points by filter.
        /// </summary>
        /// <param name="img">Image mat.</param>
        /// <param name="srcPoints">Input points.</param>
        /// <param name="dstPoints">Output points.</param>
        /// <param name="drawDebugPoints">if true, draws debug points.</param>
        /// <returns>Output points.</returns>
        public override List <Vector2> Process(Mat img, List <Vector2> srcPoints, List <Vector2> dstPoints = null, bool drawDebugPoints = false)
        {
            if (srcPoints != null && srcPoints.Count != numberOfElements)
            {
                throw new ArgumentException("The number of elements is different.");
            }

            if (srcPoints == null)
            {
                return(dstPoints == null ? srcPoints : dstPoints);
            }

            if (!flag)
            {
                if (img.channels() == 4)
                {
                    Imgproc.cvtColor(img, prevgray, Imgproc.COLOR_RGBA2GRAY);
                }
                else if (img.channels() == 3)
                {
                    Imgproc.cvtColor(img, prevgray, Imgproc.COLOR_RGB2GRAY);
                }
                else
                {
                    if (prevgray.total() == 0)
                    {
                        prevgray = img.clone();
                    }
                    else
                    {
                        img.copyTo(prevgray);
                    }
                }

                for (int i = 0; i < numberOfElements; i++)
                {
                    prevTrackPts[i] = new Point(srcPoints[i].x, srcPoints[i].y);
                }

                flag = true;
            }

            if (srcPoints != null)
            {
                if (dstPoints == null)
                {
                    dstPoints = new List <Vector2>();
                }
                if (dstPoints != null && dstPoints.Count != numberOfElements)
                {
                    dstPoints.Clear();
                    for (int i = 0; i < numberOfElements; i++)
                    {
                        dstPoints.Add(new Vector2());
                    }
                }

                if (img.channels() == 4)
                {
                    Imgproc.cvtColor(img, gray, Imgproc.COLOR_RGBA2GRAY);
                }
                else if (img.channels() == 3)
                {
                    Imgproc.cvtColor(img, gray, Imgproc.COLOR_RGB2GRAY);
                }
                else
                {
                    if (gray.total() == 0)
                    {
                        gray = img.clone();
                    }
                    else
                    {
                        img.copyTo(gray);
                    }
                }

                if (prevgray.total() > 0)
                {
                    mOP2fPrevTrackPts.fromList(prevTrackPts);
                    mOP2fNextTrackPts.fromList(nextTrackPts);
                    Video.calcOpticalFlowPyrLK(prevgray, gray, mOP2fPrevTrackPts, mOP2fNextTrackPts, status, err);
                    prevTrackPts = mOP2fPrevTrackPts.toList();
                    nextTrackPts = mOP2fNextTrackPts.toList();

                    // clac diffDlib
                    prevTrackPtsMat.fromList(prevTrackPts);
                    OpenCVForUnity.CoreModule.Rect rect = Imgproc.boundingRect(prevTrackPtsMat);
                    double diffDlib = this.diffDlib * rect.area() / 40000.0 * diffCheckSensitivity;

                    // if the face is moving so fast, use dlib to detect the face
                    double diff = calDistanceDiff(prevTrackPts, nextTrackPts);
                    if (drawDebugPoints)
                    {
                        Debug.Log("variance:" + diff);
                    }
                    if (diff > diffDlib)
                    {
                        for (int i = 0; i < numberOfElements; i++)
                        {
                            nextTrackPts[i].x = srcPoints[i].x;
                            nextTrackPts[i].y = srcPoints[i].y;

                            dstPoints[i] = srcPoints[i];
                        }

                        if (drawDebugPoints)
                        {
                            Debug.Log("DLIB");
                            for (int i = 0; i < numberOfElements; i++)
                            {
                                Imgproc.circle(img, new Point(srcPoints[i].x, srcPoints[i].y), 2, new Scalar(255, 0, 0, 255), -1);
                            }
                        }
                    }
                    else
                    {
                        // In this case, use Optical Flow
                        for (int i = 0; i < numberOfElements; i++)
                        {
                            dstPoints[i] = new Vector2((float)nextTrackPts[i].x, (float)nextTrackPts[i].y);
                        }

                        if (drawDebugPoints)
                        {
                            Debug.Log("Optical Flow");
                            for (int i = 0; i < numberOfElements; i++)
                            {
                                Imgproc.circle(img, nextTrackPts[i], 2, new Scalar(0, 0, 255, 255), -1);
                            }
                        }
                    }
                }
                Swap(ref prevTrackPts, ref nextTrackPts);
                Swap(ref prevgray, ref gray);
            }
            return(dstPoints);
        }
コード例 #19
0
        // Update is called once per frame
        void Update()
        {
            if (webCamTextureToMatHelper.isPlaying() && webCamTextureToMatHelper.didUpdateThisFrame())
            {
                Mat rgbaMat = webCamTextureToMatHelper.GetMat();

                if (mMOP2fptsPrev.rows() == 0)
                {
                    // first time through the loop so we need prev and this mats
                    // plus prev points
                    // get this mat
                    Imgproc.cvtColor(rgbaMat, matOpFlowThis, Imgproc.COLOR_RGBA2GRAY);

                    // copy that to prev mat
                    matOpFlowThis.copyTo(matOpFlowPrev);

                    // get prev corners
                    Imgproc.goodFeaturesToTrack(matOpFlowPrev, MOPcorners, iGFFTMax, 0.05, 20);
                    mMOP2fptsPrev.fromArray(MOPcorners.toArray());

                    // get safe copy of this corners
                    mMOP2fptsPrev.copyTo(mMOP2fptsSafe);
                }
                else
                {
                    // we've been through before so
                    // this mat is valid. Copy it to prev mat
                    matOpFlowThis.copyTo(matOpFlowPrev);

                    // get this mat
                    Imgproc.cvtColor(rgbaMat, matOpFlowThis, Imgproc.COLOR_RGBA2GRAY);

                    // get the corners for this mat
                    Imgproc.goodFeaturesToTrack(matOpFlowThis, MOPcorners, iGFFTMax, 0.05, 20);
                    mMOP2fptsThis.fromArray(MOPcorners.toArray());

                    // retrieve the corners from the prev mat
                    // (saves calculating them again)
                    mMOP2fptsSafe.copyTo(mMOP2fptsPrev);

                    // and save this corners for next time through

                    mMOP2fptsThis.copyTo(mMOP2fptsSafe);
                }


                /*
                 *  Parameters:
                 *      prevImg first 8-bit input image
                 *      nextImg second input image
                 *      prevPts vector of 2D points for which the flow needs to be found; point coordinates must be single-precision floating-point numbers.
                 *      nextPts output vector of 2D points (with single-precision floating-point coordinates) containing the calculated new positions of input features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
                 *      status output status vector (of unsigned chars); each element of the vector is set to 1 if the flow for the corresponding features has been found, otherwise, it is set to 0.
                 *      err output vector of errors; each element of the vector is set to an error for the corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't found then the error is not defined (use the status parameter to find such cases).
                 */
                Video.calcOpticalFlowPyrLK(matOpFlowPrev, matOpFlowThis, mMOP2fptsPrev, mMOP2fptsThis, mMOBStatus, mMOFerr);

                if (!mMOBStatus.empty())
                {
                    List <Point> cornersPrev = mMOP2fptsPrev.toList();
                    List <Point> cornersThis = mMOP2fptsThis.toList();
                    List <byte>  byteStatus  = mMOBStatus.toList();

                    int x = 0;
                    int y = byteStatus.Count - 1;

                    for (x = 0; x < y; x++)
                    {
                        if (byteStatus [x] == 1)
                        {
                            Point pt  = cornersThis [x];
                            Point pt2 = cornersPrev [x];

                            Imgproc.circle(rgbaMat, pt, 5, colorRed, iLineThickness - 1);

                            Imgproc.line(rgbaMat, pt, pt2, colorRed, iLineThickness);
                        }
                    }
                }

//              Imgproc.putText (rgbaMat, "W:" + rgbaMat.width () + " H:" + rgbaMat.height () + " SO:" + Screen.orientation, new Point (5, rgbaMat.rows () - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar (255, 255, 255, 255), 2, Imgproc.LINE_AA, false);

                Utils.matToTexture2D(rgbaMat, texture, webCamTextureToMatHelper.GetBufferColors());
            }
        }
コード例 #20
0
        //手を検出して画像に描画する
        private static void _handPoseEstimationProcess(Mat rgbaMat, Color handColor)
        {
            Imgproc.GaussianBlur(rgbaMat, rgbaMat, new OpenCVForUnity.Size(3, 3), 1, 1);

            //検出器に色を設定
            detector.setHsvColor(HGColorSpuiter.ColorToScalar(handColor));

            List <MatOfPoint> contours = detector.getContours();

            detector.process(rgbaMat);
            if (contours.Count <= 0)
            {
                return;
            }

            //手の角度に傾いた外接矩形を作成
            RotatedRect rect = Imgproc.minAreaRect(new MatOfPoint2f(contours[0].toArray()));

            double boundWidth  = rect.size.width;
            double boundHeight = rect.size.height;
            int    boundPos    = 0;

            for (int i = 1; i < contours.Count; i++)
            {
                rect = Imgproc.minAreaRect(new MatOfPoint2f(contours[i].toArray()));
                if (rect.size.width * rect.size.height > boundWidth * boundHeight)
                {
                    boundWidth  = rect.size.width;
                    boundHeight = rect.size.height;
                    boundPos    = i;
                }
            }

            OpenCVForUnity.Rect boundRect = Imgproc.boundingRect(new MatOfPoint(contours[boundPos].toArray()));
            //手首までの範囲を描画
            Imgproc.rectangle(rgbaMat, boundRect.tl(), boundRect.br(), HGColorSpuiter.ColorToScalar(WristRangeColor), 2, 8, 0);

            double a = boundRect.br().y - boundRect.tl().y;

            a = a * 0.7;
            a = boundRect.tl().y + a;

            //手のひらの範囲を描画
            Imgproc.rectangle(rgbaMat, boundRect.tl(), new Point(boundRect.br().x, a), HGColorSpuiter.ColorToScalar(PalmsRangeColor), 2, 8, 0);

            //折れ線カーブまたはポリゴンを,互いの距離が指定された精度以下になるように,より少ない頂点数のカーブやポリゴンで近似します
            MatOfPoint2f pointMat = new MatOfPoint2f();

            Imgproc.approxPolyDP(new MatOfPoint2f(contours[boundPos].toArray()), pointMat, 3, true);
            contours[boundPos] = new MatOfPoint(pointMat.toArray());

            //点とポリゴンの最短距離を計算
            MatOfInt  hull         = new MatOfInt();
            MatOfInt4 convexDefect = new MatOfInt4();

            Imgproc.convexHull(new MatOfPoint(contours[boundPos].toArray()), hull);
            if (hull.toArray().Length < 3)
            {
                return;
            }
            Imgproc.convexityDefects(new MatOfPoint(contours[boundPos].toArray()), hull, convexDefect);

            //手の範囲を取得
            List <MatOfPoint> hullPoints = new List <MatOfPoint>();
            List <Point>      listPo     = new List <Point>();

            for (int j = 0; j < hull.toList().Count; j++)
            {
                listPo.Add(contours[boundPos].toList()[hull.toList()[j]]);
            }

            MatOfPoint e = new MatOfPoint();

            e.fromList(listPo);
            hullPoints.Add(e);

            //手の範囲を描画
            Imgproc.drawContours(rgbaMat, hullPoints, -1, HGColorSpuiter.ColorToScalar(HandRangeColor), 3);

            //指と認識した場所を取得
            List <MatOfPoint> defectPoints = new List <MatOfPoint>();
            List <Point>      listPoDefect = new List <Point>();

            for (int j = 0; j < convexDefect.toList().Count; j = j + 4)
            {
                Point farPoint = contours[boundPos].toList()[convexDefect.toList()[j + 2]];
                int   depth    = convexDefect.toList()[j + 3];
                if (depth > depthThreashold && farPoint.y < a)
                {
                    listPoDefect.Add(contours[boundPos].toList()[convexDefect.toList()[j + 2]]);
                }
            }

            MatOfPoint e2 = new MatOfPoint();

            e2.fromList(listPo);
            defectPoints.Add(e2);

            //検出した指の本数を更新
            numberOfFingers = listPoDefect.Count;
            if (numberOfFingers > 5)
            {
                numberOfFingers = 5;
            }

            //指の間に点を描画
            foreach (Point p in listPoDefect)
            {
                Imgproc.circle(rgbaMat, p, 6, HGColorSpuiter.ColorToScalar(BetweenFingersColor), -1);
            }
        }
コード例 #21
0
        /// <summary>
        /// Processes points by filter.
        /// </summary>
        /// <param name="img">Image mat.</param>
        /// <param name="srcPoints">Input points.</param>
        /// <param name="dstPoints">Output points.</param>
        /// <param name="drawDebugPoints">if true, draws debug points.</param>
        /// <returns>Output points.</returns>
        public override List <Vector2> Process(Mat img, List <Vector2> srcPoints, List <Vector2> dstPoints = null, bool drawDebugPoints = false)
        {
            if (srcPoints != null && srcPoints.Count != numberOfElements)
            {
                throw new ArgumentException("The number of elements is different.");
            }

            if (srcPoints != null)
            {
                if (dstPoints == null)
                {
                    dstPoints = new List <Vector2> ();
                }
                if (dstPoints != null && dstPoints.Count != numberOfElements)
                {
                    dstPoints.Clear();
                    for (int i = 0; i < numberOfElements; i++)
                    {
                        dstPoints.Add(new Vector2());
                    }
                }

                if (flag)
                {
                    for (int i = 0; i < numberOfElements; i++)
                    {
                        double diff = Math.Sqrt(Math.Pow(srcPoints [i].x - lastPoints [i].x, 2.0) + Math.Pow(srcPoints [i].y - lastPoints [i].y, 2.0));
                        if (diff > diffLawPass)
                        {
                            lastPoints [i] = srcPoints [i];
                            if (drawDebugPoints)
                            {
                                Imgproc.circle(img, new Point(srcPoints [i].x, srcPoints [i].y), 1, new Scalar(0, 255, 0, 255), -1);
                            }
                        }
                        else
                        {
                            if (drawDebugPoints)
                            {
                                Imgproc.circle(img, new Point(lastPoints [i].x, lastPoints [i].y), 1, new Scalar(255, 0, 0, 255), -1);
                            }
                        }
                        dstPoints [i] = lastPoints [i];
                    }
                }
                else
                {
                    for (int i = 0; i < numberOfElements; i++)
                    {
                        lastPoints [i] = srcPoints [i];
                        dstPoints [i]  = srcPoints [i];
                    }
                    if (drawDebugPoints)
                    {
                        for (int i = 0; i < numberOfElements; i++)
                        {
                            Imgproc.circle(img, new Point(srcPoints [i].x, srcPoints [i].y), 1, new Scalar(0, 0, 255, 255), -1);
                        }
                    }
                    flag = true;
                }
                return(dstPoints);
            }
            else
            {
                return(dstPoints == null ? srcPoints : dstPoints);
            }
        }
コード例 #22
0
    void handleCalibration()
    {
        for (int i = 0; i < AK_receiver.GetComponent <akplay>().camInfoList.Count; i++)
        {
            //create color mat:
            byte[]   colorBytes = ((Texture2D)(AK_receiver.GetComponent <akplay>().camInfoList[i].colorCube.GetComponent <Renderer>().material.mainTexture)).GetRawTextureData();
            GCHandle ch         = GCHandle.Alloc(colorBytes, GCHandleType.Pinned);
            Mat      colorMat   = new Mat(AK_receiver.GetComponent <akplay>().camInfoList[i].color_height, AK_receiver.GetComponent <akplay>().camInfoList[i].color_width, CvType.CV_8UC4);
            Utils.copyToMat(ch.AddrOfPinnedObject(), colorMat);
            ch.Free();

            //OpenCVForUnity.CoreModule.Core.flip(colorMat, colorMat, 0);

            //detect a chessboard in the image, and refine the points, and save the pixel positions:
            MatOfPoint2f positions = new MatOfPoint2f();
            int          resizer   = 4;
            resizer = 1;                   //noresize!
            Mat colorMatSmall = new Mat(); //~27 ms each
            Imgproc.resize(colorMat, colorMatSmall, new Size(colorMat.cols() / resizer, colorMat.rows() / resizer));
            bool success = Calib3d.findChessboardCorners(colorMatSmall, new Size(7, 7), positions);
            for (int ss = 0; ss < positions.rows(); ss++)
            {
                double[] data = positions.get(ss, 0);
                data[0] = data[0] * resizer;
                data[1] = data[1] * resizer;

                positions.put(ss, 0, data);
            }

            //subpixel, drawing chessboard, and getting orange blobs takes 14ms
            TermCriteria tc = new TermCriteria();
            Imgproc.cornerSubPix(colorMat, positions, new Size(5, 5), new Size(-1, -1), tc);

            Mat chessboardResult = new Mat();
            colorMat.copyTo(chessboardResult);
            Calib3d.drawChessboardCorners(chessboardResult, new Size(7, 7), positions, success);



            //Find the orange blobs:
            Mat       orangeMask = new Mat();
            Vector2[] blobs      = getOrangeBlobs(ref colorMat, ref orangeMask);

            //find blob closest to chessboard
            if (success && (blobs.Length > 0))
            {
                Debug.Log("found a chessboard and blobs for camera: " + i);

                // time to get pin1 and chessboard positions: 27ms
                //find pin1:
                Point closestBlob = new Point();
                int   pin1idx     = getPin1(positions, blobs, ref closestBlob);
                Imgproc.circle(chessboardResult, new Point(positions.get(pin1idx, 0)[0], positions.get(pin1idx, 0)[1]), 10, new Scalar(255, 0, 0), -1);
                Imgproc.circle(chessboardResult, closestBlob, 10, new Scalar(255, 255, 0), -1);


                //get world positions of chessboard
                Point[]  realWorldPointArray  = new Point[positions.rows()];
                Point3[] realWorldPointArray3 = new Point3[positions.rows()];
                Point[]  imagePointArray      = new Point[positions.rows()];
                //getChessBoardWorldPositions(positions, pin1idx, 0.0498f, ref realWorldPointArray, ref realWorldPointArray3, ref imagePointArray); //green and white checkerboard.
                getChessBoardWorldPositions(positions, pin1idx, 0.07522f, ref realWorldPointArray, ref realWorldPointArray3, ref imagePointArray); //black and white checkerboard.


                string text       = "";
                float  decimals   = 1000.0f;
                int    text_red   = 255;
                int    text_green = 0;
                int    text_blue  = 0;
                text = ((int)(realWorldPointArray3[0].x * decimals)) / decimals + "," + ((int)(realWorldPointArray3[0].y * decimals)) / decimals + "," + ((int)(realWorldPointArray3[0].z * decimals)) / decimals;
                //text = sprintf("%f,%f,%f", realWorldPointArray3[0].x, realWorldPointArray3[0].y, realWorldPointArray3[0].z);
                Imgproc.putText(chessboardResult, text, new Point(positions.get(0, 0)[0], positions.get(0, 0)[1]), 0, .6, new Scalar(text_red, text_green, text_blue));
                text = ((int)(realWorldPointArray3[6].x * decimals)) / decimals + "," + ((int)(realWorldPointArray3[6].y * decimals)) / decimals + "," + ((int)(realWorldPointArray3[6].z * decimals)) / decimals;
                //text = sprintf("%f,%f,%f", realWorldPointArray3[0].x, realWorldPointArray3[0].y, realWorldPointArray3[0].z);
                Imgproc.putText(chessboardResult, text, new Point(positions.get(6, 0)[0], positions.get(6, 0)[1]), 0, .6, new Scalar(text_red, text_green, text_blue));
                text = ((int)(realWorldPointArray3[42].x * decimals)) / decimals + "," + ((int)(realWorldPointArray3[42].y * decimals)) / decimals + "," + ((int)(realWorldPointArray3[42].z * decimals)) / decimals;
                //text = sprintf("%f,%f,%f", realWorldPointArray3[0].x, realWorldPointArray3[0].y, realWorldPointArray3[0].z);
                Imgproc.putText(chessboardResult, text, new Point(positions.get(42, 0)[0], positions.get(42, 0)[1]), 0, .6, new Scalar(text_red, text_green, text_blue));
                text = ((int)(realWorldPointArray3[48].x * decimals)) / decimals + "," + ((int)(realWorldPointArray3[48].y * decimals)) / decimals + "," + ((int)(realWorldPointArray3[48].z * decimals)) / decimals;
                //text = sprintf("%2.2f,%2.2f,%2.2f", realWorldPointArray3[48].x, realWorldPointArray3[48].y, realWorldPointArray3[48].z);
                Imgproc.putText(chessboardResult, text, new Point(positions.get(48, 0)[0], positions.get(48, 0)[1]), 0, .6, new Scalar(text_red, text_green, text_blue));



                Mat cameraMatrix = Mat.eye(3, 3, CvType.CV_64F);
                cameraMatrix.put(0, 0, AK_receiver.GetComponent <akplay>().camInfoList[i].color_fx);
                cameraMatrix.put(1, 1, AK_receiver.GetComponent <akplay>().camInfoList[i].color_fy);
                cameraMatrix.put(0, 2, AK_receiver.GetComponent <akplay>().camInfoList[i].color_cx);
                cameraMatrix.put(1, 2, AK_receiver.GetComponent <akplay>().camInfoList[i].color_cy);

                double[] distortion = new double[8];

                distortion[0] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k1;
                distortion[1] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k2;
                distortion[2] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_p1;
                distortion[3] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_p2;
                distortion[4] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k3;
                distortion[5] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k4;
                distortion[6] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k5;
                distortion[7] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k6;


                /*
                 * distortion[0] = 0.0;
                 * distortion[1] = 0.0;
                 * distortion[2] = 0.0;
                 * distortion[3] = 0.0;
                 * distortion[4] = 0.0;
                 * distortion[5] = 0.0;
                 * distortion[6] = 0.0;
                 * distortion[7] = 0.0;
                 */

                //~1 ms to solve for pnp
                Mat  rvec           = new Mat();
                Mat  tvec           = new Mat();
                bool solvepnpSucces = Calib3d.solvePnP(new MatOfPoint3f(realWorldPointArray3), new MatOfPoint2f(imagePointArray), cameraMatrix, new MatOfDouble(distortion), rvec, tvec);

                Mat R = new Mat();
                Calib3d.Rodrigues(rvec, R);


                //calculate unity vectors, and camera transforms
                Mat camCenter     = -R.t() * tvec;
                Mat forwardOffset = new Mat(3, 1, tvec.type());
                forwardOffset.put(0, 0, 0);
                forwardOffset.put(1, 0, 0);
                forwardOffset.put(2, 0, 1);
                Mat upOffset = new Mat(3, 1, tvec.type());
                upOffset.put(0, 0, 0);
                upOffset.put(1, 0, -1);
                upOffset.put(2, 0, 0);

                Mat forwardVectorCV = R.t() * (forwardOffset - tvec);
                forwardVectorCV = forwardVectorCV - camCenter;
                Mat upVectorCV = R.t() * (upOffset - tvec);
                upVectorCV = upVectorCV - camCenter;

                Vector3    forwardVectorUnity = new Vector3((float)forwardVectorCV.get(0, 0)[0], (float)forwardVectorCV.get(2, 0)[0], (float)forwardVectorCV.get(1, 0)[0]); //need to flip y and z due to unity coordinate system
                Vector3    upVectorUnity      = new Vector3((float)upVectorCV.get(0, 0)[0], (float)upVectorCV.get(2, 0)[0], (float)upVectorCV.get(1, 0)[0]);                //need to flip y and z due to unity coordinate system
                Vector3    camCenterUnity     = new Vector3((float)camCenter.get(0, 0)[0], (float)camCenter.get(2, 0)[0], (float)camCenter.get(1, 0)[0]);
                Quaternion rotationUnity      = Quaternion.LookRotation(forwardVectorUnity, upVectorUnity);



                GameObject colorMarker = GameObject.CreatePrimitive(PrimitiveType.Cube);
                //colorMarker.transform.localScale = new Vector3(0.1f, 0.1f, 0.2f);
                //colorMarker.transform.parent = AK_receiver.transform;
                colorMarker.layer = LayerMask.NameToLayer("Debug");
                colorMarker.transform.position = camCenterUnity;
                colorMarker.transform.rotation = Quaternion.LookRotation(forwardVectorUnity, upVectorUnity);
                colorMarker.GetComponent <Renderer>().material.color = Color.blue;

                Vector3    forwardDepth   = AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.MultiplyPoint(forwardVectorUnity);
                Vector3    upDepth        = AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.MultiplyPoint(upVectorUnity);
                Vector3    camCenterDepth = AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.MultiplyPoint(camCenterUnity);
                Quaternion rotationDepth  = Quaternion.LookRotation(forwardDepth, upDepth);

                GameObject depthMarker = GameObject.CreatePrimitive(PrimitiveType.Cube);
                depthMarker.layer            = LayerMask.NameToLayer("Debug");
                depthMarker.transform.parent = colorMarker.transform;
                //depthMarker.transform.localScale = AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.lossyScale;

                depthMarker.transform.localRotation = AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.inverse.rotation;

                Vector3 matrixPosition = new Vector3(AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.inverse.GetColumn(3).x,
                                                     AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.inverse.GetColumn(3).y,
                                                     AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.inverse.GetColumn(3).z);


                /*
                 * depthMarker.transform.localRotation = AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.rotation;
                 *
                 * Vector3 matrixPosition = new Vector3(AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.GetColumn(3).x,
                 *                                      AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.GetColumn(3).y,
                 *                                      AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.GetColumn(3).z);
                 */

                depthMarker.transform.localPosition = -matrixPosition;
                depthMarker.transform.parent        = null;

                colorMarker.transform.localScale = new Vector3(0.1f, 0.1f, 0.2f);
                depthMarker.transform.localScale = new Vector3(0.1f, 0.1f, 0.2f);

                //depthMarker.transform.parent = AK_receiver.transform;
                //depthMarker.transform.position = camCenterDepth;
                //depthMarker.transform.rotation = Quaternion.LookRotation(forwardDepth-camCenterDepth, upDepth-camCenterDepth);
                depthMarker.GetComponent <Renderer>().material.color = Color.red;


                AK_receiver.GetComponent <akplay>().camInfoList[i].visualization.transform.position = depthMarker.transform.position; //need to flip y and z due to unity coordinate system
                AK_receiver.GetComponent <akplay>().camInfoList[i].visualization.transform.rotation = depthMarker.transform.rotation;
            }


            //draw chessboard result to calibration ui:
            Texture2D colorTexture = new Texture2D(chessboardResult.cols(), chessboardResult.rows(), TextureFormat.BGRA32, false);
            colorTexture.LoadRawTextureData((IntPtr)chessboardResult.dataAddr(), (int)chessboardResult.total() * (int)chessboardResult.elemSize());
            colorTexture.Apply();
            checkerboard_display_list[i].GetComponent <Renderer>().material.mainTexture = colorTexture;

            //draw threshold to calibration ui:
            Texture2D orangeTexture = new Texture2D(orangeMask.cols(), orangeMask.rows(), TextureFormat.R8, false);
            orangeTexture.LoadRawTextureData((IntPtr)orangeMask.dataAddr(), (int)orangeMask.total() * (int)orangeMask.elemSize());
            orangeTexture.Apply();
            threshold_display_list[i].GetComponent <Renderer>().material.mainTexture = orangeTexture;
        }
    }
コード例 #23
0
        /// <summary>
        /// Draws a face landmark.
        /// This method supports 68,17,6,5 landmark points.
        /// </summary>
        /// <param name="imgMat">Image mat.</param>
        /// <param name="points">Points.</param>
        /// <param name="color">Color.</param>
        /// <param name="thickness">Thickness.</param>
        /// <param name="drawIndexNumbers">Determines if draw index numbers.</param>
        public static void DrawFaceLandmark(Mat imgMat, List <Vector2> points, Scalar color, int thickness, bool drawIndexNumbers = false)
        {
            if (points.Count == 5)
            {
                Imgproc.line(imgMat, new Point(points [0].x, points [0].y), new Point(points [1].x, points [1].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [1].x, points [1].y), new Point(points [4].x, points [4].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [4].x, points [4].y), new Point(points [3].x, points [3].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [3].x, points [3].y), new Point(points [2].x, points [2].y), color, thickness);
            }
            else if (points.Count == 6)
            {
                Imgproc.line(imgMat, new Point(points [2].x, points [2].y), new Point(points [3].x, points [3].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [4].x, points [4].y), new Point(points [5].x, points [5].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [3].x, points [3].y), new Point(points [0].x, points [0].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [4].x, points [4].y), new Point(points [0].x, points [0].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [0].x, points [0].y), new Point(points [1].x, points [1].y), color, thickness);
            }
            else if (points.Count == 17)
            {
                Imgproc.line(imgMat, new Point(points [2].x, points [2].y), new Point(points [9].x, points [9].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [9].x, points [9].y), new Point(points [3].x, points [3].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [3].x, points [3].y), new Point(points [10].x, points [10].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [10].x, points [10].y), new Point(points [2].x, points [2].y), color, thickness);

                Imgproc.line(imgMat, new Point(points [4].x, points [4].y), new Point(points [11].x, points [11].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [11].x, points [11].y), new Point(points [5].x, points [5].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [5].x, points [5].y), new Point(points [12].x, points [12].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [12].x, points [12].y), new Point(points [4].x, points [4].y), color, thickness);

                Imgproc.line(imgMat, new Point(points [3].x, points [3].y), new Point(points [0].x, points [0].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [4].x, points [4].y), new Point(points [0].x, points [0].y), color, thickness);
                Imgproc.line(imgMat, new Point(points [0].x, points [0].y), new Point(points [1].x, points [1].y), color, thickness);

                for (int i = 14; i <= 16; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }
                Imgproc.line(imgMat, new Point(points [16].x, points [16].y), new Point(points [13].x, points [13].y), color, thickness);

                for (int i = 6; i <= 8; i++)
                {
                    Imgproc.circle(imgMat, new Point(points [i].x, points [i].y), 2, color, -1);
                }
            }
            else if (points.Count == 68)
            {
                for (int i = 1; i <= 16; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }

                for (int i = 28; i <= 30; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }

                for (int i = 18; i <= 21; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }
                for (int i = 23; i <= 26; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }
                for (int i = 31; i <= 35; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }
                Imgproc.line(imgMat, new Point(points [30].x, points [30].y), new Point(points [35].x, points [35].y), color, thickness);

                for (int i = 37; i <= 41; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }
                Imgproc.line(imgMat, new Point(points [36].x, points [36].y), new Point(points [41].x, points [41].y), color, thickness);

                for (int i = 43; i <= 47; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }
                Imgproc.line(imgMat, new Point(points [42].x, points [42].y), new Point(points [47].x, points [47].y), color, thickness);

                for (int i = 49; i <= 59; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }
                Imgproc.line(imgMat, new Point(points [48].x, points [48].y), new Point(points [59].x, points [59].y), color, thickness);

                for (int i = 61; i <= 67; ++i)
                {
                    Imgproc.line(imgMat, new Point(points [i].x, points [i].y), new Point(points [i - 1].x, points [i - 1].y), color, thickness);
                }
                Imgproc.line(imgMat, new Point(points [60].x, points [60].y), new Point(points [67].x, points [67].y), color, thickness);
            }
            else
            {
                for (int i = 0; i < points.Count; i++)
                {
                    Imgproc.circle(imgMat, new Point(points [i].x, points [i].y), 2, color, -1);
                }
            }

            // Draw the index number of facelandmark points.
            if (drawIndexNumbers)
            {
                for (int i = 0; i < points.Count; ++i)
                {
                    Imgproc.putText(imgMat, i.ToString(), new Point(points [i].x, points [i].y), Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(255, 255, 255, 255), 1, Imgproc.LINE_AA, false);
                }
            }
        }
コード例 #24
0
        /// <summary>
        /// Processes points by filter.
        /// </summary>
        /// <param name="img">Image mat.</param>
        /// <param name="srcPoints">Input points.</param>
        /// <param name="dstPoints">Output points.</param>
        /// <param name="drawDebugPoints">if true, draws debug points.</param>
        /// <returns>Output points.</returns>
        public override List <Vector2> Process(Mat img, List <Vector2> srcPoints, List <Vector2> dstPoints = null, bool drawDebugPoints = false)
        {
            if (srcPoints != null && srcPoints.Count != numberOfElements)
            {
                throw new ArgumentException("The number of elements is different.");
            }

            if (srcPoints != null)
            {
                if (dstPoints == null)
                {
                    dstPoints = new List <Vector2> ();
                }
                if (dstPoints != null && dstPoints.Count != numberOfElements)
                {
                    dstPoints.Clear();
                    for (int i = 0; i < numberOfElements; i++)
                    {
                        dstPoints.Add(new Vector2());
                    }
                }

                for (int i = 0; i < numberOfElements; i++)
                {
                    src_points [i].x = srcPoints [i].x;
                    src_points [i].y = srcPoints [i].y;
                }

                // clac diffDlib
                prevTrackPtsMat.fromList(src_points);
                OpenCVForUnity.Rect rect = Imgproc.boundingRect(prevTrackPtsMat);
                double diffDlib          = this.diffDlib * rect.area() / 40000.0 * diffCheckSensitivity;

                // if the face is moving so fast, use dlib to detect the face
                double diff = calDistanceDiff(src_points, last_points);
                if (drawDebugPoints)
                {
                    Debug.Log("variance:" + diff);
                }
                if (diff > diffDlib)
                {
                    for (int i = 0; i < numberOfElements; i++)
                    {
                        dstPoints [i] = srcPoints [i];
                    }

                    if (drawDebugPoints)
                    {
                        Debug.Log("DLIB");
                        for (int i = 0; i < numberOfElements; i++)
                        {
                            Imgproc.circle(img, new Point(srcPoints [i].x, srcPoints [i].y), 2, new Scalar(255, 0, 0, 255), -1);
                        }
                    }

                    flag = false;
                }
                else
                {
                    if (!flag)
                    {
                        // Set initial state estimate.
                        Mat     statePreMat = KF.get_statePre();
                        float[] tmpStatePre = new float[statePreMat.total()];
                        for (int i = 0; i < numberOfElements; i++)
                        {
                            tmpStatePre [i * 2]     = (float)srcPoints [i].x;
                            tmpStatePre [i * 2 + 1] = (float)srcPoints [i].y;
                        }
                        statePreMat.put(0, 0, tmpStatePre);
                        Mat     statePostMat = KF.get_statePost();
                        float[] tmpStatePost = new float[statePostMat.total()];
                        for (int i = 0; i < numberOfElements; i++)
                        {
                            tmpStatePost [i * 2]     = (float)srcPoints [i].x;
                            tmpStatePost [i * 2 + 1] = (float)srcPoints [i].y;
                        }
                        statePostMat.put(0, 0, tmpStatePost);

                        flag = true;
                    }

                    // Kalman Prediction
                    KF.predict();

                    // Update Measurement
                    float[] tmpMeasurement = new float[measurement.total()];
                    for (int i = 0; i < numberOfElements; i++)
                    {
                        tmpMeasurement [i * 2]     = (float)srcPoints [i].x;
                        tmpMeasurement [i * 2 + 1] = (float)srcPoints [i].y;
                    }
                    measurement.put(0, 0, tmpMeasurement);

                    // Correct Measurement
                    Mat     estimated    = KF.correct(measurement);
                    float[] tmpEstimated = new float[estimated.total()];
                    estimated.get(0, 0, tmpEstimated);
                    for (int i = 0; i < numberOfElements; i++)
                    {
                        predict_points [i].x = tmpEstimated [i * 2];
                        predict_points [i].y = tmpEstimated [i * 2 + 1];
                    }
                    estimated.Dispose();

                    for (int i = 0; i < numberOfElements; i++)
                    {
                        dstPoints [i] = new Vector2((float)predict_points [i].x, (float)predict_points [i].y);
                    }

                    if (drawDebugPoints)
                    {
                        Debug.Log("Kalman Filter");
                        for (int i = 0; i < numberOfElements; i++)
                        {
                            Imgproc.circle(img, predict_points [i], 2, new Scalar(0, 255, 0, 255), -1);
                        }
                    }
                }

                for (int i = 0; i < numberOfElements; i++)
                {
                    last_points [i].x = src_points [i].x;
                    last_points [i].y = src_points [i].y;
                }

                return(dstPoints);
            }
            else
            {
                return(dstPoints == null ? srcPoints : dstPoints);
            }
        }
コード例 #25
0
        // Update is called once per frame
        void Update()
        {
            if (webCamTextureToMatHelper.isPlaying() && webCamTextureToMatHelper.didUpdateThisFrame())
            {
                Mat rgbaMat = webCamTextureToMatHelper.GetMat();

                Imgproc.cvtColor(rgbaMat, hsvMat, Imgproc.COLOR_RGBA2RGB);
                Imgproc.cvtColor(hsvMat, hsvMat, Imgproc.COLOR_RGB2HSV);


                Point[] points = roiPointList.ToArray();

                if (roiPointList.Count == 4)
                {
                    using (Mat backProj = new Mat()) {
                        Imgproc.calcBackProject(new List <Mat> (new Mat[] { hsvMat }), new MatOfInt(0), roiHistMat, backProj, new MatOfFloat(0, 180), 1.0);

                        RotatedRect r = Video.CamShift(backProj, roiRect, termination);
                        r.points(points);
                    }

                    #if ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR)
                    //Touch
                    int touchCount = Input.touchCount;
                    if (touchCount == 1)
                    {
                        if (Input.GetTouch(0).phase == TouchPhase.Ended)
                        {
                            roiPointList.Clear();
                        }
                    }
                    #else
                    if (Input.GetMouseButtonUp(0))
                    {
                        roiPointList.Clear();
                    }
                    #endif
                }


                if (roiPointList.Count < 4)
                {
                    #if ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR)
                    //Touch
                    int touchCount = Input.touchCount;
                    if (touchCount == 1)
                    {
                        Touch t = Input.GetTouch(0);
                        if (t.phase == TouchPhase.Ended)
                        {
                            roiPointList.Add(convertScreenPoint(new Point(t.position.x, t.position.y), gameObject, Camera.main));
                            //                                  Debug.Log ("touch X " + t.position.x);
                            //                                  Debug.Log ("touch Y " + t.position.y);

                            if (!(new OpenCVForUnity.Rect(0, 0, hsvMat.width(), hsvMat.height()).contains(roiPointList [roiPointList.Count - 1])))
                            {
                                roiPointList.RemoveAt(roiPointList.Count - 1);
                            }
                        }
                    }
                    #else
                    //Mouse
                    if (Input.GetMouseButtonUp(0))
                    {
                        roiPointList.Add(convertScreenPoint(new Point(Input.mousePosition.x, Input.mousePosition.y), gameObject, Camera.main));
                        //                                              Debug.Log ("mouse X " + Input.mousePosition.x);
                        //                                              Debug.Log ("mouse Y " + Input.mousePosition.y);

                        if (!(new OpenCVForUnity.Rect(0, 0, hsvMat.width(), hsvMat.height()).contains(roiPointList [roiPointList.Count - 1])))
                        {
                            roiPointList.RemoveAt(roiPointList.Count - 1);
                        }
                    }
                    #endif


                    if (roiPointList.Count == 4)
                    {
                        using (MatOfPoint roiPointMat = new MatOfPoint(roiPointList.ToArray())) {
                            roiRect = Imgproc.boundingRect(roiPointMat);
                        }


                        if (roiHistMat != null)
                        {
                            roiHistMat.Dispose();
                            roiHistMat = null;
                        }
                        roiHistMat = new Mat();

                        using (Mat roiHSVMat = new Mat(hsvMat, roiRect))
                            using (Mat maskMat = new Mat()) {
                                Imgproc.calcHist(new List <Mat> (new Mat[] { roiHSVMat }), new MatOfInt(0), maskMat, roiHistMat, new MatOfInt(16), new MatOfFloat(0, 180));
                                Core.normalize(roiHistMat, roiHistMat, 0, 255, Core.NORM_MINMAX);

                                //                                                      Debug.Log ("roiHist " + roiHistMat.ToString ());
                            }
                    }
                }

                if (points.Length < 4)
                {
                    for (int i = 0; i < points.Length; i++)
                    {
                        Imgproc.circle(rgbaMat, points [i], 6, new Scalar(0, 0, 255, 255), 2);
                    }
                }
                else
                {
                    for (int i = 0; i < 4; i++)
                    {
                        Imgproc.line(rgbaMat, points [i], points [(i + 1) % 4], new Scalar(255, 0, 0, 255), 2);
                    }

                    Imgproc.rectangle(rgbaMat, roiRect.tl(), roiRect.br(), new Scalar(0, 255, 0, 255), 2);
                }

                Imgproc.putText(rgbaMat, "PLEASE TOUCH 4 POINTS", new Point(5, rgbaMat.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false);


//              Imgproc.putText (rgbaMat, "W:" + rgbaMat.width () + " H:" + rgbaMat.height () + " SO:" + Screen.orientation, new Point (5, rgbaMat.rows () - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar (255, 255, 255, 255), 2, Imgproc.LINE_AA, false);

                Utils.matToTexture2D(rgbaMat, texture, webCamTextureToMatHelper.GetBufferColors());
            }
        }
コード例 #26
0
        // Use this for initialization
        void Start()
        {
            // Data for visual representation
            int width = 512, height = 512;
            Mat image = Mat.zeros(height, width, CvType.CV_8UC4);

            // Set up training data
            int[]   labels          = { 1, -1, -1, -1 };
            float[] trainingData    = { 501, 10, 255, 10, 501, 255, 10, 501 };
            Mat     trainingDataMat = new Mat(4, 2, CvType.CV_32FC1);

            trainingDataMat.put(0, 0, trainingData);
            Mat labelsMat = new Mat(4, 1, CvType.CV_32SC1);

            labelsMat.put(0, 0, labels);

            // Train the SVM
            SVM svm = SVM.create();

            svm.setType(SVM.C_SVC);
            svm.setKernel(SVM.LINEAR);
            svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, 100, 1e-6));
            svm.train(trainingDataMat, Ml.ROW_SAMPLE, labelsMat);

            // Show the decision regions given by the SVM
            byte[] green = { 0, 255, 0, 255 };
            byte[] blue  = { 0, 0, 255, 255 };
            for (int i = 0; i < image.rows(); ++i)
            {
                for (int j = 0; j < image.cols(); ++j)
                {
                    Mat sampleMat = new Mat(1, 2, CvType.CV_32FC1);
                    sampleMat.put(0, 0, j, i);

                    float response = svm.predict(sampleMat);
                    if (response == 1)
                    {
                        image.put(i, j, green);
                    }
                    else if (response == -1)
                    {
                        image.put(i, j, blue);
                    }
                }
            }

            // Show the training data
            int thickness = -1;
            int lineType  = 8;

            Imgproc.circle(image, new Point(501, 10), 5, new Scalar(0, 0, 0, 255), thickness, lineType, 0);
            Imgproc.circle(image, new Point(255, 10), 5, new Scalar(255, 255, 255, 255), thickness, lineType, 0);
            Imgproc.circle(image, new Point(501, 255), 5, new Scalar(255, 255, 255, 255), thickness, lineType, 0);
            Imgproc.circle(image, new Point(10, 501), 5, new Scalar(255, 255, 255, 255), thickness, lineType, 0);

            // Show support vectors
            thickness = 2;
            lineType  = 8;
            Mat sv = svm.getUncompressedSupportVectors();

//                      Debug.Log ("sv.ToString() " + sv.ToString ());
//                      Debug.Log ("sv.dump() " + sv.dump ());
            for (int i = 0; i < sv.rows(); ++i)
            {
                Imgproc.circle(image, new Point((int)sv.get(i, 0) [0], (int)sv.get(i, 1) [0]), 6, new Scalar(128, 128, 128, 255), thickness, lineType, 0);
            }


            Texture2D texture = new Texture2D(image.width(), image.height(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(image, texture);
            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;
        }
コード例 #27
0
ファイル: Main.cs プロジェクト: eugenejahn/ARNoteTaking
        //public RawImage document;

        void Update()
        {
            if (webCamTextureToMatHelper.IsPlaying() && webCamTextureToMatHelper.DidUpdateThisFrame())
            {
                Mat mainMat = webCamTextureToMatHelper.GetMat();


                if (!selectTarget) //find paper by contours
                {
                    grayMat = new Mat();

                    // convert texture to matrix
                    mainMat.copyTo(grayMat);

                    mainMat = findPaper(mainMat);

                    // display matrix on the screen
                    Utils.fastMatToTexture2D(mainMat, texture);
                }
                else
                { // using optical flow
                    // set the currentGrayMat mat
                    currentGrayMat = new Mat(mainMat.rows(), mainMat.cols(), Imgproc.COLOR_RGB2GRAY);
                    Imgproc.cvtColor(mainMat, currentGrayMat, Imgproc.COLOR_RGBA2GRAY);


                    if (initOpticalFlow == true) // doing the init setting for optical flow
                    {
                        // create 40 points
                        Point[] points = new Point[40];
                        // set those points near the corner
                        // paperCornerMatOfPoint  is the corner of the paper
                        for (int i = 0; i < 4; i++)
                        {
                            points[i * 10]     = new Point(paperCornerMatOfPoint.toList()[i].x, paperCornerMatOfPoint.toList()[i].y);
                            points[i * 10 + 1] = new Point(paperCornerMatOfPoint.toList()[i].x + 1, paperCornerMatOfPoint.toList()[i].y);
                            points[i * 10 + 2] = new Point(paperCornerMatOfPoint.toList()[i].x, paperCornerMatOfPoint.toList()[i].y + 1);
                            points[i * 10 + 3] = new Point(paperCornerMatOfPoint.toList()[i].x + 1, paperCornerMatOfPoint.toList()[i].y + 1);
                            points[i * 10 + 4] = new Point(paperCornerMatOfPoint.toList()[i].x, paperCornerMatOfPoint.toList()[i].y - 1);
                            points[i * 10 + 5] = new Point(paperCornerMatOfPoint.toList()[i].x - 1, paperCornerMatOfPoint.toList()[i].y);
                            points[i * 10 + 6] = new Point(paperCornerMatOfPoint.toList()[i].x - 2, paperCornerMatOfPoint.toList()[i].y - 1);
                            points[i * 10 + 7] = new Point(paperCornerMatOfPoint.toList()[i].x, paperCornerMatOfPoint.toList()[i].y - 2);
                            points[i * 10 + 8] = new Point(paperCornerMatOfPoint.toList()[i].x - 2, paperCornerMatOfPoint.toList()[i].y - 2);
                            points[i * 10 + 9] = new Point(paperCornerMatOfPoint.toList()[i].x + 2, paperCornerMatOfPoint.toList()[i].y + 2);
                        }

                        // make the points closer to the corners (Harris Corner Detection )
                        //Imgproc.goodFeaturesToTrack(currentGrayMat, corners, 40, qualityLevel, minDistance, none, blockSize, false, 0.04);
                        //Imgproc.goodFeaturesToTrack(currentGrayMat, corners, 40,0.05,20);

                        corners.fromArray(points);

                        prevFeatures.fromList(corners.toList());
                        currentFeatures.fromList(corners.toList());
                        prevGrayMat = currentGrayMat.clone();

                        // won't go back t again
                        initOpticalFlow = false;


                        // not that useful lol
                        // create random color
                        // not working now
                        for (int i = 0; i < maxCorners; i++)
                        {
                            color.Add(new Scalar((int)(Random.value * 255), (int)(Random.value * 255),
                                                 (int)(Random.value * 255), 255));
                        }
                    }
                    else
                    {
                        // Don't want ball move
                        //currentFeatures.fromArray(prevFeatures.toArray());


                        // want ball move
                        prevFeatures.fromArray(currentFeatures.toArray());

                        // optical flow it will changes the valu of currentFeatures
                        Video.calcOpticalFlowPyrLK(prevGrayMat, currentGrayMat, prevFeatures, currentFeatures, mMOBStatus, err);
                        //Debug.Log(st.rows());

                        // change to points list
                        List <Point> prevList  = prevFeatures.toList(),
                                     nextList  = currentFeatures.toList();
                        List <byte> byteStatus = mMOBStatus.toList();


                        int x = 0;
                        int y = byteStatus.Count - 1;

                        for (x = 0; x < y; x++)
                        {
                            if (byteStatus[x] == 1)
                            {
                                Point pt  = nextList[x];
                                Point pt2 = prevList[x];

                                Imgproc.circle(mainMat, pt, 10, new Scalar(0, 0, 255), -1);

                                Imgproc.line(mainMat, pt, pt2, new Scalar(0, 0, 255));
                            }
                        }

                        // draw the data
                        //for (int i = 0; i < prevList.Count; i++)
                        //{
                        //    //Imgproc.circle(frame, prevList[i], 5, color[10]);
                        //    Imgproc.circle(mainMat, nextList[i], 10, new Scalar(0, 0, 255), -1);

                        //    Imgproc.line(mainMat, prevList[i], nextList[i], color[20]);
                        //}


                        List <List <Point> > cornersFeatures = new List <List <Point> >(40);
                        cornersFeatures.Add(new List <Point>(10));

                        // put the corners features data into the list
                        int  tmp  = 0;
                        bool last = true;
                        for (int i = 0; i < nextList.Count - 1; i++)
                        {
                            if (Mathf.Abs((float)(nextList[i].x - nextList[i + 1].x)) < 10 && Mathf.Abs((float)(nextList[i].y - nextList[i + 1].y)) < 10)
                            {
                                if (last == true)
                                {
                                    cornersFeatures[tmp].Add(nextList[i]);
                                }
                                else
                                {
                                    cornersFeatures.Add(new List <Point>(10));
                                    tmp = tmp + 1;
                                    cornersFeatures[tmp].Add(nextList[i]);
                                }
                                last = true;
                            }
                            else
                            {
                                last = false;
                            }
                        }

                        // count corners
                        int manyCornersFeatures = 0;
                        for (int i = 0; i < cornersFeatures.Count; i++)
                        {
                            Debug.Log(cornersFeatures[i].Count);
                            if (cornersFeatures[i].Count < 5)
                            {
                                cornersFeatures.RemoveAt(i);
                            }
                            else
                            {
                                manyCornersFeatures++;
                            }
                        }

                        //Debug.Log("Length" + manyCornersFeatures);

                        // if corners equal 4 then diplay virtual docunment into the frame
                        // doing the perspective transform
                        if (manyCornersFeatures == 4)
                        {
                            Mat documentMat = new Mat(document.height, document.width, CvType.CV_8UC3);
                            Utils.texture2DToMat(document, documentMat);

                            List <Point> srcPoints = new List <Point>();
                            srcPoints.Add(new Point(0, 0));
                            srcPoints.Add(new Point(documentMat.cols(), 0));
                            srcPoints.Add(new Point(documentMat.cols(), documentMat.rows()));
                            srcPoints.Add(new Point(0, documentMat.rows()));


                            Mat srcPointsMat = Converters.vector_Point_to_Mat(srcPoints, CvType.CV_32F);


                            List <Point> dstPoints = new List <Point>()
                            {
                                cornersFeatures[0][0], cornersFeatures[1][0], cornersFeatures[2][0], cornersFeatures[3][0]
                            };
                            Mat dstPointsMat = Converters.vector_Point_to_Mat(dstPoints, CvType.CV_32F);


                            //Make perspective transform
                            Mat m         = Imgproc.getPerspectiveTransform(srcPointsMat, dstPointsMat);
                            Mat warpedMat = new Mat(new Size(), documentMat.type());
                            Debug.Log((cornersFeatures[1][0].x - cornersFeatures[0][0].x) + " " + (cornersFeatures[2][0].y - cornersFeatures[1][0].y));
                            Imgproc.warpPerspective(documentMat, warpedMat, m, mainMat.size(), Imgproc.INTER_LINEAR);
                            //warpedMat.convertTo(warpedMat, CvType.CV_32F);


                            //warpedMat.convertTo(warpedMat, CvType.CV_8UC3);
                            warpedMat.convertTo(warpedMat, CvType.CV_8UC3);
                            // same size as frame
                            Mat dst = new Mat(mainMat.size(), CvType.CV_8UC3);
                            //Mat dst = new Mat(frame.size(), CvType.CV_8UC3);
                            //Mat dst2 = new Mat();

                            Imgproc.cvtColor(mainMat, dst, Imgproc.COLOR_RGBA2RGB);

                            //dst.setTo(new Scalar(0, 255, 0));
                            //currentGrayMat.copyTo(dst);
                            //dst.convertTo(dst, CvType.CV_8UC3);


                            //Imgproc.cvtColor(currentGrayMat, frame, Imgproc.COLOR_GRAY2RGBA);

                            Mat img1 = new Mat();
                            Mat mask = new Mat(mainMat.size(), CvType.CV_8UC1, new Scalar(0));
                            Imgproc.cvtColor(warpedMat, img1, Imgproc.COLOR_RGB2GRAY);
                            Imgproc.Canny(img1, img1, 100, 200);
                            List <MatOfPoint> doc_contours = new List <MatOfPoint>();;
                            Imgproc.findContours(img1, doc_contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
                            Imgproc.drawContours(mask, doc_contours, -1, new Scalar(255), Core.FILLED);

                            warpedMat.copyTo(dst, mask);

                            dst.convertTo(dst, CvType.CV_8UC3);

                            Debug.Log("dst" + dst.size());


                            Imgproc.cvtColor(dst, mainMat, Imgproc.COLOR_RGB2RGBA);


                            // display on the right
                            Texture2D finalTextue = new Texture2D(dst.width(), dst.height(), TextureFormat.RGB24, false);
                            Utils.matToTexture2D(dst, finalTextue);

                            targetRawImage.texture = finalTextue;
                        }


                        // current frame to old frame
                        prevGrayMat = currentGrayMat.clone();



                        //Imgproc.cvtColor(currentGrayMat, frame, Imgproc.COLOR_GRAY2RGBA);

                        // display matrix on the screen
                        Utils.fastMatToTexture2D(mainMat, texture);
                    }
                }
            }
        }
コード例 #28
0
        // Update is called once per frame
        void Update()
        {
            #if ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR)
            //Touch
            int touchCount = Input.touchCount;
            if (touchCount == 1)
            {
                Touch t = Input.GetTouch(0);
                if (t.phase == TouchPhase.Ended && !EventSystem.current.IsPointerOverGameObject(t.fingerId))
                {
                    storedTouchPoint = new Point(t.position.x, t.position.y);
                    //Debug.Log ("touch X " + t.position.x);
                    //Debug.Log ("touch Y " + t.position.y);
                }
            }
            #else
            //Mouse
            if (Input.GetMouseButtonUp(0) && !EventSystem.current.IsPointerOverGameObject())
            {
                storedTouchPoint = new Point(Input.mousePosition.x, Input.mousePosition.y);
                //Debug.Log ("mouse X " + Input.mousePosition.x);
                //Debug.Log ("mouse Y " + Input.mousePosition.y);
            }
            #endif

            if (webCamTextureToMatHelper.IsPlaying() && webCamTextureToMatHelper.DidUpdateThisFrame())
            {
                Mat rgbaMat = webCamTextureToMatHelper.GetMat();

                Imgproc.cvtColor(rgbaMat, hsvMat, Imgproc.COLOR_RGBA2RGB);
                Imgproc.cvtColor(hsvMat, hsvMat, Imgproc.COLOR_RGB2HSV);

                if (storedTouchPoint != null)
                {
                    ConvertScreenPointToTexturePoint(storedTouchPoint, storedTouchPoint, gameObject, rgbaMat.cols(), rgbaMat.rows());
                    OnTouch(rgbaMat, storedTouchPoint);
                    storedTouchPoint = null;
                }

                Point[] points = roiPointList.ToArray();

                if (shouldStartCamShift)
                {
                    shouldStartCamShift = false;

                    using (MatOfPoint roiPointMat = new MatOfPoint(points)) {
                        roiRect = Imgproc.boundingRect(roiPointMat);
                    }

                    if (roiHistMat != null)
                    {
                        roiHistMat.Dispose();
                        roiHistMat = null;
                    }
                    roiHistMat = new Mat();

                    using (Mat roiHSVMat = new Mat(hsvMat, roiRect))
                        using (Mat maskMat = new Mat()) {
                            Imgproc.calcHist(new List <Mat> (new Mat[] { roiHSVMat }), new MatOfInt(0), maskMat, roiHistMat, new MatOfInt(16), new MatOfFloat(0, 180));
                            Core.normalize(roiHistMat, roiHistMat, 0, 255, Core.NORM_MINMAX);

                            //Debug.Log ("roiHist " + roiHistMat.ToString ());
                        }
                }
                else if (points.Length == 4)
                {
                    using (Mat backProj = new Mat()) {
                        Imgproc.calcBackProject(new List <Mat> (new Mat[] { hsvMat }), new MatOfInt(0), roiHistMat, backProj, new MatOfFloat(0, 180), 1.0);

                        RotatedRect r = Video.CamShift(backProj, roiRect, termination);
                        r.points(points);
                    }
                }

                if (points.Length < 4)
                {
                    for (int i = 0; i < points.Length; i++)
                    {
                        Imgproc.circle(rgbaMat, points [i], 6, new Scalar(0, 0, 255, 255), 2);
                    }
                }
                else
                {
                    for (int i = 0; i < 4; i++)
                    {
                        Imgproc.line(rgbaMat, points [i], points [(i + 1) % 4], new Scalar(255, 0, 0, 255), 2);
                    }

                    Imgproc.rectangle(rgbaMat, roiRect.tl(), roiRect.br(), new Scalar(0, 255, 0, 255), 2);
                }

//                Imgproc.putText (rgbaMat, "W:" + rgbaMat.width () + " H:" + rgbaMat.height () + " SO:" + Screen.orientation, new Point (5, rgbaMat.rows () - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar (255, 255, 255, 255), 2, Imgproc.LINE_AA, false);

                Utils.fastMatToTexture2D(rgbaMat, texture);
            }
        }
コード例 #29
0
    void Update()
    {
        //Access camera image provided by Vuforia
        Image camImg = CameraDevice.Instance.GetCameraImage(Image.PIXEL_FORMAT.RGBA8888);

        if (camImg != null)
        {
            if (camImageMat == null)
            {
                //First time -> instantiate camera image specific data
                camImageMat = new Mat(camImg.Height, camImg.Width, CvType.CV_8UC4);  //Note: rows=height, cols=width
            }

            camImageMat.put(0, 0, camImg.Pixels);

            //Replace with your own projection matrix. This approach only uses fy.
            cam.fieldOfView = 2 * Mathf.Atan(camImg.Height * 0.5f / fy) * Mathf.Rad2Deg;

            Vector3 worldPnt1 = corner1.transform.position;
            Vector3 worldPnt2 = corner2.transform.position;
            Vector3 worldPnt3 = corner3.transform.position;
            Vector3 worldPnt4 = corner4.transform.position;

            //See lecture slides
            Matrix4x4 Rt = cam.transform.worldToLocalMatrix;
            Matrix4x4 A  = Matrix4x4.identity;
            A.m00 = fx;
            A.m11 = fy;
            A.m02 = cx;
            A.m12 = cy;

            Matrix4x4 worldToImage = A * Rt;

            Vector3 hUV1 = worldToImage.MultiplyPoint3x4(worldPnt1);
            Vector3 hUV2 = worldToImage.MultiplyPoint3x4(worldPnt2);
            Vector3 hUV3 = worldToImage.MultiplyPoint3x4(worldPnt3);
            Vector3 hUV4 = worldToImage.MultiplyPoint3x4(worldPnt4);

            //hUV are the image coordinates in 2D homogeneous coordinates, we need to normalize, i.e., divide by Z
            Vector2 uv1 = new Vector2(hUV1.x, hUV1.y) / hUV1.z;
            Vector2 uv2 = new Vector2(hUV2.x, hUV2.y) / hUV2.z;
            Vector2 uv3 = new Vector2(hUV3.x, hUV3.y) / hUV3.z;
            Vector2 uv4 = new Vector2(hUV4.x, hUV4.y) / hUV4.z;

            //don't forget to alloc before putting values into a MatOfPoint2f
            imagePoints.put(0, 0, uv1.x, camImg.Height - uv1.y);
            imagePoints.put(1, 0, uv2.x, camImg.Height - uv2.y);
            imagePoints.put(2, 0, uv3.x, camImg.Height - uv3.y);
            imagePoints.put(3, 0, uv4.x, camImg.Height - uv4.y);

            //Debug draw points
            Point imgPnt1 = new Point(imagePoints.get(0, 0));
            Point imgPnt2 = new Point(imagePoints.get(1, 0));
            Point imgPnt3 = new Point(imagePoints.get(2, 0));
            Point imgPnt4 = new Point(imagePoints.get(3, 0));
            Imgproc.circle(camImageMat, imgPnt1, 5, new Scalar(255, 0, 0, 255));
            Imgproc.circle(camImageMat, imgPnt2, 5, new Scalar(0, 255, 0, 255));
            Imgproc.circle(camImageMat, imgPnt3, 5, new Scalar(0, 0, 255, 255));
            Imgproc.circle(camImageMat, imgPnt4, 5, new Scalar(255, 255, 0, 255));
            Scalar lineCl = new Scalar(200, 120, 0, 160);
            Imgproc.line(camImageMat, imgPnt1, imgPnt2, lineCl);
            Imgproc.line(camImageMat, imgPnt2, imgPnt3, lineCl);
            Imgproc.line(camImageMat, imgPnt3, imgPnt4, lineCl);
            Imgproc.line(camImageMat, imgPnt4, imgPnt1, lineCl);


            var destPoints = new MatOfPoint2f();             // Creating a destination
            destPoints.alloc(4);
            destPoints.put(0, 0, width, 0);
            destPoints.put(1, 0, width, height);
            destPoints.put(2, 0, 0, height);
            destPoints.put(3, 0, 0, 0);

            var homography = Calib3d.findHomography(imagePoints, destPoints);             // Finding the image

            Imgproc.warpPerspective(camImageMat, destPoints, homography, new Size(camImageMat.width(), camImageMat.height()));

            unwarpedTexture = unwarpedTextureClean;

            MatDisplay.MatToTexture(destPoints, ref unwarpedTexture);             // Take output and transform into texture

            if (Input.GetKey("space"))
            {
                fish.GetComponent <Renderer>().material.mainTexture = unwarpedTexture;
            }
            else
            {
                fish.GetComponent <Renderer>().material.mainTexture = tex;
            }

            MatDisplay.DisplayMat(destPoints, MatDisplaySettings.BOTTOM_LEFT);
            MatDisplay.DisplayMat(camImageMat, MatDisplaySettings.FULL_BACKGROUND);
        }
    }
コード例 #30
0
        // Main loop that runs forever, until the user hits Escape to quit.
        private void recognizeAndTrainUsingWebcam(Mat cameraFrame, CascadeClassifier faceCascade, CascadeClassifier eyeCascade1, CascadeClassifier eyeCascade2)
        {
            if (cameraFrame != null && cameraFrame.empty())
            {
                Debug.LogError("ERROR: Couldn't grab the next camera frame.");
            }

            // Get a copy of the camera frame that we can draw onto.
            Mat displayedFrame = cameraFrame;

            int   cx;
            float current_processingTime     = Time.realtimeSinceStartup;
            float processingTimeDiff_seconds = (current_processingTime - old_processingTime);

            if (processingTimeDiff_seconds > CHANGE_IN_SECONDS_FOR_PROCESSING)
            {
                // Run the face recognition system on the camera image. It will draw some things onto the given image, so make sure it is not read-only memory!
                int identity = -1;

                // Find a face and preprocess it to have a standard size and contrast & brightness.
                Rect  faceRect = new Rect();                                       // Position of detected face.
                Rect  searchedLeftEye = new Rect(), searchedRightEye = new Rect(); // top-left and top-right regions of the face, where eyes were searched.
                Point leftEye = new Point(), rightEye = new Point();               // Position of the detected eyes.

                Mat preprocessedFace = PreprocessFace.GetPreprocessedFace(displayedFrame, faceWidth, faceCascade, eyeCascade1, eyeCascade2, preprocessLeftAndRightSeparately, ref faceRect, ref leftEye, ref rightEye, ref searchedLeftEye, ref searchedRightEye);

                bool gotFaceAndEyes = false;

                if (preprocessedFace != null && !preprocessedFace.empty())
                {
                    gotFaceAndEyes = true;
                }

                // Draw an anti-aliased rectangle around the detected face.
                if (faceRect.width > 0)
                {
                    Imgproc.rectangle(displayedFrame, faceRect.tl(), faceRect.br(), YELLOW, 2, Imgproc.LINE_AA, 0);

                    // Draw light-blue anti-aliased circles for the 2 eyes.
                    Scalar eyeColor = LIGHT_BLUE;
                    if (leftEye.x >= 0)     // Check if the eye was detected
                    {
                        Imgproc.circle(displayedFrame, new Point(faceRect.x + leftEye.x, faceRect.y + leftEye.y), 6, eyeColor, 1, Imgproc.LINE_AA, 0);
                    }
                    if (rightEye.x >= 0)     // Check if the eye was detected
                    {
                        Imgproc.circle(displayedFrame, new Point(faceRect.x + rightEye.x, faceRect.y + rightEye.y), 6, eyeColor, 1, Imgproc.LINE_AA, 0);
                    }
                }

                prev_prepreprocessedFace = preprocessedFace;

                if (m_mode == MODES.MODE_DETECTION)
                {
                    // Don't do anything special.
                }
                else if (m_mode == MODES.MODE_COLLECT_FACES)
                {
                    // Check if we have detected a face.
                    if (gotFaceAndEyes)
                    {
                        // Check if this face looks somewhat different from the previously collected face.
                        double imageDiff = 10000000000.0d;
                        if (old_prepreprocessedFace != null && !old_prepreprocessedFace.empty())
                        {
                            imageDiff = Recognition.GetSimilarity(preprocessedFace, old_prepreprocessedFace);
                        }

                        // Also record when it happened.
                        double current_time     = Time.realtimeSinceStartup;
                        double timeDiff_seconds = (current_time - old_time);

                        // Only process the face if it is noticeably different from the previous frame and there has been noticeable time gap.
                        if ((imageDiff > CHANGE_IN_IMAGE_FOR_COLLECTION) && (timeDiff_seconds > CHANGE_IN_SECONDS_FOR_COLLECTION))
                        {
                            // Also add the mirror image to the training set, so we have more training data, as well as to deal with faces looking to the left or right.
                            Mat mirroredFace = new Mat();
                            Core.flip(preprocessedFace, mirroredFace, 1);

                            // Add the face images to the list of detected faces.
                            preprocessedFaces.Add(preprocessedFace);
                            preprocessedFaces.Add(mirroredFace);
                            faceLabels.Add(m_selectedPerson);
                            faceLabels.Add(m_selectedPerson);

                            // Keep a reference to the latest face of each person.
                            m_latestFaces [m_selectedPerson] = preprocessedFaces.Count - 2;  // Point to the non-mirrored face.
                            // Show the number of collected faces. But since we also store mirrored faces, just show how many the user thinks they stored.
                            Debug.Log("Saved face " + (preprocessedFaces.Count / 2) + " for person " + m_selectedPerson);

                            // Make a white flash on the face, so the user knows a photo has been taken.
                            using (Mat displayedFaceRegion = new Mat(displayedFrame, faceRect)) {
                                Core.add(displayedFaceRegion, DARK_GRAY, displayedFaceRegion);
                            }

                            // Keep a copy of the processed face, to compare on next iteration.
                            old_prepreprocessedFace = preprocessedFace;
                            old_time = current_time;
                        }
                    }
                }
                else if (m_mode == MODES.MODE_TRAINING)
                {
                    // Check if there is enough data to train from. For Eigenfaces, we can learn just one person if we want, but for Fisherfaces,
                    // we need atleast 2 people otherwise it will crash!
                    bool haveEnoughData = true;
                    if (facerecAlgorithm == "FaceRecognizer.Fisherfaces")
                    {
                        if ((m_numPersons < 2) || (m_numPersons == 2 && m_latestFaces [1] < 0))
                        {
                            Debug.Log("Warning: Fisherfaces needs atleast 2 people, otherwise there is nothing to differentiate! Collect more data ...");
                            haveEnoughData = false;
                        }
                    }
                    if (m_numPersons < 1 || preprocessedFaces.Count <= 0 || preprocessedFaces.Count != faceLabels.Count)
                    {
                        Debug.Log("Warning: Need some training data before it can be learnt! Collect more data ...");
                        haveEnoughData = false;
                    }

                    if (haveEnoughData)
                    {
                        // Start training from the collected faces using Eigenfaces or a similar algorithm.
                        model = Recognition.LearnCollectedFaces(preprocessedFaces, faceLabels, facerecAlgorithm);

                        // Show the internal face recognition data, to help debugging.
                        //if (m_debug)
                        //Recognition.ShowTrainingDebugData(model, faceWidth, faceHeight);

                        // Now that training is over, we can start recognizing!
                        m_mode = MODES.MODE_RECOGNITION;
                    }
                    else
                    {
                        // Since there isn't enough training data, go back to the face collection mode!
                        m_mode = MODES.MODE_COLLECT_FACES;
                    }
                }
                else if (m_mode == MODES.MODE_RECOGNITION)
                {
                    prev_identity   = -1;
                    prev_similarity = 100000000.0d;
                    if (reconstructedFace != null && !reconstructedFace.IsDisposed)
                    {
                        reconstructedFace.Dispose();
                    }
                    reconstructedFace = null;

                    if (gotFaceAndEyes && (preprocessedFaces.Count > 0) && (preprocessedFaces.Count == faceLabels.Count))
                    {
                        // Generate a face approximation by back-projecting the eigenvectors & eigenvalues.
                        reconstructedFace = Recognition.ReconstructFace(model, preprocessedFace);

                        // Verify whether the reconstructed face looks like the preprocessed face, otherwise it is probably an unknown person.
                        double similarity = Recognition.GetSimilarity(preprocessedFace, reconstructedFace);
                        double confidence = 0.0d;

                        string outputStr;
                        if (similarity < UNKNOWN_PERSON_THRESHOLD)
                        {
                            int[]    predictedLabel      = new int [1];
                            double[] predictedConfidence = new double [1];
                            // Identify who the person is in the preprocessed face image.
                            model.predict(preprocessedFace, predictedLabel, predictedConfidence);
                            identity   = predictedLabel [0];
                            confidence = predictedConfidence [0];

                            outputStr     = identity.ToString();
                            prev_identity = identity;
                        }
                        else
                        {
                            // Since the confidence is low, assume it is an unknown person.
                            outputStr = "Unknown";
                        }
                        prev_similarity = similarity;
                        Debug.Log("Identity: " + outputStr + ". Similarity: " + similarity + ". Confidence: " + confidence);
                    }
                }
                else if (m_mode == MODES.MODE_DELETE_ALL)
                {
                    // Restart everything!
                    dispose();

                    // Restart in Detection mode.
                    m_mode = MODES.MODE_DETECTION;
                }
                else
                {
                    Debug.LogError("ERROR: Invalid run mode " + m_mode);
                    //exit(1);
                }

                old_processingTime = current_processingTime;
            }

            // Show the help, while also showing the number of collected faces. Since we also collect mirrored faces, we should just
            // tell the user how many faces they think we saved (ignoring the mirrored faces), hence divide by 2.
            strBuilder.Length = 0;
            Rect rcHelp = new Rect();

            if (m_mode == MODES.MODE_DETECTION)
            {
                strBuilder.Append("Click [Add Person] when ready to collect faces.");
            }
            else if (m_mode == MODES.MODE_COLLECT_FACES)
            {
                strBuilder.Append("Click anywhere to train from your ");
                strBuilder.Append(preprocessedFaces.Count / 2);
                strBuilder.Append(" faces of ");
                strBuilder.Append(m_numPersons);
                strBuilder.Append(" people.");
            }
            else if (m_mode == MODES.MODE_TRAINING)
            {
                strBuilder.Append("Please wait while your ");
                strBuilder.Append(preprocessedFaces.Count / 2);
                strBuilder.Append(" faces of ");
                strBuilder.Append(m_numPersons);
                strBuilder.Append(" people builds.");
            }
            else if (m_mode == MODES.MODE_RECOGNITION)
            {
                strBuilder.Append("Click people on the right to add more faces to them, or [Add Person] for someone new.");
            }

            if (strBuilder.Length > 0)
            {
                // Draw it with a black background and then again with a white foreground.
                // Since BORDER may be 0 and we need a negative position, subtract 2 from the border so it is always negative.
                float txtSize = 0.4f;
                drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER, -BORDER - 2), BLACK, txtSize);              // Black shadow.
                rcHelp = drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER + 1, -BORDER - 1), WHITE, txtSize); // White text.
            }

            // Show the current mode.
            strBuilder.Length = 0;
            if (m_mode >= 0 && m_mode < MODES.MODE_END)
            {
                strBuilder.Append(" people builds.");
                strBuilder.Append(MODE_NAMES [(int)m_mode]);
                drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER, -BORDER - 2 - rcHelp.height), BLACK);     // Black shadow
                drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER + 1, -BORDER - 1 - rcHelp.height), GREEN); // Green text
            }

            // Show the current preprocessed face in the top-center of the display.
            cx = (displayedFrame.cols() - faceWidth) / 2;
            if (prev_prepreprocessedFace != null && !prev_prepreprocessedFace.empty())
            {
                // Get a RGBA version of the face, since the output is RGBA color.
                using (Mat srcRGBA = new Mat(prev_prepreprocessedFace.size(), CvType.CV_8UC4)) {
                    Imgproc.cvtColor(prev_prepreprocessedFace, srcRGBA, Imgproc.COLOR_GRAY2RGBA);
                    // Get the destination ROI (and make sure it is within the image!).
                    Rect dstRC = new Rect(cx, BORDER, faceWidth, faceHeight);
                    using (Mat dstROI = new Mat(displayedFrame, dstRC)) {
                        // Copy the pixels from src to dst.
                        srcRGBA.copyTo(dstROI);
                    }
                }
            }

            // Draw an anti-aliased border around the face, even if it is not shown.
            Imgproc.rectangle(displayedFrame, new Point(cx - 1, BORDER - 1), new Point(cx - 1 + faceWidth + 2, BORDER - 1 + faceHeight + 2), LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);

            // Show the most recent face for each of the collected people, on the right side of the display.
            m_gui_faces_left = displayedFrame.cols() - BORDER - faceWidth;
            m_gui_faces_top  = BORDER;
            for (int i = 0; i < m_numPersons; i++)
            {
                int index = m_latestFaces [i];
                if (index >= 0 && index < preprocessedFaces.Count)
                {
                    Mat srcGray = preprocessedFaces [index];
                    if (srcGray != null && !srcGray.empty())
                    {
                        // Get a RGBA version of the face, since the output is RGBA color.
                        using (Mat srcRGBA = new Mat(srcGray.size(), CvType.CV_8UC4)) {
                            Imgproc.cvtColor(srcGray, srcRGBA, Imgproc.COLOR_GRAY2RGBA);
                            // Get the destination ROI (and make sure it is within the image!).
                            int  y     = Mathf.Min(m_gui_faces_top + i * faceHeight, displayedFrame.rows() - faceHeight);
                            Rect dstRC = new Rect(m_gui_faces_left, y, faceWidth, faceHeight);
                            using (Mat dstROI = new Mat(displayedFrame, dstRC)) {
                                // Copy the pixels from src to dst.
                                srcRGBA.copyTo(dstROI);
                            }
                        }
                    }
                }
            }

            // Highlight the person being collected, using a red rectangle around their face.
            if (m_mode == MODES.MODE_COLLECT_FACES)
            {
                if (m_selectedPerson >= 0 && m_selectedPerson < m_numPersons)
                {
                    int  y  = Mathf.Min(m_gui_faces_top + m_selectedPerson * faceHeight, displayedFrame.rows() - faceHeight);
                    Rect rc = new Rect(m_gui_faces_left, y, faceWidth, faceHeight);
                    Imgproc.rectangle(displayedFrame, rc.tl(), rc.br(), RED, 3, Imgproc.LINE_AA, 0);
                }
            }

            // Highlight the person that has been recognized, using a green rectangle around their face.
            if (m_mode == MODES.MODE_RECOGNITION && prev_identity >= 0 && prev_identity < 1000)
            {
                int  y  = Mathf.Min(m_gui_faces_top + prev_identity * faceHeight, displayedFrame.rows() - faceHeight);
                Rect rc = new Rect(m_gui_faces_left, y, faceWidth, faceHeight);
                Imgproc.rectangle(displayedFrame, rc.tl(), rc.br(), GREEN, 3, Imgproc.LINE_AA, 0);
            }

            if (m_mode == MODES.MODE_RECOGNITION)
            {
                if (m_debug)
                {
                    if (reconstructedFace != null && !reconstructedFace.empty())
                    {
                        cx = (displayedFrame.cols() - faceWidth) / 2;
                        Point rfDebugBottomRight = new Point(cx + faceWidth * 2 + 5, BORDER + faceHeight);
                        Point rfDebugTopLeft     = new Point(cx + faceWidth + 5, BORDER);
                        Rect  rfDebugRC          = new Rect(rfDebugTopLeft, rfDebugBottomRight);
                        using (Mat srcRGBA = new Mat(reconstructedFace.size(), CvType.CV_8UC4)) {
                            Imgproc.cvtColor(reconstructedFace, srcRGBA, Imgproc.COLOR_GRAY2RGBA);
                            using (Mat dstROI = new Mat(displayedFrame, rfDebugRC)) {
                                srcRGBA.copyTo(dstROI);
                            }
                        }
                        Imgproc.rectangle(displayedFrame, rfDebugTopLeft, rfDebugBottomRight, LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);
                    }
                }

                // Show the confidence rating for the recognition in the mid-top of the display.
                cx = (displayedFrame.cols() - faceWidth) / 2;
                Point ptBottomRight = new Point(cx - 5, BORDER + faceHeight);
                Point ptTopLeft     = new Point(cx - 15, BORDER);
                // Draw a gray line showing the threshold for an "unknown" person.
                Point ptThreshold = new Point(ptTopLeft.x, ptBottomRight.y - (1.0 - UNKNOWN_PERSON_THRESHOLD) * faceHeight);
                Imgproc.rectangle(displayedFrame, ptThreshold, new Point(ptBottomRight.x, ptThreshold.y), LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);
                // Crop the confidence rating between 0.0 to 1.0, to show in the bar.
                double confidenceRatio = 1.0d - Math.Min(Math.Max(prev_similarity, 0.0d), 1.0d);
                Point  ptConfidence    = new Point(ptTopLeft.x, ptBottomRight.y - confidenceRatio * faceHeight);
                // Show the light-blue confidence bar.
                Imgproc.rectangle(displayedFrame, ptConfidence, ptBottomRight, LIGHT_BLUE, Core.FILLED, Imgproc.LINE_AA, 0);
                // Show the gray border of the bar.
                Imgproc.rectangle(displayedFrame, ptTopLeft, ptBottomRight, LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);
            }

            /*
             * // If the user wants all the debug data, show it to them!
             * if (m_debug)
             * {
             *  Mat face = new Mat();
             *  if (faceRect.width > 0)
             *  {
             *      face = new Mat(cameraFrame, faceRect);
             *      if (searchedLeftEye.width > 0 && searchedRightEye.width > 0)
             *      {
             *          Mat topLeftOfFace = new Mat(face, searchedLeftEye);
             *          Mat topRightOfFace = new Mat(face, searchedRightEye);
             *          //imshow("topLeftOfFace", topLeftOfFace);
             *          //imshow("topRightOfFace", topRightOfFace);
             *      }
             *  }
             *
             *  //if (model != null)
             *      //showTrainingDebugData(model, faceWidth, faceHeight);
             * }
             */
        }