Beispiel #1
0
    void Update()
    {
        // Performance measuring purposes only, avoid reading data in Update()
        mat = Cv2.ImRead(CvUtil.GetStreamingAssetsPath("lena.png"), ImreadModes.GrayScale);

        // Timer to swithch between different thresholds
        timeElapsed += Time.deltaTime;
        if (timeElapsed > 1.5f)
        {
            timeElapsed = 0;
            mode++;
            if (mode > 4)
            {
                mode = 0;
            }
        }

        Cv2.Threshold(mat, binaryInvMat, 0, 255, ThresholdTypes.BinaryInv | ThresholdTypes.Otsu);
        Cv2.Threshold(mat, toZeroMat, 0, 255, ThresholdTypes.Tozero | ThresholdTypes.Otsu);
        Cv2.Threshold(mat, toZeroInvMat, 0, 255, ThresholdTypes.TozeroInv | ThresholdTypes.Otsu);
        Cv2.AdaptiveThreshold(mat, gaussianMat, 255, AdaptiveThresholdTypes.GaussianC, ThresholdTypes.Binary, 7, 8);
        Cv2.Subtract(gaussianMat, toZeroMat, subtractMat);

        switch (mode)
        {
        case 0:
            mat = subtractMat;
            break;

        case 1:
            mat = binaryInvMat;
            break;

        case 2:
            mat = toZeroMat;
            break;

        case 3:
            mat = gaussianMat;
            break;

        case 4:
            mat = toZeroInvMat;
            break;

        default:
            break;
        }


        Cv2.CvtColor(mat, matRGBA, ColorConversionCodes.GRAY2RGBA);
        CvConvert.MatToTexture2D(matRGBA, ref tex);
        rawImage.texture = tex;
    }
Beispiel #2
0
    void Start()
    {
        webCamTexture = new WebCamTexture(WebCamTexture.devices[0].name);
        webCamTexture.Play();

        tex  = new Texture2D(webCamTexture.width, webCamTexture.height, TextureFormat.RGBA32, false);
        mat  = new Mat(webCamTexture.height, webCamTexture.width, MatType.CV_8UC4);
        gray = new Mat(webCamTexture.height, webCamTexture.width, MatType.CV_8UC1);

        mat = Cv2.ImRead(CvUtil.GetStreamingAssetsPath("lena.jpg"), ImreadModes.GrayScale);
    }
Beispiel #3
0
    void Start()
    {
        // Initialize WebCamTexture
        CamInit();

        // Initialize mat and tex. Avoid doing this in Update() due to high cost of GC.
        tex = new Texture2D(webCamTexture.width, webCamTexture.height, TextureFormat.RGBA32, false);
        mat = new Mat(webCamTexture.height, webCamTexture.width, MatType.CV_8UC4);

        // Load file needed for face detection.
        haarCascade = new CascadeClassifier(
            CvUtil.GetStreamingAssetsPath("haarcascade_frontalface_alt2.xml"));
    }
Beispiel #4
0
    void Start()
    {
        // Avoid using new keyword in Update(), esp. with Mat and Texture2D
        mat     = Cv2.ImRead(CvUtil.GetStreamingAssetsPath("lena.png"), ImreadModes.GrayScale);
        matRGBA = new Mat(mat.Width, mat.Height, MatType.CV_8UC4);
        tex     = new Texture2D(mat.Height, mat.Width, TextureFormat.RGBA32, false);

        binaryInvMat = new Mat();
        toZeroMat    = new Mat();
        toZeroInvMat = new Mat();
        gaussianMat  = new Mat();
        subtractMat  = new Mat();
    }
    void CamUpdate()
    {
        CvUtil.GetWebCamMat(webCamTexture, ref mat);

        Cv2.CvtColor(mat, gray, ColorConversionCodes.RGBA2GRAY);

        Point2f[] corners;

        bool ret = Cv2.FindChessboardCorners(gray, size, out corners);

        if (ret)
        {
            TermCriteria criteria = TermCriteria.Both(30, 0.001f);
            Point2f[]    corners2 = Cv2.CornerSubPix(gray, corners, size, new Size(-1, -1), criteria);

            Cv2.DrawChessboardCorners(mat, size, corners2, ret);

            List <Point3f> lObjectPoints = new List <Point3f>();
            for (int i = 0; i < size.Width; i++)
            {
                for (int j = 0; j < size.Height; j++)
                {
                    lObjectPoints.Add(new Point3f(i, j, 0) * cellSize);
                }
            }
            var objectPoints = new List <IEnumerable <Point3f> > {
                lObjectPoints
            };

            var imagePoints = new List <IEnumerable <Point2f> > {
                corners2
            };

            double[,] cameraMatrix = new double[3, 3];
            double[] distCoefficients = new double[5];
            Vec3d[]  rvecs, tvecs;

            Cv2.CalibrateCamera(objectPoints, imagePoints, mat.Size(), cameraMatrix, distCoefficients, out rvecs, out tvecs);

            print(
                cameraMatrix[0, 0] + ", " + cameraMatrix[0, 1] + ", " + cameraMatrix[0, 2] + "\n" +
                cameraMatrix[1, 0] + ", " + cameraMatrix[1, 1] + ", " + cameraMatrix[1, 2] + "\n" +
                cameraMatrix[2, 0] + ", " + cameraMatrix[2, 1] + ", " + cameraMatrix[2, 2]
                );

            print(tvecs[0].Item0 + ", " + tvecs[0].Item1 + ", " + tvecs[0].Item2);
        }

        CvConvert.MatToTexture2D(mat, ref tex);
        rawImage.texture = tex;
    }
    void CamUpdate()
    {
        CvUtil.GetWebCamMat(webCamTexture, ref mat);
        Cv2.CvtColor(mat, greyMat, ColorConversionCodes.RGBA2GRAY);
        Cv2.Threshold(greyMat, greyMat, 100, 255, ThresholdTypes.Binary);

        var detectorParams = new SimpleBlobDetector.Params
        {
            //MinDistBetweenBlobs = 10, // 10 pixels between blobs
            //MinRepeatability = 1,

            //MinThreshold = 100,
            //MaxThreshold = 255,
            //ThresholdStep = 5,

            FilterByArea = false,
            //FilterByArea = true,
            //MinArea = 0.001f, // 10 pixels squared
            //MaxArea = 500,

            FilterByCircularity = false,
            //FilterByCircularity = true,
            //MinCircularity = 0.001f,

            FilterByConvexity = false,
            //FilterByConvexity = true,
            //MinConvexity = 0.001f,
            //MaxConvexity = 10,

            FilterByInertia = false,
            //FilterByInertia = true,
            //MinInertiaRatio = 0.001f,

            FilterByColor = false
                            //FilterByColor = true,
                            //BlobColor = 255 // to extract light blobs
        };
        var simpleBlobDetector = SimpleBlobDetector.Create(detectorParams);
        var keyPoints          = simpleBlobDetector.Detect(greyMat);

        Cv2.DrawKeypoints(
            image: greyMat,
            keypoints: keyPoints,
            outImage: mat,
            color: Scalar.FromRgb(255, 0, 0),
            flags: DrawMatchesFlags.DrawRichKeypoints);

        CvConvert.MatToTexture2D(mat, ref tex);
        rawImage.texture = tex;
    }
Beispiel #7
0
    void CamUpdate()
    {
        // Get Mat from WebCamTexture
        CvUtil.GetWebCamMat(webCamTexture, ref mat);

        // Run face detection
        mat = DetectFace(haarCascade, mat);

        // Convert Mat to Texture2D for display
        CvConvert.MatToTexture2D(mat, ref tex);

        // Assign Texture2D to GUI element
        rawImage.texture = tex;
    }
Beispiel #8
0
    void CamUpdate()
    {
        CvUtil.GetWebCamMat(webCamTexture, ref mat);

        mog2.Apply(mat, fg, 0.05f);
        Cv2.GaussianBlur(fg, fg, new Size(21, 21), 0);
        Cv2.Threshold(fg, fg, 30, 255, ThresholdTypes.Binary);
        Cv2.Dilate(fg, fg, nm, default(Point?), 2);
        Cv2.CvtColor(fg, fg, ColorConversionCodes.GRAY2BGRA);
        Cv2.Add(mat, fg, fg);

        CvConvert.MatToTexture2D(fg, ref tex);
        rawImage.texture = tex;
    }
Beispiel #9
0
    void CamUpdate()
    {
        CvUtil.GetWebCamMat(webCamTexture, ref mat);

        Cv2.CvtColor(mat, gray, ColorConversionCodes.RGBA2GRAY);

        KeyPoint[] keypoints = Cv2.FAST(gray, 50, true);

        foreach (KeyPoint kp in keypoints)
        {
            mat.Circle(kp.Pt, 3, new Scalar(255, 0, 0, 255), -1, LineTypes.AntiAlias, 0);
        }

        CvConvert.MatToTexture2D(mat, ref tex);
        rawImage.texture = tex;
    }
Beispiel #10
0
    void CamUpdate()
    {
        CvUtil.GetWebCamMat(webCamTexture, ref mat);
        Cv2.CvtColor(mat, mat, ColorConversionCodes.RGBA2RGB);
        Cv2.CvtColor(mat, gray, ColorConversionCodes.RGB2GRAY);
        Cv2.Canny(gray, gray, 90, 100);

        LineSegmentPoint[] segHoughP = Cv2.HoughLinesP(gray, 1, Mathf.PI / 180, 90, 30, 50);

        foreach (LineSegmentPoint p in segHoughP)
        {
            Cv2.Line(mat, p.P1, p.P2, colorScalar, 1, LineTypes.Link4);
        }

        CvConvert.MatToTexture2D(mat, ref tex);
        rawImage.texture = tex;
    }