Exemplo n.º 1
0
        private IEnumerator CalibrateCameraUsingImages()
        {
            string dirPath = Path.Combine(Application.streamingAssetsPath, calibrationImagesDirectory);

            string[] imageFiles = GetImageFilesInDirectory(dirPath);
            if (imageFiles.Length < 1)
            {
                yield break;
            }

            isCalibrating = true;
            markerTypeDropdown.interactable = dictionaryIdDropdown.interactable = squaresXDropdown.interactable = squaresYDropdown.interactable = false;

            Uri rootPath = new Uri(Application.streamingAssetsPath + System.IO.Path.AltDirectorySeparatorChar);

            foreach (var path in imageFiles)
            {
                Uri    fullPath     = new Uri(path);
                string relativePath = rootPath.MakeRelativeUri(fullPath).ToString();

                using (Mat gray = Imgcodecs.imread(Utils.getFilePath(relativePath), Imgcodecs.IMREAD_GRAYSCALE)) {
                    if (gray.width() != bgrMat.width() || gray.height() != bgrMat.height())
                    {
                        continue;
                    }

                    Mat    frameMat = gray.clone();
                    double e        = CaptureFrame(frameMat);
                    if (e > 0)
                    {
                        repErr = e;
                    }

                    DrawFrame(gray, bgrMat);
                    Imgproc.cvtColor(bgrMat, rgbaMat, Imgproc.COLOR_BGR2RGBA);

                    Utils.matToTexture2D(rgbaMat, texture);
                }
                yield return(new WaitForSeconds(0.5f));
            }

            isCalibrating = false;
            markerTypeDropdown.interactable = squaresXDropdown.interactable = squaresYDropdown.interactable = true;
        }
Exemplo n.º 2
0
    Mat str2mat(String s)
    {
        // Decode data
        //Debug.Log(s);
        byte[]      byteArr  = System.Convert.FromBase64String(s);
        List <byte> byteList = new List <Byte>(byteArr);

        // Decode data
        //string decoded_string = base64_decode(s);
        //List<byte> byteList = new List<Byte>(Encoding.ASCII.GetBytes(decoded_string));
        Mat data = Converters.vector_char_to_Mat(byteList);
        //Debug.Log("Converted to vector mat");
        //System.Collections.Generic.List<char> data(decoded_string.begin(), decoded_string.end());
        //List<uchar> data(decoded_string.begin(), decoded_string.end());
        //Debug.Log("Converted to image");
        Mat img = Imgcodecs.imdecode(data, Imgcodecs.IMREAD_UNCHANGED);

        return(img);
    }
Exemplo n.º 3
0
    private void Update()
    {
        if (!ct.begin_flag)
        {
            if (!ct.rec)
            {
                ct.Receive();
            }
        }
        if (ct.receiveMessage != null)
        {
            Debug.Log("Server:" + ct.receiveMessage);
            ct.receiveMessage = null;
        }

        /*if (ct.receive_joint != null)
         * {
         *  Debug.Log("Server:" + ct.receive_joint);
         *  ct.receive_joint = null;
         * }*/
        if (ct.recieve_flag != 0)
        {
            //Debug.Log("Server:" + ct.frame_size.ToString());
            if (ct.frame_img.Length != ct.dataLength_frame)
            {
                Debug.Log("Receive : " + ct.frame_img.Length.ToString() + ", len : " + ct.dataLength.ToString());
            }

            Mat mat_img = new Mat(1, ct.frame_img.Length, CvType.CV_8U);
            mat_img.put(0, 0, ct.frame_img);
            Mat frame_img_mat = Imgcodecs.imdecode(mat_img, 1);
            Imgproc.cvtColor(frame_img_mat, frame_img_mat, Imgproc.COLOR_BGR2RGB);
            //Debug.Log(frame_img_mat.size());
            byte[] image = new byte[252 * 448 * 3];
            frame_img_mat.get(0, 0, image);

            var texture = rawImage.texture as Texture2D;
            texture.LoadRawTextureData(image); //TODO: Should be able to do this: texture.LoadRawTextureData(pointerToImage, 1280 * 720 * 4);
            texture.Apply();
            ct.recieve_flag = 0;
            ct.rec          = false;
        }
    }
Exemplo n.º 4
0
    void Start()
    {
        src1 = Imgcodecs.imread(Application.dataPath + "/Textures/p1.jpg", 1);
        src2 = Imgcodecs.imread(Application.dataPath + "/Textures/p2.jpg", 1);
        Imgproc.cvtColor(src1, src1, Imgproc.COLOR_BGR2RGB);
        Imgproc.cvtColor(src2, src2, Imgproc.COLOR_BGR2RGB);
        images.Add(src1);
        images.Add(src2);

        //全景图
        Mat pano = new Mat();
        //缺少stitcher.hpp

        Texture2D t2d = new Texture2D(dstMat.width(), dstMat.height());
        Sprite    sp  = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);

        m_showImage.sprite         = sp;
        m_showImage.preserveAspect = true;
        Utils.matToTexture2D(dstMat, t2d);
    }
Exemplo n.º 5
0
    void Start()
    {
        srcMat = Imgcodecs.imread(Application.dataPath + "/Textures/sample.jpg");
        Imgproc.cvtColor(srcMat, srcMat, Imgproc.COLOR_BGR2RGB);

        //基础色度图
        for (int i = 0; i < 13; i++)
        {
            Mat dstMat = new Mat();

            //Imgproc.applyColorMap(srcMat, dstMat, Imgproc.COLORMAP_JET);
            Imgproc.applyColorMap(srcMat, dstMat, i);

            Texture2D t2d = new Texture2D(dstMat.width(), dstMat.height());
            Sprite    sp  = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);
            m_imageList[i].sprite         = sp;
            m_imageList[i].preserveAspect = true;
            Utils.matToTexture2D(dstMat, t2d);
        }
    }
Exemplo n.º 6
0
        public void takePhoto()
        {
            snapshotCount = 40;
            Debug.Log("TAKE PHOTO");
            Texture2D tex = new Texture2D(Screen.width, Screen.height, TextureFormat.RGBA32, false, true);

            drawFlag = true;
            InvokeRepeating("pauseForPhoto", 0.001f, 0.001f);

            Imgproc.resize(rgbMat, textureInstance, new Size(Screen.width, Screen.height));
            Debug.Log("texture is" + textureInstance.width() + ", " + textureInstance.height());
            Debug.Log("tex is" + tex.width + ", " + tex.height);
            Utils.fastMatToTexture2D(textureInstance, tex);

            //write to singleton
            ImageManager.instance.photo = tex;

            //write image
            Imgcodecs.imwrite("Assets/snapshot.jpeg", textureInstance);
        }
        /// <summary>
        /// Raises the save button event.
        /// </summary>
        public void OnSaveButton()
        {
            if (patternRawImage.texture != null)
            {
                Texture2D patternTexture = (Texture2D)patternRawImage.texture;
                Mat       patternMat     = new Mat(patternRect.size(), CvType.CV_8UC3);
                Utils.texture2DToMat(patternTexture, patternMat);
                Imgproc.cvtColor(patternMat, patternMat, Imgproc.COLOR_RGB2BGR);

                string savePath = Application.persistentDataPath;
                Debug.Log("savePath " + savePath);

                Imgcodecs.imwrite(savePath + "/patternImg.jpg", patternMat);

                #if UNITY_5_3 || UNITY_5_3_OR_NEWER
                SceneManager.LoadScene("WebCamTextureMarkerLessARExample");
                #else
                Application.LoadLevel("WebCamTextureMarkerLessARExample");
                #endif
            }
        }
Exemplo n.º 8
0
    /// <summary>
    /// Raises the webcam texture initialized event.
    /// </summary>
    private void OnCVInited()
    {
        if (colors == null || colors.Length != webCamTexture.width * webCamTexture.height)
        {
            colors = new Color32[webCamTexture.width * webCamTexture.height];
        }
        if (texture == null || texture.width != webCamTexture.width || texture.height != webCamTexture.height)
        {
            texture = new Texture2D(webCamTexture.width, webCamTexture.height, TextureFormat.RGBA32, false);
        }

        rgbaMat = new Mat(webCamTexture.height, webCamTexture.width, CvType.CV_8UC4);

        // 出力先の設定するならココ. 参照: OpenCVForUnityExample.
        outputScreenQuad.setupScreenQuadAndCamera(webCamTexture.height, webCamTexture.width, CvType.CV_8UC3);

        Mat matOrg = Imgcodecs.imread(Utils.getFilePath("hand1.png"));

        mat = new Mat(matOrg.size(), CvType.CV_8UC3);
        Imgproc.cvtColor(matOrg, mat, Imgproc.COLOR_BGRA2RGB);
    }
Exemplo n.º 9
0
    void Start()
    {
        string path = Application.dataPath + "/Textures/sample.jpg";

        srcImage = Imgcodecs.imread(path, 1);

        t2d = new Texture2D(srcImage.width(), srcImage.height());
        Utils.matToTexture2D(srcImage, t2d);
        undeal.texture = t2d;

        //判断文件加载是否正确
        //Debug.Log(File.Exists(path));

        //split/merge 对颜色通道进行处理
        //split 通道的拆分。用于将一幅多通道的图像的各个通道分离。
        Core.split(srcImage, channels);
        Debug.Log(channels.Count); //jpg/png都只拆出3个通道(YUV?)

        //提取红色通道的数据
        imageRed = new Mat();
        imageRed = channels[0];
        //提取绿色通道的数据
        imageGreen = new Mat();
        imageGreen = channels[1];
        //提取蓝色通道的数据
        imageBlue = new Mat();
        imageBlue = channels[2];

        channels[0] = channels[2];
        channels[2] = imageRed;

        //对拆分的通道数据合并
        //merge 与split 相反。可以将多个单通道图像合成一幅多通道图像。
        mergeImage = new Mat();
        Core.merge(channels, mergeImage);

        t2d            = new Texture2D(mergeImage.width(), mergeImage.height());
        output.texture = t2d;
        Utils.matToTexture2D(mergeImage, t2d);
    }
Exemplo n.º 10
0
    void Start()
    {
        srcMat  = Imgcodecs.imread(Application.dataPath + "/Textures/feature.jpg", 1);
        grayMat = new Mat();
        Imgproc.cvtColor(srcMat, grayMat, Imgproc.COLOR_RGBA2GRAY);

        //会把五边形识别成圆。模糊处理,提高精确度。
        Imgproc.GaussianBlur(grayMat, grayMat, new Size(7, 7), 2, 2);

        Mat circles = new Mat();

        //霍夫圆
        Imgproc.HoughCircles(grayMat, circles, Imgproc.CV_HOUGH_GRADIENT, 2, 10, 160, 50, 10, 40);
        //Debug.Log(circles);

        //圆心坐标
        Point pt = new Point();

        for (int i = 0; i < circles.cols(); i++)
        {
            double[] data = circles.get(0, i);
            pt.x = data[0];
            pt.y = data[1];
            double rho = data[2];
            //绘制圆心
            Imgproc.circle(srcMat, pt, 3, new Scalar(255, 255, 0), -1, 8, 0);
            //绘制圆轮廓
            Imgproc.circle(srcMat, pt, (int)rho, new Scalar(255, 0, 0, 255), 5);
        }

        //在Mat上写字
        Imgproc.putText(srcMat, "W:" + srcMat.width() + " H:" + srcMat.height(), new Point(5, srcMat.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false);

        Texture2D t2d = new Texture2D(srcMat.width(), srcMat.height());
        Sprite    sp  = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);

        m_showImage.sprite         = sp;
        m_showImage.preserveAspect = true;
        Utils.matToTexture2D(srcMat, t2d);
    }
Exemplo n.º 11
0
    void Start()
    {
        srcMat = Imgcodecs.imread(Application.dataPath + "/Textures/palace.jpg");
        Imgproc.cvtColor(srcMat, srcMat, Imgproc.COLOR_BGR2RGB);

        //dstMat = srcMat.clone();
        //dstMat = removeBackground(srcMat);
        //dstMat = MyThresholdHsv(srcMat);
        //dstMat = myGrabCut(srcMat, new Point(50d, 0d), new Point(300d, 250d));
        //dstMat = MyFindLargestRectangle(srcMat);
        //dstMat = MyWatershed(srcMat);
        //dstMat = MyCanny(srcMat, 100);
        dstMat = MyFloodFill(srcMat);

        Texture2D t2d = new Texture2D(dstMat.width(), dstMat.height());

        Utils.matToTexture2D(dstMat, t2d);
        Sprite sp = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);

        m_dstImage.sprite         = sp;
        m_dstImage.preserveAspect = true;
    }
Exemplo n.º 12
0
    // Update is called once per frame
    void Update()
    {
        if (frame3D.snapped)
        {
            Debug.Log("writing color to " + filePath + ".png");

            Mat colorFlipped = new Mat();
            Core.flip(frame3D.waterMarkedColor, colorFlipped, 0);
            Mat rgb = new Mat();
            Imgproc.cvtColor(colorFlipped, rgb, Imgproc.COLOR_RGB2BGR);
            Imgcodecs.imwrite(filePath + ".png", rgb);
            Mat depthFlipped = new Mat();
            Core.flip(frame3D.waterMarkedDepth, depthFlipped, 0);
            Debug.Log("writing depth to " + filePath + "_depth.png");
            Imgcodecs.imwrite(filePath + "_depth.png", depthFlipped);

            // Load the directory where the images are saved
            Debug.Log("explorer.exe" + " /n, /e, " + outpath.Replace('/', '\\'));
            System.Diagnostics.Process.Start("explorer.exe", "/n, /e, " + outpath.Replace('/', '\\'));
            ShowWindow(GetActiveWindow(), 2);
        }
    }
Exemplo n.º 13
0
        private void SaveMarkerImg()
        {
            // save the markerImg.
            string saveDirectoryPath = Path.Combine(Application.persistentDataPath, "ArUcoCreateMarkerExample");
            string savePath          = "";

            #if UNITY_WEBGL && !UNITY_EDITOR
            string   format            = "jpg";
            MatOfInt compressionParams = new MatOfInt(Imgcodecs.CV_IMWRITE_JPEG_QUALITY, 100);
            #else
            string   format            = "png";
            MatOfInt compressionParams = new MatOfInt(Imgcodecs.CV_IMWRITE_PNG_COMPRESSION, 0);
            #endif
            switch (markerType)
            {
            default:
            case MarkerType.CanonicalMarker:
                savePath = Path.Combine(saveDirectoryPath, "CanonicalMarker-d" + (int)dictionaryId + "-i" + (int)markerId + "-sp" + markerSize + "-bb" + borderBits + "." + format);
                break;

            case MarkerType.GridBoard:
                savePath = Path.Combine(saveDirectoryPath, "GridBoard-mx" + gridBoradMarkersX + "-my" + gridBoradMarkersY + "-d" + (int)dictionaryId + "-os" + markerSize + "-bb" + borderBits + "." + format);
                break;

            case MarkerType.ChArUcoBoard:
                savePath = Path.Combine(saveDirectoryPath, "ChArUcoBoard-mx" + chArUcoBoradMarkersX + "-my" + chArUcoBoradMarkersY + "-d" + (int)dictionaryId + "-os" + markerSize + "-bb" + borderBits + "." + format);
                break;
            }

            if (!Directory.Exists(saveDirectoryPath))
            {
                Directory.CreateDirectory(saveDirectoryPath);
            }

            Imgcodecs.imwrite(savePath, markerImg, compressionParams);

            savePathInputField.text = savePath;
            Debug.Log("savePath: " + savePath);
        }
Exemplo n.º 14
0
        public Mat Extract(string path)
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);

            var embedder = Dnn.readNetFromTorch(model_filepath);

            Mat img = Imgcodecs.imread(Utils.getFilePath("faces/" + path));

            if (img.empty())
            {
                Debug.LogError("image is not loaded");
                return(img);
            }

            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);
            var roi          = GetBB(img);
            Mat cropped_face = img.submat((int)roi.y, (int)roi.y + (int)roi.height,
                                          (int)roi.x, (int)roi.width + (int)roi.x);
            var faceBlob = Dnn.blobFromImage(cropped_face, scalefactor, new Size(inpWidth, inpHeight), new Scalar(0, 0, 0), true, false);

            embedder.setInput(faceBlob);
            var netOut = embedder.forward();

            if (gameObject.GetComponent <Renderer>() != null && displayBB)
            {
                GenericUtils.AdjustImageScale(cropped_face, this.gameObject);
                Texture2D texture = new Texture2D(cropped_face.cols(), cropped_face.rows(), TextureFormat.RGBA32, false);
                Utils.matToTexture2D(cropped_face, texture);
                gameObject.GetComponent <Renderer>().material.mainTexture = texture;
            }

            //_embedder.Dispose();
            //cropped_face.Dispose();
            img.Dispose();

            return(netOut);
        }
Exemplo n.º 15
0
        //Used to save pictures of faces to a jpg in the parent folder of the project, used in training the Eigen Face Recognizer.

        public bool outRectToFile(Mat frame, ref int fileCounter)
        {
            Mat    localCpy = new Mat();
            int    counter  = 0;
            String myFile;

            foreach (OpenCVForUnity.Rect face in faceSquares.toArray())
            {
                myFile = "face" + fileCounter + ".jpg";
                fileCounter++;
                counter++;
                //localCpy = new Mat (new Size (face.width, face.height))
                localCpy = new Mat(frame, face);
                Imgcodecs.imwrite(myFile, localCpy);
            }
            if (counter == 0)
            {
                return(false);
            }
            else
            {
                return(true);
            }
        }
Exemplo n.º 16
0
 public void Save(string Filename)
 {
     Imgcodecs.Imwrite(Filename, Mat);
 }
Exemplo n.º 17
0
    void Run(string jpg_path)
    {
        Utils.setDebugMode(true);

        Mat img = Imgcodecs.imread(jpg_path);

        gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1);

        float imageWidth  = img.width();
        float imageHeight = img.height();

        float widthScale  = (float)Screen.width / imageWidth;
        float heightScale = (float)Screen.height / imageHeight;

        if (widthScale < heightScale)
        {
            Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
        }
        else
        {
            Camera.main.orthographicSize = imageHeight / 2;
        }


        Net net = null;

        if (!string.IsNullOrEmpty(graph_filepath))
        {
            net = Dnn.readNetFromTensorflow(graph_filepath);
        }

        if (net == null)
        {
            Imgproc.putText(img, "Model file is not loaded.", new Point(5, img.rows() - 30), Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
        }
        else
        {
            float frameWidth  = img.cols();
            float frameHeight = img.rows();
            Mat   input       = Dnn.blobFromImage(img, 1.0, new Size(inWidth, inHeight), new Scalar(0, 0, 0), false, false);
            net.setInput(input, "image");
            Mat output = net.forward("Openpose/concat_stage7");
            output = output.reshape(1, 57);

            List <Point> points = new List <Point>();
            for (int i = 0; i < BODY_PARTS.Count; i++)
            {
                Mat heatMap = output.row(i).reshape(1, 46);
                Core.MinMaxLocResult result = Core.minMaxLoc(heatMap);
                heatMap.Dispose();

                double x = (frameWidth * result.maxLoc.x) / 46;
                double y = (frameHeight * result.maxLoc.y) / 46;

                if (result.maxVal > 0.3)
                {
                    points.Add(new Point(x, y));
                }
                else
                {
                    points.Add(null);
                }
            }

            for (int i = 0; i < POSE_PAIRS.GetLength(0); i++)
            {
                string partFrom = POSE_PAIRS[i, 0];
                string partTo   = POSE_PAIRS[i, 1];

                int idFrom = BODY_PARTS[partFrom];
                int idTo   = BODY_PARTS[partTo];

                if (points[idFrom] != null && points[idTo] != null)
                {
                    Imgproc.line(img, points[idFrom], points[idTo], new Scalar(0, 255, 0), 3);
                    Imgproc.ellipse(img, points[idFrom], new Size(3, 3), 0, 0, 360, new Scalar(0, 0, 255), Core.FILLED);
                    Imgproc.ellipse(img, points[idTo], new Size(3, 3), 0, 0, 360, new Scalar(0, 0, 255), Core.FILLED);
                }
            }
        }

        Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);
        Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false);

        Utils.matToTexture2D(img, texture);
        gameObject.GetComponent <Renderer>().material.mainTexture = texture;
        Utils.setDebugMode(false);
    }
Exemplo n.º 18
0
        private void Run()
        {
            Mat src = Imgcodecs.imread(pca_test1_jpg_filepath);

            #if !UNITY_WSA_10_0
            if (src.empty())
            {
                Debug.LogError("pca_test1.jpg is not loaded. Please copy from “OpenCVForUnity/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
            }
            #endif

            Debug.Log("src.ToString() " + src.ToString());

            // Convert image to grayscale
            Mat gray = new Mat();
            Imgproc.cvtColor(src, gray, Imgproc.COLOR_BGR2GRAY);
            // Convert image to binary
            Mat bw = new Mat();
            Imgproc.threshold(gray, bw, 50, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
            // Find all the contours in the thresholded image

            Mat hierarchy = new Mat();
            List <MatOfPoint> contours = new List <MatOfPoint> ();
            Imgproc.findContours(bw, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_NONE);

            for (int i = 0; i < contours.Count; ++i)
            {
                // Calculate the area of each contour
                double area = Imgproc.contourArea(contours [i]);
                // Ignore contours that are too small or too large
                if (area < 1e2 || 1e5 < area)
                {
                    continue;
                }
                // Draw each contour only for visualisation purposes
                Imgproc.drawContours(src, contours, i, new Scalar(0, 0, 255), 2);

                //Construct a buffer used by the pca analysis
                List <Point> pts      = contours [i].toList();
                int          sz       = pts.Count;
                Mat          data_pts = new Mat(sz, 2, CvType.CV_64FC1);
                for (int p = 0; p < data_pts.rows(); ++p)
                {
                    data_pts.put(p, 0, pts [p].x);
                    data_pts.put(p, 1, pts [p].y);
                }

                Mat mean         = new Mat();
                Mat eigenvectors = new Mat();
                Core.PCACompute(data_pts, mean, eigenvectors, 1);
                Debug.Log("mean.dump() " + mean.dump());
                Debug.Log("eigenvectors.dump() " + eigenvectors.dump());

                Point cntr = new Point(mean.get(0, 0) [0], mean.get(0, 1) [0]);
                Point vec  = new Point(eigenvectors.get(0, 0) [0], eigenvectors.get(0, 1) [0]);

                drawAxis(src, cntr, vec, new Scalar(255, 255, 0), 150);

                data_pts.Dispose();
                mean.Dispose();
                eigenvectors.Dispose();
            }


            Imgproc.cvtColor(src, src, Imgproc.COLOR_BGR2RGB);

            Texture2D texture = new Texture2D(src.cols(), src.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(src, texture);

            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;
        }
        private void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);


            Mat img = Imgcodecs.imread(scenetext01_jpg_filepath);

            #if !UNITY_WSA_10_0
            if (img.empty())
            {
                Debug.LogError("text/scenetext01.jpg is not loaded.Please copy from “OpenCVForUnity/StreamingAssets/text/” to “Assets/StreamingAssets/” folder. ");
            }
            #endif

            //# for visualization
            Mat vis = new Mat();
            img.copyTo(vis);
            Imgproc.cvtColor(vis, vis, Imgproc.COLOR_BGR2RGB);


            //# Extract channels to be processed individually
            List <Mat> channels = new List <Mat> ();
            Text.computeNMChannels(img, channels);

            //# Append negative channels to detect ER- (bright regions over dark background)
            int cn = channels.Count;
            for (int i = 0; i < cn; i++)
            {
                channels.Add(new Scalar(255) - channels [i]);
            }

            //# Apply the default cascade classifier to each independent channel (could be done in parallel)
            Debug.Log("Extracting Class Specific Extremal Regions from " + channels.Count + " channels ...");
            Debug.Log("    (...) this may take a while (...)");
            foreach (var channel in channels)
            {
                ERFilter er1 = Text.createERFilterNM1(trained_classifierNM1_xml_filepath, 16, 0.00015f, 0.13f, 0.2f, true, 0.1f);

                ERFilter er2 = Text.createERFilterNM2(trained_classifierNM2_xml_filepath, 0.5f);

                List <MatOfPoint> regions = new List <MatOfPoint> ();
                Text.detectRegions(channel, er1, er2, regions);

                MatOfRect matOfRects = new MatOfRect();
                Text.erGrouping(img, channel, regions, matOfRects);
//                Text.erGrouping (img, channel, regions, matOfRects, Text.ERGROUPING_ORIENTATION_ANY, Utils.getFilePath ("text/trained_classifier_erGrouping.xml"), 0.5f);

                List <OpenCVForUnity.Rect> rects = matOfRects.toList();

                //#Visualization
                foreach (var rect in rects)
                {
                    Imgproc.rectangle(vis, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(255, 0, 0), 2);
                    Imgproc.rectangle(vis, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(255, 255, 255), 1);
                }
            }

            Texture2D texture = new Texture2D(vis.cols(), vis.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(vis, texture);

            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;


            Utils.setDebugMode(false);
        }
Exemplo n.º 20
0
        // Use this for initialization
        void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);

            Mat img = Imgcodecs.imread(image_filepath);

            #if !UNITY_WSA_10_0
            if (img.empty())
            {
                Debug.LogError("dnn/COCO_val2014_000000000589.jpg is not loaded.The image file can be downloaded here: \"https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/examples/media/COCO_val2014_000000000589.jpg\" folder. ");
                img = new Mat(368, 368, CvType.CV_8UC3, new Scalar(0, 0, 0));
            }
            #endif


            //Adust Quad.transform.localScale.
            gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float imageWidth  = img.width();
            float imageHeight = img.height();

            float widthScale  = (float)Screen.width / imageWidth;
            float heightScale = (float)Screen.height / imageHeight;
            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = imageHeight / 2;
            }


            Net net = null;

            if (string.IsNullOrEmpty(caffemodel_filepath) || string.IsNullOrEmpty(prototxt_filepath))
            {
                Debug.LogError("model file is not loaded. The model and prototxt file can be downloaded here: \"http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel\",\"https://github.com/opencv/opencv_extra/blob/master/testdata/dnn/openpose_pose_mpi_faster_4_stages.prototxt\". Please copy to “Assets/StreamingAssets/dnn/” folder. ");
            }
            else
            {
                net = Dnn.readNetFromCaffe(prototxt_filepath, caffemodel_filepath);

                //Intel's Deep Learning Inference Engine backend is supported on Windows 64bit platform only. Please refer to ReadMe.pdf for the setup procedure.
                //net.setPreferableBackend (Dnn.DNN_BACKEND_INFERENCE_ENGINE);
            }

            if (net == null)
            {
                Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
                Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
            }
            else
            {
                float frameWidth  = img.cols();
                float frameHeight = img.rows();

                Mat input = Dnn.blobFromImage(img, 1.0 / 255, new Size(inWidth, inHeight), new Scalar(0, 0, 0), false, false);

                net.setInput(input);

//                TickMeter tm = new TickMeter ();
//                tm.start ();

                Mat output = net.forward();

//                tm.stop ();
//                Debug.Log ("Inference time, ms: " + tm.getTimeMilli ());


                output = output.reshape(1, 16);


                float[]      data   = new float[46 * 46];
                List <Point> points = new List <Point> ();
                for (int i = 0; i < BODY_PARTS.Count; i++)
                {
                    output.get(i, 0, data);

                    Mat heatMap = new Mat(1, data.Length, CvType.CV_32FC1);
                    heatMap.put(0, 0, data);


                    //Originally, we try to find all the local maximums. To simplify a sample
                    //we just find a global one. However only a single pose at the same time
                    //could be detected this way.
                    Core.MinMaxLocResult result = Core.minMaxLoc(heatMap);

                    heatMap.Dispose();


                    double x = (frameWidth * (result.maxLoc.x % 46)) / 46;
                    double y = (frameHeight * (result.maxLoc.x / 46)) / 46;

                    if (result.maxVal > 0.1)
                    {
                        points.Add(new Point(x, y));
                    }
                    else
                    {
                        points.Add(null);
                    }
                }

                for (int i = 0; i < POSE_PAIRS.GetLength(0); i++)
                {
                    string partFrom = POSE_PAIRS [i, 0];
                    string partTo   = POSE_PAIRS [i, 1];

                    int idFrom = BODY_PARTS [partFrom];
                    int idTo   = BODY_PARTS [partTo];

                    if (points [idFrom] != null && points [idTo] != null)
                    {
                        Imgproc.line(img, points [idFrom], points [idTo], new Scalar(0, 255, 0), 3);
                        Imgproc.ellipse(img, points [idFrom], new Size(3, 3), 0, 0, 360, new Scalar(0, 0, 255), Core.FILLED);
                        Imgproc.ellipse(img, points [idTo], new Size(3, 3), 0, 0, 360, new Scalar(0, 0, 255), Core.FILLED);
                    }
                }



                MatOfDouble timings = new MatOfDouble();
                long        t       = net.getPerfProfile(timings);
                Debug.Log("t: " + t);
                Debug.Log("timings.dump(): " + timings.dump());

                double freq = Core.getTickFrequency() / 1000;
                Debug.Log("freq: " + freq);

                Imgproc.putText(img, (t / freq) + "ms", new Point(10, img.height() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.6, new Scalar(0, 0, 255), 2);
            }

            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);


            Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(img, texture);

            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;


            Utils.setDebugMode(false);
        }
Exemplo n.º 21
0
        // Use this for initialization
        void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);


            classNames = readClassNames(classes_filepath);
            if (classNames == null)
            {
                Debug.LogError(classes_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
            }

            classColors = new List <Scalar> ();
            for (int i = 0; i < classNames.Count; i++)
            {
                classColors.Add(new Scalar(UnityEngine.Random.Range(0, 255), UnityEngine.Random.Range(0, 255), UnityEngine.Random.Range(0, 255)));
            }


            Mat img = Imgcodecs.imread(image_filepath);

            if (img.empty())
            {
                Debug.LogError(image_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
                img = new Mat(height, width, CvType.CV_8UC3, new Scalar(0, 0, 0));
            }



            //Adust Quad.transform.localScale.
            gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float imageWidth  = img.width();
            float imageHeight = img.height();

            float widthScale  = (float)Screen.width / imageWidth;
            float heightScale = (float)Screen.height / imageHeight;

            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = imageHeight / 2;
            }


            Net net = null;

            if (string.IsNullOrEmpty(model_filepath) || string.IsNullOrEmpty(config_filepath))
            {
                Debug.LogError(model_filepath + " or " + config_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
            }
            else
            {
                net = Dnn.readNetFromTensorflow(model_filepath, config_filepath);
            }

            if (net == null)
            {
                Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
                Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
            }
            else
            {
                float frameWidth  = img.cols();
                float frameHeight = img.rows();

                Mat blob = Dnn.blobFromImage(img, 1.0, new Size(width, height), new Scalar(0, 0, 0), true, false);


                net.setInput(blob);



                List <Mat>    outputBlobs = new List <Mat> ();
                List <string> outputName  = new List <string> ();
                outputName.Add("detection_out_final");
                outputName.Add("detection_masks");

                net.forward(outputBlobs, outputName);

                Mat boxes = outputBlobs [0];
                Mat masks = outputBlobs [1];


                //int numClasses = masks.size (1);
                //int numDetections = boxes.size (2);


                Debug.Log("boxes.size(0) " + boxes.size(0));
                Debug.Log("boxes.size(1) " + boxes.size(1));
                Debug.Log("boxes.size(2) " + boxes.size(2));
                Debug.Log("boxes.size(3) " + boxes.size(3));
                Debug.Log("masks.size(0) " + masks.size(0));
                Debug.Log("masks.size(1) " + masks.size(1));
                Debug.Log("masks.size(2) " + masks.size(2));
                Debug.Log("masks.size(3) " + masks.size(3));


                //reshape from 4D to two 2D.
                float[] data = new float[boxes.size(3)];
                boxes = boxes.reshape(1, (int)boxes.total() / boxes.size(3));
//              Debug.Log ("boxes.ToString() " + boxes.ToString ());

                //reshape from 4D to two 2D.
                float[] mask_data = new float[masks.size(2) * masks.size(3)];
                masks = masks.reshape(1, (int)masks.total() / (masks.size(2) * masks.size(3)));
//              Debug.Log ("masks.ToString(): " + masks.ToString ());


                for (int i = 0; i < boxes.rows(); i++)
                {
                    boxes.get(i, 0, data);

                    float score = data [2];

                    if (score > thr)
                    {
                        int class_id = (int)(data [1]);


                        float left   = (float)(data [3] * frameWidth);
                        float top    = (float)(data [4] * frameHeight);
                        float right  = (float)(data [5] * frameWidth);
                        float bottom = (float)(data [6] * frameHeight);

                        left   = (int)Mathf.Max(0, Mathf.Min(left, frameWidth - 1));
                        top    = (int)Mathf.Max(0, Mathf.Min(top, frameHeight - 1));
                        right  = (int)Mathf.Max(0, Mathf.Min(right, frameWidth - 1));
                        bottom = (int)Mathf.Max(0, Mathf.Min(bottom, frameHeight - 1));

                        Debug.Log("class_id: " + class_id + " class_name " + classNames [class_id] + " left: " + left + " top: " + top + " right: " + right + " bottom: " + bottom);



                        //draw masks

                        masks.get((i * 90) + class_id, 0, mask_data);

                        Mat objectMask = new Mat(15, 15, CvType.CV_32F);
                        Utils.copyToMat <float> (mask_data, objectMask);

                        Imgproc.resize(objectMask, objectMask, new Size(right - left + 1, bottom - top + 1));

                        Core.compare(objectMask, new Scalar(mask_thr), objectMask, Core.CMP_GT);
//                        Debug.Log ("objectMask.ToString(): " + objectMask.ToString ());
//                        Debug.Log ("objectMask.dump(): " + objectMask.dump ());


                        Mat roi = new Mat(img, new OpenCVForUnity.CoreModule.Rect(new Point(left, top), new Point(right + 1, bottom + 1)));

                        Mat coloredRoi = new Mat(roi.size(), CvType.CV_8UC3);

                        Imgproc.rectangle(coloredRoi, new Point(0, 0), new Point(coloredRoi.width(), coloredRoi.height()), classColors [class_id], -1);

                        Core.addWeighted(coloredRoi, 0.7, roi, 0.3, 0, coloredRoi);
//                        Debug.Log ("coloredRoi.ToString(): " + coloredRoi.ToString ());
//                        Debug.Log ("roi.ToString(): " + roi.ToString ());

                        coloredRoi.copyTo(roi, objectMask);
                        coloredRoi.Dispose();

                        objectMask.Dispose();



                        //draw boxes

                        Imgproc.rectangle(img, new Point(left, top), new Point(right, bottom), new Scalar(0, 255, 0), 2);

                        string label = score.ToString();
                        if (classNames != null && classNames.Count != 0)
                        {
                            if (class_id < (int)classNames.Count)
                            {
                                label = classNames [class_id] + ": " + label;
                            }
                        }

                        int[] baseLine  = new int[1];
                        Size  labelSize = Imgproc.getTextSize(label, Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);

                        top = Mathf.Max(top, (int)labelSize.height);
                        Imgproc.rectangle(img, new Point(left, top - labelSize.height),
                                          new Point(left + labelSize.width, top + baseLine [0]), Scalar.all(255), Core.FILLED);
                        Imgproc.putText(img, label, new Point(left, top), Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0, 255));
                    }
                }

                boxes.Dispose();
                masks.Dispose();
                blob.Dispose();
            }

            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);


            Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(img, texture);

            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;



            net.Dispose();


            Utils.setDebugMode(false);
        }
Exemplo n.º 22
0
        // Use this for initialization
        void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);

            Mat img       = Imgcodecs.imread(image_filepath, Imgcodecs.IMREAD_COLOR);
            Mat colorized = new Mat(img.rows(), img.cols(), img.type());

            if (img.empty())
            {
                Debug.LogError(image_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
                img = new Mat(368, 368, CvType.CV_8UC3, new Scalar(0, 0, 0));
            }

            Net net = null;

            if (string.IsNullOrEmpty(caffemodel_filepath) || string.IsNullOrEmpty(prototxt_filepath))
            {
                Debug.LogError(caffemodel_filepath + " or " + prototxt_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
            }
            else
            {
                net = Dnn.readNetFromCaffe(prototxt_filepath, caffemodel_filepath);
            }

            if (net == null)
            {
                Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
                Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
            }
            else
            {
                // setup additional layers:
                int[] sz          = new int[] { 2, 313, 1, 1 };
                Mat   pts_in_hull = new Mat(sz, CvType.CV_32F);
                pts_in_hull.put(new int[] { 0, 0, 0, 0 }, hull_pts);

                Layer      class8_ab = net.getLayer(new DictValue("class8_ab"));
                List <Mat> blobs     = class8_ab.get_blobs();
                blobs.Add(pts_in_hull);
                class8_ab.set_blobs(blobs);

                Layer conv8_313_rh = net.getLayer(new DictValue("conv8_313_rh"));
                blobs = conv8_313_rh.get_blobs();
                blobs.Add(new Mat(1, 313, CvType.CV_32F, new Scalar(2.606)));
                conv8_313_rh.set_blobs(blobs);

                // extract L channel and subtract mean
                Mat img_32F = new Mat();
                Mat lab     = new Mat();
                Mat L       = new Mat();
                Mat input   = new Mat();
                img.convertTo(img_32F, CvType.CV_32F, 1.0 / 255);
                Imgproc.cvtColor(img_32F, lab, Imgproc.COLOR_BGR2Lab);
                Core.extractChannel(lab, L, 0);
                Imgproc.resize(L, input, new Size(inWidth, inHeight));
                Core.subtract(input, new Scalar(50.0), input);

                // run the L channel through the network
                Mat inputBlob = Dnn.blobFromImage(input);
                net.setInput(inputBlob);
                Mat result = net.forward();

                // retrieve the calculated a,b channels from the network output
                Mat result_a = new Mat(result, new Range[] { new Range(0, 1), new Range(0, 1), new Range(0, result.size(2)), new Range(0, result.size(3)) });
                Mat result_b = new Mat(result, new Range[] { new Range(0, 1), new Range(1, 2), new Range(0, result.size(2)), new Range(0, result.size(3)) });
                result_a = result_a.reshape(1, result.size(2));
                result_b = result_b.reshape(1, result.size(2));
                Mat a = new Mat(img.size(), CvType.CV_32F);
                Mat b = new Mat(img.size(), CvType.CV_32F);
                Imgproc.resize(result_a, a, img.size());
                Imgproc.resize(result_b, b, img.size());

                // merge, and convert back to BGR
                List <Mat> chn = new List <Mat>();
                chn.Add(L); chn.Add(a); chn.Add(b);
                Core.merge(chn, lab);
                Imgproc.cvtColor(lab, img_32F, Imgproc.COLOR_Lab2BGR);
                img_32F.convertTo(colorized, CvType.CV_8U, 255.0);



                MatOfDouble timings = new MatOfDouble();
                long        t       = net.getPerfProfile(timings);
                double      freq    = Core.getTickFrequency() / 1000;
                Debug.Log("Inference time " + (t / freq) + "ms");
                Imgproc.putText(colorized, (t / freq) + "ms", new Point(10, img.height() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2);

                Imgproc.putText(img, "gray", new Point(10, 20), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2);
                Imgproc.putText(colorized, "colorized", new Point(10, 20), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2);
            }

            Imgproc.cvtColor(colorized, colorized, Imgproc.COLOR_BGR2RGB);

            Mat display = new Mat(img.rows() * 2, img.cols(), img.type());

            using (Mat gray = new Mat(img.rows(), img.cols(), CvType.CV_8UC1))
                using (Mat displayUpperHalf = new Mat(display, new Range(0, img.rows())))
                    using (Mat displayLowerHalf = new Mat(display, new Range(img.rows(), display.rows())))
                    {
                        Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
                        Imgproc.cvtColor(gray, img, Imgproc.COLOR_GRAY2RGB);

                        img.copyTo(displayUpperHalf);
                        colorized.copyTo(displayLowerHalf);
                    }

            Texture2D texture = new Texture2D(display.cols(), display.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(display, texture);

            gameObject.GetComponent <Renderer>().material.mainTexture = texture;

            //Adust Quad.transform.localScale.
            gameObject.transform.localScale = new Vector3(display.width(), display.height(), 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float imageWidth  = display.width();
            float imageHeight = display.height();

            float widthScale  = (float)Screen.width / imageWidth;
            float heightScale = (float)Screen.height / imageHeight;

            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = imageHeight / 2;
            }


            Utils.setDebugMode(false);
        }
Exemplo n.º 23
0
        public async Task <List <DetectedObject> > DetectAsync(CameraFrame frame)
        {
            if (frame == null)
            {
                throw new ArgumentNullException(nameof(frame));
            }
            Imgcodecs.imwrite(Application.persistentDataPath + "/testB.jpg", frame.Mat);

            try
            {
                Debug.Log($"Enter PredictImageAsync with mat {frame.Mat}");
                var imageWidth  = frame.Width;
                var imageHeight = frame.Height;

                Mat rgb = new Mat(imageWidth, imageHeight, CvType.CV_8UC3);
                if (frame.Format == ColorFormat.Grayscale)
                {
                    Imgproc.cvtColor(frame.Mat, rgb, Imgproc.COLOR_GRAY2RGB);
                    Debug.Log($"Converted gray2rgb to {rgb}");
                }
                else
                {
                    frame.Mat.copyTo(rgb);
                }

                //Mat rgba = new Mat();
                //Imgproc.cvtColor(rgb, rgba, Imgproc.COLOR_RGB2RGBA);

                float newHeight = 416.0f / imageWidth * imageHeight;
                Mat   resized   = new Mat(416, 416, CvType.CV_8UC3);
                Imgproc.resize(rgb, resized, new Size(416, newHeight), 0.5, 0.5, Imgproc.INTER_LINEAR);
                //Imgproc.resize(rgb, resized, new Size(targetWidth, targetHeight), 0.5, 0.5, Imgproc.INTER_LINEAR);
                Debug.Log($"Resized {resized}");

                Mat resizedBorder = new Mat();
                Core.copyMakeBorder(resized, resizedBorder, 0, (int)(416 - newHeight), 0, 0, Core.BORDER_CONSTANT, new Scalar(0, 0, 0));

                /*Mat rgba = new Mat();
                 * Imgproc.cvtColor(resizedBorder, rgba, Imgproc.COLOR_RGB2RGBA);*/

                Texture2D texture = new Texture2D(416, 416, TextureFormat.RGB24, false);
                Utils.matToTexture2D(resizedBorder, texture, true);
                //texture.Apply();
                Color32[] pixels32 = texture.GetPixels32();

                byte[] encodeArrayToJPG = ImageConversion.EncodeArrayToJPG(pixels32, GraphicsFormat.R8G8B8A8_UInt, 416, 416);
                File.WriteAllBytes(Application.persistentDataPath + "/testA.jpg", encodeArrayToJPG);

                using (var tensor = TransformInput(pixels32, ImageNetSettings.imageWidth, ImageNetSettings.imageHeight))
                {
                    var inputs = new Dictionary <string, Tensor>();
                    inputs.Add(ModelSettings.ModelInput, tensor);
                    //yield return StartCoroutine(worker.StartManualSchedule(inputs));
                    //var output = engine.Execute(inputs).PeekOutput();
                    var output  = engine.Execute(inputs).PeekOutput(ModelSettings.ModelOutput);
                    var results = outputParser.ParseOutputs(output, MINIMUM_CONFIDENCE);
                    var boxes   = outputParser.FilterBoundingBoxes(results, 10, MINIMUM_CONFIDENCE);
                    foreach (var box in boxes)
                    {
                        Debug.Log($"{box.tagName}, {box.probability}, {box.boundingBox.left},{box.boundingBox.top},{box.boundingBox.width},{box.boundingBox.height},");
                    }

                    List <DetectedObject> detectedObjects = boxes.Select(prediction => CreateDetectedObject(frame, prediction, (int)newHeight)).ToList();
                    int count = 0;
                    foreach (var detectedObject in detectedObjects)
                    {
                        count++;
                        Mat clone = frame.Mat.clone();
                        Imgproc.rectangle(clone, detectedObject.Rect.tl(), detectedObject.Rect.br(), new Scalar(255, 255, 255), 10, 1, 0);
                        Imgcodecs.imwrite(Application.persistentDataPath + "/clone-" + count + ".jpg", clone);
                    }
                }
            }
            catch (Exception e)
            {
                Debug.LogException(e);
                throw e;
            }

            return(new List <DetectedObject>());
        }
Exemplo n.º 24
0
 void Start()
 {
     srcMat = Imgcodecs.imread(Application.dataPath + "/Textures/sample.jpg", 1); //背景图
     Imgproc.cvtColor(srcMat, srcMat, Imgproc.COLOR_BGR2RGB);
 }
        /// <summary>
        /// Raises the load button click event.
        /// </summary>
        public void LoadModel()
        {
            string loadDirectoryPath = Path.Combine(Application.persistentDataPath, saveDirectoryName);

            if (!Directory.Exists(loadDirectoryPath))
            {
                Debug.Log("load failure. saved train data file does not exist.");
                return;
            }

            // Restart everything!
            dispose();

            if (facerecAlgorithm == "FaceRecognizer.Fisherfaces")
            {
                model = FisherFaceRecognizer.create();
            }
            else if (facerecAlgorithm == "FaceRecognizer.Eigenfaces")
            {
                model = EigenFaceRecognizer.create();
            }

            if (model == null)
            {
                Debug.LogError("ERROR: The FaceRecognizer algorithm [" + facerecAlgorithm + "] is not available in your version of OpenCV. Please update to OpenCV v2.4.1 or newer.");
                m_mode = R_MODES.MODE_DETECTION;
                return;
            }

            // load the train data.
            model.read(Path.Combine(loadDirectoryPath, "traindata.yml"));

            int maxLabel = (int)Core.minMaxLoc(model.getLabels()).maxVal;

            if (maxLabel < 0)
            {
                Debug.Log("load failure.");
                model.Dispose();
                return;
            }

            // Restore the save data.
            #if UNITY_WEBGL && !UNITY_EDITOR
            string format = "jpg";
            #else
            string format = "png";
            #endif
            m_numPersons = maxLabel + 1;
            personsNames = new string[m_numPersons];

            for (int i = 0; i < m_numPersons; ++i)
            {
                personsNames[i] = GameManager.instance.personsNames[i];

                m_latestFaces.Add(i);
                preprocessedFaces.Add(Imgcodecs.imread(Path.Combine(loadDirectoryPath, "preprocessedface" + i + "." + format), 0));
                if (preprocessedFaces[i].total() == 0)
                {
                    preprocessedFaces[i] = new Mat(faceHeight, faceWidth, CvType.CV_8UC1, new Scalar(128));
                }
                faceLabels.Add(i);
            }


            // go to the recognition mode!
            m_mode = R_MODES.MODE_RECOGNITION;
        }
        // Use this for initialization
        void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);


            List <string> classNames = readClassNames(coco_names_filepath);

            #if !UNITY_WSA_10_0
            if (classNames == null)
            {
                Debug.LogError("class names list file is not loaded.The model and class names list can be downloaded here: \"https://github.com/pjreddie/darknet/tree/master/data/coco.names\".Please copy to “Assets/StreamingAssets/dnn/” folder. ");
            }
            #endif


            Mat img = Imgcodecs.imread(person_jpg_filepath);
            #if !UNITY_WSA_10_0
            if (img.empty())
            {
                Debug.LogError("dnn/person.jpg is not loaded.The image file can be downloaded here: \"https://github.com/pjreddie/darknet/blob/master/data/person.jpg\".Please copy to \"Assets/StreamingAssets/dnn/\" folder. ");
                img = new Mat(424, 640, CvType.CV_8UC3, new Scalar(0, 0, 0));
            }
            #endif


            //Adust Quad.transform.localScale.
            gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float imageWidth  = img.width();
            float imageHeight = img.height();

            float widthScale  = (float)Screen.width / imageWidth;
            float heightScale = (float)Screen.height / imageHeight;
            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = imageHeight / 2;
            }


            Net net = null;

            if (string.IsNullOrEmpty(tiny_yolo_cfg_filepath) || string.IsNullOrEmpty(tiny_yolo_weights_filepath))
            {
                Debug.LogError("model file is not loaded. the cfg-file and weights-file can be downloaded here: https://github.com/pjreddie/darknet/blob/master/cfg/tiny-yolo.cfg and https://pjreddie.com/media/files/tiny-yolo.weights. Please copy to “Assets/StreamingAssets/dnn/” folder. ");
            }
            else
            {
                //! [Initialize network]
                net = Dnn.readNetFromDarknet(tiny_yolo_cfg_filepath, tiny_yolo_weights_filepath);
                //! [Initialize network]
            }


            if (net == null)
            {
                Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
                Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
            }
            else
            {
                //! [Resizing without keeping aspect ratio]
                Mat resized = new Mat();
                Imgproc.resize(img, resized, new Size(network_width, network_height));
                //! [Resizing without keeping aspect ratio]

                //! [Prepare blob]
                Mat inputBlob = Dnn.blobFromImage(resized, 1 / 255.0, new Size(), new Scalar(0), true, true);    //Convert Mat to batch of images
                //! [Prepare blob]

                //! [Set input blob]
                net.setInput(inputBlob, "data");                    //set the network input
                //! [Set input blob]


                TickMeter tm = new TickMeter();
                tm.start();

                //! [Make forward pass]
                Mat detectionMat = net.forward("detection_out");    //compute output
                //! [Make forward pass]

                tm.stop();
                Debug.Log("Inference time, ms: " + tm.getTimeMilli());

                Debug.Log("detectionMat.ToString(): " + detectionMat.ToString());

                float[] position    = new float[5];
                float[] confidences = new float[80];

                float confidenceThreshold = 0.24f;
                for (int i = 0; i < detectionMat.rows(); i++)
                {
                    detectionMat.get(i, 0, position);

                    detectionMat.get(i, 5, confidences);

                    int   maxIdx     = confidences.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I;
                    float confidence = confidences [maxIdx];

                    if (confidence > confidenceThreshold)
                    {
                        float x           = position [0];
                        float y           = position [1];
                        float width       = position [2];
                        float height      = position [3];
                        int   xLeftBottom = (int)((x - width / 2) * img.cols());
                        int   yLeftBottom = (int)((y - height / 2) * img.rows());
                        int   xRightTop   = (int)((x + width / 2) * img.cols());
                        int   yRightTop   = (int)((y + height / 2) * img.rows());

                        Debug.Log("confidence: " + confidence);

                        Debug.Log(" " + xLeftBottom
                                  + " " + yLeftBottom
                                  + " " + xRightTop
                                  + " " + yRightTop);

                        Imgproc.rectangle(img, new Point(xLeftBottom, yLeftBottom), new Point(xRightTop, yRightTop),
                                          new Scalar(0, 255, 0), 2);

                        if (maxIdx < classNames.Count)
                        {
                            string label     = classNames [maxIdx] + ": " + confidence;
                            int[]  baseLine  = new int[1];
                            Size   labelSize = Imgproc.getTextSize(label, Core.FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);

                            Imgproc.rectangle(img, new Point(xLeftBottom, yLeftBottom),
                                              new Point(xLeftBottom + labelSize.width, yLeftBottom + labelSize.height + baseLine [0]),
                                              new Scalar(255, 255, 255), Core.FILLED);
                            Imgproc.putText(img, label, new Point(xLeftBottom, yLeftBottom + labelSize.height),
                                            Core.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0));
                        }
                    }
                }
            }

            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);

            Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(img, texture);

            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;


            Utils.setDebugMode(false);
        }
Exemplo n.º 27
0
        private void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);


            Mat frame = Imgcodecs.imread(scenetext01_jpg_filepath);

            #if !UNITY_WSA_10_0
            if (frame.empty())
            {
                Debug.LogError("text/scenetext01.jpg is not loaded.Please copy from “OpenCVForUnity/StreamingAssets/text/” to “Assets/StreamingAssets/” folder. ");
            }
            #endif

            Mat binaryMat = new Mat();
            Mat maskMat   = new Mat();


            List <MatOfPoint> regions = new List <MatOfPoint> ();

            ERFilter er_filter1 = Text.createERFilterNM1(trained_classifierNM1_xml_filepath, 8, 0.00015f, 0.13f, 0.2f, true, 0.1f);

            ERFilter er_filter2 = Text.createERFilterNM2(trained_classifierNM2_xml_filepath, 0.5f);


            Mat transition_p = new Mat(62, 62, CvType.CV_64FC1);
            //            string filename = "OCRHMM_transitions_table.xml";
            //            FileStorage fs(filename, FileStorage::READ);
            //            fs["transition_probabilities"] >> transition_p;
            //            fs.release();

            //Load TransitionProbabilitiesData.
            transition_p.put(0, 0, GetTransitionProbabilitiesData(OCRHMM_transitions_table_xml_filepath));

            Mat           emission_p = Mat.eye(62, 62, CvType.CV_64FC1);
            string        voc        = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
            OCRHMMDecoder decoder    = OCRHMMDecoder.create(
                OCRHMM_knn_model_data_xml_gz_filepath,
                voc, transition_p, emission_p);

            //Text Detection
            Imgproc.cvtColor(frame, frame, Imgproc.COLOR_BGR2RGB);
            Imgproc.cvtColor(frame, binaryMat, Imgproc.COLOR_RGB2GRAY);
            Imgproc.threshold(binaryMat, binaryMat, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
            Core.absdiff(binaryMat, new Scalar(255), maskMat);


            Text.detectRegions(binaryMat, er_filter1, er_filter2, regions);
            Debug.Log("regions.Count " + regions.Count);


            MatOfRect groups_rects           = new MatOfRect();
            List <OpenCVForUnity.Rect> rects = new List <OpenCVForUnity.Rect> ();
            Text.erGrouping(frame, binaryMat, regions, groups_rects);


            for (int i = 0; i < regions.Count; i++)
            {
                regions [i].Dispose();
            }
            regions.Clear();


            rects.AddRange(groups_rects.toList());

            groups_rects.Dispose();


            //Text Recognition (OCR)

            List <Mat> detections = new List <Mat> ();

            for (int i = 0; i < (int)rects.Count; i++)
            {
                Mat group_img = new Mat();
                maskMat.submat(rects [i]).copyTo(group_img);
                Core.copyMakeBorder(group_img, group_img, 15, 15, 15, 15, Core.BORDER_CONSTANT, new Scalar(0));
                detections.Add(group_img);
            }

            Debug.Log("detections.Count " + detections.Count);


            //#Visualization
            for (int i = 0; i < rects.Count; i++)
            {
                Imgproc.rectangle(frame, new Point(rects [i].x, rects [i].y), new Point(rects [i].x + rects [i].width, rects [i].y + rects [i].height), new Scalar(255, 0, 0), 2);
                Imgproc.rectangle(frame, new Point(rects [i].x, rects [i].y), new Point(rects [i].x + rects [i].width, rects [i].y + rects [i].height), new Scalar(255, 255, 255), 1);

                string output = decoder.run(detections [i], 0);
                if (!string.IsNullOrEmpty(output))
                {
                    Debug.Log("output " + output);
                    Imgproc.putText(frame, output, new Point(rects [i].x, rects [i].y), Core.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 255), 1, Imgproc.LINE_AA, false);
                }
            }


            Texture2D texture = new Texture2D(frame.cols(), frame.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(frame, texture);

//            Texture2D texture = new Texture2D (detections [0].cols (), detections [0].rows (), TextureFormat.RGBA32, false);
//
//            Utils.matToTexture2D (detections [0], texture);

            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;


            for (int i = 0; i < detections.Count; i++)
            {
                detections [i].Dispose();
            }
            binaryMat.Dispose();
            maskMat.Dispose();

            Utils.setDebugMode(false);
        }
Exemplo n.º 28
0
        // Use this for initialization
        void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);


            Mat img = Imgcodecs.imread(dnn004545_jpg_filepath);

            #if !UNITY_WSA_10_0
            if (img.empty())
            {
                Debug.LogError("dnn/004545.jpg is not loaded.The image file can be downloaded here: \"https://github.com/chuanqi305/MobileNet-SSD/blob/master/images/004545.jpg\".Please copy to \"Assets/StreamingAssets/dnn/\" folder. ");
                img = new Mat(375, 500, CvType.CV_8UC3, new Scalar(0, 0, 0));
            }
            #endif


            //Adust Quad.transform.localScale.
            gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float imageWidth  = img.width();
            float imageHeight = img.height();

            float widthScale  = (float)Screen.width / imageWidth;
            float heightScale = (float)Screen.height / imageHeight;
            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = imageHeight / 2;
            }


            Net net = null;

            if (string.IsNullOrEmpty(MobileNetSSD_deploy_caffemodel_filepath) || string.IsNullOrEmpty(MobileNetSSD_deploy_prototxt_filepath))
            {
                Debug.LogError("model file is not loaded.The model and prototxt file can be downloaded here: \"https://github.com/chuanqi305/MobileNet-SSD\".Please copy to “Assets/StreamingAssets/dnn/” folder. ");
            }
            else
            {
                net = Dnn.readNetFromCaffe(MobileNetSSD_deploy_prototxt_filepath, MobileNetSSD_deploy_caffemodel_filepath);
            }

            if (net == null)
            {
                Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
                Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
            }
            else
            {
                Mat blob = Dnn.blobFromImage(img, inScaleFactor, new Size(inWidth, inHeight), new Scalar(meanVal, meanVal, meanVal), false, false);

                net.setInput(blob);


                TickMeter tm = new TickMeter();
                tm.start();

                Mat prob = net.forward();
                prob = prob.reshape(1, (int)prob.total() / 7);

                tm.stop();
                Debug.Log("Inference time, ms: " + tm.getTimeMilli());



                float[] data = new float[7];

                float confidenceThreshold = 0.2f;
                for (int i = 0; i < prob.rows(); i++)
                {
                    prob.get(i, 0, data);

                    float confidence = data [2];

                    if (confidence > confidenceThreshold)
                    {
                        int class_id = (int)(data [1]);

                        float left   = data [3] * img.cols();
                        float top    = data [4] * img.rows();
                        float right  = data [5] * img.cols();
                        float bottom = data [6] * img.rows();

                        Debug.Log("class_id: " + class_id);
                        Debug.Log("Confidence: " + confidence);

                        Debug.Log(" " + left
                                  + " " + top
                                  + " " + right
                                  + " " + bottom);

                        Imgproc.rectangle(img, new Point(left, top), new Point(right, bottom),
                                          new Scalar(0, 255, 0), 2);
                        string label     = classNames [class_id] + ": " + confidence;
                        int[]  baseLine  = new int[1];
                        Size   labelSize = Imgproc.getTextSize(label, Core.FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);

                        top = Mathf.Max(top, (float)labelSize.height);

                        Imgproc.rectangle(img, new Point(left, top),
                                          new Point(left + labelSize.width, top + labelSize.height + baseLine [0]),
                                          new Scalar(255, 255, 255), Core.FILLED);
                        Imgproc.putText(img, label, new Point(left, top + labelSize.height),
                                        Core.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0));
                    }
                }

                prob.Dispose();
            }

            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);

            Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(img, texture);

            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;


            Utils.setDebugMode(false);
        }
Exemplo n.º 29
0
        // Use this for initialization
        void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);

            if (!string.IsNullOrEmpty(classes))
            {
                classNames = readClassNames(classes_filepath);
                if (classNames == null)
                {
                    Debug.LogError(classes_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
                }
            }
            else if (classesList.Count > 0)
            {
                classNames = classesList;
            }

            Mat img = Imgcodecs.imread(input_filepath);

            if (img.empty())
            {
                Debug.LogError(input_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
                img = new Mat(424, 640, CvType.CV_8UC3, new Scalar(0, 0, 0));
            }


            //Adust Quad.transform.localScale.
            gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float imageWidth  = img.width();
            float imageHeight = img.height();

            float widthScale  = (float)Screen.width / imageWidth;
            float heightScale = (float)Screen.height / imageHeight;

            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = imageHeight / 2;
            }


            Net net = null;

            if (string.IsNullOrEmpty(config_filepath) || string.IsNullOrEmpty(model_filepath))
            {
                Debug.LogError(config_filepath + " or " + model_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
            }
            else
            {
                //! [Initialize network]
                net = Dnn.readNet(model_filepath, config_filepath);
                //! [Initialize network]
            }


            if (net == null)
            {
                Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
                Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
            }
            else
            {
                outBlobNames = getOutputsNames(net);
                //for (int i = 0; i < outBlobNames.Count; i++)
                //{
                //    Debug.Log("names [" + i + "] " + outBlobNames[i]);
                //}

                outBlobTypes = getOutputsTypes(net);
                //for (int i = 0; i < outBlobTypes.Count; i++)
                //{
                //    Debug.Log("types [" + i + "] " + outBlobTypes[i]);
                //}


                // Create a 4D blob from a frame.
                Size inpSize = new Size(inpWidth > 0 ? inpWidth : img.cols(),
                                        inpHeight > 0 ? inpHeight : img.rows());
                Mat blob = Dnn.blobFromImage(img, scale, inpSize, mean, swapRB, false);


                // Run a model.
                net.setInput(blob);

                if (net.getLayer(new DictValue(0)).outputNameToIndex("im_info") != -1)
                {  // Faster-RCNN or R-FCN
                    Imgproc.resize(img, img, inpSize);
                    Mat imInfo = new Mat(1, 3, CvType.CV_32FC1);
                    imInfo.put(0, 0, new float[] {
                        (float)inpSize.height,
                        (float)inpSize.width,
                        1.6f
                    });
                    net.setInput(imInfo, "im_info");
                }


                TickMeter tm = new TickMeter();
                tm.start();


                List <Mat> outs = new List <Mat>();
                net.forward(outs, outBlobNames);


                tm.stop();
                Debug.Log("Inference time, ms: " + tm.getTimeMilli());


                postprocess(img, outs, net);

                for (int i = 0; i < outs.Count; i++)
                {
                    outs[i].Dispose();
                }
                blob.Dispose();
                net.Dispose();
            }


            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);

            Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(img, texture);

            gameObject.GetComponent <Renderer>().material.mainTexture = texture;


            Utils.setDebugMode(false);
        }
Exemplo n.º 30
0
    void Start()
    {
        srcMat = Imgcodecs.imread(Application.dataPath + "/Textures/lena.jpg", 1); //512
        Imgproc.cvtColor(srcMat, srcMat, Imgproc.COLOR_BGR2RGB);
        Imgproc.resize(srcMat, srcMat, new Size(512, 512));

        Texture2D src_t2d = new Texture2D(srcMat.width(), srcMat.height());

        Utils.matToTexture2D(srcMat, src_t2d);
        Sprite src_sp = Sprite.Create(src_t2d, new UnityEngine.Rect(0, 0, src_t2d.width, src_t2d.height), Vector2.zero);

        m_srcImage.sprite = src_sp;
        m_srcImage.rectTransform.offsetMin        = new Vector2(0, 0);
        m_srcImage.rectTransform.offsetMax        = new Vector2(srcMat.width(), srcMat.height());
        m_srcImage.rectTransform.anchoredPosition = Vector2.zero;

        Mat        mask = Mat.zeros(srcMat.size(), CvType.CV_8UC1); //注意CvType必须是CV_8UC1
        Point      p0   = new Point(0, 0);
        Point      p1   = new Point(0, 256);
        Point      p2   = new Point(256, 0);
        MatOfPoint pts1 = new MatOfPoint(new Point[3] {
            p0, p1, p2
        });
        MatOfPoint2f srcTri = new MatOfPoint2f(new Point[3] {
            p0, p1, p2
        });
        Point      p3   = new Point(256, 0);
        Point      p4   = new Point(512, 0);
        Point      p5   = new Point(512, 256);
        Point      p6   = new Point(256, 64);
        MatOfPoint pts2 = new MatOfPoint(new Point[3] {
            p3, p4, p5
        });
        MatOfPoint2f dstTri = new MatOfPoint2f(new Point[3] {
            p0, p1, p6
        });
        List <MatOfPoint> contour = new List <MatOfPoint>()
        {
            pts1
        };

        for (int i = 0; i < contour.Count; i++)
        {
            //轮廓提取
            Imgproc.drawContours(mask, contour, i, new Scalar(255), -1); //全部放到mask上
        }
        srcMat.copyTo(mask, mask);
        Mat warpMat   = Imgproc.getAffineTransform(srcTri, dstTri);
        Mat warpImage = Mat.zeros(mask.size(), mask.type());

        Imgproc.warpAffine(mask, warpImage, warpMat, warpImage.size());

        //------------------------------------------------//

        /*
         * // Offset points by left top corner of the respective rectangles
         * OpenCVForUnity.Rect r1 = Imgproc.boundingRect(pts1);
         * OpenCVForUnity.Rect r2 = Imgproc.boundingRect(pts2);
         * MatOfPoint2f t1Rect = new MatOfPoint2f();
         * MatOfPoint2f t2Rect = new MatOfPoint2f();
         * MatOfPoint t2RectInt = new MatOfPoint();
         * for (int i = 0; i < 3; i++)
         * {
         *  t1Rect.push_back(new Mat((int)pts1.toList()[i].x - r1.x, (int)pts1.toList()[i].y - r1.y, 0));
         *  t2Rect.push_back(new Mat((int)pts2.toList()[i].x - r2.x, (int)pts2.toList()[i].y - r2.y, 0));
         *  t2RectInt.push_back(new Mat((int)pts2.toList()[i].x - r2.x, (int)pts2.toList()[i].y - r2.y, 0)); // for fillConvexPoly
         * }
         *
         * MatOfPoint PointArray = new MatOfPoint();
         * dstMat = Mat.zeros(srcMat.size(), CvType.CV_8UC3);
         * PointArray.fromList(new List<Point>()
         * {
         *  new Point(50,10),
         *  new Point(300,12),
         *  new Point(350,250),
         *  new Point(9,250),
         * });
         * Debug.Log(PointArray);
         * Imgproc.fillConvexPoly(dstMat, PointArray, new Scalar(255, 0, 0), 4, 0);
         */
        //------------------------------------------------//

        Texture2D dst_t2d = new Texture2D(warpImage.width(), warpImage.height());

        Utils.matToTexture2D(warpImage, dst_t2d);
        Sprite sp = Sprite.Create(dst_t2d, new UnityEngine.Rect(0, 0, dst_t2d.width, dst_t2d.height), Vector2.zero);

        m_roiImage.sprite                         = sp;
        m_roiImage.preserveAspect                 = true;
        m_roiImage.rectTransform.offsetMin        = new Vector2(0, 0);
        m_roiImage.rectTransform.offsetMax        = new Vector2(srcMat.width(), srcMat.height());
        m_roiImage.rectTransform.anchoredPosition = Vector2.zero;
    }