Beispiel #1
0
    // Use this for initialization
    void Start()
    {
        String[] textureNames = new string[] { "stitch1", "stitch2", "stitch3", "stitch4" };
        Mat[]    imgs         = new Mat[textureNames.Length];
        Mat      tmp          = new Mat();

        for (int i = 0; i < textureNames.Length; i++)
        {
            Texture2D tex = Resources.Load <Texture2D>(textureNames[i]);
            imgs[i] = new Mat();
            TextureConvert.Texture2dToOutputArray(tex, tmp);
            CvInvoke.Flip(tmp, tmp, FlipType.Vertical);
            CvInvoke.CvtColor(tmp, imgs[i], ColorConversion.Bgra2Bgr);
            if (imgs[i].IsEmpty)
            {
                Debug.Log("Image " + i + " is empty");
            }
            else
            {
                Debug.Log("Image " + i + " is " + imgs[i].NumberOfChannels + " channels " + imgs[i].Width + "x" + imgs[i].Height);
            }
        }
        Emgu.CV.Stitching.Stitcher stitcher = new Emgu.CV.Stitching.Stitcher();
        Mat result = new Mat();

        using (VectorOfMat vms = new VectorOfMat(imgs))
            stitcher.Stitch(vms, result);
        //CvInvoke.Flip(result, result, FlipType.Vertical);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(result, FlipType.Vertical);

        RenderTexture(texture);
        ResizeTexture(texture);
    }
Beispiel #2
0
    // Use this for initialization
    void Start()
    {
        Texture2D boxTexture        = Resources.Load <Texture2D>("box");
        Texture2D boxInSceneTexture = Resources.Load <Texture2D>("box_in_scene");

        Mat box3Channels = new Mat();

        TextureConvert.Texture2dToOutputArray(boxTexture, box3Channels);
        Mat box = new Mat();

        CvInvoke.CvtColor(box3Channels, box, ColorConversion.Bgra2Gray);
        CvInvoke.Flip(box, box, FlipType.Vertical);

        Mat boxInScene3Channels = new Mat();

        TextureConvert.Texture2dToOutputArray(boxInSceneTexture, boxInScene3Channels);
        Mat boxInScene = new Mat();

        CvInvoke.CvtColor(boxInScene3Channels, boxInScene, ColorConversion.Bgra2Gray);
        CvInvoke.Flip(boxInScene, boxInScene, FlipType.Vertical);

        long time;
        Mat  img = FeatureMatchingExample.DrawMatches.Draw(box, boxInScene, out time);
        //CvInvoke.Imwrite("c:\\tmp\\tmp.png", img);
        //Mat outImg = new Mat();
        //CvInvoke.CvtColor(img, outImg, ColorConversion.Bgr2Bgra);
        //CvInvoke.Imwrite("c:\\tmp\\tmp.png", outImg);
        Texture2D texture = TextureConvert.InputArrayToTexture2D(img);

        this.GetComponent <GUITexture>().texture = texture;

        this.GetComponent <GUITexture>().pixelInset = new Rect(-texture.width / 2, -texture.height / 2, texture.width, texture.height);
    }
Beispiel #3
0
    // Use this for initialization
    void Start()
    {
        Mat img = new Mat(new Size(640, 240), DepthType.Cv8U, 3);

        String openclStr = "None";

        if (CvInvoke.HaveOpenCL)
        {
            //StringBuilder builder = new StringBuilder();
            using (VectorOfOclPlatformInfo oclPlatformInfos = OclInvoke.GetPlatformInfo())
            {
                if (oclPlatformInfos.Size > 0)
                {
                    OclPlatformInfo platformInfo = oclPlatformInfos[0];
                    openclStr = platformInfo.ToString();
                }
            }
        }

        CvInvoke.PutText(img, String.Format("Emgu CV for Unity {0}", Emgu.Util.Platform.OperationSystem), new System.Drawing.Point(10, 60), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 255, 0));

        CvInvoke.PutText(img, String.Format("OpenCL: {0}", openclStr), new System.Drawing.Point(10, 120), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 0, 255));

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }
Beispiel #4
0
    private IEnumerator performOCR()
    {
        _ocr = new Tesseract(Path.Combine(Application.persistentDataPath, "tessdata"), "eng", OcrEngineMode.TesseractOnly);

        Debug.Log("OCR engine loaded.");

        Image <Bgr, Byte> img = new Image <Bgr, byte>(480, 200);

        String message = "Hello, World";

        CvInvoke.PutText(img, message, new Point(50, 100), Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, new MCvScalar(255, 255, 255));

        _ocr.SetImage(img);
        _ocr.Recognize();

        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetUTF8Text().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text

        Debug.Log("Detected text: " + message);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        RenderTexture(texture);
        ResizeTexture(texture);
        yield return(null);
    }
    // Update is called once per frame
    void Update()
    {
        webcamTexture.Read(img);


        CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Rgb);
        // Use statement - improves performance.
        using (UMat gray = new UMat())
        {
            // Convert image to gray scale.
            CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
            // Equalise the lighting.
            CvInvoke.EqualizeHist(gray, gray);
            // Rectanlges which highlight where the face is in the image.
            Rectangle[] faces = null;

            // Detect faces in image.
            faces = faceCascade.DetectMultiScale(gray, 1.15, 5);
            if (faces.Length > 0)
            {
                Rectangle          face = faces[0];
                int                numberOfIterations = 15;
                Image <Bgr, byte>  src  = img.ToImage <Bgr, byte>();
                Image <Gray, byte> mask = src.GrabCut(face, numberOfIterations);
                mask = mask.ThresholdBinary(new Gray(2), new Gray(255));
                UMat newImg = src.Copy(mask).ToUMat();
                // Update the result texture.
                resultTexture = TextureConvert.InputArrayToTexture2D(newImg, FlipType.Vertical);
                GetComponent <GUITexture>().texture = resultTexture;
                Size s = img.Size;
                GetComponent <GUITexture>().pixelInset = new Rect(s.Width / 2, -s.Height / 2, s.Width, s.Height);
            }
        }
    }
Beispiel #6
0
    // Use this for initialization
    void Start()
    {
        Mat img = new Mat(new Size(640, 240), DepthType.Cv8U, 3);

        img.SetTo(new MCvScalar());
        String openclStr = "None";

        if (CvInvoke.HaveOpenCL)
        {
            //StringBuilder builder = new StringBuilder();
            using (VectorOfOclPlatformInfo oclPlatformInfos = OclInvoke.GetPlatformsInfo())
            {
                if (oclPlatformInfos.Size > 0)
                {
                    PlatformInfo platformInfo = oclPlatformInfos[0];
                    openclStr = platformInfo.ToString();
                }
            }
        }

        CvInvoke.PutText(img, String.Format("Emgu CV for Unity {0}", Emgu.Util.Platform.OperationSystem), new System.Drawing.Point(10, 60), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 255, 0));

        CvInvoke.PutText(img, String.Format("OpenCL: {0}", openclStr), new System.Drawing.Point(10, 120), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 0, 255));

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        RenderTexture(texture);
        ResizeTexture(texture);
    }
Beispiel #7
0
    // Use this for initialization
    void Start()
    {
        Texture2D boxTexture        = Resources.Load <Texture2D>("box");
        Texture2D boxInSceneTexture = Resources.Load <Texture2D>("box_in_scene");

        Mat box3Channels = new Mat();

        TextureConvert.Texture2dToOutputArray(boxTexture, box3Channels);
        Mat box = new Mat();

        CvInvoke.CvtColor(box3Channels, box, ColorConversion.Bgra2Gray);
        CvInvoke.Flip(box, box, FlipType.Vertical);

        Mat boxInScene3Channels = new Mat();

        TextureConvert.Texture2dToOutputArray(boxInSceneTexture, boxInScene3Channels);
        Mat boxInScene = new Mat();

        CvInvoke.CvtColor(boxInScene3Channels, boxInScene, ColorConversion.Bgra2Gray);
        CvInvoke.Flip(boxInScene, boxInScene, FlipType.Vertical);

        long time;
        Mat  img = FeatureMatchingExample.DrawMatches.Draw(box, boxInScene, out time);
        //CvInvoke.Imwrite("c:\\tmp\\tmp.png", img);
        //Mat outImg = new Mat();
        //CvInvoke.CvtColor(img, outImg, ColorConversion.Bgr2Bgra);
        //CvInvoke.Imwrite("c:\\tmp\\tmp.png", outImg);
        Texture2D texture = TextureConvert.InputArrayToTexture2D(img);

        RenderTexture(texture);
        ResizeTexture(texture);
    }
        // Update is called once per frame
        void Update()
        {
            webcamTexture.Read(img);
            CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Rgb);
            // Use statement - improves performance.
            using (UMat gray = new UMat())
            {
                // Convert image to gray scale.
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
                // Equalise the lighting.
                CvInvoke.EqualizeHist(gray, gray);
                //CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Gray);

                // Rectanlges which highlight where the face is in the image.
                Rectangle[] faces = null;

                // Detect faces in image.
                faces = faceCascade.DetectMultiScale(gray, 1.15, 5);
                foreach (Rectangle face in faces)
                {
                    using (Mat faceGray = new Mat(img, face))
                    {
                        // Draw a green rectangle around the found area.
                        CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                        // Convert ROI to gray-scale.
                        CvInvoke.CvtColor(faceGray, faceGray, ColorConversion.Bgr2Gray);
                        // Improves detection of edges.
                        CvInvoke.Blur(faceGray, faceGray, new Size(3, 3), new Point(0, 0));

                        // Convert image to canny to detect edges.
                        //CvInvoke.Canny(faceGray, faceGray, 30, 128, 3, false);
                        //CvInvoke.Sobel(faceGray, faceGray, DepthType.Default, 1, 0, 3);
                        CvInvoke.Threshold(faceGray, faceGray, 100, 255, ThresholdType.BinaryInv);
                        // Hierarchy order of contours.
                        //hierarchy = CvInvoke.FindContourTree(faceGray, contours, ChainApproxMethod.ChainApproxSimple);
                        // Find the contours in the ROI area.
                        CvInvoke.FindContours(faceGray, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple, face.Location);

                        for (int i = 0; i < contours.Size; ++i)
                        {
                            CvInvoke.DrawContours(img, contours, i, new MCvScalar(255, 255, 255));
                        }
                    }
                }
            }

            // Update the result texture.
            resultTexture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
            //RunShader();



            // MatToTexture();
            GetComponent <GUITexture>().texture = resultTexture;
            Size s = img.Size;

            GetComponent <GUITexture>().pixelInset = new Rect(s.Width / 2, -s.Height / 2, s.Width, s.Height);
        }
    // Update is called once per frame
    void Update()
    {
        // Update texture with webcam image.
        copyTexture = new Texture2D(webcamTexture.width, webcamTexture.height);
        copyTexture.SetPixels32(webcamTexture.GetPixels32());
        copyTexture.Apply();
        // Convert to Image to be used in image manipulation.
        TextureConvert.Texture2dToOutputArray(copyTexture, img);
        // This will appear upside down, so flip it.
        CvInvoke.Flip(img, img, FlipType.Vertical);

        // Use statement - improves performance.
        using (UMat gray = new UMat())
        {
            // Convert image to gray scale.
            CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
            // Equalise the lighting.
            CvInvoke.EqualizeHist(gray, gray);
            //CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Gray);

            // Rectanlges which highlight where the face is in the image.
            Rectangle[] faces = null;

            // Detect faces in image.
            faces = faceCascade.DetectMultiScale(gray);
            foreach (Rectangle face in faces)
            {
                using (UMat faceGray = new UMat(img, face))
                {
                    // Draw a green rectangle around the found area.
                    CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                    // Convert ROI to gray-scale.
                    CvInvoke.CvtColor(faceGray, faceGray, ColorConversion.Bgr2Gray);
                    // Convert image to canny to detect edges.
                    CvInvoke.Canny(faceGray, faceGray, 30, 128, 3, false);
                    // Hierarchy order of contours.
                    //hierarchy = CvInvoke.FindContourTree(faceGray, contours, ChainApproxMethod.ChainApproxSimple);
                    // Find the contours in the ROI area.
                    CvInvoke.FindContours(faceGray, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple, face.Location);
                    for (int i = 0; i < contours.Size; ++i)
                    {
                        CvInvoke.DrawContours(img, contours, i, new MCvScalar(255, 255, 255));
                    }
                }
            }
        }

        // Update the result texture.
        //Texture2D texture
        resultTexture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
        GetComponent <GUITexture>().texture = resultTexture;
        Size s = img.Size;

        GetComponent <GUITexture>().pixelInset = new Rect(s.Width / 2, -s.Height / 2, s.Width, s.Height);
    }
Beispiel #10
0
    // Use this for initialization
    void Start()
    {
        Texture2D lenaTexture = Resources.Load <Texture2D>("lena");

        UMat img = new UMat();

        TextureConvert.Texture2dToOutputArray(lenaTexture, img);
        CvInvoke.Flip(img, img, FlipType.Vertical);

        //String fileName = "haarcascade_frontalface_default";
        //String fileName = "lbpcascade_frontalface";
        String fileName = "haarcascade_frontalface_alt2";
        String filePath = Path.Combine(Application.persistentDataPath, fileName + ".xml");
        //if (!File.Exists(filePath))
        {
            TextAsset cascadeModel = Resources.Load <TextAsset>(fileName);

#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, cascadeModel.bytes);
#else
            File.WriteAllBytes(filePath, cascadeModel.bytes);
#endif
        }

        using (CascadeClassifier classifier = new CascadeClassifier(filePath))
            using (UMat gray = new UMat())
            {
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);

                Rectangle[] faces = null;
                try
                {
                    faces = classifier.DetectMultiScale(gray);

                    foreach (Rectangle face in faces)
                    {
                        CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                    }
                }
                catch (Exception e)
                {
                    Debug.Log(e.Message);

                    return;
                }
            }

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture = texture;
        Size s = img.Size;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-s.Width / 2, -s.Height / 2, s.Width, s.Height);
    }
Beispiel #11
0
    // Use this for initialization
    void Start()
    {
        String[] names      = new string[] { "eng.cube.bigrams", "eng.cube.fold", "eng.cube.lm", "eng.cube.nn", "eng.cube.params", "eng.cube.size", "eng.cube.word-freq", "eng.tesseract_cube.nn", "eng.traineddata" };
        String   outputPath = Path.Combine(Application.persistentDataPath, "tessdata");

        if (!Directory.Exists(outputPath))
        {
            Directory.CreateDirectory(outputPath);
        }

        foreach (String n in names)
        {
            TextAsset textAsset = Resources.Load <TextAsset>(Path.Combine("tessdata", n));
            String    filePath  = Path.Combine(outputPath, n);
#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, textAsset.bytes);
#else
            if (!File.Exists(filePath))
            {
                File.WriteAllBytes(filePath, textAsset.bytes);
            }
#endif
        }

        _ocr = new Tesseract(outputPath, "eng", OcrEngineMode.TesseractLstmCombined);

        Debug.Log("OCR engine loaded.");

        Image <Bgr, Byte> img = new Image <Bgr, byte>(480, 200);

        String message = "Hello, World";
        CvInvoke.PutText(img, message, new Point(50, 100), Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, new MCvScalar(255, 255, 255));

        _ocr.Recognize(img);


        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetText().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text
        Debug.Log("Detected text: " + message);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }
Beispiel #12
0
    // Use this for initialization
    void Start()
    {
#if !(NETFX_CORE && (!UNITY_EDITOR))
        //Warning: The following code is used to get around a https certification issue for downloading tesseract language files from Github
        //Do not use this code in a production environment. Please make sure you understand the security implication from the following code before using it
        ServicePointManager.ServerCertificateValidationCallback += delegate(object sender, X509Certificate cert, X509Chain chain, SslPolicyErrors sslPolicyErrors) {
            HttpWebRequest webRequest = sender as HttpWebRequest;
            if (webRequest != null)
            {
                String requestStr = webRequest.Address.AbsoluteUri;
                if (requestStr.StartsWith(@"https://github.com/") || requestStr.StartsWith(@"https://raw.githubusercontent.com/"))
                {
                    return(true);
                }
            }
            return(false);
        };
#endif
        TesseractDownloadLangFile(Application.persistentDataPath, "eng");
        TesseractDownloadLangFile(Application.persistentDataPath, "osd"); //script orientation detection


        _ocr = new Tesseract(Path.Combine(Application.persistentDataPath, "tessdata"), "eng", OcrEngineMode.TesseractLstmCombined);

        Debug.Log("OCR engine loaded.");

        Image <Bgr, Byte> img = new Image <Bgr, byte>(480, 200);

        String message = "Hello, World";
        CvInvoke.PutText(img, message, new Point(50, 100), Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, new MCvScalar(255, 255, 255));

        _ocr.SetImage(img);
        _ocr.Recognize();

        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetUTF8Text().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text
        Debug.Log("Detected text: " + message);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }
Beispiel #13
0
    // Use this for initialization
    void Start()
    {
        String[] names = new string[] { "eng.cube.bigrams", "eng.cube.fold", "eng.cube.lm", "eng.cube.nn", "eng.cube.params", "eng.cube.size", "eng.cube.word-freq", "eng.tesseract_cube.nn", "eng.traineddata" };

        String outputPath = Path.Combine("C:\\Emgu/emgucv-windesktop 3.1.0.2504/Emgu.CV.World", "tessdata");

        if (!Directory.Exists(outputPath))
        {
            Directory.CreateDirectory(outputPath);
        }

        foreach (String n in names)
        {
            TextAsset textAsset = Resources.Load <TextAsset>(Path.Combine("tessdata", n));
            String    filePath  = Path.Combine(outputPath, n);
#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, textAsset.bytes);
#else
            if (!File.Exists(filePath))
            {
                File.WriteAllBytes(filePath, textAsset.bytes);
            }
#endif
        }

        _ocr = new Tesseract(outputPath, "eng", OcrEngineMode.TesseractCubeCombined);

        Debug.Log("OCR engine loaded.");
        print("OCR processing..");

        Image <Bgr, Byte> img = TextureConvert.Texture2dToImage <Bgr, Byte>(original_texture);
        _ocr.Recognize(img);

        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)   //draw rect for each character
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetText().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text
        Debug.Log("Detected text: " + messageOcr);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
        original_texture = texture;
        build_map(characters);
    }
Beispiel #14
0
    void Track()
    {
        if (lastPositions != null)
        {
            lastPositions = landmarks;
        }

        // We fetch webcam texture data
        convertedTexture.SetPixels(webcamTexture.GetPixels());
        convertedTexture.Apply();

        // We convert the webcam texture2D into the OpenCV image format
        UMat img = new UMat();

        TextureConvert.Texture2dToOutputArray(convertedTexture, img);
        CvInvoke.Flip(img, img, FlipType.Vertical);

        using (CascadeClassifier classifier = new CascadeClassifier(filePath)) {
            using (UMat gray = new UMat()) {
                // We convert the OpenCV image format to the facial detection API parsable monochrome image type and detect the faces
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
                facesVV   = new VectorOfRect(classifier.DetectMultiScale(gray));
                landmarks = new VectorOfVectorOfPointF();

                // we fit facial landmarks onto the face data
                if (facemark.Fit(gray, facesVV, landmarks))
                {
                    FaceInvoke.DrawFacemarks(img, landmarks[0], new MCvScalar(255, 255, 0, 255));

                    // We calculate the nose position to use as a capture center
                    noseOffset = new Vector3(landmarks[0][67].X, landmarks[0][67].Y * -1f, 0f);

                    // We draw markers and computer positions
                    for (int j = 0; j < 68; j++)
                    {
                        Vector3 markerPos = new Vector3(landmarks[0][j].X, landmarks[0][j].Y * -1f, 0f);

                        if (displayOffsetMarkers)
                        {
                            Debug.DrawLine(markerPos, markerPos + (Vector3.forward * 3f), UnityEngine.Color.green, trackingInterval);
                        }

                        AdjustCalibration(j, markerPos);
                    }
                    recording = true;
                }
                else
                {
                    recording = false;
                }

                if (displayCalibrationMarkers)
                {
                    DisplayCalibration();
                }
            }
        }

        // We render out the calculation result into the debug image
        if (debugImage)
        {
            Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
            debugImage.sprite = Sprite.Create(texture, new Rect(0, 0, texture.width, texture.height), new Vector2(0.5f, 0.5f));
        }
    }
Beispiel #15
0
    // Use this for initialization
    void Start()
    {
        Texture2D lenaTexture = Resources.Load <Texture2D>("lena");

        //updateTextureWithString("load lena ok");
        Mat img = new Mat();

        TextureConvert.Texture2dToOutputArray(lenaTexture, img);
        CvInvoke.Flip(img, img, FlipType.Vertical);

        //updateTextureWithString("convert to image ok");

        //String fileName = "haarcascade_frontalface_default";
        //String fileName = "lbpcascade_frontalface";
        String fileName = "haarcascade_frontalface_alt2";
        String filePath = Path.Combine(Application.persistentDataPath, fileName + ".xml");
        //if (!File.Exists(filePath))
        {
            //updateTextureWithString("start move cascade xml");
            TextAsset cascadeModel = Resources.Load <TextAsset>(fileName);

#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, cascadeModel.bytes);
#else
            File.WriteAllBytes(filePath, cascadeModel.bytes);
#endif
            //updateTextureWithString("File size: " + new FileInfo(filePath).Length);
        }


        using (CascadeClassifier classifier = new CascadeClassifier(filePath))
            using (Mat gray = new Mat())
            {
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
                //updateTextureWithString("classifier create ok");

                Rectangle[] faces = null;
                try
                {
                    faces = classifier.DetectMultiScale(gray);

                    //updateTextureWithString("face detected");
                    foreach (Rectangle face in faces)
                    {
                        CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                    }
                }
                catch (Exception e)
                {
                    //updateTextureWithString(e.Message);
                    return;
                }

                //updateTextureWithString(String.Format("{0} face found on image of {1} x {2}", faces.Length, img.Width, img.Height));
            }

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }