Ejemplo n.º 1
0
    // Use this for initialization
    void Start()
    {
        Texture2D boxTexture        = Resources.Load <Texture2D>("box");
        Texture2D boxInSceneTexture = Resources.Load <Texture2D>("box_in_scene");

        Mat box3Channels = new Mat();

        TextureConvert.Texture2dToOutputArray(boxTexture, box3Channels);
        Mat box = new Mat();

        CvInvoke.CvtColor(box3Channels, box, ColorConversion.Bgra2Gray);
        CvInvoke.Flip(box, box, FlipType.Vertical);

        Mat boxInScene3Channels = new Mat();

        TextureConvert.Texture2dToOutputArray(boxInSceneTexture, boxInScene3Channels);
        Mat boxInScene = new Mat();

        CvInvoke.CvtColor(boxInScene3Channels, boxInScene, ColorConversion.Bgra2Gray);
        CvInvoke.Flip(boxInScene, boxInScene, FlipType.Vertical);

        long time;
        Mat  img = FeatureMatchingExample.DrawMatches.Draw(box, boxInScene, out time);
        //CvInvoke.Imwrite("c:\\tmp\\tmp.png", img);
        //Mat outImg = new Mat();
        //CvInvoke.CvtColor(img, outImg, ColorConversion.Bgr2Bgra);
        //CvInvoke.Imwrite("c:\\tmp\\tmp.png", outImg);
        Texture2D texture = TextureConvert.InputArrayToTexture2D(img);

        this.GetComponent <GUITexture>().texture = texture;

        this.GetComponent <GUITexture>().pixelInset = new Rect(-texture.width / 2, -texture.height / 2, texture.width, texture.height);
    }
    // Update is called once per frame
    void Update()
    {
        webcamTexture.Read(img);


        CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Rgb);
        // Use statement - improves performance.
        using (UMat gray = new UMat())
        {
            // Convert image to gray scale.
            CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
            // Equalise the lighting.
            CvInvoke.EqualizeHist(gray, gray);
            // Rectanlges which highlight where the face is in the image.
            Rectangle[] faces = null;

            // Detect faces in image.
            faces = faceCascade.DetectMultiScale(gray, 1.15, 5);
            if (faces.Length > 0)
            {
                Rectangle          face = faces[0];
                int                numberOfIterations = 15;
                Image <Bgr, byte>  src  = img.ToImage <Bgr, byte>();
                Image <Gray, byte> mask = src.GrabCut(face, numberOfIterations);
                mask = mask.ThresholdBinary(new Gray(2), new Gray(255));
                UMat newImg = src.Copy(mask).ToUMat();
                // Update the result texture.
                resultTexture = TextureConvert.InputArrayToTexture2D(newImg, FlipType.Vertical);
                GetComponent <GUITexture>().texture = resultTexture;
                Size s = img.Size;
                GetComponent <GUITexture>().pixelInset = new Rect(s.Width / 2, -s.Height / 2, s.Width, s.Height);
            }
        }
    }
Ejemplo n.º 3
0
    // Use this for initialization
    void Start()
    {
        Image <Bgr, Byte> img = new Image <Bgr, byte>(640, 240);

        String openclStr = "None";

        if (CvInvoke.HaveOpenCL)
        {
            //StringBuilder builder = new StringBuilder();
            using (VectorOfOclPlatformInfo oclPlatformInfos = OclInvoke.GetPlatformInfo())
            {
                if (oclPlatformInfos.Size > 0)
                {
                    OclPlatformInfo platformInfo = oclPlatformInfos[0];
                    openclStr = platformInfo.ToString();
                }
            }
        }

        CvInvoke.PutText(img, String.Format("Emgu CV for Unity {0}", Emgu.Util.Platform.OperationSystem), new System.Drawing.Point(10, 60), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 255, 0));

        CvInvoke.PutText(img, String.Format("OpenCL: {0}", openclStr), new System.Drawing.Point(10, 120), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 0, 255));

        Texture2D texture = TextureConvert.ImageToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }
Ejemplo n.º 4
0
    // Use this for initialization
    void Start()
    {
        Mat img = new Mat(new Size(640, 240), DepthType.Cv8U, 3);

        img.SetTo(new MCvScalar());
        String openclStr = "None";

        if (CvInvoke.HaveOpenCL)
        {
            //StringBuilder builder = new StringBuilder();
            using (VectorOfOclPlatformInfo oclPlatformInfos = OclInvoke.GetPlatformsInfo())
            {
                if (oclPlatformInfos.Size > 0)
                {
                    PlatformInfo platformInfo = oclPlatformInfos[0];
                    openclStr = platformInfo.ToString();
                }
            }
        }

        CvInvoke.PutText(img, String.Format("Emgu CV for Unity {0}", Emgu.Util.Platform.OperationSystem), new System.Drawing.Point(10, 60), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 255, 0));

        CvInvoke.PutText(img, String.Format("OpenCL: {0}", openclStr), new System.Drawing.Point(10, 120), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 0, 255));

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        RenderTexture(texture);
        ResizeTexture(texture);
    }
Ejemplo n.º 5
0
    private IEnumerator performOCR()
    {
        _ocr = new Tesseract(Path.Combine(Application.persistentDataPath, "tessdata"), "eng", OcrEngineMode.TesseractOnly);

        Debug.Log("OCR engine loaded.");

        Image <Bgr, Byte> img = new Image <Bgr, byte>(480, 200);

        String message = "Hello, World";

        CvInvoke.PutText(img, message, new Point(50, 100), Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, new MCvScalar(255, 255, 255));

        _ocr.SetImage(img);
        _ocr.Recognize();

        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetUTF8Text().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text

        Debug.Log("Detected text: " + message);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        RenderTexture(texture);
        ResizeTexture(texture);
        yield return(null);
    }
Ejemplo n.º 6
0
    // Use this for initialization
    void Start()
    {
        WebCamDevice[] devices     = WebCamTexture.devices;
        int            cameraCount = devices.Length;

        if (cameraCount == 0)
        {
            Image <Bgr, Byte> img = new Image <Bgr, byte>(640, 240);
            CvInvoke.PutText(img, String.Format("{0} camera found", devices.Length), new System.Drawing.Point(10, 60),
                             Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                             1.0, new MCvScalar(0, 255, 0));
            Texture2D texture = TextureConvert.ImageToTexture2D(img, FlipType.Vertical);

            RenderTexture(texture);
            ResizeTexture(texture);
            //this.GetComponent<GUITexture>().texture = texture;
            //this.GetComponent<GUITexture>().pixelInset = new Rect(-img.Width/2, -img.Height/2, img.Width, img.Height);
        }
        else
        {
            webcamTexture = new WebCamTexture(devices[0].name);

            baseRotation = transform.rotation;
            webcamTexture.Play();
            //data = new Color32[webcamTexture.width * webcamTexture.height];
            CvInvoke.CheckLibraryLoaded();
        }
    }
Ejemplo n.º 7
0
    // Use this for initialization
    void Start()
    {
        Texture2D boxTexture        = Resources.Load <Texture2D>("box");
        Texture2D boxInSceneTexture = Resources.Load <Texture2D>("box_in_scene");

        Mat box3Channels = new Mat();

        TextureConvert.Texture2dToOutputArray(boxTexture, box3Channels);
        Mat box = new Mat();

        CvInvoke.CvtColor(box3Channels, box, ColorConversion.Bgra2Gray);
        CvInvoke.Flip(box, box, FlipType.Vertical);

        Mat boxInScene3Channels = new Mat();

        TextureConvert.Texture2dToOutputArray(boxInSceneTexture, boxInScene3Channels);
        Mat boxInScene = new Mat();

        CvInvoke.CvtColor(boxInScene3Channels, boxInScene, ColorConversion.Bgra2Gray);
        CvInvoke.Flip(boxInScene, boxInScene, FlipType.Vertical);

        long time;
        Mat  img = FeatureMatchingExample.DrawMatches.Draw(box, boxInScene, out time);
        //CvInvoke.Imwrite("c:\\tmp\\tmp.png", img);
        //Mat outImg = new Mat();
        //CvInvoke.CvtColor(img, outImg, ColorConversion.Bgr2Bgra);
        //CvInvoke.Imwrite("c:\\tmp\\tmp.png", outImg);
        Texture2D texture = TextureConvert.InputArrayToTexture2D(img);

        RenderTexture(texture);
        ResizeTexture(texture);
    }
Ejemplo n.º 8
0
    // Use this for initialization
    void Start()
    {
        String[] textureNames = new string[] { "stitch1", "stitch2", "stitch3", "stitch4" };
        Mat[]    imgs         = new Mat[textureNames.Length];
        Mat      tmp          = new Mat();

        for (int i = 0; i < textureNames.Length; i++)
        {
            Texture2D tex = Resources.Load <Texture2D>(textureNames[i]);
            imgs[i] = new Mat();
            TextureConvert.Texture2dToOutputArray(tex, tmp);
            CvInvoke.Flip(tmp, tmp, FlipType.Vertical);
            CvInvoke.CvtColor(tmp, imgs[i], ColorConversion.Bgra2Bgr);
            if (imgs[i].IsEmpty)
            {
                Debug.Log("Image " + i + " is empty");
            }
            else
            {
                Debug.Log("Image " + i + " is " + imgs[i].NumberOfChannels + " channels " + imgs[i].Width + "x" + imgs[i].Height);
            }
        }
        Emgu.CV.Stitching.Stitcher stitcher = new Emgu.CV.Stitching.Stitcher();
        Mat result = new Mat();

        using (VectorOfMat vms = new VectorOfMat(imgs))
            stitcher.Stitch(vms, result);
        //CvInvoke.Flip(result, result, FlipType.Vertical);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(result, FlipType.Vertical);

        RenderTexture(texture);
        ResizeTexture(texture);
    }
        // Update is called once per frame
        void Update()
        {
            webcamTexture.Read(img);
            CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Rgb);
            // Use statement - improves performance.
            using (UMat gray = new UMat())
            {
                // Convert image to gray scale.
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
                // Equalise the lighting.
                CvInvoke.EqualizeHist(gray, gray);
                //CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Gray);

                // Rectanlges which highlight where the face is in the image.
                Rectangle[] faces = null;

                // Detect faces in image.
                faces = faceCascade.DetectMultiScale(gray, 1.15, 5);
                foreach (Rectangle face in faces)
                {
                    using (Mat faceGray = new Mat(img, face))
                    {
                        // Draw a green rectangle around the found area.
                        CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                        // Convert ROI to gray-scale.
                        CvInvoke.CvtColor(faceGray, faceGray, ColorConversion.Bgr2Gray);
                        // Improves detection of edges.
                        CvInvoke.Blur(faceGray, faceGray, new Size(3, 3), new Point(0, 0));

                        // Convert image to canny to detect edges.
                        //CvInvoke.Canny(faceGray, faceGray, 30, 128, 3, false);
                        //CvInvoke.Sobel(faceGray, faceGray, DepthType.Default, 1, 0, 3);
                        CvInvoke.Threshold(faceGray, faceGray, 100, 255, ThresholdType.BinaryInv);
                        // Hierarchy order of contours.
                        //hierarchy = CvInvoke.FindContourTree(faceGray, contours, ChainApproxMethod.ChainApproxSimple);
                        // Find the contours in the ROI area.
                        CvInvoke.FindContours(faceGray, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple, face.Location);

                        for (int i = 0; i < contours.Size; ++i)
                        {
                            CvInvoke.DrawContours(img, contours, i, new MCvScalar(255, 255, 255));
                        }
                    }
                }
            }

            // Update the result texture.
            resultTexture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
            //RunShader();



            // MatToTexture();
            GetComponent <GUITexture>().texture = resultTexture;
            Size s = img.Size;

            GetComponent <GUITexture>().pixelInset = new Rect(s.Width / 2, -s.Height / 2, s.Width, s.Height);
        }
Ejemplo n.º 10
0
    // Use this for initialization
    void Start()
    {
        Texture2D lenaTexture = Resources.Load <Texture2D>("lena");

        //updateTextureWithString("load lena ok");
        Image <Bgr, Byte> img = TextureConvert.Texture2dToImage <Bgr, byte>(lenaTexture);
        //updateTextureWithString("convert to image ok");

        //String fileName = "haarcascade_frontalface_default";
        //String fileName = "lbpcascade_frontalface";
        String fileName = "haarcascade_frontalface_alt2";
        String filePath = Path.Combine(Application.persistentDataPath, fileName + ".xml");
        //if (!File.Exists(filePath))
        {
            //updateTextureWithString("start move cascade xml");
            TextAsset cascadeModel = Resources.Load <TextAsset>(fileName);

#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, cascadeModel.bytes);
#else
            File.WriteAllBytes(filePath, cascadeModel.bytes);
#endif
            //updateTextureWithString("File size: " + new FileInfo(filePath).Length);
        }


        using (CascadeClassifier classifier = new CascadeClassifier(filePath))
            using (Image <Gray, Byte> gray = img.Convert <Gray, byte>())
            {
                //updateTextureWithString("classifier create ok");

                Rectangle[] faces = null;
                try
                {
                    faces = classifier.DetectMultiScale(gray);

                    //updateTextureWithString("face detected");
                    foreach (Rectangle face in faces)
                    {
                        CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                    }
                }
                catch (Exception e)
                {
                    //updateTextureWithString(e.Message);
                    return;
                }

                //updateTextureWithString(String.Format("{0} face found on image of {1} x {2}", faces.Length, img.Width, img.Height));
            }

        Texture2D texture = TextureConvert.ImageToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }
    // Update is called once per frame
    void Update()
    {
        // Update texture with webcam image.
        copyTexture = new Texture2D(webcamTexture.width, webcamTexture.height);
        copyTexture.SetPixels32(webcamTexture.GetPixels32());
        copyTexture.Apply();
        // Convert to Image to be used in image manipulation.
        TextureConvert.Texture2dToOutputArray(copyTexture, img);
        // This will appear upside down, so flip it.
        CvInvoke.Flip(img, img, FlipType.Vertical);

        // Use statement - improves performance.
        using (UMat gray = new UMat())
        {
            // Convert image to gray scale.
            CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
            // Equalise the lighting.
            CvInvoke.EqualizeHist(gray, gray);
            //CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Gray);

            // Rectanlges which highlight where the face is in the image.
            Rectangle[] faces = null;

            // Detect faces in image.
            faces = faceCascade.DetectMultiScale(gray);
            foreach (Rectangle face in faces)
            {
                using (UMat faceGray = new UMat(img, face))
                {
                    // Draw a green rectangle around the found area.
                    CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                    // Convert ROI to gray-scale.
                    CvInvoke.CvtColor(faceGray, faceGray, ColorConversion.Bgr2Gray);
                    // Convert image to canny to detect edges.
                    CvInvoke.Canny(faceGray, faceGray, 30, 128, 3, false);
                    // Hierarchy order of contours.
                    //hierarchy = CvInvoke.FindContourTree(faceGray, contours, ChainApproxMethod.ChainApproxSimple);
                    // Find the contours in the ROI area.
                    CvInvoke.FindContours(faceGray, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple, face.Location);
                    for (int i = 0; i < contours.Size; ++i)
                    {
                        CvInvoke.DrawContours(img, contours, i, new MCvScalar(255, 255, 255));
                    }
                }
            }
        }

        // Update the result texture.
        //Texture2D texture
        resultTexture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
        GetComponent <GUITexture>().texture = resultTexture;
        Size s = img.Size;

        GetComponent <GUITexture>().pixelInset = new Rect(s.Width / 2, -s.Height / 2, s.Width, s.Height);
    }
Ejemplo n.º 12
0
    private void updateTextureWithString(String text)
    {
        Image <Bgr, Byte> img = new Image <Bgr, byte>(640, 240);

        CvInvoke.PutText(img, text, new System.Drawing.Point(10, 60), Emgu.CV.CvEnum.FontFace.HersheyDuplex,
                         1.0, new MCvScalar(0, 255, 0));

        Texture2D texture = TextureConvert.ImageToTexture2D(img);

        this.GetComponent <GUITexture>().texture = texture;
    }
Ejemplo n.º 13
0
    // Use this for initialization
    void Start()
    {
        Texture2D lenaTexture = Resources.Load <Texture2D>("lena");

        UMat img = new UMat();

        TextureConvert.Texture2dToOutputArray(lenaTexture, img);
        CvInvoke.Flip(img, img, FlipType.Vertical);

        //String fileName = "haarcascade_frontalface_default";
        //String fileName = "lbpcascade_frontalface";
        String fileName = "haarcascade_frontalface_alt2";
        String filePath = Path.Combine(Application.persistentDataPath, fileName + ".xml");
        //if (!File.Exists(filePath))
        {
            TextAsset cascadeModel = Resources.Load <TextAsset>(fileName);

#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, cascadeModel.bytes);
#else
            File.WriteAllBytes(filePath, cascadeModel.bytes);
#endif
        }

        using (CascadeClassifier classifier = new CascadeClassifier(filePath))
            using (UMat gray = new UMat())
            {
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);

                Rectangle[] faces = null;
                try
                {
                    faces = classifier.DetectMultiScale(gray);

                    foreach (Rectangle face in faces)
                    {
                        CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                    }
                }
                catch (Exception e)
                {
                    Debug.Log(e.Message);

                    return;
                }
            }

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture = texture;
        Size s = img.Size;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-s.Width / 2, -s.Height / 2, s.Width, s.Height);
    }
Ejemplo n.º 14
0
    // Use this for initialization
    void Start()
    {
        String[] names      = new string[] { "eng.cube.bigrams", "eng.cube.fold", "eng.cube.lm", "eng.cube.nn", "eng.cube.params", "eng.cube.size", "eng.cube.word-freq", "eng.tesseract_cube.nn", "eng.traineddata" };
        String   outputPath = Path.Combine(Application.persistentDataPath, "tessdata");

        if (!Directory.Exists(outputPath))
        {
            Directory.CreateDirectory(outputPath);
        }

        foreach (String n in names)
        {
            TextAsset textAsset = Resources.Load <TextAsset>(Path.Combine("tessdata", n));
            String    filePath  = Path.Combine(outputPath, n);
#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, textAsset.bytes);
#else
            if (!File.Exists(filePath))
            {
                File.WriteAllBytes(filePath, textAsset.bytes);
            }
#endif
        }

        _ocr = new Tesseract(outputPath, "eng", OcrEngineMode.TesseractLstmCombined);

        Debug.Log("OCR engine loaded.");

        Image <Bgr, Byte> img = new Image <Bgr, byte>(480, 200);

        String message = "Hello, World";
        CvInvoke.PutText(img, message, new Point(50, 100), Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, new MCvScalar(255, 255, 255));

        _ocr.Recognize(img);


        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetText().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text
        Debug.Log("Detected text: " + message);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }
    IEnumerator ocr_async()
    {
        String[] names = new string[] { "eng.cube.bigrams", "eng.cube.fold", "eng.cube.lm", "eng.cube.nn", "eng.cube.params", "eng.cube.size", "eng.cube.word-freq", "eng.tesseract_cube.nn", "eng.traineddata" };


        if (!Directory.Exists(outputPath))
        {
            Directory.CreateDirectory(outputPath);
        }

        yield return(Ninja.JumpToUnity);

        foreach (String n in names)
        {
            TextAsset textAsset = Resources.Load <TextAsset>(Path.Combine("tessdata", n));
            String    filePath  = Path.Combine(outputPath, n);
#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, textAsset.bytes);
#else
            if (!File.Exists(filePath))
            {
                File.WriteAllBytes(filePath, textAsset.bytes);
            }
#endif
        }

        yield return(Ninja.JumpBack);

        _ocr = new Tesseract(outputPath, "eng", OcrEngineMode.TesseractCubeCombined);

        yield return(Ninja.JumpToUnity);

        print("OCR engine loaded.");
        print("OCR processing..");
        Image <Bgr, Byte> img = TextureConvert.Texture2dToImage <Bgr, Byte>(original_texture);
        yield return(Ninja.JumpBack);

        _ocr.Recognize(img);

        Tesseract.Character[] characters = _ocr.GetCharacters();
        String messageOcr = _ocr.GetText().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text

        yield return(Ninja.JumpToUnity);

        Debug.Log("Detected text: " + messageOcr);

        build_char_list(characters);

        progress_indicator.SetActive(false);
    }
Ejemplo n.º 16
0
    // Use this for initialization
    void Start()
    {
#if !(NETFX_CORE && (!UNITY_EDITOR))
        //Warning: The following code is used to get around a https certification issue for downloading tesseract language files from Github
        //Do not use this code in a production environment. Please make sure you understand the security implication from the following code before using it
        ServicePointManager.ServerCertificateValidationCallback += delegate(object sender, X509Certificate cert, X509Chain chain, SslPolicyErrors sslPolicyErrors) {
            HttpWebRequest webRequest = sender as HttpWebRequest;
            if (webRequest != null)
            {
                String requestStr = webRequest.Address.AbsoluteUri;
                if (requestStr.StartsWith(@"https://github.com/") || requestStr.StartsWith(@"https://raw.githubusercontent.com/"))
                {
                    return(true);
                }
            }
            return(false);
        };
#endif
        TesseractDownloadLangFile(Application.persistentDataPath, "eng");
        TesseractDownloadLangFile(Application.persistentDataPath, "osd"); //script orientation detection


        _ocr = new Tesseract(Path.Combine(Application.persistentDataPath, "tessdata"), "eng", OcrEngineMode.TesseractLstmCombined);

        Debug.Log("OCR engine loaded.");

        Image <Bgr, Byte> img = new Image <Bgr, byte>(480, 200);

        String message = "Hello, World";
        CvInvoke.PutText(img, message, new Point(50, 100), Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, new MCvScalar(255, 255, 255));

        _ocr.SetImage(img);
        _ocr.Recognize();

        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetUTF8Text().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text
        Debug.Log("Detected text: " + message);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }
Ejemplo n.º 17
0
    // Use this for initialization
    void Start()
    {
        String[] names = new string[] { "eng.cube.bigrams", "eng.cube.fold", "eng.cube.lm", "eng.cube.nn", "eng.cube.params", "eng.cube.size", "eng.cube.word-freq", "eng.tesseract_cube.nn", "eng.traineddata" };

        String outputPath = Path.Combine("C:\\Emgu/emgucv-windesktop 3.1.0.2504/Emgu.CV.World", "tessdata");

        if (!Directory.Exists(outputPath))
        {
            Directory.CreateDirectory(outputPath);
        }

        foreach (String n in names)
        {
            TextAsset textAsset = Resources.Load <TextAsset>(Path.Combine("tessdata", n));
            String    filePath  = Path.Combine(outputPath, n);
#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, textAsset.bytes);
#else
            if (!File.Exists(filePath))
            {
                File.WriteAllBytes(filePath, textAsset.bytes);
            }
#endif
        }

        _ocr = new Tesseract(outputPath, "eng", OcrEngineMode.TesseractCubeCombined);

        Debug.Log("OCR engine loaded.");
        print("OCR processing..");

        Image <Bgr, Byte> img = TextureConvert.Texture2dToImage <Bgr, Byte>(original_texture);
        _ocr.Recognize(img);

        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)   //draw rect for each character
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetText().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text
        Debug.Log("Detected text: " + messageOcr);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
        original_texture = texture;
        build_map(characters);
    }
    private void AnalyseFrame()
    {
        if (frame != null)
        {
            frame.Dispose();
        }
        frame = capture.QueryFrame();
        if (frame != null)
        {
            GameObject.Destroy(cameraTex);
            cameraTex = TextureConvert.ImageToTexture2D <Bgr, byte>(frame, true);
            Sprite.DestroyImmediate(CameraImageUI.GetComponent <UnityEngine.UI.Image>().sprite);
            CameraImageUI.sprite = Sprite.Create(cameraTex, new Rect(0, 0, cameraTex.width, cameraTex.height), new Vector2(0.5f, 0.5f));
        }
        if (true)
        //if (!processingFrame)
        {
            processingFrame = true;

            board = ImageTools.ReadFromFrame(frame.Clone(), filteringParameters);

            if (lookupImage != null)
            {
                lookupImage.Dispose();
            }

            if (board != null)
            {
                lookupImage = ImageTools.DrawRooms(320, 240, board.Grid);
            }
            else
            {
                lookupImage = new Image <Bgr, byte>(320, 240, new Bgr(0, 0, 0));
            }

            if (lookupImage != null)
            {
                GameObject.Destroy(lookupTex);
                lookupTex = TextureConvert.ImageToTexture2D <Bgr, byte>(lookupImage, true);
                Sprite.DestroyImmediate(LookupUI.GetComponent <UnityEngine.UI.Image>().sprite);
                LookupUI.sprite = Sprite.Create(lookupTex, new Rect(0, 0, lookupTex.width, lookupTex.height), new Vector2(0.5f, 0.5f));
            }
            processingFrame = false;
        }
    }
    // Start is called before the first frame update
    void Start()
    {
        capture = new VideoCapture(0);
        Image <Bgr, Byte> initialFrame = capture.QueryFrame().ToImage <Bgr, Byte>();

        tracker  = new TrackerCSRT();
        RawImage = GetComponent <RawImage>();

        // Drawing inital handBox
        float midX = (initialFrame.Width / 2);
        float midY = (initialFrame.Height / 2);

        handBox = new Rectangle((int)midX - 100, (int)midY - 100, 200, 200);

        initialFrame.Draw(handBox, new Bgr(System.Drawing.Color.Green), 3);
        texture          = TextureConvert.ImageToTexture2D <Bgr, Byte>(initialFrame, FlipType.Vertical);
        RawImage.texture = texture;
    }
    // Update is called once per frame
    void Update()
    {
        Image <Bgr, Byte> frame = capture.QueryFrame().ToImage <Bgr, Byte>();

        // Initialize tracker
        if (Input.GetKeyDown("space") && !isReady)
        {
            print("space key was pressed");
            tracker.Init(frame.Mat, handBox);
            isReady = true;

            // Move Raw Image to bottom right hand corner
            RawImage.rectTransform.anchorMin = new Vector2(1, 0);
            RawImage.rectTransform.anchorMax = new Vector2(1, 0);
            RawImage.rectTransform.pivot     = new Vector2(1, 0);
            RawImage.rectTransform.sizeDelta = new Vector2(319, 179);
        }

        // User has already initalized tracker, do tracking on this frame
        if (isReady)
        {
            Rectangle box;
            tracker.Update(frame.Mat, out box);
            if (box != null)
            {
                frame.Draw(box, new Bgr(System.Drawing.Color.Green), 3);

                // Invert y axis
                Vector3 pos = VideoCoordToScreenCoord(box.X, box.Y, frame.Width, frame.Height);
                pos.y = pos.y * -1;
                hand.transform.position = pos;
            }
            else
            {
                Debug.Log("Box is null");
            }
        }
        else
        {
            frame.Draw(handBox, new Bgr(System.Drawing.Color.Green), 3);
        }
        RawImage.texture = TextureConvert.ImageToTexture2D <Bgr, byte>(frame, FlipType.Vertical);
    }
Ejemplo n.º 21
0
    public static bool FindBoard(Texture2D image, out Vector2[,] corners, int boardWidth = 9, int boardHeight = 6)
    {
        Image <Gray, byte> snapshot = TextureConvert.Texture2dToImage <Gray, byte>(image);

        Size patternSize = new Size(boardWidth, boardHeight); //size of chess board to be detected


        //MCvPoint3D32f[][] corners_object_list = new MCvPoint3D32f[Frame_array_buffer.Length][];
        // PointF[][] corners_points_list = new PointF[Frame_array_buffer.Length][];

        PointF[] _corners = new PointF[patternSize.Width * patternSize.Height];

        bool           patternFound = false;
        Matrix <float> pointMatrix  = new Matrix <float>(_corners.Length, 1, 2);

        patternFound = CvInvoke.FindChessboardCorners(snapshot, patternSize, pointMatrix);

        if (patternFound)
        {
            corners = new Vector2[boardWidth, boardHeight];
            Matrix <float>[] pointChannels = pointMatrix.Split();
            int idx = 0;
            foreach (PointF p in _corners)
            {
                //Debug.Log("points[" + idx + "]=" + pointChannels[0][idx,0] + "," + pointChannels[1][idx, 0]);
                corners[idx % boardWidth, idx / boardWidth] = new Vector2(((float)pointChannels[0][idx, 0]) / image.width, 1 - ((float)pointChannels[1][idx, 0]) / image.height);

                //Debug.Log("points[" + idx + "]=" + corners[idx % boardWidth, idx / boardWidth]);

                idx++;
            }
        }
        else
        {
            corners = null;
        }
        return(patternFound);
    }
Ejemplo n.º 22
0
        public static byte[] GetRgbaImage(ITexture model, int mipMapLevel = 0)
        {
            var x = model.GetTextureData(mipMapLevel);

            switch (model.Format)
            {
            // compressed formats...
            case TextureFormat.D3DFMT_DXT1:
                return(TextureCompressionHelper.DecompressBC1(x, model.Width, model.Height));

            case TextureFormat.D3DFMT_DXT3:
            {
                return(TextureCompressionHelper.DecompressBC2(x, model.Width, model.Height));
            }

            case TextureFormat.D3DFMT_DXT5:
            {
                return(TextureCompressionHelper.DecompressBC3(x, model.Width, model.Height));
            }

            case TextureFormat.D3DFMT_ATI1:
            {
                return(TextureCompressionHelper.DecompressBC4(x, model.Width, model.Height));
            }

            case TextureFormat.D3DFMT_ATI2:
            {
                return(TextureCompressionHelper.DecompressBC5(x, model.Width, model.Height));
            }

            case TextureFormat.D3DFMT_BC7:
            {
                return(TextureCompressionHelper.DecompressBC7(x, model.Width, model.Height));
            }

            // uncompressed formats...
            case TextureFormat.D3DFMT_A8:
            {
                return(TextureConvert.MakeRGBAFromA8(x, model.Width, model.Height));
            }

            case TextureFormat.D3DFMT_L8:
            {
                return(TextureConvert.MakeARGBFromL8(x, model.Width, model.Height));
            }

            case TextureFormat.D3DFMT_A1R5G5B5:
            {
                return(TextureConvert.MakeARGBFromA1R5G5B5(x, model.Width, model.Height));
            }

            case TextureFormat.D3DFMT_A8B8G8R8:
            {
                return(TextureConvert.MakeRGBAFromA8B8G8R8(x, model.Width, model.Height));
            }

            case TextureFormat.D3DFMT_A8R8G8B8:
            {
                return(TextureConvert.MakeRGBAFromA8R8G8B8(x, model.Width, model.Height));
            }

            default:
                throw new System.Exception("unknown format");
            }
        }
Ejemplo n.º 23
0
    public static byte[] TextureEncodeToPNG(this Texture @this, TextureConvert convert = TextureConvert.None,
                                            TextureFormat pngFormat = TextureFormat.RGBA32)
    {
        if (convert == TextureConvert.None)
        {
            if (@this is Texture2D)
            {
                var tex0           = @this as Texture2D;
                int channelsActual = tex0.format.Channels();
                int channelsExpect = pngFormat.Channels();
                if (!(channelsActual == channelsExpect || channelsActual == 3 && channelsExpect == 4))
                {
                    Debug.LogWarningFormat("Tex channels not match: \"{0}\",{1}, expect: {2}", tex0.name, tex0.format, pngFormat);
                }

                if (tex0.isReadable)
                {
                    return(tex0.EncodeToPNG());
                }
                else
                {
                    Texture2D tex2D = new Texture2D(tex0.width, tex0.height);
                    try
                    {
                        Graphics.CopyTexture(tex0, tex2D);
                        return(tex2D.EncodeToPNG());
                    }
                    finally
                    {
                        Util.DestroyRes(tex2D);
                    }
                }
            }
            else if (@this is RenderTexture)
            {
                Texture2D tex = ((RenderTexture)@this).ToNewTexture2D(pngFormat);
                try
                {
                    return(tex.EncodeToPNG());
                }
                finally
                {
                    Util.DestroyRes(tex);
                }
            }
            else
            {
                throw new NotSupportedException();
            }
        }
        else
        {
            RenderTexture rt         = null;
            Material      convertMat = null;
            try
            {
                rt         = RenderTexture.GetTemporary(@this.width, @this.height);
                convertMat = new Material(Shader.Find("Hidden/ColorSpaceConvert"));
                Graphics.Blit(@this, rt, convertMat, convert == TextureConvert.LinearToGamma ? 0 : 1);
                return(rt.TextureEncodeToPNG(TextureConvert.None, pngFormat));
            }
            finally
            {
                RenderTexture.ReleaseTemporary(rt);
                Util.DestroyRes(convertMat);
            }
        }
    }
Ejemplo n.º 24
0
    void Track()
    {
        if (lastPositions != null)
        {
            lastPositions = landmarks;
        }

        // We fetch webcam texture data
        convertedTexture.SetPixels(webcamTexture.GetPixels());
        convertedTexture.Apply();

        // We convert the webcam texture2D into the OpenCV image format
        UMat img = new UMat();

        TextureConvert.Texture2dToOutputArray(convertedTexture, img);
        CvInvoke.Flip(img, img, FlipType.Vertical);

        using (CascadeClassifier classifier = new CascadeClassifier(filePath)) {
            using (UMat gray = new UMat()) {
                // We convert the OpenCV image format to the facial detection API parsable monochrome image type and detect the faces
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
                facesVV   = new VectorOfRect(classifier.DetectMultiScale(gray));
                landmarks = new VectorOfVectorOfPointF();

                // we fit facial landmarks onto the face data
                if (facemark.Fit(gray, facesVV, landmarks))
                {
                    FaceInvoke.DrawFacemarks(img, landmarks[0], new MCvScalar(255, 255, 0, 255));

                    // We calculate the nose position to use as a capture center
                    noseOffset = new Vector3(landmarks[0][67].X, landmarks[0][67].Y * -1f, 0f);

                    // We draw markers and computer positions
                    for (int j = 0; j < 68; j++)
                    {
                        Vector3 markerPos = new Vector3(landmarks[0][j].X, landmarks[0][j].Y * -1f, 0f);

                        if (displayOffsetMarkers)
                        {
                            Debug.DrawLine(markerPos, markerPos + (Vector3.forward * 3f), UnityEngine.Color.green, trackingInterval);
                        }

                        AdjustCalibration(j, markerPos);
                    }
                    recording = true;
                }
                else
                {
                    recording = false;
                }

                if (displayCalibrationMarkers)
                {
                    DisplayCalibration();
                }
            }
        }

        // We render out the calculation result into the debug image
        if (debugImage)
        {
            Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
            debugImage.sprite = Sprite.Create(texture, new Rect(0, 0, texture.width, texture.height), new Vector2(0.5f, 0.5f));
        }
    }
Ejemplo n.º 25
0
        public static void ParseRDPSetOtherMode(F3DEX2Command cmd,
                                                out PipelineMode pm, out CycleType cyc, out TexturePersp tp, out TextureDetail td, out TextureLOD tl,
                                                out TextureLUT tt, out TextureFilter tf, out TextureConvert tc, out CombineKey ck, out ColorDither cd,
                                                out AlphaDither ad, out AlphaCompare ac, out DepthSource zs, out RenderMode rm)
        {
            rm = new RenderMode(cmd.Words & (0xFFFFFFFF & ~((ulong)AlphaCompare.Mask | (ulong)DepthSource.Mask)));

            if (!rm.Known) // Handle TCL modes by checking again with alpha compare and dither included
            {
                rm = new RenderMode(cmd.Words & (0xFFFFFFFF & ~(ulong)DepthSource.Mask));
            }

            ulong wordH = cmd.Words >> 32;

            ad  = (AlphaDither)(wordH & (ulong)AlphaDither.Mask);
            cd  = (ColorDither)(wordH & (ulong)ColorDither.Mask);
            ck  = (CombineKey)(wordH & (ulong)CombineKey.Mask);
            pm  = (PipelineMode)(wordH & (ulong)PipelineMode.Mask);
            cyc = (CycleType)(wordH & (ulong)CycleType.Mask);
            tp  = (TexturePersp)(wordH & (ulong)TexturePersp.Mask);
            td  = (TextureDetail)(wordH & (ulong)TextureDetail.Mask);
            tl  = (TextureLOD)(wordH & (ulong)TextureLOD.Mask);
            tt  = (TextureLUT)(wordH & (ulong)TextureLUT.Mask);
            tf  = (TextureFilter)(wordH & (ulong)TextureFilter.Mask);
            tc  = (TextureConvert)(wordH & (ulong)TextureConvert.Mask);

            ac = (AlphaCompare)(cmd.Words & (ulong)AlphaCompare.Mask);
            zs = (DepthSource)(cmd.Words & (ulong)DepthSource.Mask);
        }