Пример #1
0
    // Use this for initialization
    void Start()
    {
        Texture2D lenaTexture = Resources.Load <Texture2D>("lena");

        //updateTextureWithString("load lena ok");
        Image <Bgr, Byte> img = TextureConvert.Texture2dToImage <Bgr, byte>(lenaTexture);
        //updateTextureWithString("convert to image ok");

        //String fileName = "haarcascade_frontalface_default";
        //String fileName = "lbpcascade_frontalface";
        String fileName = "haarcascade_frontalface_alt2";
        String filePath = Path.Combine(Application.persistentDataPath, fileName + ".xml");
        //if (!File.Exists(filePath))
        {
            //updateTextureWithString("start move cascade xml");
            TextAsset cascadeModel = Resources.Load <TextAsset>(fileName);

#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, cascadeModel.bytes);
#else
            File.WriteAllBytes(filePath, cascadeModel.bytes);
#endif
            //updateTextureWithString("File size: " + new FileInfo(filePath).Length);
        }


        using (CascadeClassifier classifier = new CascadeClassifier(filePath))
            using (Image <Gray, Byte> gray = img.Convert <Gray, byte>())
            {
                //updateTextureWithString("classifier create ok");

                Rectangle[] faces = null;
                try
                {
                    faces = classifier.DetectMultiScale(gray);

                    //updateTextureWithString("face detected");
                    foreach (Rectangle face in faces)
                    {
                        CvInvoke.Rectangle(img, face, new MCvScalar(0, 255, 0));
                    }
                }
                catch (Exception e)
                {
                    //updateTextureWithString(e.Message);
                    return;
                }

                //updateTextureWithString(String.Format("{0} face found on image of {1} x {2}", faces.Length, img.Width, img.Height));
            }

        Texture2D texture = TextureConvert.ImageToTexture2D(img, FlipType.Vertical);

        this.GetComponent <GUITexture>().texture    = texture;
        this.GetComponent <GUITexture>().pixelInset = new Rect(-img.Width / 2, -img.Height / 2, img.Width, img.Height);
    }
    IEnumerator ocr_async()
    {
        String[] names = new string[] { "eng.cube.bigrams", "eng.cube.fold", "eng.cube.lm", "eng.cube.nn", "eng.cube.params", "eng.cube.size", "eng.cube.word-freq", "eng.tesseract_cube.nn", "eng.traineddata" };


        if (!Directory.Exists(outputPath))
        {
            Directory.CreateDirectory(outputPath);
        }

        yield return(Ninja.JumpToUnity);

        foreach (String n in names)
        {
            TextAsset textAsset = Resources.Load <TextAsset>(Path.Combine("tessdata", n));
            String    filePath  = Path.Combine(outputPath, n);
#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, textAsset.bytes);
#else
            if (!File.Exists(filePath))
            {
                File.WriteAllBytes(filePath, textAsset.bytes);
            }
#endif
        }

        yield return(Ninja.JumpBack);

        _ocr = new Tesseract(outputPath, "eng", OcrEngineMode.TesseractCubeCombined);

        yield return(Ninja.JumpToUnity);

        print("OCR engine loaded.");
        print("OCR processing..");
        Image <Bgr, Byte> img = TextureConvert.Texture2dToImage <Bgr, Byte>(original_texture);
        yield return(Ninja.JumpBack);

        _ocr.Recognize(img);

        Tesseract.Character[] characters = _ocr.GetCharacters();
        String messageOcr = _ocr.GetText().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text

        yield return(Ninja.JumpToUnity);

        Debug.Log("Detected text: " + messageOcr);

        build_char_list(characters);

        progress_indicator.SetActive(false);
    }
Пример #3
0
    // Use this for initialization
    void Start()
    {
        String[] names = new string[] { "eng.cube.bigrams", "eng.cube.fold", "eng.cube.lm", "eng.cube.nn", "eng.cube.params", "eng.cube.size", "eng.cube.word-freq", "eng.tesseract_cube.nn", "eng.traineddata" };

        String outputPath = Path.Combine("C:\\Emgu/emgucv-windesktop 3.1.0.2504/Emgu.CV.World", "tessdata");

        if (!Directory.Exists(outputPath))
        {
            Directory.CreateDirectory(outputPath);
        }

        foreach (String n in names)
        {
            TextAsset textAsset = Resources.Load <TextAsset>(Path.Combine("tessdata", n));
            String    filePath  = Path.Combine(outputPath, n);
#if UNITY_METRO
            UnityEngine.Windows.File.WriteAllBytes(filePath, textAsset.bytes);
#else
            if (!File.Exists(filePath))
            {
                File.WriteAllBytes(filePath, textAsset.bytes);
            }
#endif
        }

        _ocr = new Tesseract(outputPath, "eng", OcrEngineMode.TesseractCubeCombined);

        Debug.Log("OCR engine loaded.");
        print("OCR processing..");

        Image <Bgr, Byte> img = TextureConvert.Texture2dToImage <Bgr, Byte>(original_texture);
        _ocr.Recognize(img);

        Tesseract.Character[] characters = _ocr.GetCharacters();
        foreach (Tesseract.Character c in characters)   //draw rect for each character
        {
            CvInvoke.Rectangle(img, c.Region, new MCvScalar(255, 0, 0));
        }

        String messageOcr = _ocr.GetText().TrimEnd('\n', '\r'); // remove end of line from ocr-ed text
        Debug.Log("Detected text: " + messageOcr);

        Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
        original_texture = texture;
        build_map(characters);
    }
Пример #4
0
    public static bool FindBoard(Texture2D image, out Vector2[,] corners, int boardWidth = 9, int boardHeight = 6)
    {
        Image <Gray, byte> snapshot = TextureConvert.Texture2dToImage <Gray, byte>(image);

        Size patternSize = new Size(boardWidth, boardHeight); //size of chess board to be detected


        //MCvPoint3D32f[][] corners_object_list = new MCvPoint3D32f[Frame_array_buffer.Length][];
        // PointF[][] corners_points_list = new PointF[Frame_array_buffer.Length][];

        PointF[] _corners = new PointF[patternSize.Width * patternSize.Height];

        bool           patternFound = false;
        Matrix <float> pointMatrix  = new Matrix <float>(_corners.Length, 1, 2);

        patternFound = CvInvoke.FindChessboardCorners(snapshot, patternSize, pointMatrix);

        if (patternFound)
        {
            corners = new Vector2[boardWidth, boardHeight];
            Matrix <float>[] pointChannels = pointMatrix.Split();
            int idx = 0;
            foreach (PointF p in _corners)
            {
                //Debug.Log("points[" + idx + "]=" + pointChannels[0][idx,0] + "," + pointChannels[1][idx, 0]);
                corners[idx % boardWidth, idx / boardWidth] = new Vector2(((float)pointChannels[0][idx, 0]) / image.width, 1 - ((float)pointChannels[1][idx, 0]) / image.height);

                //Debug.Log("points[" + idx + "]=" + corners[idx % boardWidth, idx / boardWidth]);

                idx++;
            }
        }
        else
        {
            corners = null;
        }
        return(patternFound);
    }