/// <summary>
        /// Utilizado para carregar as imagens de treinamento e gerar o arquivo de descritores
        /// </summary>
        private void LoadTrain()
        {
            Image <Bgr, Byte> image;

            for (int i = 0; i < banknotes.Count; i++)
            {
                var banknote = banknotes[i];
                //J = 1 pois é o indice que está no nome das imagens de treinamento
                for (int j = 1; j <= banknoteSizes[banknote]; j++)
                {
                    var nameFile = $@"{Path}\Train\{banknote} ({j}).jpg";
                    image = new Image <Bgr, Byte>(nameFile);
                    listaImagensTreino.Add(nameFile, image);
                    MKeyPoint[] keypoints;
                    keypoints = extractor.Detect(image);
                    listaKeyPointsImagensTreino.Add(nameFile, keypoints);
                    Mat features = new Mat();
                    extractor.Compute(image, new VectorOfKeyPoint(keypoints), features);
                    featureUnclustered.PushBack(features);
                }
            }
            //Armazenando os descritores processados da etapa de cima em um arquivo train_descriptors.yml
            FileStorage fs = new FileStorage($@"{Path}\SVM Datasets\train_descriptors.yml", FileStorage.Mode.Write);

            //Adicionando Label train_descriptors ao arquivo train_descriptors.yml
            fs.Write(featureUnclustered, "train_descriptors");
            fs.ReleaseAndGetString();
            //Adicionando descritores não processados no BOW
            bowTrainer.Add(featureUnclustered);
        }
        public bool ConfigRecognitionImageTrain(Mat imageTrain, Mat roiTrain, bool useGlobalMatch)
        {
            _trainsImage.Push(imageTrain);

            _keypointsImageTrain.Add(new VectorOfKeyPoint());
            _descriptorsImageTrain.Push(new Mat());

            _LODIndex = _trainsImage.Size - 1;

            SIFT sift = new SIFT();

            //Insere os pontos chaves da imagem alvo na lista de pontos chaves
            _keypointsImageTrain.Insert(_LODIndex, new VectorOfKeyPoint(sift.Detect(_trainsImage[_LODIndex], roiTrain)));
            if (_keypointsImageTrain[_LODIndex] != null && _keypointsImageTrain[_LODIndex].Size < 4)
            {
                return(false);
            }

            //Calcula os descritores dos pontos chaves extraidos, no caso se extrair poucos descritores ele return false = não reconhecido
            sift.Compute(_trainsImage[_LODIndex], _keypointsImageTrain[_LODIndex], _descriptorsImageTrain[_LODIndex]);
            if (_descriptorsImageTrain[_LODIndex].Rows < 4)
            {
                return(false);
            }

            if (useGlobalMatch)
            {
                return(true);
            }
            else
            {
                return(ConfigureImageTrainROI(_keypointsImageTrain[_LODIndex], roiTrain));
            }
        }
Exemple #3
0
 public OriantatioOnMap(Image <Rgb, byte> Map, SIFTParametrs parametrs, double Compression = 4, double Radius = 20)
 {
     this.Map = Map;
     using (SIFT siftCPU = new SIFT(parametrs.nFeatures, parametrs.nOctaveLayers,
                                    parametrs.contrastThreshold, parametrs.edgeThreshold, parametrs.sigma))
     {
         VectorMapKeyPoint = new VectorOfKeyPoint(siftCPU.Detect(Map));
         VectorMapKeyPoint = FilterKeyPoint(VectorMapKeyPoint, Map, Compression, Radius, parametrs);
         siftCPU.Compute(Map, VectorMapKeyPoint, MapDiscriptors);
     }
 }
Exemple #4
0
        public Mat PutFeaturesOnImage()
        {
            SIFT siftCPU = new SIFT();

            Details.modelKeyPoints = new VectorOfKeyPoint();

            mKeyPoints = siftCPU.Detect(Details.thinnedimage, null);

            Details.modelKeyPoints.Push(mKeyPoints);

            Mat o = new Mat();

            siftCPU.Compute(Details.thinnedimage, Details.modelKeyPoints, o);

            Mat resultimage = new Mat();

            Features2DToolbox.DrawKeypoints(Details.thinnedimage, Details.modelKeyPoints, resultimage, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.Default);

            return(resultimage);
        }
Exemple #5
0
        public float[] ComputeDescriptor(Image <Bgr, byte> image, int stepX = 9, int stepY = 9)
        {
            SIFT sift = new SIFT();

            VectorOfKeyPoint keypoints = new VectorOfKeyPoint();
            Mat descriptors            = new Mat();

            for (int y = stepY; y < image.Rows - stepY; y += stepY)
            {
                for (int x = stepX; x < image.Cols - stepX; x += stepX)
                {
                    MKeyPoint[] point = { new MKeyPoint() };
                    point[0].Size  = stepX;
                    point[0].Point = new Point(x, y);
                    keypoints.Push(point);
                }
            }

            sift.Compute(image, keypoints, descriptors);

            float[] returnArray = new float[descriptors.Rows * descriptors.Cols];
            descriptors.CopyTo(returnArray);
            return(returnArray);
        }
Exemple #6
0
        public Bitmap DrawSift(Image <Rgb, byte> modelimage, Image <Rgb, byte> observedimage)
        {
            int    k = 2;
            double uniquenessThreshold = 0.80;


            VectorOfKeyPoint modelKeyPoints    = new VectorOfKeyPoint(),
                             observedKeyPoints = new VectorOfKeyPoint();

            Mat modeldiscriptors    = new Mat();
            Mat observeddiscriptors = new Mat();

            //observedKeyPoints = observedKeyPoints.Resize(1.0 / Compression, Inter.Area);
            using (SIFT siftCPU = new SIFT(0, 5, 0.04, 10.0, 1.6))
            {
                siftCPU.DetectAndCompute(modelimage, null, modelKeyPoints, modeldiscriptors, false);
                observedKeyPoints = new VectorOfKeyPoint(siftCPU.Detect(observedimage));
                siftCPU.Compute(observedimage, observedKeyPoints, observeddiscriptors);
            }
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            using (Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams())
                using (Emgu.CV.Flann.SearchParams sp = new SearchParams())
                    using (Emgu.CV.Features2D.DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                    {
                        matcher.Add(modeldiscriptors);
                        matcher.KnnMatch(observeddiscriptors, matches, k, null);
                    }

            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            Mat homography = null;

            int nonZeroCount = CvInvoke.CountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                           matches, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                          observedKeyPoints, matches, mask, 2);
                }
            }

            observedimage = new Image <Rgb, byte>(DrawZone(observedimage.Mat, observedKeyPoints, matches, mask).Bitmap);
            //modelKeyPoints.FilterByPixelsMask(new Image<Gray, byte>(mask.Bitmap));
            //observedKeyPoints.FilterByPixelsMask(new Image<Gray, byte>(mask.Bitmap));

            Mat result = new Mat();

            //Draw the matched keypoints
            Features2DToolbox.DrawMatches(modelimage, modelKeyPoints, observedimage, observedKeyPoints,
                                          matches, result, new MCvScalar(0, 255, 0), new MCvScalar(255, 0, 0), mask);
            if (homography != null)
            {
                //draw a rectangle along the projected model
                SD.Rectangle rect = new SD.Rectangle(SD.Point.Empty, modelimage.Size);
                PointF[]     pts  = new PointF[]
                {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };
                pts = CvInvoke.PerspectiveTransform(pts, homography);

#if NETFX_CORE
                Point[] points = Extensions.ConvertAll <PointF, Point>(pts, Point.Round);
#else
                SD.Point[] points = Array.ConvertAll <PointF, SD.Point>(pts, SD.Point.Round);
#endif
                using (VectorOfPoint vp = new VectorOfPoint(points))
                {
                    CvInvoke.Polylines(result, vp, true, new MCvScalar(0, 0, 255), 2);
                }
            }

            return(result.Bitmap);
        }
    IEnumerator CalculateHomography()
    //void CalculateHomography()
    {
        //Debug.Log("CalculateHomography1");
        //myCam.Pause();
        yield return(new WaitForEndOfFrame());

        //yield return new WaitForSeconds((float)0.5);
        //程式開始後至少要等0.3秒才會出現影像畫面,不然算sift一開始就會記憶體爆掉

        //input camera image

        /*Texture2D sourceTex = ScreenCapture.CaptureScreenshotAsTexture();
         * Color[] pix = sourceTex.GetPixels((int)rectBotLeft.x, (int)rectBotLeft.y, width, height);
         * Texture2D tex = new Texture2D(width, height);
         * tex.SetPixels(pix);
         * tex.Apply();*/

        //Debug.Log("CalculateHomography2");

        //rawimage position at (0,0),start from bottom left
        int xStart = (int)(Screen.width - rawImageRT.rect.width) / 2;
        int yStart = (int)(Screen.height - rawImageRT.rect.height) / 2;

        /*Debug.Log("xStart: "+xStart);
         * Debug.Log("yStart: "+yStart);
         * Debug.Log("Screen.width: "+Screen.width);
         * Debug.Log("Screen.height: "+Screen.height);
         * Debug.Log("rawImageRT.rect.width: "+rawImageRT.rect.width);
         * Debug.Log("rawImageRT.rect.height: "+rawImageRT.rect.height);*/

        //get sign image with text
        Texture2D sourceTex = ScreenCapture.CaptureScreenshotAsTexture();

        //rawImageRI.texture = sourceTex;
        //Color[] pix = sourceTex.GetPixels((int)rectBotLeft.x, (int)rectBotLeft.y, width, height);
        Color[] pix = sourceTex.GetPixels(xStart, yStart, (int)rawImageRT.rect.width, (int)rawImageRT.rect.height);
        tex = new Texture2D((int)rawImageRT.rect.width, (int)rawImageRT.rect.height);
        tex.SetPixels(pix);
        tex.Apply();

        //Debug.Log("tex.width: "+tex.width);
        //Debug.Log("tex.height: "+tex.height);

        //input fixed image

        /*Texture2D tex = new Texture2D(2,2);
         * string imgPath = "../signboard-rectangle/test-199-fast-628.jpg";
         * byte [] binaryImageData = File.ReadAllBytes(imgPath);
         * tex.LoadImage(binaryImageData);*/

        //scale texture to make it smaller
        TextureScale.Bilinear(tex, tex.width / 2, tex.height / 2);

        //必要 防止記憶體爆炸
        tex = TextureGray.ToGray(tex);

        //rawImageRI.texture = tex;

        mat = Unity.TextureToMat(tex);

        Destroy(sourceTex);
        Destroy(tex);

        //Cv2.ImShow("img", mat); ok
        //OpenCvSharp.Mat mat = Cv2.ImRead(imgPath, ImreadModes.Unchanged);

        //Debug.Log("mat: "+mat.ToString());
        //string imgPath = "../signboard-rectangle/test-199-fast-628.jpg";
        //OpenCvSharp.Mat mat = Cv2.ImRead(imgPath);
        InputArray imgCam = InputArray.Create(mat);

        desCam = OutputArray.Create(mat);

        //Cv2.ImShow("img", mat); ok
        //OpenCvSharp.Mat mat2 = mat;

        //sift = SIFT.Create();

        //System.Diagnostics.Stopwatch time = new System.Diagnostics.Stopwatch();
        //time.Start ();

        //卡卡
        OpenCvSharp.KeyPoint[] kpCam = sift.Detect(mat);
        //OpenCvSharp.KeyPoint[] kpCam = surf.Detect(mat);
        //OpenCvSharp.KeyPoint[] kpCam = orb.Detect(mat);
        //OpenCvSharp.KeyPoint[] kpCam = brief.Detect(mat);

        //time.Stop();
        //Debug.Log("執行 " + time.Elapsed.TotalSeconds + " 秒");

        //myCam.Pause();
        //rawImageRI.texture = tex;

        //Cv2.ImShow("img", mat); ok
        //Cv2.ImShow("img", mat2); ok

        sift.Compute(imgCam, ref kpCam, desCam);
        //surf.Compute(img2, ref kpCam, desCam);
        //orb.Compute(img2, ref kpCam, desCam);
        //brief.Compute(img2, ref kpCam, desCam);

        //Cv2.ImShow("img", mat);
        //Cv2.ImShow("img", mat2); 爆炸

        OpenCvSharp.Mat desCammat = desCam.GetMat();
        //Debug.Log("desCammat: "+desCammat);

        //if (!M) 如果還沒計算出homography M {
        //desFirstCatch = desCam;
        //OutputArray descriptors_object = des1;

        OpenCvSharp.Mat des1mat = des1.GetMat();
        OpenCvSharp.Mat des2mat = des2.GetMat();
        //OpenCvSharp.Mat des3mat = des3.GetMat();
        //Debug.Log("des1mat: "+des1mat);

        OpenCvSharp.DMatch[] dmatch1 = descriptorMatcher.Match(des1mat, desCammat);
        OpenCvSharp.DMatch[] dmatch2 = descriptorMatcher.Match(des2mat, desCammat);
        //OpenCvSharp.DMatch[] dmatch3 = descriptorMatcher.Match(des3mat, desCammat);

        //Debug.Log("damtch1[0]: "+dmatch1[0].ToString());
        //}
        //else {
        //OpenCvSharp.Mat desFirstCatchmat = desFirstCatch.GetMat();

        // OpenCvSharp.DMatch[] dmatch = descriptorMatcher.Match(desFirstCatchmat, desCammat);
        // OutputArray descriptors_object = desFirstCatch;
        //}

        double max_dist1 = 0;
        double min_dist1 = 100;
        double max_dist2 = 0;
        double min_dist2 = 100;

        //double max_dist3 = 0;
        //double min_dist3 = 100;

        //Cv2.ImShow("img", mat); 爆炸

        //Quick calculation of max and min distances between keypoints
        foreach (OpenCvSharp.DMatch d in dmatch1)
        {
            double dist = d.Distance;
            if (dist < min_dist1)
            {
                min_dist1 = dist;
            }
            if (dist > max_dist1)
            {
                max_dist1 = dist;
            }
        }

        foreach (OpenCvSharp.DMatch d in dmatch2)
        {
            double dist = d.Distance;
            if (dist < min_dist2)
            {
                min_dist2 = dist;
            }
            if (dist > max_dist2)
            {
                max_dist2 = dist;
            }
        }

        /*foreach (OpenCvSharp.DMatch d in dmatch3){
         *  double dist = d.Distance;
         *  if( dist < min_dist3 ) min_dist3 = dist;
         *  if( dist > max_dist3 ) max_dist3 = dist;
         * }*/

        //Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
        List <OpenCvSharp.DMatch> goodMatch1 = new List <OpenCvSharp.DMatch>();

        foreach (OpenCvSharp.DMatch d in dmatch1)
        {
            if (d.Distance < 3 * min_dist1)
            {
                goodMatch1.Add(d);
            }
        }

        List <OpenCvSharp.DMatch> goodMatch2 = new List <OpenCvSharp.DMatch>();

        foreach (OpenCvSharp.DMatch d in dmatch2)
        {
            if (d.Distance < 3 * min_dist2)
            {
                goodMatch2.Add(d);
            }
        }

        /*List<OpenCvSharp.DMatch> goodMatch3 = new List<OpenCvSharp.DMatch>();
         * foreach (OpenCvSharp.DMatch d in dmatch3){
         *  if( d.Distance < 3*min_dist3 )
         *      goodMatch3.Add(d);
         * }*/

        List <OpenCvSharp.Point2f> srcPts1 = new List <OpenCvSharp.Point2f>();
        List <OpenCvSharp.Point2f> dstPts1 = new List <OpenCvSharp.Point2f>();

        foreach (OpenCvSharp.DMatch d in goodMatch1)
        {
            //-- Get the keypoints from the good matches
            srcPts1.Add(kp1[d.QueryIdx].Pt);
            dstPts1.Add(kpCam[d.TrainIdx].Pt);
            //Debug.Log("kp1[d.QueryIdx].Pt: "+kp1[d.QueryIdx].Pt);
        }

        List <OpenCvSharp.Point2f> srcPts2 = new List <OpenCvSharp.Point2f>();
        List <OpenCvSharp.Point2f> dstPts2 = new List <OpenCvSharp.Point2f>();

        foreach (OpenCvSharp.DMatch d in goodMatch2)
        {
            //-- Get the keypoints from the good matches
            srcPts2.Add(kp2[d.QueryIdx].Pt);
            dstPts2.Add(kpCam[d.TrainIdx].Pt);
            //Debug.Log("kp1[d.QueryIdx].Pt: "+kp1[d.QueryIdx].Pt);
        }

        /*List<OpenCvSharp.Point2f> srcPts3 = new List<OpenCvSharp.Point2f>();
         * List<OpenCvSharp.Point2f> dstPts3 = new List<OpenCvSharp.Point2f>();
         * foreach (OpenCvSharp.DMatch d in goodMatch3){
         *  //-- Get the keypoints from the good matches
         *  srcPts3.Add(kp3[d.QueryIdx].Pt);
         *  dstPts3.Add(kpCam[d.TrainIdx].Pt);
         *  //Debug.Log("kp1[d.QueryIdx].Pt: "+kp1[d.QueryIdx].Pt);
         * }*/

        //jump to next iteration if less than certain number of keypoints matched
        if (srcPts1.Count < 200 && srcPts2.Count < 200)
        {
            yield break;
        }

        if (srcPts1.Count >= srcPts2.Count)
        {
            srcPts        = new List <OpenCvSharp.Point2f>(srcPts1);
            dstPts        = new List <OpenCvSharp.Point2f>(dstPts1);
            text1.enabled = true;
            text2.enabled = false;
            num1++;
            //text3.enabled = false;
        }

        /*else if(srcPts2.Count >= srcPts1.Count && srcPts2.Count >= srcPts3.Count){
         *  srcPts = new List<OpenCvSharp.Point2f>(srcPts2);
         *  dstPts = new List<OpenCvSharp.Point2f>(dstPts2);
         *  text2.enabled = true;
         *  text1.enabled = false;
         *  text3.enabled = false;
         * }*/
        else
        {
            srcPts        = new List <OpenCvSharp.Point2f>(srcPts2);
            dstPts        = new List <OpenCvSharp.Point2f>(dstPts2);
            text2.enabled = true;
            text1.enabled = false;
            num2++;
            //text2.enabled = false;
        }

        if (num1 > num2 + 10)
        {
            text1.enabled = true;
            text2.enabled = false;
        }

        if (num2 > num1 + 10)
        {
            text2.enabled = true;
            text1.enabled = false;
        }

        if (num1 > 60 || num2 > 60)
        {
            num1 = 0;
            num2 = 0;
        }
        //OpenCvSharp.Mat mat2 = mat;

        //Cv2.DrawKeypoints(mat, kpCam, mat2);

        //Cv2.ImShow("img", mat); 亂碼圖

        //Texture2D tex2 = new Texture2D(8, 8);
        //tex2 = Unity.MatToTexture(mat);
        //rawImageRI.texture = tex2;
        //myCam.Pause();

        //Cv2.ImShow("img", mat2); 亂碼圖

        Texture2D emptyTex = new Texture2D(8, 8);

        OpenCvSharp.Mat outputImg = Unity.TextureToMat(emptyTex);
        //Debug.Log("outputImg: "+outputImg.ToString());

        InputArray  srcArr = InputArray.Create <OpenCvSharp.Point2f>(srcPts);
        InputArray  dstArr = InputArray.Create <OpenCvSharp.Point2f>(dstPts);
        OutputArray mask   = OutputArray.Create(outputImg);

        OpenCvSharp.Mat M = Cv2.FindHomography(srcArr, dstArr, HomographyMethods.Ransac, 5, mask);

        OpenCVForUnity.Mat transMat = new OpenCVForUnity.Mat(3, 3, CvType.CV_32FC1);
        transMat.put(0, 0, M.Get <double>(0, 0), M.Get <double>(0, 1), M.Get <double>(0, 2),
                     M.Get <double>(1, 0), M.Get <double>(1, 1), M.Get <double>(1, 2),
                     M.Get <double>(2, 0), M.Get <double>(2, 1), M.Get <double>(2, 2));
        //Debug.Log("transMat: "+transMat.dump());

        //Debug.Log("mask: "+mask);
        //OpenCvSharp.Mat maskMat = mask.GetMat();
        //Debug.Log("maskMat: "+maskMat.ToString());
        //maskMoB = new OpenCvSharp.MatOfByte(maskMat);

        //-- Get the corners from the image_1 ( the object to be "detected" )

        /*OpenCvSharp.Point2f[] obj_corners = new OpenCvSharp.Point2f[4];
         * obj_corners[0] = new OpenCvSharp.Point2f(0, 0);
         * obj_corners[1] = new OpenCvSharp.Point2f(inputTex.width, 0);
         * obj_corners[2] = new OpenCvSharp.Point2f(inputTex.width, inputTex.height);
         * obj_corners[3] = new OpenCvSharp.Point2f(0, inputTex.height);
         *
         * //OpenCvSharp.Point2f[] scene_corners = new OpenCvSharp.Point2f[4];
         * //scene_corners = Cv2.PerspectiveTransform(obj_corners, M);
         *
         * //if (!M) 如果還沒計算出homography M {
         * //Cv2.DrawMatches(inputImg, kp1, mat, kpCam, goodMatch, outputImg, OpenCvSharp.Scalar.All(-1),
         * //OpenCvSharp.Scalar.All(-1), maskMoB.ToArray(), DrawMatchesFlags.NotDrawSinglePoints);
         * //else {
         *
         * //Texture2D outputTex = Unity.MatToTexture(outputImg);
         * //rawImageRI.texture = outputTex;
         *
         * //-- Draw lines between the corners (the mapped object in the scene - image_2 )
         * //Cv2.Line(outputImg, scene_corners[0] + obj_corners[1], scene_corners[1] + obj_corners[1], OpenCvSharp.Scalar.LightBlue, 4);
         * //Cv2.Line(outputImg, scene_corners[1] + obj_corners[1], scene_corners[2] + obj_corners[1], OpenCvSharp.Scalar.LightBlue, 4);
         * //Cv2.Line(outputImg, scene_corners[2] + obj_corners[1], scene_corners[3] + obj_corners[1], OpenCvSharp.Scalar.LightBlue, 4);
         * //Cv2.Line(outputImg, scene_corners[3] + obj_corners[1], scene_corners[0] + obj_corners[1], OpenCvSharp.Scalar.LightBlue, 4);
         *
         * //OpenCvSharp.Mat outimg = Unity.TextureToMat(emptyTex);
         * //inputImg = Unity.TextureToMat(emptyTex);
         * //Cv2.DrawKeypoints(mat, kpCam, outimg, OpenCvSharp.Scalar.LightBlue);
         *
         * //show image with text after homography
         * /*string imgPath2 = "../signboard-rectangle/test-IMG_0204-text.PNG";
         * textTex = new Texture2D(2,2);
         * byte [] binaryImageData2 = File.ReadAllBytes(imgPath2);
         * textTex.LoadImage(binaryImageData2);
         * rawImageRI.texture = textTex;*/

        /*OpenCVForUnity.Mat inputTextImg = new OpenCVForUnity.Mat(new OpenCVForUnity.Size(textTex.width, textTex.height), CvType.CV_8UC4);
         * Utils.texture2DToMat(textTex, inputTextImg);
         * OpenCVForUnity.Mat outputTextImg = new OpenCVForUnity.Mat(new OpenCVForUnity.Size(textTex.width, textTex.height), CvType.CV_8UC4);
         *
         * Imgproc.warpPerspective(inputTextImg, outputTextImg, transMat, new OpenCVForUnity.Size(textTex.width, textTex.height));
         *
         * Texture2D outputTex = new Texture2D((int)textTex.width, (int)textTex.height, TextureFormat.RGB24, false);
         * Utils.matToTexture2D(outputTextImg, outputTex);*/

        //TextureScale.Bilinear(outputTex, outputTex.width/5, outputTex.height/5);
        //rawImageRI.texture = outputTex;

        //text.enabled = true;

        /*Vector3 scale;
         * scale.x = new Vector4((float)M.Get<double>(0,0), (float)M.Get<double>(1,0), (float)M.Get<double>(2,0), 0).magnitude;
         * scale.y = new Vector4((float)M.Get<double>(0,1), (float)M.Get<double>(1,1), (float)M.Get<double>(2,1), 0).magnitude;
         * scale.z = new Vector4((float)M.Get<double>(0,2), (float)M.Get<double>(1,2), (float)M.Get<double>(2,2), 0).magnitude;
         *
         * Vector3 forward;
         * forward.x = (float)M.Get<double>(0,2);
         * forward.y = (float)M.Get<double>(1,2);
         * forward.z = (float)M.Get<double>(2,2);
         *
         * Vector3 upwards;
         * upwards.x = (float)M.Get<double>(0,1);
         * upwards.y = (float)M.Get<double>(1,1);
         * upwards.z = (float)M.Get<double>(2,1);
         *
         * //textRT.localScale = scale;
         * //textRT.rotation = Quaternion.LookRotation(forward, upwards);*/

        Matrix4x4 matrix = new Matrix4x4();

        /*matrix.SetRow(0, new Vector4((float)M.Get<double>(0,0), (float)M.Get<double>(0,1), (float)M.Get<double>(0,2),0));
         * matrix.SetRow(1, new Vector4((float)M.Get<double>(1,0), (float)M.Get<double>(1,1), (float)M.Get<double>(1,2),0));
         * matrix.SetRow(2, new Vector4(0,0,1,0));
         * matrix.SetRow(3, new Vector4(0,0,0,1));*/

        //inverse效果還行
        matrix.SetRow(0, new Vector4((float)M.Get <double>(0, 0), (float)M.Get <double>(0, 1), 0, (float)M.Get <double>(0, 2)));
        matrix.SetRow(1, new Vector4((float)M.Get <double>(1, 0), (float)M.Get <double>(1, 1), 0, (float)M.Get <double>(1, 2)));
        matrix.SetRow(2, new Vector4(0, 0, 1, 0));
        matrix.SetRow(3, new Vector4(0, 0, 0, 1));

        Matrix4x4 inverse = matrix.inverse;

        //textRT.localScale = matrix.lossyScale;
        //textRT.rotation = matrix.rotation; //rotation跟eulerangles效果一樣
        textRT1.rotation = inverse.rotation;
        textRT2.rotation = inverse.rotation;
        //textRT3.rotation = inverse.rotation;

        Destroy(emptyTex);

        //calculate euler angle

        /*double angleX = Math.Asin(-M.Get<double>(2,1));
         * double angleY = Math.Atan2(M.Get<double>(2,0), M.Get<double>(2,2));
         * double angleZ = Math.Atan2(M.Get<double>(0,1), M.Get<double>(1,1));
         * //textRT.eulerAngles = new Vector3((float)angleX, (float)angleY, (float)angleZ);
         * //Debug.Log("textRT.eulerAngles: "+textRT.eulerAngles.ToString());
         *
         * //calculate quaternion
         * double w = Math.Sqrt(1 + M.Get<double>(0,0) + M.Get<double>(1,1) + M.Get<double>(2,2))/2;
         * double w4 = w*4;
         * double qx = (M.Get<double>(2,1) - M.Get<double>(1,2))/w4 ;
         * double qy = (M.Get<double>(0,2) - M.Get<double>(2,0))/w4 ;
         * double qz = (M.Get<double>(1,0) - M.Get<double>(0,1))/w4 ;
         * //textRT.rotation = new Quaternion((float)qx, (float)qy, (float)qz, 1);
         *
         * double tr = M.Get<double>(0,0) + M.Get<double>(1,1) + M.Get<double>(2,2);
         * Debug.Log("tr: "+tr);*/

        //Cv2.ImShow("img", mat);
        //myCam.Pause();
    }
    void Start()
    {
        //thread = new Thread(new ThreadStart(ThreadMainFunc));
        //thread = new Thread(ThreadMainFunc);
        //thread.IsBackground = true;
        //thread = Loom.RunAsync(ThreadMainFunc);
        //thread.Start();
        //textTex = Resources.Load<Texture2D>("test-IMG_0204-text");
        //rawImageRI.texture = textTex;

        //TextAsset binary = (TextAsset)AssetDatabase.LoadAssetAtPath("Assets/img1.bytes", typeof(TextAsset));

        //inputTex = Resources.Load<Texture2D>("test-IMG_0204");
        //rawImageRI.texture = inputTex;
        //Debug.Log("inputTex.width: "+inputTex.width);
        //Debug.Log("inputTex.height: "+inputTex.height);

        //tex.LoadImage(binary.bytes);
        //Texture2D tex = (Texture2D)AssetDatabase.LoadAssetAtPath("Assets/img.PNG", typeof(Texture2D));

        //StartCoroutine(GetTextImg());

        //Size texSize = new Size(tex.width, tex.height);
        //Mat mat = new Mat(texSize, CvType.CV_8UC4);
        //Utils.texture2DToMat(tex, mat);

        //inputImg = Cv2.ImRead(imgPath);
        //Cv2.ImShow("inputImg", inputImg);

        //tex = Unity.MatToTexture(inputImg);
        //rawImageRI.texture = tex;

        /*inputTex = new Texture2D(2,2);
         * string imgPath = "../signboard-rectangle/test-IMG_0204.PNG";
         * byte [] binaryImageData = File.ReadAllBytes(imgPath);
         * inputTex.LoadImage(binaryImageData);*/

        //inputTex = Resources.Load<Texture2D>("forAddText");

        //必要 防止記憶體爆炸
        Texture2D inputTex1 = TextureGray.ToGray("1");
        Texture2D inputTex2 = TextureGray.ToGray("2");

        //Texture2D inputTex3 = TextureGray.ToGray("3");
        //Debug.Log("inputTex.width: "+inputTex.width);
        //Debug.Log("inputTex.height: "+inputTex.height);

        //rawImageRI.texture = inputTex;

        OpenCvSharp.Mat inputImg1 = Unity.TextureToMat(inputTex1);
        OpenCvSharp.Mat inputImg2 = Unity.TextureToMat(inputTex2);
        //OpenCvSharp.Mat inputImg3 = Unity.TextureToMat(inputTex3);
        //OpenCvSharp.Mat inputImg2 = Unity.TextureToMat(inputTex);
        //Cv2.ImShow("img", inputImg);

        InputArray img1 = InputArray.Create(inputImg1);
        InputArray img2 = InputArray.Create(inputImg2);

        //InputArray img3 = InputArray.Create(inputImg3);
        //Debug.Log("inputImg: "+inputImg.ToString());
        //InputArray mask = null;
        //OpenCvSharp.KeyPoint[] kp1 = null;
        des1 = OutputArray.Create(inputImg1);
        des2 = OutputArray.Create(inputImg2);
        //des3 = OutputArray.Create(inputImg3);
        //Debug.Log("des1: "+des1);

        //Initiate SIFT detector and extractor
        //siftDetect = FeatureDetector.create(3);
        //siftExtract = DescriptorExtractor.create(1);

        sift = SIFT.Create();
        //surf = SURF.Create((double)100);
        //orb = OpenCvSharp.ORB.Create();
        //brief = OpenCvSharp.XFeatures2D.BriefDescriptorExtractor.Create();

        //if image too large will cause app Terminated due to memory error
        kp1 = sift.Detect(inputImg1);
        kp2 = sift.Detect(inputImg2);
        //kp3 = sift.Detect(inputImg3);
        //kp1 = surf.Detect(inputImg);
        //kp1 = orb.Detect(inputImg);
        //kp1 = brief.Detect(inputImg);

        //Cv2.ImShow("img", inputImg); ok

        sift.Compute(img1, ref kp1, des1);
        sift.Compute(img2, ref kp2, des2);
        //sift.Compute(img3, ref kp3, des3);
        //surf.Compute(img1, ref kp1, des1);
        //orb.Compute(img1, ref kp1, des1);
        //brief.Compute(img1, ref kp1, des1);

        //Cv2.ImShow("img", inputImg); 亂碼圖
        //Cv2.ImShow("img", inputImg2); ok

        //foreach (OpenCvSharp.KeyPoint kp in kp1)
        // Debug.Log("kp: "+kp.ToString());

        //用flannbased的話unity會掛掉
        descriptorMatcher = OpenCvSharp.DescriptorMatcher.Create("BruteForce");

        //sift.DetectAndCompute(img1, mask, out kp1, des1);

        //MatOfKeyPoint kp1 = new MatOfKeyPoint();
        //Mat des1 = new Mat();
        //siftDetect.detect(inputImg, kp1);
        //siftExtract.compute(inputImg, kp1, des1);

        //StartCoroutine(OpenCamera());//開啟攝影機鏡頭
        //StartCoroutine(CalculateHomography());

        /*Texture2D sourceTex = ScreenCapture.CaptureScreenshotAsTexture();
         * Color[] pix = sourceTex.GetPixels((int)rectBotLeft.x, (int)rectBotLeft.y, width, height);
         * tex = new Texture2D(width, height);
         * tex.SetPixels(pix);
         * tex.Apply();
         *
         * tex = TextureGray.ToGray(tex);
         *
         * mat = Unity.TextureToMat(tex);
         *
         * InputArray img2 = InputArray.Create(mat);
         * desCam = OutputArray.Create(mat);
         *
         * kpCam = sift.Detect(mat);*/
    }
        private List <Result> DetectBanknotesTrain(Mat image, float minimumMatchAllowed = 0.07f, float minimuTargetAreaPercentage = 0.05f, float maxDistanceRatio = 0.75f, float reprojectionThresholPercentage = 0.01f,
                                                   double confidence = 0.99, int maxIters = 5000, int minimumNumerInliers = 8)
        {
            object locker = new object();

            List <Result> detectorResults = new List <Result>();

            MKeyPoint[] mKeyPoints;
            SIFT        sift = new SIFT();

            mKeyPoints = sift.Detect(image);
            VectorOfKeyPoint keypointsEvalImage = new VectorOfKeyPoint();

            keypointsEvalImage.Push(mKeyPoints);

            if (keypointsEvalImage.Size < 4)
            {
                return(detectorResults);
            }

            Mat descriptorsEvalImage = new Mat();

            sift.Compute(image, keypointsEvalImage, descriptorsEvalImage);

            Features2DToolbox.DrawKeypoints(image, keypointsEvalImage, image, new Bgr(0, 0, 255), Features2DToolbox.KeypointDrawType.Default);

            float  bestMatch          = 0;
            Result bestDetectorResult = new Result();

            int   trainDetectorsSize    = DetectedBanknotes.Count;
            bool  validDetection        = true;
            float reprojectionThreshold = image.Cols * reprojectionThresholPercentage;

            do
            {
                bestMatch = 0;

                Parallel.For(0, trainDetectorsSize, i =>
                {
                    DetectedBanknotes[(int)i].UpdateCurrentLODIndex(ref image, 0.6999999881F);
                    Result detectorResult = DetectedBanknotes[(int)i].AnalyzeImageEval(ref keypointsEvalImage, ref descriptorsEvalImage, maxDistanceRatio, reprojectionThreshold, confidence, maxIters, minimumNumerInliers);
                    if (detectorResult.GetBestROIMatch() > minimumMatchAllowed)
                    {
                        float contourArea           = (float)CvInvoke.ContourArea(detectorResult.GetTrainContour());
                        float imageArea             = (float)(image.Cols * image.Rows);
                        float contourAreaPercentage = contourArea / imageArea;

                        if (contourAreaPercentage > minimuTargetAreaPercentage)
                        {
                            double contourAspectRatio = _util.ComputeContourAspectRatio(detectorResult.GetTrainContour());
                            if (contourAspectRatio > _contourAspectRatioRange.X && contourAspectRatio < _contourAspectRatioRange.Y)
                            {
                                double contourCircularity = _util.ComputeContourCircularity(detectorResult.GetTrainContour());
                                if (contourCircularity > _contourCircularityRange.X && contourCircularity < _contourCircularityRange.Y)
                                {
                                    if (CvInvoke.IsContourConvex(detectorResult.GetTrainContour()))
                                    {
                                        lock (locker)
                                        {
                                            if (detectorResult.GetBestROIMatch() > bestMatch)
                                            {
                                                bestMatch          = detectorResult.GetBestROIMatch();
                                                bestDetectorResult = detectorResult;
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }
                });

                validDetection = bestMatch > minimumMatchAllowed && bestDetectorResult.GetInliers().Size > minimumNumerInliers;

                if (bestDetectorResult != null && validDetection)
                {
                    detectorResults.Add(bestDetectorResult);
                    _util.RemoveInliersFromKeypointsAndDescriptors(bestDetectorResult.GetInliers(), ref keypointsEvalImage, ref descriptorsEvalImage);
                }
            } while (validDetection);

            return(detectorResults);
        }
Exemple #10
0
        public static Mat GetDescriptors(float[][] matrix)
        {
            int numRows = matrix.Length;
            int numCols = matrix[0].Length;

            float fmin = float.MaxValue;
            float fmax = float.MinValue;

            for (int i = 0; i < numRows; i++)
            {
                for (int j = 0; j < numCols; j++)
                {
                    if (matrix[i][j] < fmin)
                    {
                        fmin = matrix[i][j];
                    }

                    if (matrix[i][j] > fmax)
                    {
                        fmax = matrix[i][j];
                    }
                }
            }

            Mat mat = new Mat();

            using (var matraw = new Mat(numRows, numCols, MatType.CV_8U)) {
                for (int i = 0; i < numRows; i++)
                {
                    for (int j = 0; j < numCols; j++)
                    {
                        byte val = (byte)(255f * (matrix[i][j] - fmin) / (fmax - fmin));
                        matraw.At <byte>(i, j) = val;
                    }
                }

                var f = 512f / Math.Min(matraw.Width, matraw.Height);
                if (f < 1f)
                {
                    Cv2.Resize(matraw, mat, new Size(0, 0), f, f, InterpolationFlags.Cubic);
                }
                else
                {
                    mat = matraw.Clone();
                }
            }

            var keypoints = _sift.Detect(mat);

            keypoints = keypoints.OrderByDescending(e => e.Size).ThenBy(e => e.Response).Take(AppConsts.MaxDescriptors).ToArray();
            var matdescriptors = new Mat();

            _sift.Compute(mat, ref keypoints, matdescriptors);

            /*
             * using (var matkeypoints = new Mat()) {
             *  Cv2.DrawKeypoints(mat, keypoints, matkeypoints, null, DrawMatchesFlags.DrawRichKeypoints);
             *  matkeypoints.SaveImage("matkeypoints.png");
             * }
             */

            using (var matflip = new Mat()) {
                Cv2.Flip(mat, matflip, FlipMode.Y);
                keypoints = _sift.Detect(matflip);
                keypoints = keypoints.OrderByDescending(e => e.Size).ThenBy(e => e.Response).Take(AppConsts.MaxDescriptors).ToArray();
                using (var matdescriptorsflip = new Mat()) {
                    _sift.Compute(matflip, ref keypoints, matdescriptorsflip);
                    matdescriptors.PushBack(matdescriptorsflip);
                }
            }

            mat.Dispose();
            return(matdescriptors);
        }