コード例 #1
0
        public void RunTest()
        {
            LinearIndexParams ip = new LinearIndexParams();
            SearchParams      sp = new SearchParams();

            using (var descriptorExtractor = SIFT.Create(500))
                //using (var descriptorMatcher = new FlannBasedMatcher(ip, sp))
                using (var descriptorMatcher = new BFMatcher())
                    using (var img = Image("lenna.png"))
                    {
                        KeyPoint[] keypoints;
                        Mat        dictionary;
                        var        tc = new TermCriteria(CriteriaType.MaxIter, 100, 0.001d);
                        using (var bowTrainer = new BOWKMeansTrainer(200, tc, 1, KMeansFlags.PpCenters))
                        {
                            var descriptors = new Mat();
                            descriptorExtractor.DetectAndCompute(img, null, out keypoints, descriptors);

                            Mat featuresUnclustered = new Mat();
                            featuresUnclustered.PushBack(descriptors);
                            featuresUnclustered.ConvertTo(featuresUnclustered, MatType.CV_32F);
                            dictionary = bowTrainer.Cluster(featuresUnclustered);
                        }

                        using (var bowDE = new BOWImgDescriptorExtractor(descriptorExtractor, descriptorMatcher))
                        {
                            bowDE.SetVocabulary(dictionary);

                            try
                            {
                                int[][] arr;
                                Mat     descriptors = new Mat();
                                descriptorExtractor.Compute(img, ref keypoints, descriptors);
                                descriptors.ConvertTo(descriptors, MatType.CV_32F);
                                bowDE.Compute(descriptors, ref keypoints, descriptors, out arr);
                                Console.WriteLine(arr.Length);
                                Console.WriteLine(arr[0].Length);
                            }
                            catch (OpenCVException ex)
                            {
                                Console.WriteLine(ex.FileName);
                                Console.WriteLine(ex.FuncName);
                                Console.WriteLine(ex.Line);
                                throw;
                            }
                        }
                    }
        }
コード例 #2
0
 internal static extern double cvStereoCalibrate(
     Mat object_points,
     Mat image_points1,
     Mat image_points2,
     Mat npoints,
     Mat camera_matrix1,
     Mat dist_coeffs1,
     Mat camera_matrix2,
     Mat dist_coeffs2,
     Size image_size,
     Mat R,
     Mat T,
     Mat E,
     Mat F,
     TermCriteria term_crit,
     StereoCalibrationFlags flags);
コード例 #3
0
        /// <summary>
        /// "Posterizes" image and returns new Mat with result. Implementation is color
        /// quantization with K-Means algorithm
        ///
        /// Note: it's slow, don't use on big images
        /// </summary>
        /// <param name="colors">Desired output color count</param>
        /// <returns>New Mat with requested colors count</returns>
        public static Mat PosterizedImage(this Mat img, int colors)
        {
            // basics
            int          attempts = 5;
            double       eps      = 0.01;
            TermCriteria criteria = new TermCriteria(CriteriaType.Eps | CriteriaType.MaxIter, attempts, eps);

            // prepare
            Mat labels = new Mat(), centers = new Mat();
            Mat samples = new Mat(img.Rows * img.Cols, 3, MatType.CV_32F);

            for (int y = 0; y < img.Rows; y++)
            {
                for (int x = 0; x < img.Cols; x++)
                {
                    Vec3b color = img.At <Vec3b>(y, x);
                    for (int z = 0; z < 3; z++)
                    {
                        samples.Set <float>(y + x * img.Rows, z, color[z]);
                    }
                }
            }

            // run k-means
            Cv2.Kmeans(samples, colors, labels, criteria, attempts, KMeansFlags.PpCenters, centers);

            // restore original image
            Mat new_image = new Mat(img.Size(), img.Type());

            for (int y = 0; y < img.Rows; y++)
            {
                for (int x = 0; x < img.Cols; x++)
                {
                    int   cluster_idx = labels.At <int>(y + x * img.Rows, 0);
                    Vec3b color       = new Vec3b(
                        (byte)centers.At <float>(cluster_idx, 0),
                        (byte)centers.At <float>(cluster_idx, 1),
                        (byte)centers.At <float>(cluster_idx, 2)
                        );

                    new_image.Set(y, x, color);
                }
            }

            return(new_image);
        }
コード例 #4
0
        public static double CalibrateCamera(Std.VectorVectorPoint3f objectPoints, Std.VectorVectorPoint2f imagePoints,
                                             Size imageSize,
                                             Mat cameraMatrix, Mat distCoeffs, out Std.VectorVec3d rvecs, out Std.VectorVec3d tvecs, Calib flags,
                                             TermCriteria criteria)
        {
            var    exception = new Exception();
            IntPtr rvecsPtr, tvecsPtr;

            var error = au_cv_calib3d_calibrateCamera2(objectPoints.CppPtr, imagePoints.CppPtr, imageSize.CppPtr,
                                                       cameraMatrix.CppPtr,
                                                       distCoeffs.CppPtr, out rvecsPtr, out tvecsPtr, (int)flags, criteria.CppPtr, exception.CppPtr);

            rvecs = new Std.VectorVec3d(rvecsPtr);
            tvecs = new Std.VectorVec3d(tvecsPtr);

            exception.Check();
            return(error);
        }
コード例 #5
0
ファイル: Video.cs プロジェクト: Raniot/AR_Assignment3
        //
        // C++:  int cv::meanShift(Mat probImage, Rect& window, TermCriteria criteria)
        //

        //javadoc: meanShift(probImage, window, criteria)
        public static int meanShift(Mat probImage, Rect window, TermCriteria criteria)
        {
            if (probImage != null)
            {
                probImage.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            double[] window_out = new double[4];
            int      retVal     = video_Video_meanShift_10(probImage.nativeObj, window.x, window.y, window.width, window.height, window_out, criteria.type, criteria.maxCount, criteria.epsilon);
            if (window != null)
            {
                window.x = (int)window_out[0]; window.y = (int)window_out[1]; window.width = (int)window_out[2]; window.height = (int)window_out[3];
            }
            return(retVal);
#else
            return(-1);
#endif
        }
コード例 #6
0
        public void RunTest()
        {
            using var descriptorExtractor = SIFT.Create(500);
            using var descriptorMatcher   = new BFMatcher();
            using var img = Image("lenna.png");
            KeyPoint[] keypoints;
            Mat        dictionary;
            var        tc = new TermCriteria(CriteriaTypes.MaxIter, 100, 0.001d);

            using (var bowTrainer = new BOWKMeansTrainer(200, tc, 1, KMeansFlags.PpCenters))
            {
                var descriptors = new Mat();
                descriptorExtractor.DetectAndCompute(img, null, out keypoints, descriptors);

                using var featuresUnclustered = new Mat();
                featuresUnclustered.PushBack(descriptors);
                featuresUnclustered.ConvertTo(featuresUnclustered, MatType.CV_32F);
                dictionary = bowTrainer.Cluster(featuresUnclustered);
            }

            using (var bowDe = new BOWImgDescriptorExtractor(descriptorExtractor, descriptorMatcher))
            {
                bowDe.SetVocabulary(dictionary);

                try
                {
                    using var descriptors = new Mat();
                    descriptorExtractor.Compute(img, ref keypoints, descriptors);
                    descriptors.ConvertTo(descriptors, MatType.CV_32F);
                    bowDe.Compute(img, ref keypoints, descriptors, out var arr);
                    testOutputHelper.WriteLine(arr.Length.ToString(CultureInfo.InvariantCulture));
                    testOutputHelper.WriteLine(arr[0].Length.ToString(CultureInfo.InvariantCulture));
                }
                catch (OpenCVException ex)
                {
                    testOutputHelper.WriteLine(ex.FileName);
                    testOutputHelper.WriteLine(ex.FuncName);
                    testOutputHelper.WriteLine(ex.Line.ToString(CultureInfo.InvariantCulture));
                    throw;
                }
            }

            dictionary.Dispose();
        }
コード例 #7
0
ファイル: SceneController.cs プロジェクト: drywitte/sherpa
    // quantizes colors
    Color kmeansColor(Mat submat)
    {
        Mat          kScores = new Mat();
        Mat          centers = new Mat();
        TermCriteria end     = new TermCriteria();

        end.type     = TermCriteria.COUNT;
        end.maxCount = 2;
        Core.kmeans(submat, 4, kScores, end, 2, 0, centers);
        Color col = new Color32((byte)(255 * centers.get(0, 0)[0]), (byte)(255 * centers.get(0, 1)[0]), (byte)(255 * centers.get(0, 2)[0]), (byte)(255 * centers.get(0, 3)[0]));

        Debug.Log("Center has # cols: " + centers.cols().ToString());
        Debug.Log("color is " + centers.get(0, 1)[0].ToString() + " " + centers.get(0, 1)[0].ToString() + " " + centers.get(0, 2)[0].ToString() + " ");
        Debug.Log("Centers height: " + centers.rows() + " and width: " + centers.cols());
        submat.Dispose();
        kScores.Dispose();
        centers.Dispose();
        return(col);
    }
コード例 #8
0
ファイル: Program.cs プロジェクト: shimat/opencvsharp
        private static void BowTest()
        {
            DescriptorMatcher matcher = new BFMatcher();
            Feature2D extractor = AKAZE.Create();
            Feature2D detector = AKAZE.Create();

            TermCriteria criteria = new TermCriteria(CriteriaType.Count | CriteriaType.Eps, 10, 0.001);
            BOWKMeansTrainer bowTrainer = new BOWKMeansTrainer(200, criteria, 1);
            BOWImgDescriptorExtractor bowDescriptorExtractor = new BOWImgDescriptorExtractor(extractor, matcher);
            
            Mat img = null;

            KeyPoint[] keypoint = detector.Detect(img);
            Mat features = new Mat();
            extractor.Compute(img, ref keypoint, features);
            bowTrainer.Add(features);

            throw new NotImplementedException();
        }
コード例 #9
0
ファイル: Program.cs プロジェクト: umitkok/opencvsharp
        private static void BowTest()
        {
            DescriptorMatcher matcher   = new BFMatcher();
            Feature2D         extractor = AKAZE.Create();
            Feature2D         detector  = AKAZE.Create();

            TermCriteria              criteria               = new TermCriteria(CriteriaType.Count | CriteriaType.Eps, 10, 0.001);
            BOWKMeansTrainer          bowTrainer             = new BOWKMeansTrainer(200, criteria, 1);
            BOWImgDescriptorExtractor bowDescriptorExtractor = new BOWImgDescriptorExtractor(extractor, matcher);

            Mat img = null;

            KeyPoint[] keypoint = detector.Detect(img);
            Mat        features = new Mat();

            extractor.Compute(img, ref keypoint, features);
            bowTrainer.Add(features);

            throw new NotImplementedException();
        }
コード例 #10
0
ファイル: Ccalib.cs プロジェクト: holyris/CodeMiner
      public static double StereoCalibrate(Std.VectorVectorPoint3f objectPoints, Std.VectorVectorPoint2f imagePoints1,
        Std.VectorVectorPoint2f imagePoints2, Size imageSize1, Size imageSize2, Mat cameraMatrix1, Mat xi1, Mat distCoeffs1, Mat cameraMatrix2,
        Mat xi2, Mat distCoeffs2, out Vec3d rvec, out Vec3d tvec, out Std.VectorVec3d rvecsL, out Std.VectorVec3d tvecsL, Calib flags,
        TermCriteria criteria, out Mat idx)
      {
        Exception exception = new Exception();
        IntPtr rvecPtr, tvecPtr, rvecsLPtr, tvecsLPtr, idxPtr;

        double error = au_cv_ccalib_omnidir_stereoCalibrate(objectPoints.CppPtr, imagePoints1.CppPtr, imagePoints2.CppPtr, imageSize1.CppPtr,
          imageSize2.CppPtr, cameraMatrix1.CppPtr, xi1.CppPtr, distCoeffs1.CppPtr, cameraMatrix2.CppPtr, xi2.CppPtr, distCoeffs2.CppPtr, out rvecPtr,
          out tvecPtr, out rvecsLPtr, out tvecsLPtr, (int)flags, criteria.CppPtr, out idxPtr, exception.CppPtr);
        rvec = new Vec3d(rvecPtr);
        tvec = new Vec3d(tvecPtr);
        rvecsL = new Std.VectorVec3d(rvecsLPtr);
        tvecsL = new Std.VectorVec3d(tvecsLPtr);
        idx = new Mat(idxPtr);

        exception.Check();
        return error;
      }
コード例 #11
0
    //Capture a rendered texture frame and register the checkerboard pattern data
    public void RegisterCurrentCalib()
    {
        corners.Clear();
        obj.Clear();
        //imagePoints.Clear();
        //objPoints.Clear();

        bool b = false;

        //find the corners and populate the data for one sqaure
        b = Cv2.FindChessboardCorners(mat, boardSize, OutputArray.Create(corners),
                                      ChessboardFlags.AdaptiveThresh | ChessboardFlags.NormalizeImage | ChessboardFlags.FastCheck);

        if (!b)
        {
            return;
        }

        Cv2.CornerSubPix(grayMat, corners, new Size(5, 5), new Size(-1, -1), TermCriteria.Both(30, 0.1));
        Debug.Log(b);

        // for debug draw the found squares
        Cv2.DrawChessboardCorners(mat, boardSize, corners, b);

        for (int i = 0; i < boardSize.Height; i++)
        {
            for (int j = 0; j < boardSize.Width; j++)
            {
                //add the space coordinates of the squares. Z = 0 since its  a flat plane.
                obj.Add(new Point3f((float)j * squareSizeMeters, (float)i * squareSizeMeters, 0));
                if (b)
                {
                    //register the data per square
                    CornerPoints.Add(corners);
                    objPoints.Add(obj);
                }
            }
        }
    }
コード例 #12
0
        public static double StereoCalibrate(Std.VectorVectorPoint3f objectPoints, Std.VectorVectorPoint2f imagePoints1,
                                             Std.VectorVectorPoint2f imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2,
                                             Mat distCoeffs2, Size imageSize,
                                             out Mat rotationMatrix, out Vec3d tvec, out Mat essentialMatrix, out Mat fundamentalMatrix, Calib flags,
                                             TermCriteria criteria)
        {
            var    exception = new Exception();
            IntPtr rotationMatrixPtr, tvecPtr, essentialMatrixPtr, fundamentalMatrixPtr;

            var error = au_cv_calib3d_stereoCalibrate(objectPoints.CppPtr, imagePoints1.CppPtr, imagePoints2.CppPtr,
                                                      cameraMatrix1.CppPtr,
                                                      distCoeffs1.CppPtr, cameraMatrix2.CppPtr, distCoeffs2.CppPtr, imageSize.CppPtr, out rotationMatrixPtr,
                                                      out tvecPtr, out essentialMatrixPtr,
                                                      out fundamentalMatrixPtr, (int)flags, criteria.CppPtr, exception.CppPtr);

            rotationMatrix    = new Mat(rotationMatrixPtr);
            tvec              = new Vec3d(tvecPtr);
            essentialMatrix   = new Mat(essentialMatrixPtr);
            fundamentalMatrix = new Mat(fundamentalMatrixPtr);

            exception.Check();
            return(error);
        }
コード例 #13
0
ファイル: cuda_imgproc.cs プロジェクト: yangyouji/opencvsharp
        /// <summary>
        /// Performs a mean-shift segmentation of the source image and eliminates small segments.
        /// </summary>
        /// <param name="src">Source image. Only CV_8UC4 images are supported for now.</param>
        /// <param name="dst">Segmented image with the same size and type as src (host or gpu memory).</param>
        /// <param name="sp">Spatial window radius.</param>
        /// <param name="sr">Color window radius.</param>
        /// <param name="criteria">Termination criteria. See TermCriteria.</param>
        /// <param name="stream">Stream for the asynchronous version.</param>
        public static void meanShiftSegmentation(InputArray src, OutputArray dst, int sp, int sr, int minsize,
                                                 TermCriteria?criteria = null, Stream stream = null)
        {
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }

            src.ThrowIfDisposed();
            dst.ThrowIfNotReady();

            TermCriteria criteria0 = criteria.GetValueOrDefault(
                TermCriteria.Both(5, 1));

            NativeMethods.cuda_imgproc_meanShiftSegmentation(src.CvPtr, dst.CvPtr, sp, sr, minsize, criteria0, stream?.CvPtr ?? Stream.Null.CvPtr);
            GC.KeepAlive(src);
            GC.KeepAlive(dst);
            dst.Fix();
        }
コード例 #14
0
        Expression VisitCriteriaEqualsForFields(ConstantMemberPair constantMemberPair, bool equal = true)
        {
            if (Mapping.TryGetFieldName(SourceType, constantMemberPair.Expression, out string fieldName))
            {
                var propertyMappings = Mapping.ElasticPropertyMappings();

                ICriteria criteria;

                if (IsPropertyTypeText(fieldName, propertyMappings))
                {
                    if (propertyMappings.ContainsKey($"{fieldName}.keyword"))
                    {
                        fieldName = $"{fieldName}.keyword";
                        criteria  = new TermCriteria(fieldName,
                                                     constantMemberPair.GetMemberFromExpression(), constantMemberPair.ConstantExpression.Value);
                    }
                    else
                    {
                        criteria = new MatchCriteria(fieldName,
                                                     constantMemberPair.GetMemberFromExpression(), constantMemberPair.ConstantExpression.Value);
                    }
                }
                else
                {
                    criteria = new TermCriteria(fieldName,
                                                constantMemberPair.GetMemberFromExpression(), constantMemberPair.ConstantExpression.Value);
                }

                if (!equal)
                {
                    criteria = NotCriteria.Create(criteria);
                }
                return(new CriteriaExpression(criteria));
            }

            return(null);
        }
コード例 #15
0
    void Start()
    {
        Tracking.arrayGameObj = this.gameObject;
        Debug.Log("There are " + cameraArray.Count + " cameras");

        cam.Camera = CurrentCamera();

        if (CurrentCamera() == null)
        {
            throw new Exception("Error, no camera in the array");
        }
        GetControllerScript(CurrentCamera()).WakeUp();

        roiRect = null;

        if (saveVideo)
        {
            CreateVideoTargetDirectory(savesFolderName, frameRate);
        }

        backgroundSubstractorMOG2 = Video.createBackgroundSubtractorMOG2();

        terminationCriteria = new TermCriteria(TermCriteria.EPS | TermCriteria.COUNT, 10, 1);
    }
コード例 #16
0
                // Static methods

                public static double Calibrate(Std.VectorVectorPoint3f objectPoints, Std.VectorVectorPoint2f imagePoints, Size imageSize,
                                               Mat cameraMatrix, Mat xi, Mat distCoeffs, out Std.VectorVec3d rvecs, out Std.VectorVec3d tvecs, Calib flags, TermCriteria criteria,
                                               out Mat idx)
                {
                    Exception exception = new Exception();

                    System.IntPtr rvecsPtr, tvecsPtr, idxPtr;

                    double error = au_cv_ccalib_omnidir_calibrate(objectPoints.CppPtr, imagePoints.CppPtr, imageSize.CppPtr, cameraMatrix.CppPtr,
                                                                  xi.CppPtr, distCoeffs.CppPtr, out rvecsPtr, out tvecsPtr, (int)flags, criteria.CppPtr, out idxPtr, exception.CppPtr);

                    rvecs = new Std.VectorVec3d(rvecsPtr);
                    tvecs = new Std.VectorVec3d(tvecsPtr);
                    idx   = new Mat(idxPtr);

                    exception.Check();
                    return(error);
                }
コード例 #17
0
                public static double StereoCalibrate(Std.VectorVectorPoint3f objectPoints, Std.VectorVectorPoint2f imagePoints1,
                                                     Std.VectorVectorPoint2f imagePoints2, Size imageSize1, Size imageSize2, Mat cameraMatrix1, Mat xi1, Mat distCoeffs1, Mat cameraMatrix2,
                                                     Mat xi2, Mat distCoeffs2, out Vec3d rvec, out Vec3d tvec, out Mat rvecsL, out Mat tvecsL, Calib flags, TermCriteria criteria)
                {
                    Mat idx;

                    return(StereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize1, imageSize2, cameraMatrix1, xi1, distCoeffs1, cameraMatrix2,
                                           xi2, distCoeffs2, out rvec, out tvec, out rvecsL, out tvecsL, flags, criteria, out idx));
                }
コード例 #18
0
    void handleCalibration()
    {
        for (int i = 0; i < AK_receiver.GetComponent <akplay>().camInfoList.Count; i++)
        {
            //create color mat:
            byte[]   colorBytes = ((Texture2D)(AK_receiver.GetComponent <akplay>().camInfoList[i].colorCube.GetComponent <Renderer>().material.mainTexture)).GetRawTextureData();
            GCHandle ch         = GCHandle.Alloc(colorBytes, GCHandleType.Pinned);
            Mat      colorMat   = new Mat(AK_receiver.GetComponent <akplay>().camInfoList[i].color_height, AK_receiver.GetComponent <akplay>().camInfoList[i].color_width, CvType.CV_8UC4);
            Utils.copyToMat(ch.AddrOfPinnedObject(), colorMat);
            ch.Free();

            //OpenCVForUnity.CoreModule.Core.flip(colorMat, colorMat, 0);

            //detect a chessboard in the image, and refine the points, and save the pixel positions:
            MatOfPoint2f positions = new MatOfPoint2f();
            int          resizer   = 4;
            resizer = 1;                   //noresize!
            Mat colorMatSmall = new Mat(); //~27 ms each
            Imgproc.resize(colorMat, colorMatSmall, new Size(colorMat.cols() / resizer, colorMat.rows() / resizer));
            bool success = Calib3d.findChessboardCorners(colorMatSmall, new Size(7, 7), positions);
            for (int ss = 0; ss < positions.rows(); ss++)
            {
                double[] data = positions.get(ss, 0);
                data[0] = data[0] * resizer;
                data[1] = data[1] * resizer;

                positions.put(ss, 0, data);
            }

            //subpixel, drawing chessboard, and getting orange blobs takes 14ms
            TermCriteria tc = new TermCriteria();
            Imgproc.cornerSubPix(colorMat, positions, new Size(5, 5), new Size(-1, -1), tc);

            Mat chessboardResult = new Mat();
            colorMat.copyTo(chessboardResult);
            Calib3d.drawChessboardCorners(chessboardResult, new Size(7, 7), positions, success);



            //Find the orange blobs:
            Mat       orangeMask = new Mat();
            Vector2[] blobs      = getOrangeBlobs(ref colorMat, ref orangeMask);

            //find blob closest to chessboard
            if (success && (blobs.Length > 0))
            {
                Debug.Log("found a chessboard and blobs for camera: " + i);

                // time to get pin1 and chessboard positions: 27ms
                //find pin1:
                Point closestBlob = new Point();
                int   pin1idx     = getPin1(positions, blobs, ref closestBlob);
                Imgproc.circle(chessboardResult, new Point(positions.get(pin1idx, 0)[0], positions.get(pin1idx, 0)[1]), 10, new Scalar(255, 0, 0), -1);
                Imgproc.circle(chessboardResult, closestBlob, 10, new Scalar(255, 255, 0), -1);


                //get world positions of chessboard
                Point[]  realWorldPointArray  = new Point[positions.rows()];
                Point3[] realWorldPointArray3 = new Point3[positions.rows()];
                Point[]  imagePointArray      = new Point[positions.rows()];
                //getChessBoardWorldPositions(positions, pin1idx, 0.0498f, ref realWorldPointArray, ref realWorldPointArray3, ref imagePointArray); //green and white checkerboard.
                getChessBoardWorldPositions(positions, pin1idx, 0.07522f, ref realWorldPointArray, ref realWorldPointArray3, ref imagePointArray); //black and white checkerboard.


                string text       = "";
                float  decimals   = 1000.0f;
                int    text_red   = 255;
                int    text_green = 0;
                int    text_blue  = 0;
                text = ((int)(realWorldPointArray3[0].x * decimals)) / decimals + "," + ((int)(realWorldPointArray3[0].y * decimals)) / decimals + "," + ((int)(realWorldPointArray3[0].z * decimals)) / decimals;
                //text = sprintf("%f,%f,%f", realWorldPointArray3[0].x, realWorldPointArray3[0].y, realWorldPointArray3[0].z);
                Imgproc.putText(chessboardResult, text, new Point(positions.get(0, 0)[0], positions.get(0, 0)[1]), 0, .6, new Scalar(text_red, text_green, text_blue));
                text = ((int)(realWorldPointArray3[6].x * decimals)) / decimals + "," + ((int)(realWorldPointArray3[6].y * decimals)) / decimals + "," + ((int)(realWorldPointArray3[6].z * decimals)) / decimals;
                //text = sprintf("%f,%f,%f", realWorldPointArray3[0].x, realWorldPointArray3[0].y, realWorldPointArray3[0].z);
                Imgproc.putText(chessboardResult, text, new Point(positions.get(6, 0)[0], positions.get(6, 0)[1]), 0, .6, new Scalar(text_red, text_green, text_blue));
                text = ((int)(realWorldPointArray3[42].x * decimals)) / decimals + "," + ((int)(realWorldPointArray3[42].y * decimals)) / decimals + "," + ((int)(realWorldPointArray3[42].z * decimals)) / decimals;
                //text = sprintf("%f,%f,%f", realWorldPointArray3[0].x, realWorldPointArray3[0].y, realWorldPointArray3[0].z);
                Imgproc.putText(chessboardResult, text, new Point(positions.get(42, 0)[0], positions.get(42, 0)[1]), 0, .6, new Scalar(text_red, text_green, text_blue));
                text = ((int)(realWorldPointArray3[48].x * decimals)) / decimals + "," + ((int)(realWorldPointArray3[48].y * decimals)) / decimals + "," + ((int)(realWorldPointArray3[48].z * decimals)) / decimals;
                //text = sprintf("%2.2f,%2.2f,%2.2f", realWorldPointArray3[48].x, realWorldPointArray3[48].y, realWorldPointArray3[48].z);
                Imgproc.putText(chessboardResult, text, new Point(positions.get(48, 0)[0], positions.get(48, 0)[1]), 0, .6, new Scalar(text_red, text_green, text_blue));



                Mat cameraMatrix = Mat.eye(3, 3, CvType.CV_64F);
                cameraMatrix.put(0, 0, AK_receiver.GetComponent <akplay>().camInfoList[i].color_fx);
                cameraMatrix.put(1, 1, AK_receiver.GetComponent <akplay>().camInfoList[i].color_fy);
                cameraMatrix.put(0, 2, AK_receiver.GetComponent <akplay>().camInfoList[i].color_cx);
                cameraMatrix.put(1, 2, AK_receiver.GetComponent <akplay>().camInfoList[i].color_cy);

                double[] distortion = new double[8];

                distortion[0] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k1;
                distortion[1] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k2;
                distortion[2] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_p1;
                distortion[3] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_p2;
                distortion[4] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k3;
                distortion[5] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k4;
                distortion[6] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k5;
                distortion[7] = AK_receiver.GetComponent <akplay>().camInfoList[i].color_k6;


                /*
                 * distortion[0] = 0.0;
                 * distortion[1] = 0.0;
                 * distortion[2] = 0.0;
                 * distortion[3] = 0.0;
                 * distortion[4] = 0.0;
                 * distortion[5] = 0.0;
                 * distortion[6] = 0.0;
                 * distortion[7] = 0.0;
                 */

                //~1 ms to solve for pnp
                Mat  rvec           = new Mat();
                Mat  tvec           = new Mat();
                bool solvepnpSucces = Calib3d.solvePnP(new MatOfPoint3f(realWorldPointArray3), new MatOfPoint2f(imagePointArray), cameraMatrix, new MatOfDouble(distortion), rvec, tvec);

                Mat R = new Mat();
                Calib3d.Rodrigues(rvec, R);


                //calculate unity vectors, and camera transforms
                Mat camCenter     = -R.t() * tvec;
                Mat forwardOffset = new Mat(3, 1, tvec.type());
                forwardOffset.put(0, 0, 0);
                forwardOffset.put(1, 0, 0);
                forwardOffset.put(2, 0, 1);
                Mat upOffset = new Mat(3, 1, tvec.type());
                upOffset.put(0, 0, 0);
                upOffset.put(1, 0, -1);
                upOffset.put(2, 0, 0);

                Mat forwardVectorCV = R.t() * (forwardOffset - tvec);
                forwardVectorCV = forwardVectorCV - camCenter;
                Mat upVectorCV = R.t() * (upOffset - tvec);
                upVectorCV = upVectorCV - camCenter;

                Vector3    forwardVectorUnity = new Vector3((float)forwardVectorCV.get(0, 0)[0], (float)forwardVectorCV.get(2, 0)[0], (float)forwardVectorCV.get(1, 0)[0]); //need to flip y and z due to unity coordinate system
                Vector3    upVectorUnity      = new Vector3((float)upVectorCV.get(0, 0)[0], (float)upVectorCV.get(2, 0)[0], (float)upVectorCV.get(1, 0)[0]);                //need to flip y and z due to unity coordinate system
                Vector3    camCenterUnity     = new Vector3((float)camCenter.get(0, 0)[0], (float)camCenter.get(2, 0)[0], (float)camCenter.get(1, 0)[0]);
                Quaternion rotationUnity      = Quaternion.LookRotation(forwardVectorUnity, upVectorUnity);



                GameObject colorMarker = GameObject.CreatePrimitive(PrimitiveType.Cube);
                //colorMarker.transform.localScale = new Vector3(0.1f, 0.1f, 0.2f);
                //colorMarker.transform.parent = AK_receiver.transform;
                colorMarker.layer = LayerMask.NameToLayer("Debug");
                colorMarker.transform.position = camCenterUnity;
                colorMarker.transform.rotation = Quaternion.LookRotation(forwardVectorUnity, upVectorUnity);
                colorMarker.GetComponent <Renderer>().material.color = Color.blue;

                Vector3    forwardDepth   = AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.MultiplyPoint(forwardVectorUnity);
                Vector3    upDepth        = AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.MultiplyPoint(upVectorUnity);
                Vector3    camCenterDepth = AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.MultiplyPoint(camCenterUnity);
                Quaternion rotationDepth  = Quaternion.LookRotation(forwardDepth, upDepth);

                GameObject depthMarker = GameObject.CreatePrimitive(PrimitiveType.Cube);
                depthMarker.layer            = LayerMask.NameToLayer("Debug");
                depthMarker.transform.parent = colorMarker.transform;
                //depthMarker.transform.localScale = AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.lossyScale;

                depthMarker.transform.localRotation = AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.inverse.rotation;

                Vector3 matrixPosition = new Vector3(AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.inverse.GetColumn(3).x,
                                                     AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.inverse.GetColumn(3).y,
                                                     AK_receiver.GetComponent <akplay>().camInfoList[i].color_extrinsics.inverse.GetColumn(3).z);


                /*
                 * depthMarker.transform.localRotation = AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.rotation;
                 *
                 * Vector3 matrixPosition = new Vector3(AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.GetColumn(3).x,
                 *                                      AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.GetColumn(3).y,
                 *                                      AK_receiver.GetComponent<akplay>().camInfoList[i].color_extrinsics.GetColumn(3).z);
                 */

                depthMarker.transform.localPosition = -matrixPosition;
                depthMarker.transform.parent        = null;

                colorMarker.transform.localScale = new Vector3(0.1f, 0.1f, 0.2f);
                depthMarker.transform.localScale = new Vector3(0.1f, 0.1f, 0.2f);

                //depthMarker.transform.parent = AK_receiver.transform;
                //depthMarker.transform.position = camCenterDepth;
                //depthMarker.transform.rotation = Quaternion.LookRotation(forwardDepth-camCenterDepth, upDepth-camCenterDepth);
                depthMarker.GetComponent <Renderer>().material.color = Color.red;


                AK_receiver.GetComponent <akplay>().camInfoList[i].visualization.transform.position = depthMarker.transform.position; //need to flip y and z due to unity coordinate system
                AK_receiver.GetComponent <akplay>().camInfoList[i].visualization.transform.rotation = depthMarker.transform.rotation;
            }


            //draw chessboard result to calibration ui:
            Texture2D colorTexture = new Texture2D(chessboardResult.cols(), chessboardResult.rows(), TextureFormat.BGRA32, false);
            colorTexture.LoadRawTextureData((IntPtr)chessboardResult.dataAddr(), (int)chessboardResult.total() * (int)chessboardResult.elemSize());
            colorTexture.Apply();
            checkerboard_display_list[i].GetComponent <Renderer>().material.mainTexture = colorTexture;

            //draw threshold to calibration ui:
            Texture2D orangeTexture = new Texture2D(orangeMask.cols(), orangeMask.rows(), TextureFormat.R8, false);
            orangeTexture.LoadRawTextureData((IntPtr)orangeMask.dataAddr(), (int)orangeMask.total() * (int)orangeMask.elemSize());
            orangeTexture.Apply();
            threshold_display_list[i].GetComponent <Renderer>().material.mainTexture = orangeTexture;
        }
    }
コード例 #19
0
            // Static methods

            public static double CalibrateCamera(Std.VectorVectorPoint3f objectPoints, Std.VectorVectorPoint2f imagePoints, Size imageSize,
                                                 Mat cameraMatrix, Mat distCoeffs, out Std.VectorVec3d rvecs, out Std.VectorVec3d tvecs, Std.VectorDouble stdDeviationsIntrinsics,
                                                 Std.VectorDouble stdDeviationsExtrinsics, Std.VectorDouble perViewErrors, Calib flags, TermCriteria criteria)
            {
                Exception exception = new Exception();

                System.IntPtr rvecsPtr, tvecsPtr;

                double error = au_cv_calib3d_calibrateCamera1(objectPoints.CppPtr, imagePoints.CppPtr, imageSize.CppPtr, cameraMatrix.CppPtr,
                                                              distCoeffs.CppPtr, out rvecsPtr, out tvecsPtr, stdDeviationsIntrinsics.CppPtr, stdDeviationsExtrinsics.CppPtr, perViewErrors.CppPtr,
                                                              (int)flags, criteria.CppPtr, exception.CppPtr);

                rvecs = new Std.VectorVec3d(rvecsPtr);
                tvecs = new Std.VectorVec3d(tvecsPtr);
                exception.Check();

                return(error);
            }
コード例 #20
0
		/// <summary>
		/// Recognizes the markers.
		/// </summary>
		/// <param name="grayscale">Grayscale.</param>
		/// <param name="detectedMarkers">Detected markers.</param>
		void recognizeMarkers (Mat grayscale, List<Marker> detectedMarkers)
		{
				List<Marker> goodMarkers = new List<Marker> ();
		
				// Identify the markers
				for (int i=0; i<detectedMarkers.Count; i++) {
						Marker marker = detectedMarkers [i];

			
						// Find the perspective transformation that brings current marker to rectangular form
						Mat markerTransform = Imgproc.getPerspectiveTransform (new MatOfPoint2f (marker.points.toArray ()), m_markerCorners2d);
				

						// Transform image to get a canonical marker image
						Imgproc.warpPerspective (grayscale, canonicalMarkerImage, markerTransform, markerSize);
			
						MatOfInt nRotations = new MatOfInt (0);
						int id = Marker.getMarkerId (canonicalMarkerImage, nRotations, m_markerDesign);
						if (id != - 1) {
								marker.id = id;
//				                Debug.Log ("id " + id);

								//sort the points so that they are always in the same order no matter the camera orientation
								List<Point> MarkerPointsList = marker.points.toList ();

								//				std::rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end());
								MarkerPointsList = MarkerPointsList.Skip (4 - nRotations.toArray () [0]).Concat (MarkerPointsList.Take (4 - nRotations.toArray () [0])).ToList ();

								marker.points.fromList (MarkerPointsList);
				
								goodMarkers.Add (marker);
						}
						nRotations.Dispose ();
				}

//				Debug.Log ("goodMarkers " + goodMarkers.Count);
		
				// Refine marker corners using sub pixel accuracy
				if (goodMarkers.Count > 0) {
						List<Point> preciseCornersPoint = new List<Point> (4 * goodMarkers.Count);
						for (int i = 0; i < preciseCornersPoint.Capacity; i++) {
								preciseCornersPoint.Add (new Point (0, 0));
						}
						

			
						for (int i=0; i<goodMarkers.Count; i++) {
								Marker marker = goodMarkers [i];

								List<Point> markerPointsList = marker.points.toList ();
				
								for (int c = 0; c <4; c++) {
										preciseCornersPoint [i * 4 + c] = markerPointsList [c];
								}
						}

						MatOfPoint2f preciseCorners = new MatOfPoint2f (preciseCornersPoint.ToArray ());

						TermCriteria termCriteria = new TermCriteria (TermCriteria.MAX_ITER | TermCriteria.EPS, 30, 0.01);
						Imgproc.cornerSubPix (grayscale, preciseCorners, new Size (5, 5), new Size (-1, -1), termCriteria);

						preciseCornersPoint = preciseCorners.toList ();
			
						// Copy refined corners position back to markers
						for (int i=0; i<goodMarkers.Count; i++) {
								Marker marker = goodMarkers [i];

								List<Point> markerPointsList = marker.points.toList ();
				
								for (int c=0; c<4; c++) {
										markerPointsList [c] = preciseCornersPoint [i * 4 + c];
								}
						}
						preciseCorners.Dispose ();
				}

				detectedMarkers.Clear ();
				detectedMarkers.AddRange (goodMarkers);

		}
コード例 #21
0
        /// <summary>
        /// Camshift algorithm
        /// </summary>
        /// <param name="probabilityMap">Probability map [0-255].</param>
        /// <param name="roi">Initial Search area</param>
        /// <param name="termCriteria">Mean shift termination criteria (PLEASE DO NOT REMOVE (but you can move it) THIS CLASS; PLEASE!!!)</param>
        /// <param name="centralMoments">Calculated central moments (up to order 2).</param>
        /// <returns>Object position, size and angle packed into a structure.</returns>
        public static Box2D Process(Image<Gray, byte> probabilityMap, Rectangle roi, TermCriteria termCriteria, out CentralMoments centralMoments)
        {
            // Compute mean shift
            Rectangle objArea = Meanshift.Process(probabilityMap, roi, termCriteria, out centralMoments);

            //fit ellipse
            Ellipse ellipse = centralMoments.GetEllipse();
            ellipse.Center = objArea.Center();

            //return empty structure is the object is lost
            var sz = ellipse.Size;
            if (Single.IsNaN(sz.Width) || Single.IsNaN(sz.Height) ||
                sz.Width < 1 || sz.Height < 1)
            {
                return Box2D.Empty;
            }

            return (Box2D)ellipse;
        }
コード例 #22
0
                public static double Calibrate(Std.VectorVectorPoint3f objectPoints, Std.VectorVectorPoint2f imagePoints, Size imageSize,
                                               Mat cameraMatrix, Mat xi, Mat distCoeffs, out Std.VectorVec3d rvecs, out Std.VectorVec3d tvecs, Calib flags, TermCriteria criteria)
                {
                    Mat idx;

                    return(Calibrate(objectPoints, imagePoints, imageSize, cameraMatrix, xi, distCoeffs, out rvecs, out tvecs, flags, criteria, out idx));
                }
コード例 #23
0
 public static extern ExceptionStatus ml_RTrees_getTermCriteria(IntPtr obj, out TermCriteria returnValue);
コード例 #24
0
        private static Rectangle process(Image<Gray, byte> probabilityMap, Rectangle roi, TermCriteria termCriteria, out CentralMoments centralMoments)
        {
            Rectangle imageArea = new Rectangle(0, 0, probabilityMap.Width, probabilityMap.Height);

            Rectangle searchWindow = roi;
            RawMoments moments = new RawMoments(order: 1);

            // Mean shift with fixed number of iterations
            int i = 0;
            double shift = Byte.MaxValue;
            while (termCriteria.ShouldTerminate(i, shift) == false && !searchWindow.IsEmptyArea())
            {
                // Locate first order moments
                moments.Compute(probabilityMap.GetSubRect(searchWindow));

                int shiftX = (int)(moments.CenterX - searchWindow.Width / 2f);
                int shiftY = (int)(moments.CenterY - searchWindow.Height / 2f);

                // Shift the mean (centroid)
                searchWindow.X += shiftX;
                searchWindow.Y += shiftY;

                // Keep the search window inside the image
                searchWindow.Intersect(imageArea);

                shift = System.Math.Abs((double)shiftX) + System.Math.Abs((double)shiftY); //for term criteria only
                i++;
            }

            if (searchWindow.IsEmptyArea() == false)
            {
                // Locate second order moments and perform final shift
                moments.Order = 2;
                moments.Compute(probabilityMap.GetSubRect(searchWindow));

                searchWindow.X += (int)(moments.CenterX - searchWindow.Width / 2f);
                searchWindow.Y += (int)(moments.CenterY - searchWindow.Height / 2f);

                // Keep the search window inside the image
                searchWindow.Intersect(imageArea);
            }

            centralMoments = new CentralMoments(moments); // moments to be used by camshift
            return searchWindow;
        }
コード例 #25
0
        //javadoc: stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize1, imageSize2, K1, xi1, D1, K2, xi2, D2, rvec, tvec, rvecsL, tvecsL, flags, criteria)
        public static double stereoCalibrate(List <Mat> objectPoints, List <Mat> imagePoints1, List <Mat> imagePoints2, Size imageSize1, Size imageSize2, Mat K1, Mat xi1, Mat D1, Mat K2, Mat xi2, Mat D2, Mat rvec, Mat tvec, List <Mat> rvecsL, List <Mat> tvecsL, int flags, TermCriteria criteria)
        {
            if (K1 != null)
            {
                K1.ThrowIfDisposed();
            }
            if (xi1 != null)
            {
                xi1.ThrowIfDisposed();
            }
            if (D1 != null)
            {
                D1.ThrowIfDisposed();
            }
            if (K2 != null)
            {
                K2.ThrowIfDisposed();
            }
            if (xi2 != null)
            {
                xi2.ThrowIfDisposed();
            }
            if (D2 != null)
            {
                D2.ThrowIfDisposed();
            }
            if (rvec != null)
            {
                rvec.ThrowIfDisposed();
            }
            if (tvec != null)
            {
                tvec.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat    objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
            Mat    imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
            Mat    imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
            Mat    rvecsL_mat       = new Mat();
            Mat    tvecsL_mat       = new Mat();
            double retVal           = ccalib_Ccalib_stereoCalibrate_11(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, imageSize1.width, imageSize1.height, imageSize2.width, imageSize2.height, K1.nativeObj, xi1.nativeObj, D1.nativeObj, K2.nativeObj, xi2.nativeObj, D2.nativeObj, rvec.nativeObj, tvec.nativeObj, rvecsL_mat.nativeObj, tvecsL_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
            Converters.Mat_to_vector_Mat(objectPoints_mat, objectPoints);
            objectPoints_mat.release();
            Converters.Mat_to_vector_Mat(imagePoints1_mat, imagePoints1);
            imagePoints1_mat.release();
            Converters.Mat_to_vector_Mat(imagePoints2_mat, imagePoints2);
            imagePoints2_mat.release();
            Converters.Mat_to_vector_Mat(rvecsL_mat, rvecsL);
            rvecsL_mat.release();
            Converters.Mat_to_vector_Mat(tvecsL_mat, tvecsL);
            tvecsL_mat.release();
            return(retVal);
#else
            return(-1);
#endif
        }
コード例 #26
0
    public void ApplyPosterizeImage()
    {
        if (_imageMatrix == null)
        {
            return;
        }

        WizardController.instance.ToggleProcessingWindow();

        // Reshape the matrix to create a matrix of n-pixels elements each one with 3 features (R,G,B)
        int numberOfPixels = _imageMatrix.cols() * _imageMatrix.rows();
        Mat _imageReshaped = new Mat();

        _imageMatrix.copyTo(_imageReshaped);
        _imageReshaped = _imageReshaped.reshape(1, numberOfPixels);
        _imageReshaped.convertTo(_imageReshaped, CvType.CV_32F);
        Mat          bestLabels = new Mat();
        Mat          centers    = new Mat();
        TermCriteria criteria   = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 10, 1.0);
        double       ret        = Core.kmeans(_imageReshaped, WizardController.instance.clusterCount, bestLabels, criteria, 20, Core.KMEANS_PP_CENTERS, centers);

        centers.convertTo(centers, CvType.CV_8UC1);


        centers.reshape(_imageReshaped.channels());

        WizardController.instance.posterizeResult = new Texture2D(_imageMatrix.height(), _imageMatrix.width(), TextureFormat.RGB24, false, false);

        WizardController.instance.labelsMatrix = bestLabels.clone();
        int rows = 0;

        WizardController.instance.imageProcessingResults.Clear();
        WizardController.instance.clusterList.Clear();
        Resources.UnloadUnusedAssets();


        int h = _imageMatrix.cols();
        int w = _imageMatrix.rows();

        var colorlist = new List <Color>();

        for (var i = 0; i < centers.rows(); i++)
        {
            int r = (int)centers.get(i, 0)[0];
            int g = (int)centers.get(i, 1)[0];
            int b = (int)centers.get(i, 2)[0];
            colorlist.Add(new Color(r / 255.0f, g / 255.0f, b / 255.0f));
            Mat ipr = Mat.zeros(_imageMatrix.size(), CvType.CV_8U);
            WizardController.instance.imageProcessingResults.Add(ipr);
        }
        WizardController.instance.clusterList.AddRange(colorlist);

        WizardController.instance.backgroundWhiteTexture  = Mat.ones(_imageMatrix.size(), CvType.CV_8U);
        WizardController.instance.backgroundWhiteTexture *= 255;


        Color[] pixels = new Color[w * h];

        for (int x = 0; x < _imageMatrix.cols(); x++)
        {
            for (int y = 0; y < _imageMatrix.rows(); y++)
            {
                int label = (int)bestLabels.get(rows, 0)[0];
                WizardController.instance.imageProcessingResults[label].put(y, x, 255);
                pixels[rows] = colorlist[label];
                rows++;
            }
        }
        WizardController.instance.posterizeResult.SetPixels(0, 0, w, h, pixels);

        for (var i = 0; i < centers.rows(); i++)
        {
            WizardController.instance.imageProcessingResults[i] = WizardController.instance.imageProcessingResults[i].t();
        }
        WizardController.instance.backgroundWhiteTexture = WizardController.instance.backgroundWhiteTexture.t();
        WizardController.instance.posterizeResult.Apply();

        GetComponent <RawImage>().texture = WizardController.instance.posterizeResult;
        WizardController.instance.ToggleProcessingWindow();
    }
コード例 #27
0
 /// <summary>
 /// Meanshift algorithm
 /// </summary>
 /// <param name="probabilityMap">Probability map [0-1].</param>
 /// <param name="roi">Initial search area</param>
 /// <param name="termCriteria">Mean shift termination criteria.</param>
 /// <returns>Object area.</returns>
 public static Rectangle Process(Image<Gray, byte> probabilityMap, Rectangle roi, TermCriteria termCriteria)
 {
     CentralMoments centralMoments;
     return process(probabilityMap, roi, termCriteria, out centralMoments);
 }
コード例 #28
0
 protected JObject Build(TermCriteria criteria)
 {
     return(new JObject(
                new JProperty(criteria.Name, new JObject(
                                  new JProperty(criteria.Field, this.FormatValue(criteria.Member, criteria.Value))))));
 }
コード例 #29
0
        // Use this for initialization
        void Start()
        {
            roiPointList = new List<Point> ();
            termination = new TermCriteria (TermCriteria.EPS | TermCriteria.COUNT, 10, 1);

            webCamTextureToMatHelper = gameObject.GetComponent<WebCamTextureToMatHelper> ();
            webCamTextureToMatHelper.Init ();
        }
コード例 #30
0
        public void Run()
        {
            // Training data
            var points    = new CvPoint2D32f[500];
            var responses = new int[points.Length];
            var rand      = new Random();

            for (int i = 0; i < responses.Length; i++)
            {
                double x = rand.Next(0, 300);
                double y = rand.Next(0, 300);
                points[i]    = new CvPoint2D32f(x, y);
                responses[i] = (y > f(x)) ? 1 : 2;
            }

            // Show training data and f(x)
            using (Mat pointsPlot = Mat.Zeros(300, 300, MatType.CV_8UC3))
            {
                for (int i = 0; i < points.Length; i++)
                {
                    int    x     = (int)points[i].X;
                    int    y     = (int)(300 - points[i].Y);
                    int    res   = responses[i];
                    Scalar color = (res == 1) ? Scalar.Red : Scalar.GreenYellow;
                    pointsPlot.Circle(x, y, 2, color, -1);
                }
                // f(x)
                for (int x = 1; x < 300; x++)
                {
                    int y1 = (int)(300 - f(x - 1));
                    int y2 = (int)(300 - f(x));
                    pointsPlot.Line(x - 1, y1, x, y2, Scalar.LightBlue, 1);
                }
                Window.ShowImages(pointsPlot);
            }

            // Train
            var dataMat = new Mat(points.Length, 2, MatType.CV_32FC1, points);
            var resMat  = new Mat(responses.Length, 1, MatType.CV_32SC1, responses);

            using (var svm = new CvSVM())
            {
                // normalize data
                dataMat /= 300.0;

                var criteria = TermCriteria.Both(1000, 0.000001);
                var param    = new CvSVMParams(
                    SVMType.CSvc,
                    SVMKernelType.Rbf,
                    100.0, // degree
                    100.0, // gamma
                    1.0,   // coeff0
                    1.0,   // c
                    0.5,   // nu
                    0.1,   // p
                    null,
                    criteria);
                svm.Train(dataMat, resMat, null, null, param);

                // Predict for each 300x300 pixel
                using (Mat retPlot = Mat.Zeros(300, 300, MatType.CV_8UC3))
                {
                    for (int x = 0; x < 300; x++)
                    {
                        for (int y = 0; y < 300; y++)
                        {
                            float[] sample    = { x / 300f, y / 300f };
                            var     sampleMat = new CvMat(1, 2, MatrixType.F32C1, sample);
                            int     ret       = (int)svm.Predict(sampleMat);
                            var     plotRect  = new CvRect(x, 300 - y, 1, 1);
                            if (ret == 1)
                            {
                                retPlot.Rectangle(plotRect, Scalar.Red);
                            }
                            else if (ret == 2)
                            {
                                retPlot.Rectangle(plotRect, Scalar.GreenYellow);
                            }
                        }
                    }
                    Window.ShowImages(retPlot);
                }
            }
        }
コード例 #31
0
        /// <summary>
        /// Recognizes the markers.
        /// </summary>
        /// <param name="grayscale">Grayscale.</param>
        /// <param name="detectedMarkers">Detected markers.</param>
        void recognizeMarkers(Mat grayscale, List <Marker> detectedMarkers)
        {
            List <Marker> goodMarkers = new List <Marker> ();

            // Identify the markers
            for (int i = 0; i < detectedMarkers.Count; i++)
            {
                Marker marker = detectedMarkers [i];


                // Find the perspective transformation that brings current marker to rectangular form
                Mat markerTransform = Imgproc.getPerspectiveTransform(new MatOfPoint2f(marker.points.toArray()), m_markerCorners2d);


                // Transform image to get a canonical marker image
                Imgproc.warpPerspective(grayscale, canonicalMarkerImage, markerTransform, markerSize);

                for (int p = 0; p < m_markerDesigns.Count; p++)
                {
                    MatOfInt nRotations = new MatOfInt(0);
                    int      id         = Marker.getMarkerId(canonicalMarkerImage, nRotations, m_markerDesigns [p]);
                    if (id != -1)
                    {
                        marker.id = id;
//                              Debug.Log ("id " + id);

                        //sort the points so that they are always in the same order no matter the camera orientation
                        List <Point> MarkerPointsList = marker.points.toList();

                        //              std::rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end());
                        MarkerPointsList = MarkerPointsList.Skip(4 - nRotations.toArray() [0]).Concat(MarkerPointsList.Take(4 - nRotations.toArray() [0])).ToList();

                        marker.points.fromList(MarkerPointsList);

                        goodMarkers.Add(marker);
                    }
                    nRotations.Dispose();
                }
            }

//              Debug.Log ("goodMarkers " + goodMarkers.Count);

            // Refine marker corners using sub pixel accuracy
            if (goodMarkers.Count > 0)
            {
                List <Point> preciseCornersPoint = new List <Point> (4 * goodMarkers.Count);
                for (int i = 0; i < preciseCornersPoint.Capacity; i++)
                {
                    preciseCornersPoint.Add(new Point(0, 0));
                }



                for (int i = 0; i < goodMarkers.Count; i++)
                {
                    Marker marker = goodMarkers [i];

                    List <Point> markerPointsList = marker.points.toList();

                    for (int c = 0; c < 4; c++)
                    {
                        preciseCornersPoint [i * 4 + c] = markerPointsList [c];
                    }
                }

                MatOfPoint2f preciseCorners = new MatOfPoint2f(preciseCornersPoint.ToArray());

                TermCriteria termCriteria = new TermCriteria(TermCriteria.MAX_ITER | TermCriteria.EPS, 30, 0.01);
                Imgproc.cornerSubPix(grayscale, preciseCorners, new Size(5, 5), new Size(-1, -1), termCriteria);

                preciseCornersPoint = preciseCorners.toList();

                // Copy refined corners position back to markers
                for (int i = 0; i < goodMarkers.Count; i++)
                {
                    Marker marker = goodMarkers [i];

                    List <Point> markerPointsList = marker.points.toList();

                    for (int c = 0; c < 4; c++)
                    {
                        markerPointsList [c] = preciseCornersPoint [i * 4 + c];
                    }
                }
                preciseCorners.Dispose();
            }

            detectedMarkers.Clear();
            detectedMarkers.AddRange(goodMarkers);
        }
コード例 #32
0
        static void Main(string[] args)
        {
            Mat src  = Cv2.ImRead("dummy.jpg");
            Mat gray = new Mat();
            Mat dst  = src.Clone();

            Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY);

            Point2f[] corners     = Cv2.GoodFeaturesToTrack(gray, 100, 0.03, 5, null, 3, false, 0);
            Point2f[] sub_corners = Cv2.CornerSubPix(gray, corners, new Size(3, 3), new Size(-1, -1), TermCriteria.Both(10, 0.03));

            for (int i = 0; i < corners.Length; i++)
            {
                Point pt = new Point((int)corners[i].X, (int)corners[i].Y);
                Cv2.Circle(dst, pt, 5, Scalar.Yellow, Cv2.FILLED);
            }

            for (int i = 0; i < sub_corners.Length; i++)
            {
                Point pt = new Point((int)sub_corners[i].X, (int)sub_corners[i].Y);
                Cv2.Circle(dst, pt, 5, Scalar.Red, Cv2.FILLED);
            }

            Cv2.ImShow("dst", dst);
            Cv2.WaitKey(0);
            Cv2.DestroyAllWindows();
        }
コード例 #33
0
        public static void Kmeans(Mat input, Mat output, int k)
        {
            using (Mat points = new Mat())
            {
                using (Mat labels = new Mat())
                {
                    using (Mat centers = new Mat())
                    {
                        int width  = input.Cols;
                        int height = input.Rows;

                        points.Create(width * height, 1, MatType.CV_32FC3);
                        centers.Create(k, 1, points.Type());
                        output.Create(height, width, input.Type());

                        // Input Image Data
                        int i = 0;
                        for (int y = 0; y < height; y++)
                        {
                            for (int x = 0; x < width; x++, i++)
                            {
                                Vec3f vec3f = new Vec3f
                                {
                                    Item0 = input.At <Vec3b>(y, x).Item0,
                                    Item1 = input.At <Vec3b>(y, x).Item1,
                                    Item2 = input.At <Vec3b>(y, x).Item2
                                };

                                points.Set <Vec3f>(i, vec3f);
                            }
                        }

                        // Criteria:
                        // – Stop the algorithm iteration if specified accuracy, epsilon, is reached.
                        // – Stop the algorithm after the specified number of iterations, MaxIter.
                        var criteria = new TermCriteria(type: CriteriaType.Eps | CriteriaType.MaxIter, maxCount: 10, epsilon: 1.0);

                        // Finds centers of clusters and groups input samples around the clusters.
                        Cv2.Kmeans(data: points, k: k, bestLabels: labels, criteria: criteria, attempts: 3, flags: KMeansFlags.PpCenters, centers: centers);

                        // Output Image Data
                        i = 0;
                        for (int y = 0; y < height; y++)
                        {
                            for (int x = 0; x < width; x++, i++)
                            {
                                int index = labels.Get <int>(i);

                                Vec3b vec3b = new Vec3b();

                                int firstComponent = Convert.ToInt32(Math.Round(centers.At <Vec3f>(index).Item0));
                                firstComponent = firstComponent > 255 ? 255 : firstComponent < 0 ? 0 : firstComponent;
                                vec3b.Item0    = Convert.ToByte(firstComponent);

                                int secondComponent = Convert.ToInt32(Math.Round(centers.At <Vec3f>(index).Item1));
                                secondComponent = secondComponent > 255 ? 255 : secondComponent < 0 ? 0 : secondComponent;
                                vec3b.Item1     = Convert.ToByte(secondComponent);

                                int thirdComponent = Convert.ToInt32(Math.Round(centers.At <Vec3f>(index).Item2));
                                thirdComponent = thirdComponent > 255 ? 255 : thirdComponent < 0 ? 0 : thirdComponent;
                                vec3b.Item2    = Convert.ToByte(thirdComponent);

                                output.Set(y, x, vec3b);
                            }
                        }
                    }
                }
            }
        }
コード例 #34
0
 public static extern ExceptionStatus ml_RTrees_setTermCriteria(IntPtr obj, TermCriteria val);
コード例 #35
0
        public override void RunTest()
        {
            // Training data
            var points    = new Point2f[500];
            var responses = new int[points.Length];
            var rand      = new Random();

            for (int i = 0; i < responses.Length; i++)
            {
                float x = rand.Next(0, 300);
                float y = rand.Next(0, 300);
                points[i]    = new Point2f(x, y);
                responses[i] = (y > Function(x)) ? 1 : 2;
            }

            // Show training data and f(x)
            using (Mat pointsPlot = Mat.Zeros(300, 300, MatType.CV_8UC3))
            {
                for (int i = 0; i < points.Length; i++)
                {
                    int    x     = (int)points[i].X;
                    int    y     = (int)(300 - points[i].Y);
                    int    res   = responses[i];
                    Scalar color = (res == 1) ? Scalar.Red : Scalar.GreenYellow;
                    pointsPlot.Circle(x, y, 2, color, -1);
                }
                // f(x)
                for (int x = 1; x < 300; x++)
                {
                    int y1 = (int)(300 - Function(x - 1));
                    int y2 = (int)(300 - Function(x));
                    pointsPlot.Line(x - 1, y1, x, y2, Scalar.LightBlue, 1);
                }
                Window.ShowImages(pointsPlot);
            }

            // Train
            var dataMat = new Mat(points.Length, 2, MatType.CV_32FC1, points);
            var resMat  = new Mat(responses.Length, 1, MatType.CV_32SC1, responses);

            using var svm = SVM.Create();
            // normalize data
            dataMat /= 300.0;

            // SVM parameters
            svm.Type         = SVM.Types.CSvc;
            svm.KernelType   = SVM.KernelTypes.Rbf;
            svm.TermCriteria = TermCriteria.Both(1000, 0.000001);
            svm.Degree       = 100.0;
            svm.Gamma        = 100.0;
            svm.Coef0        = 1.0;
            svm.C            = 1.0;
            svm.Nu           = 0.5;
            svm.P            = 0.1;

            svm.Train(dataMat, SampleTypes.RowSample, resMat);

            // Predict for each 300x300 pixel
            using Mat retPlot = Mat.Zeros(300, 300, MatType.CV_8UC3);
            for (int x = 0; x < 300; x++)
            {
                for (int y = 0; y < 300; y++)
                {
                    float[] sample    = { x / 300f, y / 300f };
                    var     sampleMat = new Mat(1, 2, MatType.CV_32FC1, sample);
                    int     ret       = (int)svm.Predict(sampleMat);
                    var     plotRect  = new Rect(x, 300 - y, 1, 1);
                    if (ret == 1)
                    {
                        retPlot.Rectangle(plotRect, Scalar.Red);
                    }
                    else if (ret == 2)
                    {
                        retPlot.Rectangle(plotRect, Scalar.GreenYellow);
                    }
                }
            }
            Window.ShowImages(retPlot);
        }
コード例 #36
0
        //javadoc: calibrate(objectPoints, imagePoints, size, K, xi, D, rvecs, tvecs, flags, criteria)
        public static double calibrate(List <Mat> objectPoints, List <Mat> imagePoints, Size size, Mat K, Mat xi, Mat D, List <Mat> rvecs, List <Mat> tvecs, int flags, TermCriteria criteria)
        {
            if (K != null)
            {
                K.ThrowIfDisposed();
            }
            if (xi != null)
            {
                xi.ThrowIfDisposed();
            }
            if (D != null)
            {
                D.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat    objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
            Mat    imagePoints_mat  = Converters.vector_Mat_to_Mat(imagePoints);
            Mat    rvecs_mat        = new Mat();
            Mat    tvecs_mat        = new Mat();
            double retVal           = ccalib_Ccalib_calibrate_11(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, size.width, size.height, K.nativeObj, xi.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
            Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
            rvecs_mat.release();
            Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
            tvecs_mat.release();
            return(retVal);
#else
            return(-1);
#endif
        }
コード例 #37
0
				// Use this for initialization
				void Start ()
				{
						roiPointList = new List<Point> ();
						termination = new TermCriteria (TermCriteria.EPS | TermCriteria.COUNT, 10, 1);
						
						StartCoroutine (init ());

				}