Exemplo n.º 1
0
        /// <summary>
        /// Upsample via neural network of multiple outputs
        /// </summary>
        /// <param name="img">Image to upscale</param>
        /// <param name="imgsNew">Destination upscaled images</param>
        /// <param name="scaleFactors">Scaling factors of the output nodes</param>
        /// <param name="nodeNames">Names of the output nodes in the neural network</param>
        public void UpsampleMultioutput(
            InputArray img, out Mat[] imgsNew, IEnumerable <int> scaleFactors, IEnumerable <string> nodeNames)
        {
            ThrowIfDisposed();
            if (img == null)
            {
                throw new ArgumentNullException(nameof(img));
            }
            if (scaleFactors == null)
            {
                throw new ArgumentNullException(nameof(scaleFactors));
            }
            if (nodeNames == null)
            {
                throw new ArgumentNullException(nameof(nodeNames));
            }

            using var imgsNewVec = new VectorOfMat();
            var scaleFactorsArray = scaleFactors as int[] ?? scaleFactors.ToArray();
            var nodeNamesArray    = nodeNames as string[] ?? nodeNames.ToArray();

            NativeMethods.HandleException(
                NativeMethods.dnn_superres_DnnSuperResImpl_upsampleMultioutput(
                    ptr, img.CvPtr, imgsNewVec.CvPtr,
                    scaleFactorsArray, scaleFactorsArray.Length,
                    nodeNamesArray, nodeNamesArray.Length));

            GC.KeepAlive(this);
            imgsNew = imgsNewVec.ToArray();
        }
Exemplo n.º 2
0
 /// <summary>
 /// Returns a training set of descriptors.
 /// </summary>
 /// <returns></returns>
 public Mat[] GetDescriptors()
 {
     using (var descriptors = new VectorOfMat())
     {
         NativeMethods.features2d_BOWTrainer_getDescriptors(ptr, descriptors.CvPtr);
         return descriptors.ToArray();
     }
 }
Exemplo n.º 3
0
 /// <summary>
 ///
 /// </summary>
 /// <returns></returns>
 public virtual Mat[] GetProjections()
 {
     ThrowIfDisposed();
     using (var resultVector = new VectorOfMat())
     {
         NativeMethods.face_BasicFaceRecognizer_getProjections(ptr, resultVector.CvPtr);
         return(resultVector.ToArray());
     }
 }
Exemplo n.º 4
0
 /// <summary>
 ///
 /// </summary>
 /// <returns></returns>
 public virtual Mat[] GetProjections()
 {
     ThrowIfDisposed();
     using var resultVector = new VectorOfMat();
     NativeMethods.HandleException(
         NativeMethods.face_BasicFaceRecognizer_getProjections(ptr, resultVector.CvPtr));
     GC.KeepAlive(this);
     return(resultVector.ToArray());
 }
 /// <summary>
 ///
 /// </summary>
 /// <returns></returns>
 public virtual Mat[] GetHistograms()
 {
     ThrowIfDisposed();
     using (var resultVector = new VectorOfMat())
     {
         NativeMethods.face_LBPHFaceRecognizer_getHistograms(ptr, resultVector.CvPtr);
         GC.KeepAlive(this);
         return(resultVector.ToArray());
     }
 }
Exemplo n.º 6
0
        /// <summary>
        /// Loads a multi-page image from a file. 
        /// </summary>
        /// <param name="filename">Name of file to be loaded.</param>
        /// <param name="mats">A vector of Mat objects holding each page, if more than one.</param>
        /// <param name="flags">Flag that can take values of @ref cv::ImreadModes, default with IMREAD_ANYCOLOR.</param>
        /// <returns></returns>
        public static bool ImReadMulti(string filename, out Mat[] mats, ImreadModes flags = ImreadModes.AnyColor)
        {
            if (filename == null) 
                throw new ArgumentNullException("filename");

            using (var matsVec = new VectorOfMat())
            {
                int ret = NativeMethods.imgcodecs_imreadmulti(filename, matsVec.CvPtr, (int) flags);
                mats = matsVec.ToArray();
                return ret != 0;
            }
        }
Exemplo n.º 7
0
 /// <summary>
 ///
 /// </summary>
 /// <returns></returns>
 public virtual Mat[] GetHistograms()
 {
     if (disposed)
     {
         throw new ObjectDisposedException(nameof(LBPHFaceRecognizer));
     }
     using (var resultVector = new VectorOfMat())
     {
         NativeMethods.face_LBPHFaceRecognizer_getHistograms(ptr, resultVector.CvPtr);
         return(resultVector.ToArray());
     }
 }
Exemplo n.º 8
0
        /// <summary>
        /// 
        /// </summary>
        public override void AssignResult()
        {
            if (!IsReady())
                throw new NotSupportedException();

            // Matで結果取得
            using (var vectorOfMat = new VectorOfMat())
            {
                NativeMethods.core_OutputArray_getVectorOfMat(ptr, vectorOfMat.CvPtr);
                list.Clear();
                list.AddRange(vectorOfMat.ToArray());
            }
        }
Exemplo n.º 9
0
 /// <summary>
 ///
 /// </summary>
 /// <returns></returns>
 public void DetectAndDecode(InputArray inputImage, out Mat[] bbox, out string?[] results)
 {
     if (inputImage == null)
     {
         throw new ArgumentNullException(nameof(inputImage));
     }
     inputImage.ThrowIfDisposed();
     using var bboxVec = new VectorOfMat();
     using var texts   = new VectorOfString();
     NativeMethods.HandleException(
         NativeMethods.wechat_qrcode_WeChatQRCode_detectAndDecode(
             ptr, inputImage.CvPtr, bboxVec.CvPtr, texts.CvPtr));
     bbox    = bboxVec.ToArray();
     results = texts.ToArray();
     GC.KeepAlive(this);
     GC.KeepAlive(inputImage);
 }
Exemplo n.º 10
0
        public void VectorOfMat()
        {
            var mats = new Mat[]
            {
                Mat.Eye(2, 2, MatType.CV_8UC1),
                Mat.Ones(2, 2, MatType.CV_64FC1),
            };

            using (var vec = new VectorOfMat(mats))
            {
                Assert.Equal(2, vec.Size);
                var dst = vec.ToArray();
                Assert.Equal(2, dst.Length);

                var eye = dst[0];
                var one = dst[1];

                Assert.Equal(1, eye.Get <byte>(0, 0));
                Assert.Equal(0, eye.Get <byte>(0, 1));
                Assert.Equal(0, eye.Get <byte>(1, 0));
                Assert.Equal(1, eye.Get <byte>(1, 1));

                Assert.Equal(1, one.Get <double>(0, 0), 6);
                Assert.Equal(1, one.Get <double>(0, 1), 6);
                Assert.Equal(1, one.Get <double>(1, 0), 6);
                Assert.Equal(1, one.Get <double>(1, 1), 6);

                foreach (var d in dst)
                {
                    d.Dispose();
                }
            }

            foreach (var mat in mats)
            {
                mat.Dispose();
            }
        }
Exemplo n.º 11
0
        /// <summary>
        /// Copies each plane of a multi-channel array to a dedicated array
        /// </summary>
        /// <param name="src">The source multi-channel array</param>
        /// <param name="mv">The destination array or vector of arrays; 
        /// The number of arrays must match mtx.channels() . 
        /// The arrays themselves will be reallocated if needed</param>
        public static void Split(Mat src, out Mat[] mv)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            src.ThrowIfDisposed();

            IntPtr mvPtr;
            NativeMethods.core_split(src.CvPtr, out mvPtr);

            using (var vec = new VectorOfMat(mvPtr))
            {
                mv = vec.ToArray();
            }
            GC.KeepAlive(src);
        }
Exemplo n.º 12
0
        /// <summary>
        /// Performs images matching.
        /// </summary>
        /// <param name="features">Features of the source images</param>
        /// <param name="mask">Mask indicating which image pairs must be matched</param>
        /// <returns>Found pairwise matches</returns>
        public virtual MatchesInfo[] Apply(
            IEnumerable <ImageFeatures> features, Mat?mask = null)
        {
            if (features == null)
            {
                throw new ArgumentNullException(nameof(features));
            }
            ThrowIfDisposed();

            var featuresArray = features.CastOrToArray();

            if (featuresArray.Length == 0)
            {
                throw new ArgumentException("Empty features array", nameof(features));
            }

            var keypointVecs   = new VectorOfKeyPoint?[featuresArray.Length];
            var wImageFeatures = new WImageFeatures[featuresArray.Length];

            try
            {
                for (int i = 0; i < featuresArray.Length; i++)
                {
                    if (featuresArray[i].Descriptors == null)
                    {
                        throw new ArgumentException("features contain null descriptor mat", nameof(features));
                    }
                    featuresArray[i].Descriptors.ThrowIfDisposed();

                    keypointVecs[i]   = new VectorOfKeyPoint();
                    wImageFeatures[i] = new WImageFeatures
                    {
                        ImgIdx      = featuresArray[i].ImgIdx,
                        ImgSize     = featuresArray[i].ImgSize,
                        Keypoints   = keypointVecs[i] !.CvPtr,
                        Descriptors = featuresArray[i].Descriptors.CvPtr,
                    };
                }

                using var srcImgIndexVecs = new VectorOfInt32();
                using var dstImgIndexVecs = new VectorOfInt32();
                using var matchesVec      = new VectorOfVectorDMatch();
                using var inlinersMaskVec = new VectorOfVectorByte();
                using var numInliersVecs  = new VectorOfInt32();
                using var hVecs           = new VectorOfMat();
                using var confidenceVecs  = new VectorOfDouble();
                NativeMethods.HandleException(
                    NativeMethods.stitching_FeaturesMatcher_apply2(
                        ptr,
                        wImageFeatures, wImageFeatures.Length,
                        mask?.CvPtr ?? IntPtr.Zero,
                        srcImgIndexVecs.CvPtr,
                        dstImgIndexVecs.CvPtr,
                        matchesVec.CvPtr,
                        inlinersMaskVec.CvPtr,
                        numInliersVecs.CvPtr,
                        hVecs.CvPtr,
                        confidenceVecs.CvPtr
                        ));

                var srcImgIndices = srcImgIndexVecs.ToArray();
                var dstImgIndices = dstImgIndexVecs.ToArray();
                var matches       = matchesVec.ToArray();
                var inlinersMasks = inlinersMaskVec.ToArray();
                var numInliers    = numInliersVecs.ToArray();
                var hs            = hVecs.ToArray();
                var confidences   = confidenceVecs.ToArray();
                var result        = new MatchesInfo[srcImgIndices.Length];
                for (int i = 0; i < srcImgIndices.Length; i++)
                {
                    result[i] = new MatchesInfo(
                        srcImgIndices[i],
                        dstImgIndices[i],
                        matches[i],
                        inlinersMasks[i],
                        numInliers[i],
                        hs[i],
                        confidences[i]);
                }
                return(result);
            }
            finally
            {
                foreach (var vec in keypointVecs)
                {
                    vec?.Dispose();
                }
                GC.KeepAlive(this);
            }
        }
Exemplo n.º 13
0
        /// <summary>
        /// finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
        /// </summary>
        /// <param name="objectPoints">In the new interface it is a vector of vectors of calibration pattern points in the calibration pattern coordinate space. 
        /// The outer vector contains as many elements as the number of the pattern views. If the same calibration pattern is shown in each view and 
        /// it is fully visible, all the vectors will be the same. Although, it is possible to use partially occluded patterns, or even different patterns 
        /// in different views. Then, the vectors will be different. The points are 3D, but since they are in a pattern coordinate system, then, 
        /// if the rig is planar, it may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object point is 0.
        /// In the old interface all the vectors of object points from different views are concatenated together.</param>
        /// <param name="imagePoints">In the new interface it is a vector of vectors of the projections of calibration pattern points. 
        /// imagePoints.Count() and objectPoints.Count() and imagePoints[i].Count() must be equal to objectPoints[i].Count() for each i.</param>
        /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
        /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. 
        /// If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be 
        /// initialized before calling the function.</param>
        /// <param name="distCoeffs">Output vector of distortion coefficients (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.</param>
        /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues() ) estimated for each pattern view. That is, each k-th rotation vector 
        /// together with the corresponding k-th translation vector (see the next output parameter description) brings the calibration pattern 
        /// from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the 
        /// calibration pattern in the k-th pattern view (k=0.. M -1)</param>
        /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
        /// <param name="flags">Different flags that may be zero or a combination of the CalibrationFlag values</param>
        /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
        /// <returns></returns>
        public static double CalibrateCamera(
            IEnumerable<IEnumerable<Point3d>> objectPoints,
            IEnumerable<IEnumerable<Point2d>> imagePoints,
            Size imageSize,
            double[,] cameraMatrix,
            double[] distCoeffs,
            out Vec3d[] rvecs,
            out Vec3d[] tvecs,
            CalibrationFlag flags = CalibrationFlag.Zero,
            TermCriteria? criteria = null)
        {
            if (objectPoints == null)
                throw new ArgumentNullException("objectPoints");
            if (objectPoints == null)
                throw new ArgumentNullException("objectPoints");
            if (cameraMatrix == null)
                throw new ArgumentNullException("cameraMatrix");
            if (distCoeffs == null)
                throw new ArgumentNullException("distCoeffs");

            TermCriteria criteria0 = criteria.GetValueOrDefault(
                new TermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, 30, Double.Epsilon));

            using (var op = new ArrayAddress2<Point3d>(objectPoints))
            using (var ip = new ArrayAddress2<Point2d>(imagePoints))
            using (var rvecsVec = new VectorOfMat())
            using (var tvecsVec = new VectorOfMat())
            {
                double ret = NativeMethods.calib3d_calibrateCamera_vector(
                    op.Pointer, op.Dim1Length, op.Dim2Lengths,
                    ip.Pointer, ip.Dim1Length, ip.Dim2Lengths,
                    imageSize, cameraMatrix, distCoeffs, distCoeffs.Length,
                    rvecsVec.CvPtr, tvecsVec.CvPtr, (int)flags, criteria0);
                Mat[] rvecsM = rvecsVec.ToArray();
                Mat[] tvecsM = tvecsVec.ToArray();
                rvecs = EnumerableEx.SelectToArray(rvecsM, m => m.Get<Vec3d>(0));
                tvecs = EnumerableEx.SelectToArray(tvecsM, m => m.Get<Vec3d>(0));
                return ret;
            }
        }
Exemplo n.º 14
0
        /// <summary>
        /// finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
        /// </summary>
        /// <param name="objectPoints">In the new interface it is a vector of vectors of calibration pattern points in the calibration pattern coordinate space. 
        /// The outer vector contains as many elements as the number of the pattern views. If the same calibration pattern is shown in each view and 
        /// it is fully visible, all the vectors will be the same. Although, it is possible to use partially occluded patterns, or even different patterns 
        /// in different views. Then, the vectors will be different. The points are 3D, but since they are in a pattern coordinate system, then, 
        /// if the rig is planar, it may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object point is 0.
        /// In the old interface all the vectors of object points from different views are concatenated together.</param>
        /// <param name="imagePoints">In the new interface it is a vector of vectors of the projections of calibration pattern points. 
        /// imagePoints.Count() and objectPoints.Count() and imagePoints[i].Count() must be equal to objectPoints[i].Count() for each i.</param>
        /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
        /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. 
        /// If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be 
        /// initialized before calling the function.</param>
        /// <param name="distCoeffs">Output vector of distortion coefficients (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.</param>
        /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues() ) estimated for each pattern view. That is, each k-th rotation vector 
        /// together with the corresponding k-th translation vector (see the next output parameter description) brings the calibration pattern 
        /// from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the 
        /// calibration pattern in the k-th pattern view (k=0.. M -1)</param>
        /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
        /// <param name="flags">Different flags that may be zero or a combination of the CalibrationFlag values</param>
        /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
        /// <returns></returns>
        public static double CalibrateCamera(
            IEnumerable<Mat> objectPoints,
            IEnumerable<Mat> imagePoints,
            Size imageSize,
            InputOutputArray cameraMatrix,
            InputOutputArray distCoeffs,
            out Mat[] rvecs, 
            out Mat[] tvecs,
            CalibrationFlag flags = CalibrationFlag.Zero, 
            TermCriteria? criteria = null)
        {
            if (objectPoints == null)
                throw new ArgumentNullException("objectPoints");
            if (objectPoints == null)
                throw new ArgumentNullException("objectPoints");
            if (cameraMatrix == null)
                throw new ArgumentNullException("cameraMatrix");
            if (distCoeffs == null)
                throw new ArgumentNullException("distCoeffs");
            cameraMatrix.ThrowIfNotReady();
            distCoeffs.ThrowIfNotReady();

            TermCriteria criteria0 = criteria.GetValueOrDefault(
                new TermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, 30, Double.Epsilon));

            IntPtr[] objectPointsPtrs = EnumerableEx.SelectPtrs(objectPoints);
            IntPtr[] imagePointsPtrs = EnumerableEx.SelectPtrs(imagePoints);

            double ret;
            using (var rvecsVec = new VectorOfMat())
            using (var tvecsVec = new VectorOfMat())
            {
                ret = NativeMethods.calib3d_calibrateCamera_InputArray(
                    objectPointsPtrs, objectPointsPtrs.Length,
                    imagePointsPtrs, objectPointsPtrs.Length,
                    imageSize, cameraMatrix.CvPtr, distCoeffs.CvPtr,
                    rvecsVec.CvPtr, tvecsVec.CvPtr, (int)flags, criteria0);
                rvecs = rvecsVec.ToArray();
                tvecs = tvecsVec.ToArray();
            }

            cameraMatrix.Fix();
            distCoeffs.Fix();
            return ret;
        }
Exemplo n.º 15
0
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
        /// <return>検出された輪郭.各輪郭は,点のベクトルとして格納されます.</return>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
        /// <returns>Detected contours. Each contour is stored as a vector of points.</returns>
#endif
        public static MatOfPoint[] FindContoursAsMat(InputOutputArray image, 
            ContourRetrieval mode, ContourChain method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr;
            NativeMethods.imgproc_findContours2_OutputArray(image.CvPtr, out contoursPtr, (int)mode, (int)method, offset0);
            image.Fix();

            using (var contoursVec = new VectorOfMat(contoursPtr))
            {
                return contoursVec.ToArray<MatOfPoint>();
            }
        }
Exemplo n.º 16
0
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="contours">検出された輪郭.各輪郭は,点のベクトルとして格納されます.</param>
        /// <param name="hierarchy">画像のトポロジーに関する情報を含む出力ベクトル.これは,輪郭数と同じ数の要素を持ちます.各輪郭 contours[i] に対して,
        /// 要素 hierarchy[i]のメンバにはそれぞれ,同じ階層レベルに存在する前後の輪郭,最初の子輪郭,および親輪郭の 
        /// contours インデックス(0 基準)がセットされます.また,輪郭 i において,前後,親,子の輪郭が存在しない場合,
        /// それに対応する hierarchy[i] の要素は,負の値になります.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
        /// <param name="hierarchy">Optional output vector, containing information about the image topology. 
        /// It has as many elements as the number of contours. For each i-th contour contours[i], 
        /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next 
        /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. 
        /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.</param>
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
#endif
        public static void FindContours(InputOutputArray image, out Mat[] contours,
            OutputArray hierarchy, ContourRetrieval mode, ContourChain method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (hierarchy == null)
                throw new ArgumentNullException("hierarchy");
            image.ThrowIfNotReady();
            hierarchy.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr;
            NativeMethods.imgproc_findContours1_OutputArray(image.CvPtr, out contoursPtr, hierarchy.CvPtr, (int)mode, (int)method, offset0);

            using (var contoursVec = new VectorOfMat(contoursPtr))
            {
                contours = contoursVec.ToArray();
            }
            image.Fix();
            hierarchy.Fix();
        }
Exemplo n.º 17
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="baseMat"></param>
        /// <param name="nOctaves"></param>
        /// <returns></returns>
        public Mat[] BuildGaussianPyramid(Mat baseMat, int nOctaves)
        {
            ThrowIfDisposed();
            if (baseMat == null)
                throw new ArgumentNullException("baseMat");
            baseMat.ThrowIfDisposed();

            using (VectorOfMat pyrVec = new VectorOfMat())
            {
                NativeMethods.nonfree_SIFT_buildGaussianPyramid(ptr, baseMat.CvPtr, pyrVec.CvPtr, nOctaves);
                return pyrVec.ToArray();
            }
        }
Exemplo n.º 18
0
 /// <summary>
 /// Get train descriptors collection.
 /// </summary>
 /// <returns></returns>
 public Mat[] GetTrainDescriptors()
 {
     ThrowIfDisposed();
     using (var matVec = new VectorOfMat())
     {
         NativeMethods.features2d_DescriptorMatcher_getTrainDescriptors(ptr, matVec.CvPtr);
         return matVec.ToArray();
     }
 }
Exemplo n.º 19
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="pyr"></param>
        /// <returns></returns>
        public Mat[] BuildDoGPyramid(IEnumerable<Mat> pyr)
        {
            ThrowIfDisposed();
            if (pyr == null)
                throw new ArgumentNullException("pyr");

            IntPtr[] pyrPtrs = EnumerableEx.SelectPtrs(pyr);
            using (VectorOfMat dogPyrVec = new VectorOfMat())
            {
                NativeMethods.nonfree_SIFT_buildDoGPyramid(ptr, pyrPtrs, pyrPtrs.Length, dogPyrVec.CvPtr);
                return dogPyrVec.ToArray();
            }
        }
Exemplo n.º 20
0
        /// <summary>
        /// Returns covariation matrices.
        /// Returns vector of covariation matrices. Number of matrices is the number of 
        /// gaussian mixtures, each matrix is a square floating-point matrix NxN, where N is the space dimensionality.
        /// </summary>
        public Mat[] GetCovs()
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);

            using (var vec = new VectorOfMat())
            {
                NativeMethods.ml_EM_getCovs(ptr, vec.CvPtr);
                return vec.ToArray();
            }
            
        }
Exemplo n.º 21
0
        /// <summary>
        /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK
        /// </summary>
        /// <param name="img">8-bit input image.</param>
        /// <param name="pyramid">output pyramid.</param>
        /// <param name="winSize">window size of optical flow algorithm. 
        /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). 
        /// It is needed to calculate required padding for pyramid levels.</param>
        /// <param name="maxLevel">0-based maximal pyramid level number.</param>
        /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. 
        /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will 
        /// calculate them internally.</param>
        /// <param name="pyrBorder">the border mode for pyramid layers.</param>
        /// <param name="derivBorder">the border mode for gradients.</param>
        /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. 
        /// You can pass false to force data copying.</param>
        /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns>
        public static int BuildOpticalFlowPyramid(
            InputArray img, out Mat[] pyramid,
            Size winSize, int maxLevel,
            bool withDerivatives = true,
            BorderTypes pyrBorder = BorderTypes.Reflect101,
            BorderTypes derivBorder = BorderTypes.Constant,
            bool tryReuseInputImage = true)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (var pyramidVec = new VectorOfMat())
            {
                int result = NativeMethods.video_buildOpticalFlowPyramid2(
                    img.CvPtr, pyramidVec.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0,
                    (int) pyrBorder, (int) derivBorder, tryReuseInputImage ? 1 : 0);
                pyramid = pyramidVec.ToArray();
                return result;
            }
        }