Exemple #1
0
 /// <summary>
 /// Updates the motion history image by a moving silhouette.
 /// </summary>
 /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs.</param>
 /// <param name="mhi">
 /// Motion history image, that is updated by the function (single-channel, 32-bit floating-point).
 /// </param>
 /// <param name="timestamp">Current time in milliseconds or other units.</param>
 /// <param name="duration">Maximal duration of the motion track in the same units as <paramref name="timestamp"/>.</param>
 public static void UpdateMotionHistory(Arr silhouette, Arr mhi, double timestamp, double duration)
 {
     NativeMethods.cvUpdateMotionHistory(silhouette, mhi, timestamp, duration);
 }
Exemple #2
0
 /// <summary>
 /// Finds the object center on back projection.
 /// </summary>
 /// <param name="probImage">Back projection of object histogram (see <see cref="Histogram.CalcArrBackProject"/>).</param>
 /// <param name="window">Initial search window.</param>
 /// <param name="criteria">Criteria applied to determine when the window search should be finished.</param>
 /// <param name="comp">
 /// Resultant structure that contains the converged search window coordinates (<see cref="ConnectedComp.Rect"/> field)
 /// and the sum of all of the pixels inside the window (<see cref="ConnectedComp.Area"/> field).
 /// </param>
 /// <returns><b>true</b> if the search was successful; <b>false</b> otherwise.</returns>
 public static bool MeanShift(Arr probImage, Rect window, TermCriteria criteria, out ConnectedComp comp)
 {
     return(NativeMethods.cvMeanShift(probImage, window, criteria, out comp) > 0);
 }
Exemple #3
0
 /// <summary>
 /// Displays the image in the specified window.
 /// </summary>
 /// <param name="image">The image to be shown.</param>
 public void ShowImage(Arr image)
 {
     NativeMethods.cvShowImage(name, image);
 }
Exemple #4
0
 /// <summary>
 /// Estimates the optimal affine transformation between two images or two point sets.
 /// </summary>
 /// <param name="A">
 /// First input 2D point set stored as a <see cref="Mat"/>, or an image.
 /// </param>
 /// <param name="B">
 /// Second input 2D point set of the same size and the same type as <paramref name="A"/>, or another image.
 /// </param>
 /// <param name="M">The output optimal 2x3 affine transformation matrix.</param>
 /// <param name="fullAffine">
 /// If <b>true</b>, the function finds an optimal affine transformation with no additional restrictions
 /// (6 degrees of freedom). Otherwise, the class of transformations to choose from is limited to
 /// combinations of translation, rotation, and uniform scaling (5 degrees of freedom).
 /// </param>
 /// <returns>
 /// <b>true</b> if the optimal affine transformation was successfully found; <b>false</b> otherwise.
 /// </returns>
 public static bool EstimateRigidTransform(Arr A, Arr B, Mat M, bool fullAffine)
 {
     return(NativeMethods.cvEstimateRigidTransform(A, B, M, fullAffine ? 1 : 0) > 0);
 }
 /// <summary>
 /// Computes the disparity map using block matching algorithm.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image.</param>
 /// <param name="right">The right image of the same size and the same type.</param>
 /// <param name="disparity">
 /// The output single-channel 16-bit signed, or 32-bit floating-point disparity map of the
 /// same size as input images. In the first case the computed disparities are represented
 /// as fixed-point numbers with 4 fractional bits (i.e. the computed disparity values are
 /// multiplied by 16 and rounded to integers).
 /// </param>
 public void FindStereoCorrespondence(Arr left, Arr right, Arr disparity)
 {
     NativeMethods.cvFindStereoCorrespondenceBM(left, right, disparity, this);
 }
Exemple #6
0
 /// <summary>
 /// Converts one image to another with an optional vertical flip.
 /// </summary>
 /// <param name="src">Source image.</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="flags">The operation flags.</param>
 public static void ConvertImage(Arr src, Arr dst, ConvertImageFlags flags = ConvertImageFlags.None)
 {
     NativeMethods.cvConvertImage(src, dst, flags);
 }
 /// <summary>
 /// Finds the best match for each descriptor in <paramref name="queryDescriptors"/>.
 /// </summary>
 /// <param name="queryDescriptors">The set of descriptors for which to find the best match.</param>
 /// <param name="trainDescriptors">The training set of descriptors.</param>
 /// <param name="matches">
 /// The collection of best matches found for each permissible descriptor in
 /// <paramref name="queryDescriptors"/>.
 /// </param>
 /// <param name="mask">
 /// The optional operation mask specifying permissible matches between input query descriptors
 /// and stored training descriptors.
 /// </param>
 public override void Match(Arr queryDescriptors, Arr trainDescriptors, DMatchCollection matches, Arr mask)
 {
     NativeMethods.cv_features2d_BFMatcher_match(this, queryDescriptors, trainDescriptors, matches, mask ?? Arr.Null);
 }
Exemple #8
0
 /// <summary>
 /// Inserts an array in the middle of the sequence.
 /// </summary>
 /// <param name="index">The index at which to insert the array.</param>
 /// <param name="array">The inserted array.</param>
 public void Insert(int index, Arr array)
 {
     NativeMethods.cvSeqInsertSlice(this, index, array);
 }
Exemple #9
0
 /// <summary>
 /// Computes the descriptors for a set of keypoints in an image.
 /// </summary>
 /// <param name="image">The image from which to extract keypoint descriptors.</param>
 /// <param name="keyPoints">
 /// The keypoints for which to extract descriptors. Keypoints for which a
 /// descriptor cannot be computed are removed.
 /// </param>
 /// <param name="descriptors">
 /// The array of descriptors computed for the specified set of keypoints.
 /// </param>
 public abstract void Compute(Arr image, KeyPointCollection keyPoints, Arr descriptors);
Exemple #10
0
 /// <summary>
 /// Detects keypoints in the specified input image.
 /// </summary>
 /// <param name="image">The image on which to detect keypoints.</param>
 /// <param name="keyPoints">The collection that will contain the set of detected keypoints.</param>
 /// <param name="mask">The optional operation mask used to specify where to look for keypoints.</param>
 public abstract void Detect(Arr image, KeyPointCollection keyPoints, Arr mask = null);
Exemple #11
0
        /// <summary>
        /// Finds the best match for each descriptor in <paramref name="queryDescriptors"/>.
        /// </summary>
        /// <param name="matcher">The descriptor matcher used to find correspondences between descriptor sets.</param>
        /// <param name="queryDescriptors">The set of descriptors for which to find the best match.</param>
        /// <param name="trainDescriptors">The training set of descriptors.</param>
        /// <param name="mask">
        /// The optional operation mask specifying permissible matches between input query descriptors
        /// and stored training descriptors.
        /// </param>
        /// <returns>
        /// The collection of best matches found for each permissible descriptor in
        /// <paramref name="queryDescriptors"/>.
        /// </returns>
        public static DMatchCollection Match(this IDescriptorMatcher matcher, Arr queryDescriptors, Arr trainDescriptors, Arr mask = null)
        {
            if (matcher == null)
            {
                throw new ArgumentNullException("matcher");
            }

            var matches = new DMatchCollection();

            matcher.Match(queryDescriptors, trainDescriptors, matches, mask);
            return(matches);
        }
Exemple #12
0
        /// <summary>
        /// Detects keypoints in the specified input image.
        /// </summary>
        /// <param name="detector">The feature detector used to find image keypoints.</param>
        /// <param name="image">The image on which to detect keypoints.</param>
        /// <param name="mask">The optional operation mask used to specify where to look for keypoints.</param>
        /// <returns>The collection of detected keypoints.</returns>
        public static KeyPointCollection Detect(this IFeatureDetector detector, Arr image, Arr mask = null)
        {
            if (detector == null)
            {
                throw new ArgumentNullException("detector");
            }

            var keyPoints = new KeyPointCollection();

            detector.Detect(image, keyPoints, mask);
            return(keyPoints);
        }
Exemple #13
0
 /// <summary>
 /// Restores the selected region in an image using the region neighborhood.
 /// </summary>
 /// <param name="src">Input 8-bit 1-channel or 3-channel image.</param>
 /// <param name="inpaintMask">
 /// Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted.
 /// </param>
 /// <param name="dst">Output image with the same size and type as <paramref name="src"/>.</param>
 /// <param name="inpaintRange">
 /// Radius of a circular neighborhood of each point inpainted that is considered by the algorithm.
 /// </param>
 /// <param name="flags">Specifies the inpainting method.</param>
 public static void Inpaint(Arr src, Arr inpaintMask, Arr dst, double inpaintRange, InpaintMethod flags)
 {
     NativeMethods.cvInpaint(src, inpaintMask, dst, inpaintRange, flags);
 }