Пример #1
0
 /// <summary>
 /// Find the k-nearest match
 /// </summary>
 /// <param name="queryDescriptors">An n x m matrix of descriptors to be query for nearest neighbors. n is the number of descriptor and m is the size of the descriptor</param>
 /// <param name="k">Number of nearest neighbors to search for</param>
 /// <param name="mask">Can be null if not needed. An n x 1 matrix. If 0, the query descriptor in the corresponding row will be ignored.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 /// <param name="compactResult">Parameter used when the mask (or masks) is not empty. If compactResult is false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the matches vector does not contain matches for fully masked-out query descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 public void KnnMatch(IInputArray queryDescriptors, IInputArray trainDescriptors, VectorOfVectorOfDMatch matches, int k, IInputArray mask = null, bool compactResult = false)
 {
     using (InputArray iaQueryDescriptors = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptors = trainDescriptors.GetInputArray())
             using (InputArray iaMask = (mask == null ? InputArray.GetEmpty() : mask.GetInputArray()))
                 CudaInvoke.cveCudaDescriptorMatcherKnnMatch(_ptr, iaQueryDescriptors, iaTrainDescriptors, matches, k, iaMask, compactResult);
 }
Пример #2
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             using (OutputArray oaDescriptors = descriptors.GetOutputArray())
                 Features2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }
Пример #3
0
 /// <summary>
 /// Apply filtering to the disparity map.
 /// </summary>
 /// <param name="filter">The disparity filter</param>
 /// <param name="disparityMapLeft">Disparity map of the left view, 1 channel, CV_16S type. Implicitly assumes that disparity values are scaled by 16 (one-pixel disparity corresponds to the value of 16 in the disparity map). Disparity map can have any resolution, it will be automatically resized to fit left_view resolution.</param>
 /// <param name="leftView">Left view of the original stereo-pair to guide the filtering process, 8-bit single-channel or three-channel image.</param>
 /// <param name="filteredDisparityMap">Output disparity map.</param>
 /// <param name="disparityMapRight">Optional argument, some implementations might also use the disparity map of the right view to compute confidence maps, for instance.</param>
 /// <param name="roi">Region of the disparity map to filter. Optional, usually it should be set automatically.</param>
 /// <param name="rightView">Optional argument, some implementations might also use the right view of the original stereo-pair.</param>
 public static void Filter(
     this IDisparityFilter filter,
     IInputArray disparityMapLeft,
     IInputArray leftView,
     IOutputArray filteredDisparityMap,
     IInputArray disparityMapRight = null,
     Rectangle roi         = new Rectangle(),
     IInputArray rightView = null)
 {
     using (InputArray iaDisparityMapLeft = disparityMapLeft.GetInputArray())
         using (InputArray oaLeftView = leftView.GetInputArray())
             using (OutputArray oaFilteredDisparityMap = filteredDisparityMap.GetOutputArray())
                 using (InputArray iaDisparityMapRight = disparityMapRight == null ? InputArray.GetEmpty() : disparityMapRight.GetInputArray())
                     using (InputArray iaRightView = rightView == null ? InputArray.GetEmpty() : rightView.GetInputArray())
                     {
                         cveDisparityFilterFilter(
                             filter.DisparityFilterPtr,
                             iaDisparityMapLeft,
                             oaLeftView,
                             oaFilteredDisparityMap,
                             iaDisparityMapRight,
                             ref roi,
                             iaRightView);
                     }
 }
Пример #4
0
 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IInputArray mask = null, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaKeypoints = keypoints.GetOutputArray())
             using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
                 CudaInvoke.cveCudaFeature2dAsyncDetectAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, iaMask, stream);
 }
Пример #5
0
 /// <summary>
 /// Interpolate position of ChArUco board corners
 /// </summary>
 /// <param name="markerCorners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4.The order of the corners should be clockwise.</param>
 /// <param name="markerIds">list of identifiers for each marker in corners</param>
 /// <param name="image">input image necesary for corner refinement. Note that markers are not detected and should be sent in corners and ids parameters.</param>
 /// <param name="board">layout of ChArUco board.</param>
 /// <param name="charucoCorners">interpolated chessboard corners</param>
 /// <param name="charucoIds">interpolated chessboard corners identifiers</param>
 /// <param name="cameraMatrix">optional 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">optional vector of distortion coefficients, (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]]) of 4, 5, 8 or 12 elements </param>
 /// <param name="minMarkers">number of adjacent markers that must be detected to return a charuco corner</param>
 /// <returns>The number of interpolated corners.</returns>
 public static int InterpolateCornersCharuco(
     IInputArrayOfArrays markerCorners,
     IInputArray markerIds,
     IInputArray image,
     CharucoBoard board,
     IOutputArray charucoCorners,
     IOutputArray charucoIds,
     IInputArray cameraMatrix = null,
     IInputArray distCoeffs = null,
     int minMarkers = 2)
 {
     using (InputArray iaMarkerCorners = markerCorners.GetInputArray())
     using (InputArray iaMarkerIds = markerIds.GetInputArray())
     using (InputArray iaImage = image.GetInputArray())
     using (OutputArray oaCharucoCorners = charucoCorners.GetOutputArray())
     using (OutputArray oaCharucoIds = charucoIds.GetOutputArray())
     using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
     using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
     {
         return cveArucoInterpolateCornersCharuco(
             iaMarkerCorners, iaMarkerIds, iaImage, board,
             oaCharucoCorners, oaCharucoIds,
             iaCameraMatrix, iaDistCoeffs,
             minMarkers);
     }
 }
Пример #6
0
 /// <summary>
 /// These functions try to match the given images and to estimate rotations of each camera.
 /// </summary>
 /// <param name="images">Input images.</param>
 /// <param name="masks">Masks for each input image specifying where to look for keypoints (optional).</param>
 /// <returns>Status code.</returns>
 public Stitcher.Status EstimateTransform(IInputArrayOfArrays images, IInputArrayOfArrays masks = null)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (InputArray iaMasks = masks == null ? InputArray.GetEmpty() : masks.GetInputArray())
         {
             return(StitchingInvoke.cveStitcherEstimateTransform(_ptr, iaImages, iaMasks));
         }
 }
Пример #7
0
 /// <summary>
 /// Create DAISY descriptor extractor
 /// </summary>
 /// <param name="radius">Radius of the descriptor at the initial scale.</param>
 /// <param name="qRadius">Amount of radial range division quantity.</param>
 /// <param name="qTheta">Amount of angular range division quantity.</param>
 /// <param name="qHist">Amount of gradient orientations range division quantity.</param>
 /// <param name="norm">Descriptors normalization type.</param>
 /// <param name="H">optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image</param>
 /// <param name="interpolation">Switch to disable interpolation for speed improvement at minor quality loss</param>
 /// <param name="useOrientation">Sample patterns using keypoints orientation, disabled by default.</param>
 public DAISY(float radius       = 15, int qRadius           = 3, int qTheta = 8,
              int qHist          = 8, NormalizationType norm = NormalizationType.None, IInputArray H = null,
              bool interpolation = true, bool useOrientation = false)
 {
     using (InputArray iaH = H == null ? InputArray.GetEmpty() : H.GetInputArray())
         _ptr = XFeatures2DInvoke.cveDAISYCreate(radius, qRadius, qTheta, qHist, norm, iaH, interpolation, useOrientation,
                                                 ref _feature2D);
 }
Пример #8
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAndComputeAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IInputArray mask, IOutputArray keyPoints,
                                          IOutputArray descriptors, bool useProvidedKeyPoints, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             using (OutputArray oaKeypoints = keyPoints.GetOutputArray())
                 using (OutputArray oaDescriptors = descriptors.GetOutputArray())
                     CudaInvoke.cveCudaFeature2dAsyncDetectAndComputeAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, iaMask, oaKeypoints, oaDescriptors, useProvidedKeyPoints, stream);
 }
Пример #9
0
 /// <summary>
 /// Find the k-nearest match
 /// </summary>
 /// <param name="queryDescriptor">An n x m matrix of descriptors to be query for nearest neighbours. n is the number of descriptor and m is the size of the descriptor</param>
 /// <param name="k">Number of nearest neighbors to search for</param>
 /// <param name="mask">Can be null if not needed. An n x 1 matrix. If 0, the query descriptor in the corresponding row will be ignored.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 /// <param name="compactResult">
 /// Parameter used when the mask (or masks) is not empty. If compactResult is
 /// false, the matches vector has the same size as queryDescriptors rows.If compactResult is true,
 /// the matches vector does not contain matches for fully masked-out query descriptors.
 /// </param>
 public void KnnMatch(
     IInputArray queryDescriptor,
     VectorOfVectorOfDMatch matches,
     int k,
     IInputArray mask   = null,
     bool compactResult = false)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptor.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             CvInvoke.cveDescriptorMatcherKnnMatch2(_descriptorMatcherPtr, iaQueryDesccriptor, matches, k, iaMask, compactResult);
 }
Пример #10
0
 /// <summary>
 /// Debug draw markers of matched correspondences onto a lineBundle
 /// </summary>
 /// <param name="bundle">the lineBundle</param>
 /// <param name="cols">column coordinates in the line bundle</param>
 /// <param name="colors">colors for the markers. Defaults to white.</param>
 public static void DrawCorrespondencies(
     IInputOutputArray bundle,
     IInputArray cols,
     IInputArray colors = null)
 {
     using (InputOutputArray ioaBundle = bundle.GetInputOutputArray())
         using (InputArray iaCols = cols.GetInputArray())
             using (InputArray iaColors = colors == null ? InputArray.GetEmpty() : colors.GetInputArray())
             {
                 cveDrawCorrespondencies(ioaBundle, iaCols, iaColors);
             }
 }
Пример #11
0
 /// <summary>
 /// Finds the best match for each descriptor from a query set. Train descriptors collection that was set by the Add function is used.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="matches">If a query descriptor is masked out in mask , no match is added for this descriptor. So, matches size may be smaller than the query descriptors count.</param>
 /// <param name="masks">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 public void Match(
     IInputArray queryDescriptors,
     VectorOfDMatch matches,
     IInputArrayOfArrays masks = null
     )
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaMasks = masks == null ? InputArray.GetEmpty() : masks.GetInputArray())
         {
             CvInvoke.cveDescriptorMatcherMatch2(_descriptorMatcherPtr, iaQueryDesccriptor, matches, iaMasks);
         }
 }
Пример #12
0
 /// <summary>
 /// Unwraps a 2D phase map.
 /// </summary>
 /// <param name="wrappedPhaseMap">The wrapped phase map that needs to be unwrapped.</param>
 /// <param name="unwrappedPhaseMap">The unwrapped phase map.</param>
 /// <param name="shadowMask">Optional parameter used when some pixels do not hold any phase information in the wrapped phase map.</param>
 public void UnwrapPhaseMap(
     IInputArray wrappedPhaseMap,
     IOutputArray unwrappedPhaseMap,
     IInputArray shadowMask = null)
 {
     using (InputArray iaWrappedPhaseMap = wrappedPhaseMap.GetInputArray())
         using (OutputArray oaUnwrappedPhaseMap = unwrappedPhaseMap.GetOutputArray())
             using (InputArray iaShadowMask = shadowMask == null ? InputArray.GetEmpty() : shadowMask.GetInputArray())
             {
                 PhaseUnwrappingInvoke.cveHistogramPhaseMapUnwrappingUnwrapPhaseMap(_ptr, iaWrappedPhaseMap, oaUnwrappedPhaseMap, iaShadowMask);
             }
 }
Пример #13
0
 public bool TrainE(IInputArray samples, IInputArray means0, IInputArray covs0, IInputArray weights0,
                    IOutputArray loglikelihoods, IOutputArray labels, IOutputArray probs)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaMeans0 = means0.GetInputArray())
             using (InputArray iaCovs0 = covs0 == null ? InputArray.GetEmpty() : covs0.GetInputArray())
                 using (InputArray iaWeights = weights0 == null ? InputArray.GetEmpty() : weights0.GetInputArray())
                     using (OutputArray oaLogLikelihood = loglikelihoods == null ? OutputArray.GetEmpty() : loglikelihoods.GetOutputArray())
                         using (OutputArray oaLabels = labels == null ? OutputArray.GetEmpty() : labels.GetOutputArray())
                             using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                                 return(MlInvoke.CvEMTrainE(_ptr, iaSamples, iaMeans0, iaCovs0, iaWeights, oaLogLikelihood, oaLabels, oaProbs));
 }
Пример #14
0
 /// <summary>
 /// Draws a set of Charuco corners
 /// </summary>
 /// <param name="image">image input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="charucoCorners">vector of detected charuco corners</param>
 /// <param name="charucoIds">list of identifiers for each corner in charucoCorners</param>
 /// <param name="cornerColor">color of the square surrounding each corner</param>
 public static void DrawDetectedCornersCharuco(
     IInputOutputArray image,
     IInputArray charucoCorners,
     IInputArray charucoIds,
     MCvScalar cornerColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
             using (InputArray iaCharucoIds = charucoIds == null ? InputArray.GetEmpty() : charucoIds.GetInputArray())
             {
                 cveArucoDrawDetectedCornersCharuco(ioaImage, iaCharucoCorners, iaCharucoIds, ref cornerColor);
             }
 }
Пример #15
0
 /// <summary>
 /// For each query descriptor, finds the training descriptors not farther than the specified distance.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="matches">Found matches.</param>
 /// <param name="maxDistance">Threshold for the distance between matched descriptors. Distance means here metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in Pixels)!</param>
 /// <param name="masks">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 /// <param name="compactResult">Parameter used when the mask (or masks) is not empty. If compactResult is false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the matches vector does not contain matches for fully masked-out query descriptors.</param>
 public void RadiusMatch(
     IInputArray queryDescriptors,
     VectorOfVectorOfDMatch matches,
     float maxDistance,
     IInputArray masks  = null,
     bool compactResult = false)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaMasks = masks == null ? InputArray.GetEmpty() : masks.GetInputArray())
         {
             CvInvoke.cveDescriptorMatcherRadiusMatch2(_descriptorMatcherPtr, iaQueryDesccriptor, matches, maxDistance, iaMasks, compactResult);
         }
 }
Пример #16
0
 /// <summary>
 /// Finds the best match for each descriptor from a query set.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 /// <param name="matches">If a query descriptor is masked out in mask , no match is added for this descriptor. So, matches size may be smaller than the query descriptors count.</param>
 /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 public void Match(
     IInputArray queryDescriptors,
     IInputArray trainDescriptors,
     VectorOfDMatch matches,
     IInputArray mask = null)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptor = trainDescriptors.GetInputArray())
             using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             {
                 CvInvoke.cveDescriptorMatcherMatch1(_descriptorMatcherPtr, iaQueryDesccriptor, iaTrainDescriptor, matches, iaMask);
             }
 }
Пример #17
0
 /// <summary>
 /// Draw a set of detected ChArUco Diamond markers
 /// </summary>
 /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="diamondCorners">positions of diamond corners in the same format returned by detectCharucoDiamond(). (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="diamondIds">vector of identifiers for diamonds in diamondCorners, in the same format returned by detectCharucoDiamond() (e.g. VectorOfMat ). Optional, if not provided, ids are not painted. </param>
 /// <param name="borderColor">color of marker borders. Rest of colors (text color and first corner color) are calculated based on this one.</param>
 public static void DrawDetectedDiamonds(
     IInputOutputArray image,
     IInputArrayOfArrays diamondCorners,
     IInputArray diamondIds,
     MCvScalar borderColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaDiamondCorners = diamondCorners.GetInputArray())
             using (InputArray iaDiamondIds = diamondIds == null ? InputArray.GetEmpty() : diamondIds.GetInputArray())
             {
                 cveArucoDrawDetectedDiamonds(ioaImage, iaDiamondCorners, iaDiamondIds, ref borderColor);
             }
 }
Пример #18
0
 public void trainE(IInputArray samples, IInputArray means0, IInputArray covs0, IInputArray weights0,
                    IOutputArray loglikelihoods, IOutputArray labels, IOutputArray probs)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaMeans0 = means0.GetInputArray())
             using (InputArray iaCovs0 = covs0 == null ? InputArray.GetEmpty() : covs0.GetInputArray())
                 using (InputArray iaWeights = weights0 == null ? InputArray.GetEmpty() : weights0.GetInputArray())
                     using (OutputArray oaLogLikelihood = loglikelihoods == null ? OutputArray.GetEmpty() : loglikelihoods.GetOutputArray())
                         using (OutputArray oaLabels = labels == null ? OutputArray.GetEmpty() : labels.GetOutputArray())
                             using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                             {
                                 MlInvoke.CvEMTrainE(_ptr, iaSamples, iaMeans0, iaCovs0, iaWeights, oaLogLikelihood, oaLabels,
                                                     oaProbs, ref _statModel, ref _algorithm);
                             }
 }
Пример #19
0
 /// <summary>
 /// For each query descriptor, finds the training descriptors not farther than the specified distance.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 /// <param name="matches">Found matches.</param>
 /// <param name="maxDistance">Threshold for the distance between matched descriptors. Distance means here metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in Pixels)!</param>
 /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 /// <param name="compactResult">Parameter used when the mask (or masks) is not empty. If compactResult is false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the matches vector does not contain matches for fully masked-out query descriptors.</param>
 public void RadiusMatch(
     IInputArray queryDescriptors,
     IInputArray trainDescriptors,
     VectorOfVectorOfDMatch matches,
     float maxDistance,
     IInputArrayOfArrays mask = null,
     bool compactResult       = false)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptot = trainDescriptors.GetInputArray())
             using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             {
                 Features2DInvoke.cveDescriptorMatcherRadiusMatch1(_descriptorMatcherPtr, iaQueryDesccriptor, iaTrainDescriptot, matches, maxDistance, iaMask, compactResult);
             }
 }
Пример #20
0
 /// <summary>
 /// Draw the matched keypoints between the model image and the observered image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observerdImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
     IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
     IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
     VectorOfVectorOfDMatch matches,
     IInputOutputArray result,
     MCvScalar matchColor, MCvScalar singlePointColor,
     IInputArray mask       = null,
     KeypointDrawType flags = KeypointDrawType.Default)
 {
     using (InputArray iaModelImage = modelImage.GetInputArray())
         using (InputArray iaObserverdImage = observerdImage.GetInputArray())
             using (InputOutputArray ioaResult = result.GetInputOutputArray())
                 using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
                     Features2DInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
                                                          modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask, flags);
 }
Пример #21
0
 /// <summary>
 /// Collect corresponding 2d and 3d points based on correspondencies and mask
 /// </summary>
 /// <param name="cols">Correspondence-position per line in line-bundle-space</param>
 /// <param name="srcLocations">The source image location</param>
 /// <param name="pts2d">2d points</param>
 /// <param name="pts3d">3d points</param>
 /// <param name="mask">mask containing non-zero values for the elements to be retained</param>
 public static void ConvertCorrespondencies(
     IInputArray cols,
     IInputArray srcLocations,
     IOutputArray pts2d,
     IInputOutputArray pts3d = null,
     IInputArray mask        = null)
 {
     using (InputArray iaCols = cols.GetInputArray())
         using (InputArray iaSrcLocations = srcLocations.GetInputArray())
             using (OutputArray oaPts2d = pts2d.GetOutputArray())
                 using (InputOutputArray ioaPts3d = pts3d.GetInputOutputArray())
                     using (InputArray iaMask = mask == null? InputArray.GetEmpty() : mask.GetInputArray())
                     {
                         cveConvertCorrespondencies(iaCols, iaSrcLocations, oaPts2d, ioaPts3d, iaMask);
                     }
 }
Пример #22
0
 /// <summary>
 /// Creates training data from in-memory arrays.
 /// </summary>
 /// <param name="samples">Matrix of samples. It should have CV_32F type.</param>
 /// <param name="layoutType">Type of the layout.</param>
 /// <param name="response">Matrix of responses. If the responses are scalar, they should be stored as a single row or as a single column. The matrix should have type CV_32F or CV_32S (in the former case the responses are considered as ordered by default; in the latter case - as categorical)</param>
 /// <param name="varIdx">Vector specifying which variables to use for training. It can be an integer vector (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of active variables.</param>
 /// <param name="sampleIdx">Vector specifying which samples to use for training. It can be an integer vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask of training samples.</param>
 /// <param name="sampleWeight">Optional vector with weights for each sample. It should have CV_32F type.</param>
 /// <param name="varType">Optional vector of type CV_8U and size &lt;number_of_variables_in_samples&gt; + &lt;number_of_variables_in_responses&gt;, containing types of each input and output variable.</param>
 public TrainData(
     IInputArray samples, DataLayoutType layoutType, IInputArray response,
     IInputArray varIdx       = null, IInputArray sampleIdx = null,
     IInputArray sampleWeight = null, IInputArray varType   = null
     )
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaResponse = response.GetInputArray())
             using (InputArray iaVarIdx = varIdx == null ? InputArray.GetEmpty() : varIdx.GetInputArray())
                 using (InputArray iaSampleIdx = sampleIdx == null ? InputArray.GetEmpty() : sampleIdx.GetInputArray())
                     using (InputArray iaSampleWeight = sampleWeight == null ? InputArray.GetEmpty() : sampleWeight.GetInputArray())
                         using (InputArray iaVarType = varType == null ? InputArray.GetEmpty() : varType.GetInputArray())
                         {
                             _ptr = MlInvoke.cveTrainDataCreate(iaSamples, layoutType, iaResponse, iaVarIdx, iaSampleIdx, iaSampleWeight,
                                                                iaVarType);
                         }
 }
Пример #23
0
 /// <summary>
 /// Unwrap the wrapped phase map to remove phase ambiguities.
 /// </summary>
 /// <param name="wrappedPhaseMap">The wrapped phase map computed from the pattern.</param>
 /// <param name="unwrappedPhaseMap">The unwrapped phase map used to find correspondences between the two devices.</param>
 /// <param name="camSize">Resolution of the camera.</param>
 /// <param name="shadowMask">Mask used to discard shadow regions.</param>
 public void UnwrapPhaseMap(
     IInputArray wrappedPhaseMap,
     IOutputArray unwrappedPhaseMap,
     Size camSize,
     IInputArray shadowMask = null)
 {
     using (InputArray iaWrappedPhaseMap = wrappedPhaseMap.GetInputArray())
         using (OutputArray oaUnwrappedPhaseMap = unwrappedPhaseMap.GetOutputArray())
             using (InputArray iaShadowMask = shadowMask == null ? InputArray.GetEmpty() : shadowMask.GetInputArray())
             {
                 StructuredLightInvoke.cveSinusoidalPatternUnwrapPhaseMap(
                     _ptr,
                     iaWrappedPhaseMap,
                     oaUnwrappedPhaseMap,
                     ref camSize,
                     iaShadowMask);
             }
 }
Пример #24
0
 /// <summary>
 /// Compute a wrapped phase map from sinusoidal patterns.
 /// </summary>
 /// <param name="patternImages">Input data to compute the wrapped phase map.</param>
 /// <param name="wrappedPhaseMap">Wrapped phase map obtained through one of the three methods.</param>
 /// <param name="shadowMask">Mask used to discard shadow regions.</param>
 /// <param name="fundamental">Fundamental matrix used to compute epipolar lines and ease the matching step.</param>
 public void ComputePhaseMap(
     IInputArrayOfArrays patternImages,
     IOutputArray wrappedPhaseMap,
     IOutputArray shadowMask = null,
     IInputArray fundamental = null)
 {
     using (InputArray iaPatternImages = patternImages.GetInputArray())
         using (OutputArray oaWrappedPhaseMap = wrappedPhaseMap.GetOutputArray())
             using (OutputArray oaShadowMask = shadowMask == null ? OutputArray.GetEmpty() : shadowMask.GetOutputArray())
                 using (InputArray iaFundamental = fundamental == null ? InputArray.GetEmpty() : fundamental.GetInputArray())
                 {
                     StructuredLightInvoke.cveSinusoidalPatternComputePhaseMap(
                         _ptr,
                         iaPatternImages,
                         oaWrappedPhaseMap,
                         oaShadowMask,
                         iaFundamental);
                 }
 }
Пример #25
0
 /// <summary>
 /// For each query descriptor, finds the training descriptors not farther than the specified distance (asynchronous version).
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 /// <param name="matches">Matches array stored in GPU memory. Internal representation is not defined.</param>
 /// <param name="maxDistance">Threshold for the distance between matched descriptors. Distance means here metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in Pixels)!</param>
 /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 /// <param name="stream">CUDA stream.</param>
 public void RadiusMatchAsync(
     IInputArray queryDescriptors,
     IInputArray trainDescriptors,
     IOutputArray matches,
     float maxDistance,
     IInputArray mask = null,
     Stream stream    = null)
 {
     using (InputArray iaQueryDescriptors = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptors = trainDescriptors.GetInputArray())
             using (OutputArray oaMatches = matches.GetOutputArray())
                 using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
                     CudaInvoke.cveCudaDescriptorMatcherRadiusMatchAsync1(
                         _ptr,
                         iaQueryDescriptors,
                         iaTrainDescriptors,
                         oaMatches,
                         maxDistance,
                         iaMask,
                         stream);
 }
Пример #26
0
 /// <summary>
 /// Detect ChArUco Diamond markers
 /// </summary>
 /// <param name="image">input image necessary for corner subpixel.</param>
 /// <param name="markerCorners">list of detected marker corners from detectMarkers function.</param>
 /// <param name="markerIds">list of marker ids in markerCorners.</param>
 /// <param name="squareMarkerLengthRate">rate between square and marker length: squareMarkerLengthRate = squareLength / markerLength.The real units are not necessary.</param>
 /// <param name="diamondCorners">output list of detected diamond corners (4 corners per diamond). The order is the same than in marker corners: top left, top right, bottom right and bottom left. Similar format than the corners returned by detectMarkers(e.g VectorOfVectorOfPointF ).</param>
 /// <param name="diamondIds">ids of the diamonds in diamondCorners. The id of each diamond is in fact of type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the diamond.</param>
 /// <param name="cameraMatrix">Optional camera calibration matrix.</param>
 /// <param name="distCoeffs">Optional camera distortion coefficients.</param>
 public static void DetectCharucoDiamond(
     IInputArray image,
     IInputArray markerCorners,
     IInputArray markerIds,
     float squareMarkerLengthRate,
     IOutputArray diamondCorners,
     IOutputArray diamondIds,
     IInputArray cameraMatrix = null,
     IInputArray distCoeffs = null)
 {
     using (InputArray iaImage = image.GetInputArray())
     using (InputArray iaMarkerCorners = markerCorners.GetInputArray())
     using (InputArray iaMarkerIds = markerIds.GetInputArray())
     using (OutputArray oaDiamondCorners = diamondCorners.GetOutputArray())
     using (OutputArray oaDiamondIds = diamondIds.GetOutputArray())
     using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
     using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
     {
         cveArucoDetectCharucoDiamond(iaImage, iaMarkerCorners, iaMarkerIds, squareMarkerLengthRate, oaDiamondCorners, oaDiamondIds, iaCameraMatrix, iaDistCoeffs);
     }
 }
Пример #27
0
 /// <summary>
 /// Decodes the structured light pattern, generating a disparity map.
 /// </summary>
 /// <param name="structuredLightPattern">The strucutred light pattern</param>
 /// <param name="patternImages">The acquired pattern images to decode VectorOfVectorOfMat), loaded as grayscale and previously rectified.</param>
 /// <param name="disparityMap">The decoding result: a CV_64F Mat at image resolution, storing the computed disparity map.</param>
 /// <param name="blackImages">The all-black images needed for shadowMasks computation.</param>
 /// <param name="whiteImages">The all-white images needed for shadowMasks computation.</param>
 /// <param name="flags">Flags setting decoding algorithms.</param>
 /// <returns>True if successful.</returns>
 public static bool Decode(
     this IStructuredLightPattern structuredLightPattern,
     VectorOfVectorOfMat patternImages,
     IOutputArray disparityMap,
     IInputArrayOfArrays blackImages = null,
     IInputArrayOfArrays whiteImages = null,
     DecodeFlag flags = DecodeFlag.Decode3dUnderworld)
 {
     using (OutputArray oaDisparityMap = disparityMap.GetOutputArray())
         using (InputArray iaBlackImages = blackImages == null? InputArray.GetEmpty() : blackImages.GetInputArray())
             using (InputArray iaWhiteImages = whiteImages == null? InputArray.GetEmpty() : whiteImages.GetInputArray())
             {
                 return(cveStructuredLightPatternDecode(
                            structuredLightPattern.StructuredLightPatternPtr,
                            patternImages,
                            oaDisparityMap,
                            iaBlackImages,
                            iaWhiteImages,
                            flags
                            ));
             }
 }
Пример #28
0
 /// <summary>
 /// Finds the best match for each descriptor from a query set (asynchronous version).
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 /// <param name="matches">Matches array stored in GPU memory. Internal representation is not defined. Use DescriptorMatcher::matchConvert method to retrieve results in standard representation.</param>
 /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 /// <param name="stream">CUDA stream.</param>
 public void MatchAsync(
     IInputArray queryDescriptors,
     IInputArray trainDescriptors,
     IOutputArray matches,
     IInputArray mask = null,
     Stream stream    = null)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptor = trainDescriptors.GetInputArray())
             using (OutputArray oaMatches = matches.GetOutputArray())
                 using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
                 {
                     CudaInvoke.cveCudaDescriptorMatcherMatchAsync1(
                         _ptr,
                         iaQueryDesccriptor,
                         iaTrainDescriptor,
                         oaMatches,
                         iaMask,
                         stream
                         );
                 }
 }
Пример #29
0
 /// <summary>
 /// Finds the k best matches for each descriptor from a query set.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 /// <param name="k">Count of best matches found per each query descriptor or less if a query descriptor has less than k possible matches in total.</param>
 /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 /// <param name="compactResult">Parameter used when the mask (or masks) is not empty. If compactResult is false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the matches vector does not contain matches for fully masked-out query descriptors.</param>
 public void KnnMatch(
     IInputArray queryDescriptors,
     IInputArray trainDescriptors,
     VectorOfVectorOfDMatch matches,
     int k,
     IInputArray mask   = null,
     bool compactResult = false)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptot = trainDescriptors.GetInputArray())
             using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             {
                 Features2DInvoke.cveDescriptorMatcherKnnMatch1(
                     _descriptorMatcherPtr,
                     iaQueryDesccriptor,
                     iaTrainDescriptot,
                     matches,
                     k,
                     iaMask,
                     compactResult);
             }
 }
Пример #30
0
 /// <summary>
 /// Calculates Optical Flow using NVIDIA Optical Flow SDK.
 /// NVIDIA GPUs starting with Turing contain a dedicated hardware accelerator for computing optical flow vectors between pairs of images.
 /// The optical flow hardware accelerator generates block-based optical flow vectors.
 /// The size of the block depends on hardware in use, and can be queried using the function getGridSize().
 /// The block-based flow vectors generated by the hardware can be converted to dense representation(i.e.per-pixel flow vectors) using upSampler() helper function, if needed.
 /// The flow vectors are stored in CV_16SC2 format with x and y components of each flow vector in 16-bit signed fixed point representation S10.5.
 /// </summary>
 /// <param name="nvidiaOpticalFlow">The nvidia optical flow object</param>
 /// <param name="inputImage">Input image</param>
 /// <param name="referenceImage">Reference image of the same size and the same type as input image.</param>
 /// <param name="flow">A buffer consisting of inputImage.Size() / getGridSize() flow vectors in CV_16SC2 format.</param>
 /// <param name="stream">Stream for the asynchronous version.</param>
 /// <param name="hint">Hint buffer if client provides external hints. Must have same size as flow buffer. Caller can provide flow vectors as hints for optical flow calculation.</param>
 /// <param name="cost">Cost buffer contains numbers indicating the confidence associated with each of the generated flow vectors. Higher the cost, lower the confidence. Cost buffer is of type CV_32SC1.</param>
 public static void Calc(
     this INvidiaOpticalFlow nvidiaOpticalFlow,
     IInputArray inputImage,
     IInputArray referenceImage,
     IInputOutputArray flow,
     Stream stream     = null,
     IInputArray hint  = null,
     IOutputArray cost = null)
 {
     using (InputArray iaInputImage = inputImage.GetInputArray())
         using (InputArray iaReferenceImage = referenceImage.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 using (InputArray iaHint = (hint == null ? InputArray.GetEmpty() : hint.GetInputArray()))
                     using (OutputArray oaCost = (cost == null ? OutputArray.GetEmpty() : cost.GetOutputArray()))
                         cudaNvidiaOpticalFlowCalc(
                             nvidiaOpticalFlow.NvidiaOpticalFlowPtr,
                             iaInputImage,
                             iaReferenceImage,
                             ioaFlow,
                             (stream == null) ? IntPtr.Zero : stream.Ptr,
                             iaHint,
                             oaCost);
 }