Exemplo n.º 1
1
 /// <summary>
 /// Applies an affine transformation to an image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="mapMatrix">2x3 transformation matrix</param>
 /// <param name="dsize">Size of the output image.</param>
 /// <param name="interpMethod">Interpolation method</param>
 /// <param name="warpMethod">Warp method</param>
 /// <param name="borderMode">Pixel extrapolation method</param>
 /// <param name="borderValue">A value used to fill outliers</param>
 public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
       cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
 }
 /// <summary>
 /// Apply the filter to the disparity image
 /// </summary>
 /// <param name="disparity">The input disparity map</param>
 /// <param name="image">The image</param>
 /// <param name="dst">The output disparity map, should have the same size as the input disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray disparity, IInputArray image, IOutputArray dst, Stream stream = null)
 {
    using (InputArray iaDisparity = disparity.GetInputArray())
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       CudaInvoke.cudaDisparityBilateralFilterApply(this, iaDisparity, iaImage, oaDst, stream);
 }
Exemplo n.º 3
0
 /// <summary>
 /// Detects objects of different sizes in the input image.
 /// </summary>
 /// <param name="image">Matrix of type CV_8U containing an image where objects should be detected.</param>
 /// <param name="objects">Buffer to store detected objects (rectangles).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void DetectMultiScale(IInputArray image, IOutputArray objects, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaObjects = objects.GetOutputArray())
       CudaInvoke.cudaCascadeClassifierDetectMultiScale(_ptr, iaImage, oaObjects,
          stream == null ? IntPtr.Zero : stream.Ptr);
 }
 /// <summary>
 /// Find the good features to track
 /// </summary>
 public void Detect(IInputArray image, IOutputArray corners, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaCorners = corners.GetOutputArray())
    using (InputArray iaMask = (mask == null ? mask.GetInputArray() : InputArray.GetEmpty()))
       CudaInvoke.cudaCornersDetectorDetect(_ptr, iaImage, oaCorners, iaMask, stream);
 }
Exemplo n.º 5
0
 public bool NextFrame(IOutputArray frame)
 {
    using (OutputArray oaFrame = frame.GetOutputArray())
    {
       return CudaInvoke.cudaVideoReaderNextFrame(_ptr, oaFrame);
    }
 }
Exemplo n.º 6
0
      /*
      /// <summary>
      /// Create an auto-tuned flann index
      /// </summary>
      /// <param name="values">A row by row matrix of descriptors</param>
      /// <param name="targetPrecision">Precision desired, use 0.9 if not sure</param>
      /// <param name="buildWeight">build tree time weighting factor, use 0.01 if not sure</param>
      /// <param name="memoryWeight">index memory weighting factor, use 0 if not sure</param>
      /// <param name="sampleFraction">what fraction of the dataset to use for autotuning, use 0.1 if not sure</param>
      public Index(IInputArray values, float targetPrecision, float buildWeight, float memoryWeight, float sampleFraction)
      {
         using (InputArray iaValues = values.GetInputArray())
            _ptr = CvFlannIndexCreateAutotuned(iaValues, targetPrecision, buildWeight, memoryWeight, sampleFraction);
      }*/
      #endregion

      /// <summary>
      /// Perform k-nearest-neighbours (KNN) search
      /// </summary>
      /// <param name="queries">A row by row matrix of descriptors to be query for nearest neighbours</param>
      /// <param name="indices">The result of the indices of the k-nearest neighbours</param>
      /// <param name="squareDistances">The square of the Eculidean distance between the neighbours</param>
      /// <param name="knn">Number of nearest neighbors to search for</param>
      /// <param name="checks">The number of times the tree(s) in the index should be recursively traversed. A
      /// higher value for this parameter would give better search precision, but also take more
      /// time. If automatic configuration was used when the index was created, the number of
      /// checks required to achieve the specified precision was also computed, in which case
      /// this parameter is ignored </param>
      public void KnnSearch(IInputArray queries, IOutputArray indices, IOutputArray squareDistances, int knn, int checks)
      {
         using (InputArray iaQueries = queries.GetInputArray())
         using (OutputArray oaIndices = indices.GetOutputArray())
         using (OutputArray oaSquareDistances = squareDistances.GetOutputArray())
         CvFlannIndexKnnSearch(_ptr, iaQueries, oaIndices, oaSquareDistances, knn, checks);
      }
Exemplo n.º 7
0
 /// <summary>
 ///  This function is similiar to cvCalcBackProjectPatch. It slids through image, compares overlapped patches of size wxh with templ using the specified method and stores the comparison results to result
 /// </summary>
 /// <param name="image">Image where the search is running. It should be 8-bit or 32-bit floating-point</param>
 /// <param name="templ">Searched template; must be not greater than the source image and the same data type as the image</param>
 /// <param name="result">A map of comparison results; single-channel 32-bit floating-point. If image is WxH and templ is wxh then result must be W-w+1xH-h+1.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>  
 public void Match(IInputArray image, IInputArray templ, IOutputArray result, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaTempl = templ.GetInputArray())
    using (OutputArray oaResult = result.GetOutputArray())
       CudaInvoke.cudaTemplateMatchingMatch(_ptr, iaImage, iaTempl, oaResult, stream);
 }
Exemplo n.º 8
0
 /// <summary>
 /// Reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video.
 /// </summary>
 /// <param name="src">The input 8-bit 1-channel or 3-channel image</param>
 /// <param name="mask">The inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted</param>
 /// <param name="dst">The output image of the same format and the same size as input</param>
 /// <param name="flags">The inpainting method</param>
 /// <param name="inpaintRadius">The radius of circular neighborhood of each point inpainted that is considered by the algorithm</param>
 public static void Inpaint(IInputArray src, IInputArray mask, IOutputArray dst, double inpaintRadius, CvEnum.InpaintType flags)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputArray iaMask = mask.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveInpaint(iaSrc, iaMask, oaDst, inpaintRadius, flags);
 }
Exemplo n.º 9
0
 /// <summary>
 /// Finds perspective transformation H=||h_ij|| between the source and the destination planes
 /// </summary>
 /// <param name="srcPoints">Point coordinates in the original plane</param>
 /// <param name="dstPoints">Point coordinates in the destination plane</param>
 /// <param name="homography">The output homography matrix</param>
 /// <param name="method">FindHomography method</param>
 /// <param name="ransacReprojThreshold">
 /// The maximum allowed reprojection error to treat a point pair as an inlier. 
 /// The parameter is only used in RANSAC-based homography estimation. 
 /// E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3
 /// </param>
 /// <param name="mask">Optional output mask set by a robust method ( CV_RANSAC or CV_LMEDS ). Note that the input mask values are ignored.</param>
 /// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
 public static void FindHomography(
    PointF[] srcPoints,
    PointF[] dstPoints,
    IOutputArray homography,
    CvEnum.HomographyMethod method,
    double ransacReprojThreshold = 3,
    IOutputArray mask = null)
 {
    GCHandle srcHandle = GCHandle.Alloc(srcPoints, GCHandleType.Pinned);
    GCHandle dstHandle = GCHandle.Alloc(dstPoints, GCHandleType.Pinned);
    try
    {
       using (
          Mat srcPointMatrix = new Mat(srcPoints.Length, 2, DepthType.Cv32F, 1, srcHandle.AddrOfPinnedObject(), 8))
       using (
          Mat dstPointMatrix = new Mat(dstPoints.Length, 2, DepthType.Cv32F, 1, dstHandle.AddrOfPinnedObject(), 8))
       {
          CvInvoke.FindHomography(srcPointMatrix, dstPointMatrix, homography, method, ransacReprojThreshold, mask);
       }
    }
    finally
    {
       srcHandle.Free();
       dstHandle.Free();
    }
 }
Exemplo n.º 10
0
 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaKeypoints = keypoints.GetOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       CudaInvoke.cveCudaFeature2dAsyncDetectAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, iaMask, stream);
 }
Exemplo n.º 11
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaDescriptors = descriptors.GetOutputArray())
       Feature2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }
Exemplo n.º 12
0
 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void FindStereoCorrespondence(IInputArray left, IInputArray right, IOutputArray disparity, Stream stream = null)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CudaInvoke.cudaStereoBMFindStereoCorrespondence(_ptr, iaLeft, iaRight, oaDisparity, stream);
 }
Exemplo n.º 13
0
 /// <summary>
 /// Computes disparity map for the specified stereo pair
 /// </summary>
 /// <param name="matcher">The stereo matcher</param>
 /// <param name="left">Left 8-bit single-channel image.</param>
 /// <param name="right">Right image of the same size and the same type as the left one.</param>
 /// <param name="disparity">Output disparity map. It has the same size as the input images. Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map</param>
 public static void Compute(this IStereoMatcher matcher, IInputArray left, IInputArray right, IOutputArray disparity)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CvStereoMatcherCompute(matcher.StereoMatcherPtr, iaLeft, iaRight, oaDisparity);
 }
Exemplo n.º 14
0
 public static void AmFilter(IInputArray joint, IInputArray src, IOutputArray dst, double sigmaS, double sigmaR,
    bool adjustOutliers)
 {
    using (InputArray iaJoint = joint.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveAmFilter(iaJoint, iaSrc, oaDst, sigmaS, sigmaR, adjustOutliers);
 }
Exemplo n.º 15
0
 public static void DtFilter(IInputArray guide, IInputArray src, IOutputArray dst,
    double sigmaSpatial, double sigmaColor, int mode, int numIters)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveDtFilter(iaGuide, iaSrc, oaDst, sigmaSpatial, sigmaColor, mode, numIters);
 }
Exemplo n.º 16
0
 public static void GuidedFilter(IInputArray guide, IInputArray src, IOutputArray dst, int radius, double eps,
    int dDepth)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveGuidedFilter(iaGuide, iaSrc, oaDst, radius, eps, dDepth);
 }
Exemplo n.º 17
0
 /// <summary>
 /// Extracts pixels from src:
 /// dst(x, y) = src(x + center.x - (width(dst)-1)*0.5, y + center.y - (height(dst)-1)*0.5)
 /// where the values of pixels at non-integer coordinates are retrieved using bilinear interpolation. Every channel of multiple-channel images is processed independently. Whereas the rectangle center must be inside the image, the whole rectangle may be partially occluded. In this case, the replication border mode is used to get pixel values beyond the image boundaries.
 /// </summary>
 /// <param name="image">Source image</param>
 /// <param name="patchSize">Size of the extracted patch.</param>
 /// <param name="patch">Extracted rectangle</param>
 /// <param name="patchType">Depth of the extracted pixels. By default, they have the same depth as <paramref name="image"/>.</param>
 /// <param name="center">Floating point coordinates of the extracted rectangle center within the source image. The center must be inside the image.</param>
 public static void GetRectSubPix(IInputArray image, Size patchSize, PointF center, IOutputArray patch, DepthType patchType = DepthType.Default)
 {
    using (InputArray iaSrc = image.GetInputArray())
    using (OutputArray oaPatch = patch.GetOutputArray())
    {
       cveGetRectSubPix(iaSrc, ref patchSize, ref center, oaPatch, patchType);
    }
 }
Exemplo n.º 18
0
 public static float Predict(this IStatModel model, IInputArray samples, IOutputArray results = null, int flags = 0)
 {
    using (InputArray iaSamples = samples.GetInputArray())
    using (OutputArray oaResults = results == null ? OutputArray.GetEmpty() : results.GetOutputArray())
    {
       return MlInvoke.StatModelPredict(model.StatModelPtr, iaSamples, oaResults, flags);
    }
 }
Exemplo n.º 19
0
 /// <summary>
 /// Converts objects array from internal representation to standard vector.
 /// </summary>
 /// <param name="objects">Objects array in internal representation.</param>
 /// <returns>Resulting array.</returns>
 public Rectangle[] Convert(IOutputArray objects)
 {
    using (OutputArray oaObjects = objects.GetOutputArray())
    using (VectorOfRect vr = new VectorOfRect())
    {
       CudaInvoke.cudaCascadeClassifierConvert(_ptr, oaObjects, vr);
       return vr.ToArray();
    }
 }
Exemplo n.º 20
0
 /// <summary>
 /// Creates kernel from basic functions.
 /// </summary>
 /// <param name="A">Basic function used in axis x.</param>
 /// <param name="B">Basic function used in axis y.</param>
 /// <param name="kernel">Final 32-b kernel derived from A and B.</param>
 /// <param name="chn">Number of kernel channels.</param>
 public static void CreateKernel(IInputArray A, IInputArray B, IOutputArray kernel, int chn = 1)
 {
    using (InputArray iaA = A.GetInputArray())
    using (InputArray iaB = B.GetInputArray())
    using (OutputArray oaKernel = kernel.GetOutputArray())
    {
       cveFtCreateKernel(iaA, iaB, oaKernel, chn);
    }
 }
Exemplo n.º 21
0
 /// <summary>
 /// Compute the descriptors on the image from the given keypoint locations.
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="image">The image to compute descriptors from</param>
 /// <param name="keypoints">The keypoints where the descriptor computation is perfromed</param>
 /// <param name="descriptors">The descriptors from the given keypoints</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void ComputeAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IOutputArray descriptors, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaKeypoints = keypoints.GetOutputArray())
    using (OutputArray oaDescriptors = descriptors.GetOutputArray())
    {
       CudaInvoke.cveCudaFeature2dAsyncComputeAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, oaDescriptors, stream);
    }
 }
Exemplo n.º 22
0
 /// <summary>
 /// Recovers inverse camera response.
 /// </summary>
 /// <param name="src">Vector of input images</param>
 /// <param name="dst">256x1 matrix with inverse camera response function</param>
 /// <param name="times">Vector of exposure time values for each image</param>
 public void Process(IInputArray src, IOutputArray dst, IInputArray times)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaTimes = times.GetInputArray())
    {
       CvInvoke.cveCalibrateCRFProcess(_calibrateCRFPtr, iaSrc, oaDst, iaTimes);
    }
 }
Exemplo n.º 23
0
 /// <summary>
 /// Produce domain transform filtering operation on source image.
 /// </summary>
 /// <param name="src">Filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="dDepth">Optional depth of the output image. dDepth can be set to Default, which will be equivalent to src.depth().</param>
 public void Filter(IInputArray src, IOutputArray dst, DepthType dDepth = DepthType.Default)
 {
    using (InputArray iaSrc = src.GetInputArray())
       using (OutputArray oaDst = dst.GetOutputArray())
       {
          XimgprocInvoke.cveDTFilterFilter(_ptr, iaSrc, oaDst, dDepth);
       }
    
 }
Exemplo n.º 24
0
      /// <summary>
      /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
      /// </summary>
      /// <param name="feature2DAsync">The Feature2DAsync object</param>
      /// <param name="image">The image</param>
      /// <param name="mask">The optional mask, can be null if not needed</param>
      /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
      /// <param name="descriptors">The descriptors from the keypoints</param>
      /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
      /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
      public static void DetectAndComputeAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IInputArray mask, IOutputArray keyPoints,
         IOutputArray descriptors, bool useProvidedKeyPoints, Stream stream = null)
      {
         using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
         using (OutputArray oaKeypoints = keyPoints.GetOutputArray())
         using (OutputArray oaDescriptors = descriptors.GetOutputArray())
            CudaInvoke.cveCudaFeature2dAsyncDetectAndComputeAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, iaMask, oaKeypoints, oaDescriptors, useProvidedKeyPoints, stream);

      }
Exemplo n.º 25
0
 /// <summary>
 /// Applies the joint bilateral filter to an image.
 /// </summary>
 /// <param name="joint">Joint 8-bit or floating-point, 1-channel or 3-channel image.</param>
 /// <param name="src">Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint image.</param>
 /// <param name="dst">Destination image of the same size and type as src .</param>
 /// <param name="d">Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .</param>
 /// <param name="sigmaColor">Filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.</param>
 /// <param name="sigmaSpace">Filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d&gt;0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .</param>
 /// <param name="borderType">Border type</param>
 public static void JointBilateralFilter(
    IInputArray joint, IInputArray src, IOutputArray dst, int d,
    double sigmaColor, double sigmaSpace, CvEnum.BorderType borderType = BorderType.Reflect101)
 {
    using (InputArray iaJoint = joint.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveJointBilateralFilter(iaJoint, iaSrc,
          oaDst, d, sigmaColor, sigmaSpace, borderType);
 }
Exemplo n.º 26
0
 /// <summary>
 /// Simple one-line Fast Global Smoother filter call.
 /// </summary>
 /// <param name="guide">image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.</param>
 /// <param name="src">source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.</param>
 /// <param name="dst">destination image.</param>
 /// <param name="lambda">parameter defining the amount of regularization</param>
 /// <param name="sigmaColor">parameter, that is similar to color space sigma in bilateralFilter.</param>
 /// <param name="lambdaAttenuation">internal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.</param>
 /// <param name="numIter">number of iterations used for filtering, 3 is usually enough.</param>
 public static void FastGlobalSmootherFilter(IInputArray guide, IInputArray src, IOutputArray dst, double lambda,
    double sigmaColor, double lambdaAttenuation = 0.25, int numIter = 3)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    {
       cveFastGlobalSmootherFilter(iaGuide, iaSrc, oaDst, lambda, sigmaColor, lambdaAttenuation, numIter);
    }
 }
Exemplo n.º 27
0
 /// <summary>
 /// Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing filters are used in many different applications.
 /// </summary>
 /// <param name="src">Input 8-bit 3-channel image</param>
 /// <param name="dst">Output 8-bit 3-channel image</param>
 /// <param name="flags">Edge preserving filters</param>
 /// <param name="sigmaS">Range between 0 to 200</param>
 /// <param name="sigmaR">Range between 0 to 1</param>
 public static void EdgePreservingFilter(
    IInputArray src, IOutputArray dst,
    CvEnum.EdgePreservingFilterFlag flags = CvEnum.EdgePreservingFilterFlag.RecursFilter,
    float sigmaS = 60.0f,
    float sigmaR = 0.4f)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveEdgePreservingFilter(iaSrc, oaDst, flags, sigmaS, sigmaR);
 }
Exemplo n.º 28
0
 /// <summary>
 /// Merges images.
 /// </summary>
 /// <param name="src">Vector of input images</param>
 /// <param name="dst">Result image</param>
 /// <param name="times">Vector of exposure time values for each image</param>
 /// <param name="response">256x1 matrix with inverse camera response function for each pixel value, it should have the same number of channels as images.</param>
 public void Process(IInputArray src, IOutputArray dst, IInputArray times, IInputArray response)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaTimes = times.GetInputArray())
    using (InputArray iaResponse = response.GetInputArray())
    {
       CvInvoke.cveMergeExposuresProcess(_mergeExposuresPtr, iaSrc, oaDst, iaTimes, iaResponse);
    }
 }
Exemplo n.º 29
0
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="sparseFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
 /// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
 {
    using (InputArray iaPrevImg = prevImg.GetInputArray())
    using (InputArray iaNextImg = nextImg.GetInputArray())
    using (InputArray iaPrevPts = prevPts.GetInputArray())
    using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
    using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
    using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
       cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
          oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
 }
Exemplo n.º 30
0
 /// <summary>
 /// Projects the image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="K">Camera intrinsic parameters</param>
 /// <param name="R">Camera rotation matrix</param>
 /// <param name="interpMode">Interpolation mode</param>
 /// <param name="borderMode">Border extrapolation mode</param>
 /// <param name="dst">Projected image</param>
 /// <returns>Project image top-left corner</returns>
 public Point Warp(IInputArray src, IInputArray K, IInputArray R, CvEnum.Inter interpMode, CvEnum.BorderType borderMode, IOutputArray dst)
 {
    Point corner = new Point();
    using (InputArray iaSrc = src.GetInputArray())
    using (InputArray iaK = K.GetInputArray())
    using (InputArray iaR = R.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    {
       StitchingInvoke.cveRotationWarperWarp(_rotationWarper, iaSrc, iaK, iaR, interpMode, borderMode, oaDst, ref corner);
       return corner;
    }
 }
Exemplo n.º 31
0
 /// <summary>
 /// Given an input color image, enhance low-light images using the BIMEF method
 /// </summary>
 /// <param name="input">Input color image.</param>
 /// <param name="output">Resulting image.</param>
 /// <param name="mu">Enhancement ratio.</param>
 /// <param name="a">a-parameter in the Camera Response Function (CRF).</param>
 /// <param name="b">b-parameter in the Camera Response Function (CRF).</param>
 public static void BIMEF(IInputArray input, IOutputArray output, float mu = 0.5f, float a = -0.3293f, float b = 1.1258f)
 {
     using (InputArray iaInput = input.GetInputArray())
         using (OutputArray oaOutput = output.GetOutputArray())
             cveBIMEF(iaInput, oaOutput, mu, a, b);
 }
Exemplo n.º 32
0
 /// <summary>
 /// Draw a GridBoard.
 /// </summary>
 /// <param name="outSize">size of the output image in pixels.</param>
 /// <param name="img">output image with the board. The size of this image will be outSize and the board will be on the center, keeping the board proportions.</param>
 /// <param name="margindSize">minimum margins (in pixels) of the board in the output image</param>
 /// <param name="borderBits">width of the marker borders.</param>
 public void Draw(Size outSize, IOutputArray img, int margindSize = 0, int borderBits = 1)
 {
     using (OutputArray oaImg = img.GetOutputArray())
         ArucoInvoke.cveArucoGridBoardDraw(_ptr, ref outSize, oaImg, margindSize, borderBits);
 }
Exemplo n.º 33
0
 /// <summary>
 /// First call Grab() function follows by Retrieve()
 /// </summary>
 /// <param name="m">The output array where the image will be read into.</param>
 /// <returns>False if no frames has been grabbed</returns>
 public bool Read(IOutputArray m)
 {
     using (OutputArray oaM = m.GetOutputArray())
         return(CvInvoke.cveVideoCaptureRead(Ptr, oaM));
 }
Exemplo n.º 34
0
 /// <summary>
 /// Transform the image using the lookup table
 /// </summary>
 /// <param name="image">The image to be transformed</param>
 /// <param name="dst">The transformation result</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Transform(IInputArray image, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaLookUpTableTransform(_ptr, iaImage, oaDst, stream);
 }
Exemplo n.º 35
0
 /// <summary>
 /// Converts an array to another data type with optional scaling.
 /// </summary>
 /// <param name="m">Output matrix; if it does not have a proper size or type before the operation, it is reallocated.</param>
 /// <param name="rtype">Desired output matrix type or, rather, the depth since the number of channels are the same as the input has; if rtype is negative, the output matrix will have the same type as the input.</param>
 /// <param name="alpha">Optional scale factor.</param>
 /// <param name="beta">Optional delta added to the scaled values.</param>
 public void ConvertTo(IOutputArray m, CvEnum.DepthType rtype, double alpha = 1.0, double beta = 0.0)
 {
     using (OutputArray oaM = m.GetOutputArray())
         UMatInvoke.cveUMatConvertTo(Ptr, oaM, rtype, alpha, beta);
 }
Exemplo n.º 36
0
 /// <summary>
 /// Update the background model
 /// </summary>
 /// <param name="image">The image that is used to update the background model</param>
 /// <param name="learningRate">Use -1 for default</param>
 /// <param name="fgMask">The output forground mask</param>
 public void Apply(IInputArray image, IOutputArray fgMask, double learningRate = -1)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaFgMask = fgMask.GetOutputArray())
             CvInvoke.CvBackgroundSubtractorUpdate(_ptr, iaImage, oaFgMask, learningRate);
 }
Exemplo n.º 37
0
 public static void GradientPaillouX(IInputArray op, IOutputArray dst, double alpha, double omega)
 {
     using (InputArray iaOp = op.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveGradientPaillouX(iaOp, oaDst, alpha, omega);
 }
Exemplo n.º 38
0
 /// <summary>
 /// Compute the descriptors on the image from the given keypoint locations.
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="image">The image to compute descriptors from</param>
 /// <param name="keypoints">The keypoints where the descriptor computation is perfromed</param>
 /// <param name="descriptors">The descriptors from the given keypoints</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void ComputeAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IOutputArray descriptors, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaKeypoints = keypoints.GetOutputArray())
             using (OutputArray oaDescriptors = descriptors.GetOutputArray())
             {
                 CudaInvoke.cveCudaFeature2dAsyncComputeAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, oaDescriptors, stream);
             }
 }
Exemplo n.º 39
0
 /// <summary>
 /// Finds line segments in a binary image using the probabilistic Hough transform.
 /// </summary>
 /// <param name="image">8-bit, single-channel binary source image</param>
 /// <param name="lines">Output vector of lines.Output vector of lines. Each line is represented by a two-element vector.
 /// The first element is the distance from the coordinate origin (top-left corner of the image).
 /// The second element is the line rotation angle in radians.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Detect(IInputArray image, IOutputArray lines, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaLines = lines.GetOutputArray())
             CudaInvoke.cudaHoughLinesDetectorDetect(_ptr, iaImage, oaLines, stream);
 }
Exemplo n.º 40
0
 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IInputArray mask = null, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaKeypoints = keypoints.GetOutputArray())
             using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
                 CudaInvoke.cveCudaFeature2dAsyncDetectAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, iaMask, stream);
 }
Exemplo n.º 41
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAndComputeAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IInputArray mask, IOutputArray keyPoints,
                                          IOutputArray descriptors, bool useProvidedKeyPoints, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             using (OutputArray oaKeypoints = keyPoints.GetOutputArray())
                 using (OutputArray oaDescriptors = descriptors.GetOutputArray())
                     CudaInvoke.cveCudaFeature2dAsyncDetectAndComputeAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, iaMask, oaKeypoints, oaDescriptors, useProvidedKeyPoints, stream);
 }
Exemplo n.º 42
0
 /// <summary>
 /// Returns the mask of the superpixel segmentation stored in SuperpixelSEEDS object.
 /// </summary>
 /// <param name="image">Return: CV_8UC1 image mask where -1 indicates that the pixel is a superpixel border, and 0 otherwise.</param>
 /// <param name="thickLine">If false, the border is only one pixel wide, otherwise all pixels at the border are masked.</param>
 public void GetLabelContourMask(IOutputArray image, bool thickLine = false)
 {
     using (OutputArray oaImage = image.GetOutputArray())
         XImgprocInvoke.cveSuperpixelSEEDSGetLabelContourMask(_ptr, oaImage, thickLine);
 }
Exemplo n.º 43
0
 /// <summary>
 /// Compute the descriptors on the image from the given keypoint locations.
 /// </summary>
 /// <param name="image">The image to compute descriptors from</param>
 /// <param name="keyPoints">The keypoints where the descriptor computation is perfromed</param>
 /// <param name="descriptors">The descriptors from the given keypoints</param>
 public void Compute(IInputArray image, VectorOfKeyPoint keyPoints, IOutputArray descriptors)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaDescriptors = descriptors.GetOutputArray())
             Features2DInvoke.CvFeature2DCompute(_feature2D, iaImage, keyPoints.Ptr, oaDescriptors);
 }
Exemplo n.º 44
0
 /// <summary>
 /// Converts image from one color space to another
 /// </summary>
 /// <param name="src">The source GpuMat</param>
 /// <param name="dst">The destination GpuMat</param>
 /// <param name="code">The color conversion code</param>
 /// <param name="dcn">Number of channels in the destination image. If the parameter is 0, the number of the channels is derived automatically from src and the code .</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void CvtColor(IInputArray src, IOutputArray dst, CvEnum.ColorConversion code, int dcn = 0, Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cudaCvtColor(iaSrc, oaDst, code, dcn, stream);
 }
Exemplo n.º 45
0
 /// <summary>
 /// Computes the estimated covariance matrix of an image using the sliding window forumlation.
 /// </summary>
 /// <param name="src">The source image. Input image must be of a complex type.</param>
 /// <param name="dst">The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).</param>
 /// <param name="windowRows">The number of rows in the window.</param>
 /// <param name="windowCols">The number of cols in the window. The window size parameters control the accuracy of the estimation. The sliding window moves over the entire image from the top-left corner to the bottom right corner. Each location of the window represents a sample. If the window is the size of the image, then this gives the exact covariance matrix. For all other cases, the sizes of the window will impact the number of samples and the number of elements in the estimated covariance matrix.</param>
 public static void CovarianceEstimation(IInputArray src, IOutputArray dst, int windowRows, int windowCols)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveCovarianceEstimation(iaSrc, oaDst, windowRows, windowCols);
 }
Exemplo n.º 46
0
 /// <summary>
 /// Converts an image from Bayer pattern to RGB or grayscale.
 /// </summary>
 /// <param name="src">Source image (8-bit or 16-bit single channel).</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="code">Color space conversion code (see the description below).</param>
 /// <param name="dcn">Number of channels in the destination image. If the parameter is 0, the number of the channels is derived automatically from src and the code .</param>
 /// <param name="stream">Stream for the asynchronous version.</param>
 public static void Demosaicing(IInputArray src, IOutputArray dst, DemosaicTypes code, int dcn = -1, Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cudaDemosaicing(iaSrc, oaDst, code, dcn, stream);
 }
Exemplo n.º 47
0
 public static void GradientDericheX(IInputArray op, IOutputArray dst, double alphaDerive, double alphaMean)
 {
     using (InputArray iaOp = op.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveGradientDericheX(iaOp, oaDst, alphaDerive, alphaMean);
 }
Exemplo n.º 48
0
 /// <summary>
 /// Routines for correcting image color gamma
 /// </summary>
 /// <param name="src">Source image (3- or 4-channel 8 bit).</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="forward">True for forward gamma correction or false for inverse gamma correction.</param>
 /// <param name="stream">Stream for the asynchronous version.</param>
 public static void GammaCorrection(IInputArray src, IOutputArray dst, bool forward = true, Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cudaGammaCorrection(iaSrc, oaDst, forward, stream);
 }
Exemplo n.º 49
0
 /// <summary>
 /// Copy the data in this umat to the other mat
 /// </summary>
 /// <param name="mask">Operation mask. Its non-zero elements indicate which matrix elements need to be copied.</param>
 /// <param name="m">The input array to copy to</param>
 public void CopyTo(IOutputArray m, IInputArray mask = null)
 {
     using (OutputArray oaM = m.GetOutputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             UMatInvoke.cveUMatCopyTo(this, oaM, iaMask);
 }
Exemplo n.º 50
0
 /// <summary>
 /// Equalizes the histogram of a grayscale image.
 /// </summary>
 /// <param name="src">Source image with CV_8UC1 type.</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="stream">Stream for the asynchronous version.</param>
 public static void EqualizeHist(IInputArray src, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cudaEqualizeHist(iaSrc, oaDst, stream);
 }
Exemplo n.º 51
0
 /// <summary>
 /// Updates the background model
 /// </summary>
 /// <param name="frame">Next video frame.</param>
 /// <param name="learningRate">The learning rate, use -1.0f for default value.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 /// <param name="foregroundMask">The foregroundMask</param>
 public void Update(IInputArray frame, IOutputArray foregroundMask, double learningRate, Stream stream = null)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         using (OutputArray oaForegroundMask = foregroundMask.GetOutputArray())
             CudaInvoke.cudaBackgroundSubtractorMOGApply(_ptr, iaFrame, oaForegroundMask, learningRate, stream);
 }
Exemplo n.º 52
0
 /// <summary>
 /// Calculates histogram with evenly distributed bins for single channel source.
 /// </summary>
 /// <param name="src">The source GpuMat. Supports CV_8UC1, CV_16UC1 and CV_16SC1 types.</param>
 /// <param name="hist">Histogram with evenly distributed bins. A GpuMat&lt;int&gt; type.</param>
 /// <param name="histSize">The size of histogram (number of levels)</param>
 /// <param name="lowerLevel">The lower level</param>
 /// <param name="upperLevel">The upper level</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 /// <returns>Histogram with evenly distributed bins</returns>
 public static void HistEven(IInputArray src, IOutputArray hist, int histSize, int lowerLevel, int upperLevel, Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaHist = hist.GetOutputArray())
             cudaHistEven(iaSrc, oaHist, histSize, lowerLevel, upperLevel, stream);
 }
Exemplo n.º 53
0
 /// <summary>
 /// Computes features by input image.
 /// </summary>
 /// <param name="image">Input image (CV_32FC1)</param>
 /// <param name="features">Feature vector (CV_32FC1)</param>
 public void Compute(IInputArray image, IOutputArray features)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaFeatures = features.GetOutputArray())
             ContribInvoke.cveBIFCompute(_ptr, iaImage, oaFeatures);
 }
Exemplo n.º 54
0
 /// <summary>
 /// Performs linear blending of two images.
 /// </summary>
 /// <param name="img1">First image. Supports only CV_8U and CV_32F depth.</param>
 /// <param name="img2">Second image. Must have the same size and the same type as img1 .</param>
 /// <param name="weights1">Weights for first image. Must have tha same size as img1. Supports only CV_32F type.</param>
 /// <param name="weights2">Weights for second image. Must have tha same size as img2. Supports only CV_32F type.</param>
 /// <param name="result">Destination image.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void BlendLinear(IInputArray img1, IInputArray img2, IInputArray weights1, IInputArray weights2, IOutputArray result,
                                Stream stream = null)
 {
     using (InputArray iaImg1 = img1.GetInputArray())
         using (InputArray iaImg2 = img2.GetInputArray())
             using (InputArray iaWeights1 = weights1.GetInputArray())
                 using (InputArray iaWeights2 = weights2.GetInputArray())
                     using (OutputArray oaResult = result.GetOutputArray())
                         cudaBlendLinear(iaImg1, iaImg2, iaWeights1, iaWeights2, oaResult, stream);
 }
Exemplo n.º 55
0
 /// <summary>
 /// Get the next frame
 /// </summary>
 public void NextFrame(IOutputArray frame)
 {
     using (OutputArray oaFrame = frame.GetOutputArray())
         SuperresInvoke.cvSuperresFrameSourceNextFrame(_frameSourcePtr, oaFrame);
 }
Exemplo n.º 56
0
 /// <summary>
 /// Applies bilateral filter to the image.
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="dst">The destination image; should have the same size and the same type as src</param>
 /// <param name="kernelSize">The diameter of each pixel neighborhood, that is used during filtering.</param>
 /// <param name="sigmaColor">Filter sigma in the color space. Larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting in larger areas of semi-equal color</param>
 /// <param name="sigmaSpatial">Filter sigma in the coordinate space. Larger value of the parameter means that farther pixels will influence each other (as long as their colors are close enough; see sigmaColor). Then d&gt;0, it specifies the neighborhood size regardless of sigmaSpace, otherwise d is proportional to sigmaSpace.</param>
 /// <param name="borderType">Pixel extrapolation method, use DEFAULT for default</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void BilateralFilter(IInputArray src, IOutputArray dst, int kernelSize, float sigmaColor, float sigmaSpatial, CvEnum.BorderType borderType = BorderType.Default, Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cudaBilateralFilter(iaSrc, oaDst, kernelSize, sigmaColor, sigmaSpatial, borderType, stream);
 }
Exemplo n.º 57
0
 /// <summary>
 /// Given an input color image, enhance low-light images using the BIMEF method
 /// </summary>
 /// <param name="input">Input color image.</param>
 /// <param name="output">Resulting image.</param>
 /// <param name="k">Exposure ratio.</param>
 /// <param name="mu">Enhancement ratio.</param>
 /// <param name="a">a-parameter in the Camera Response Function (CRF).</param>
 /// <param name="b">b-parameter in the Camera Response Function (CRF).</param>
 public static void BIMEF(IInputArray input, IOutputArray output, float k, float mu, float a, float b)
 {
     using (InputArray iaInput = input.GetInputArray())
         using (OutputArray oaOutput = output.GetOutputArray())
             cveBIMEF2(iaInput, oaOutput, k, mu, a, b);
 }
Exemplo n.º 58
0
 /// <summary>
 /// Simple one-line Fast Global Smoother filter call.
 /// </summary>
 /// <param name="guide">image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.</param>
 /// <param name="src">source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.</param>
 /// <param name="dst">destination image.</param>
 /// <param name="lambda">parameter defining the amount of regularization</param>
 /// <param name="sigmaColor">parameter, that is similar to color space sigma in bilateralFilter.</param>
 /// <param name="lambdaAttenuation">internal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.</param>
 /// <param name="numIter">number of iterations used for filtering, 3 is usually enough.</param>
 public static void FastGlobalSmootherFilter(IInputArray guide, IInputArray src, IOutputArray dst, double lambda,
                                             double sigmaColor, double lambdaAttenuation = 0.25, int numIter = 3)
 {
     using (InputArray iaGuide = guide.GetInputArray())
         using (InputArray iaSrc = src.GetInputArray())
             using (OutputArray oaDst = dst.GetOutputArray())
             {
                 cveFastGlobalSmootherFilter(iaGuide, iaSrc, oaDst, lambda, sigmaColor, lambdaAttenuation, numIter);
             }
 }
Exemplo n.º 59
0
 /// <summary>
 /// Returns the segmentation labeling of the image.
 /// Each label represents a superpixel, and each pixel is assigned to one superpixel label.
 /// </summary>
 /// <param name="labels">Return: A CV_32UC1 integer array containing the labels of the superpixel segmentation. The labels are in the range [0, NumberOfSuperpixels].</param>
 public void GetLabels(IOutputArray labels)
 {
     using (OutputArray oaLabels = labels.GetOutputArray())
         XImgprocInvoke.cveSuperpixelSEEDSGetLabels(_ptr, oaLabels);
 }
Exemplo n.º 60
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             using (OutputArray oaDescriptors = descriptors.GetOutputArray())
                 Features2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }