Beispiel #1
1
 /// <summary>
 /// Applies an affine transformation to an image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="mapMatrix">2x3 transformation matrix</param>
 /// <param name="dsize">Size of the output image.</param>
 /// <param name="interpMethod">Interpolation method</param>
 /// <param name="warpMethod">Warp method</param>
 /// <param name="borderMode">Pixel extrapolation method</param>
 /// <param name="borderValue">A value used to fill outliers</param>
 public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
       cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
 }
Beispiel #2
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaDescriptors = descriptors.GetOutputArray())
       Feature2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }
 /// <summary>
 ///  This function is similiar to cvCalcBackProjectPatch. It slids through image, compares overlapped patches of size wxh with templ using the specified method and stores the comparison results to result
 /// </summary>
 /// <param name="image">Image where the search is running. It should be 8-bit or 32-bit floating-point</param>
 /// <param name="templ">Searched template; must be not greater than the source image and the same data type as the image</param>
 /// <param name="result">A map of comparison results; single-channel 32-bit floating-point. If image is WxH and templ is wxh then result must be W-w+1xH-h+1.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>  
 public void Match(IInputArray image, IInputArray templ, IOutputArray result, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaTempl = templ.GetInputArray())
    using (OutputArray oaResult = result.GetOutputArray())
       CudaInvoke.cudaTemplateMatchingMatch(_ptr, iaImage, iaTempl, oaResult, stream);
 }
 public bool NextFrame(IOutputArray frame)
 {
    using (OutputArray oaFrame = frame.GetOutputArray())
    {
       return CudaInvoke.cudaVideoReaderNextFrame(_ptr, oaFrame);
    }
 }
 /// <summary>
 /// Apply the filter to the disparity image
 /// </summary>
 /// <param name="disparity">The input disparity map</param>
 /// <param name="image">The image</param>
 /// <param name="dst">The output disparity map, should have the same size as the input disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray disparity, IInputArray image, IOutputArray dst, Stream stream = null)
 {
    using (InputArray iaDisparity = disparity.GetInputArray())
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       CudaInvoke.cudaDisparityBilateralFilterApply(this, iaDisparity, iaImage, oaDst, stream);
 }
 /// <summary>
 /// Find the good features to track
 /// </summary>
 public void Detect(IInputArray image, IOutputArray corners, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaCorners = corners.GetOutputArray())
    using (InputArray iaMask = (mask == null ? mask.GetInputArray() : InputArray.GetEmpty()))
       CudaInvoke.cudaCornersDetectorDetect(_ptr, iaImage, oaCorners, iaMask, stream);
 }
Beispiel #7
0
 /// <summary>
 /// Reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video.
 /// </summary>
 /// <param name="src">The input 8-bit 1-channel or 3-channel image</param>
 /// <param name="mask">The inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted</param>
 /// <param name="dst">The output image of the same format and the same size as input</param>
 /// <param name="flags">The inpainting method</param>
 /// <param name="inpaintRadius">The radius of circular neighborhood of each point inpainted that is considered by the algorithm</param>
 public static void Inpaint(IInputArray src, IInputArray mask, IOutputArray dst, double inpaintRadius, CvEnum.InpaintType flags)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputArray iaMask = mask.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveInpaint(iaSrc, iaMask, oaDst, inpaintRadius, flags);
 }
Beispiel #8
0
 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void FindStereoCorrespondence(IInputArray left, IInputArray right, IOutputArray disparity, Stream stream = null)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CudaInvoke.cudaStereoBMFindStereoCorrespondence(_ptr, iaLeft, iaRight, oaDisparity, stream);
 }
Beispiel #9
0
 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaKeypoints = keypoints.GetOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       CudaInvoke.cveCudaFeature2dAsyncDetectAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, iaMask, stream);
 }
 /// <summary>
 /// Detects objects of different sizes in the input image.
 /// </summary>
 /// <param name="image">Matrix of type CV_8U containing an image where objects should be detected.</param>
 /// <param name="objects">Buffer to store detected objects (rectangles).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void DetectMultiScale(IInputArray image, IOutputArray objects, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaObjects = objects.GetOutputArray())
       CudaInvoke.cudaCascadeClassifierDetectMultiScale(_ptr, iaImage, oaObjects,
          stream == null ? IntPtr.Zero : stream.Ptr);
 }
 /// <summary>
 /// Computes disparity map for the specified stereo pair
 /// </summary>
 /// <param name="matcher">The stereo matcher</param>
 /// <param name="left">Left 8-bit single-channel image.</param>
 /// <param name="right">Right image of the same size and the same type as the left one.</param>
 /// <param name="disparity">Output disparity map. It has the same size as the input images. Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map</param>
 public static void Compute(this IStereoMatcher matcher, IInputArray left, IInputArray right, IOutputArray disparity)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CvStereoMatcherCompute(matcher.StereoMatcherPtr, iaLeft, iaRight, oaDisparity);
 }
Beispiel #12
0
      /*
      /// <summary>
      /// Create an auto-tuned flann index
      /// </summary>
      /// <param name="values">A row by row matrix of descriptors</param>
      /// <param name="targetPrecision">Precision desired, use 0.9 if not sure</param>
      /// <param name="buildWeight">build tree time weighting factor, use 0.01 if not sure</param>
      /// <param name="memoryWeight">index memory weighting factor, use 0 if not sure</param>
      /// <param name="sampleFraction">what fraction of the dataset to use for autotuning, use 0.1 if not sure</param>
      public Index(IInputArray values, float targetPrecision, float buildWeight, float memoryWeight, float sampleFraction)
      {
         using (InputArray iaValues = values.GetInputArray())
            _ptr = CvFlannIndexCreateAutotuned(iaValues, targetPrecision, buildWeight, memoryWeight, sampleFraction);
      }*/
      #endregion

      /// <summary>
      /// Perform k-nearest-neighbours (KNN) search
      /// </summary>
      /// <param name="queries">A row by row matrix of descriptors to be query for nearest neighbours</param>
      /// <param name="indices">The result of the indices of the k-nearest neighbours</param>
      /// <param name="squareDistances">The square of the Eculidean distance between the neighbours</param>
      /// <param name="knn">Number of nearest neighbors to search for</param>
      /// <param name="checks">The number of times the tree(s) in the index should be recursively traversed. A
      /// higher value for this parameter would give better search precision, but also take more
      /// time. If automatic configuration was used when the index was created, the number of
      /// checks required to achieve the specified precision was also computed, in which case
      /// this parameter is ignored </param>
      public void KnnSearch(IInputArray queries, IOutputArray indices, IOutputArray squareDistances, int knn, int checks)
      {
         using (InputArray iaQueries = queries.GetInputArray())
         using (OutputArray oaIndices = indices.GetOutputArray())
         using (OutputArray oaSquareDistances = squareDistances.GetOutputArray())
         CvFlannIndexKnnSearch(_ptr, iaQueries, oaIndices, oaSquareDistances, knn, checks);
      }
Beispiel #13
0
 public static void AmFilter(IInputArray joint, IInputArray src, IOutputArray dst, double sigmaS, double sigmaR,
    bool adjustOutliers)
 {
    using (InputArray iaJoint = joint.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveAmFilter(iaJoint, iaSrc, oaDst, sigmaS, sigmaR, adjustOutliers);
 }
Beispiel #14
0
 public static void GuidedFilter(IInputArray guide, IInputArray src, IOutputArray dst, int radius, double eps,
    int dDepth)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveGuidedFilter(iaGuide, iaSrc, oaDst, radius, eps, dDepth);
 }
Beispiel #15
0
 public static void DtFilter(IInputArray guide, IInputArray src, IOutputArray dst,
    double sigmaSpatial, double sigmaColor, int mode, int numIters)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveDtFilter(iaGuide, iaSrc, oaDst, sigmaSpatial, sigmaColor, mode, numIters);
 }
Beispiel #16
0
 /// <summary>
 /// Extracts pixels from src:
 /// dst(x, y) = src(x + center.x - (width(dst)-1)*0.5, y + center.y - (height(dst)-1)*0.5)
 /// where the values of pixels at non-integer coordinates are retrieved using bilinear interpolation. Every channel of multiple-channel images is processed independently. Whereas the rectangle center must be inside the image, the whole rectangle may be partially occluded. In this case, the replication border mode is used to get pixel values beyond the image boundaries.
 /// </summary>
 /// <param name="image">Source image</param>
 /// <param name="patchSize">Size of the extracted patch.</param>
 /// <param name="patch">Extracted rectangle</param>
 /// <param name="patchType">Depth of the extracted pixels. By default, they have the same depth as <paramref name="image"/>.</param>
 /// <param name="center">Floating point coordinates of the extracted rectangle center within the source image. The center must be inside the image.</param>
 public static void GetRectSubPix(IInputArray image, Size patchSize, PointF center, IOutputArray patch, DepthType patchType = DepthType.Default)
 {
    using (InputArray iaSrc = image.GetInputArray())
    using (OutputArray oaPatch = patch.GetOutputArray())
    {
       cveGetRectSubPix(iaSrc, ref patchSize, ref center, oaPatch, patchType);
    }
 }
Beispiel #17
0
 public static float Predict(this IStatModel model, IInputArray samples, IOutputArray results = null, int flags = 0)
 {
    using (InputArray iaSamples = samples.GetInputArray())
    using (OutputArray oaResults = results == null ? OutputArray.GetEmpty() : results.GetOutputArray())
    {
       return MlInvoke.StatModelPredict(model.StatModelPtr, iaSamples, oaResults, flags);
    }
 }
Beispiel #18
0
 /// <summary>
 /// Produce domain transform filtering operation on source image.
 /// </summary>
 /// <param name="src">Filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="dDepth">Optional depth of the output image. dDepth can be set to Default, which will be equivalent to src.depth().</param>
 public void Filter(IInputArray src, IOutputArray dst, DepthType dDepth = DepthType.Default)
 {
    using (InputArray iaSrc = src.GetInputArray())
       using (OutputArray oaDst = dst.GetOutputArray())
       {
          XimgprocInvoke.cveDTFilterFilter(_ptr, iaSrc, oaDst, dDepth);
       }
    
 }
Beispiel #19
0
 /// <summary>
 /// Compute the descriptors on the image from the given keypoint locations.
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="image">The image to compute descriptors from</param>
 /// <param name="keypoints">The keypoints where the descriptor computation is perfromed</param>
 /// <param name="descriptors">The descriptors from the given keypoints</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void ComputeAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IOutputArray descriptors, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaKeypoints = keypoints.GetOutputArray())
    using (OutputArray oaDescriptors = descriptors.GetOutputArray())
    {
       CudaInvoke.cveCudaFeature2dAsyncComputeAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, oaDescriptors, stream);
    }
 }
 /// <summary>
 /// Converts objects array from internal representation to standard vector.
 /// </summary>
 /// <param name="objects">Objects array in internal representation.</param>
 /// <returns>Resulting array.</returns>
 public Rectangle[] Convert(IOutputArray objects)
 {
    using (OutputArray oaObjects = objects.GetOutputArray())
    using (VectorOfRect vr = new VectorOfRect())
    {
       CudaInvoke.cudaCascadeClassifierConvert(_ptr, oaObjects, vr);
       return vr.ToArray();
    }
 }
Beispiel #21
0
 /// <summary>
 /// Creates kernel from basic functions.
 /// </summary>
 /// <param name="A">Basic function used in axis x.</param>
 /// <param name="B">Basic function used in axis y.</param>
 /// <param name="kernel">Final 32-b kernel derived from A and B.</param>
 /// <param name="chn">Number of kernel channels.</param>
 public static void CreateKernel(IInputArray A, IInputArray B, IOutputArray kernel, int chn = 1)
 {
    using (InputArray iaA = A.GetInputArray())
    using (InputArray iaB = B.GetInputArray())
    using (OutputArray oaKernel = kernel.GetOutputArray())
    {
       cveFtCreateKernel(iaA, iaB, oaKernel, chn);
    }
 }
Beispiel #22
0
 /// <summary>
 /// Recovers inverse camera response.
 /// </summary>
 /// <param name="src">Vector of input images</param>
 /// <param name="dst">256x1 matrix with inverse camera response function</param>
 /// <param name="times">Vector of exposure time values for each image</param>
 public void Process(IInputArray src, IOutputArray dst, IInputArray times)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaTimes = times.GetInputArray())
    {
       CvInvoke.cveCalibrateCRFProcess(_calibrateCRFPtr, iaSrc, oaDst, iaTimes);
    }
 }
Beispiel #23
0
      /// <summary>
      /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
      /// </summary>
      /// <param name="feature2DAsync">The Feature2DAsync object</param>
      /// <param name="image">The image</param>
      /// <param name="mask">The optional mask, can be null if not needed</param>
      /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
      /// <param name="descriptors">The descriptors from the keypoints</param>
      /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
      /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
      public static void DetectAndComputeAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IInputArray mask, IOutputArray keyPoints,
         IOutputArray descriptors, bool useProvidedKeyPoints, Stream stream = null)
      {
         using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
         using (OutputArray oaKeypoints = keyPoints.GetOutputArray())
         using (OutputArray oaDescriptors = descriptors.GetOutputArray())
            CudaInvoke.cveCudaFeature2dAsyncDetectAndComputeAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, iaMask, oaKeypoints, oaDescriptors, useProvidedKeyPoints, stream);

      }
Beispiel #24
0
 /// <summary>
 /// Simple one-line Fast Global Smoother filter call.
 /// </summary>
 /// <param name="guide">image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.</param>
 /// <param name="src">source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.</param>
 /// <param name="dst">destination image.</param>
 /// <param name="lambda">parameter defining the amount of regularization</param>
 /// <param name="sigmaColor">parameter, that is similar to color space sigma in bilateralFilter.</param>
 /// <param name="lambdaAttenuation">internal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.</param>
 /// <param name="numIter">number of iterations used for filtering, 3 is usually enough.</param>
 public static void FastGlobalSmootherFilter(IInputArray guide, IInputArray src, IOutputArray dst, double lambda,
    double sigmaColor, double lambdaAttenuation = 0.25, int numIter = 3)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    {
       cveFastGlobalSmootherFilter(iaGuide, iaSrc, oaDst, lambda, sigmaColor, lambdaAttenuation, numIter);
    }
 }
Beispiel #25
0
 /// <summary>
 /// Applies the joint bilateral filter to an image.
 /// </summary>
 /// <param name="joint">Joint 8-bit or floating-point, 1-channel or 3-channel image.</param>
 /// <param name="src">Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint image.</param>
 /// <param name="dst">Destination image of the same size and type as src .</param>
 /// <param name="d">Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .</param>
 /// <param name="sigmaColor">Filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.</param>
 /// <param name="sigmaSpace">Filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d&gt;0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .</param>
 /// <param name="borderType">Border type</param>
 public static void JointBilateralFilter(
    IInputArray joint, IInputArray src, IOutputArray dst, int d,
    double sigmaColor, double sigmaSpace, CvEnum.BorderType borderType = BorderType.Reflect101)
 {
    using (InputArray iaJoint = joint.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveJointBilateralFilter(iaJoint, iaSrc,
          oaDst, d, sigmaColor, sigmaSpace, borderType);
 }
Beispiel #26
0
 /// <summary>
 /// Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing filters are used in many different applications.
 /// </summary>
 /// <param name="src">Input 8-bit 3-channel image</param>
 /// <param name="dst">Output 8-bit 3-channel image</param>
 /// <param name="flags">Edge preserving filters</param>
 /// <param name="sigmaS">Range between 0 to 200</param>
 /// <param name="sigmaR">Range between 0 to 1</param>
 public static void EdgePreservingFilter(
    IInputArray src, IOutputArray dst,
    CvEnum.EdgePreservingFilterFlag flags = CvEnum.EdgePreservingFilterFlag.RecursFilter,
    float sigmaS = 60.0f,
    float sigmaR = 0.4f)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveEdgePreservingFilter(iaSrc, oaDst, flags, sigmaS, sigmaR);
 }
Beispiel #27
0
 /// <summary>
 /// Merges images.
 /// </summary>
 /// <param name="src">Vector of input images</param>
 /// <param name="dst">Result image</param>
 /// <param name="times">Vector of exposure time values for each image</param>
 /// <param name="response">256x1 matrix with inverse camera response function for each pixel value, it should have the same number of channels as images.</param>
 public void Process(IInputArray src, IOutputArray dst, IInputArray times, IInputArray response)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaTimes = times.GetInputArray())
    using (InputArray iaResponse = response.GetInputArray())
    {
       CvInvoke.cveMergeExposuresProcess(_mergeExposuresPtr, iaSrc, oaDst, iaTimes, iaResponse);
    }
 }
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="sparseFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
 /// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
 {
    using (InputArray iaPrevImg = prevImg.GetInputArray())
    using (InputArray iaNextImg = nextImg.GetInputArray())
    using (InputArray iaPrevPts = prevPts.GetInputArray())
    using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
    using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
    using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
       cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
          oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
 }
Beispiel #29
0
 /// <summary>
 /// Builds the projection maps according to the given camera data.
 /// </summary>
 /// <param name="srcSize">Source image size</param>
 /// <param name="K">Camera intrinsic parameters</param>
 /// <param name="R">Camera rotation matrix</param>
 /// <param name="xmap">Projection map for the x axis</param>
 /// <param name="ymap">Projection map for the y axis</param>
 /// <returns>Projected image minimum bounding box</returns>
 public Rectangle BuildMaps(Size srcSize, IInputArray K, IInputArray R, IOutputArray xmap, IOutputArray ymap)
 {
    Rectangle result = new Rectangle();
    using (InputArray iaK = K.GetInputArray())
    using (InputArray iaR = R.GetInputArray())
    using (OutputArray oaXmap = xmap.GetOutputArray())
    using (OutputArray oaYmap = ymap.GetOutputArray())
    {
       StitchingInvoke.cveRotationWarperBuildMaps(_rotationWarper, ref srcSize, iaK, iaR, oaXmap, oaYmap, ref result);
       return result;
    }
 }
Beispiel #30
0
 /// <summary>
 /// Finds perspective transformation H=||hij|| between the source and the destination planes
 /// </summary>
 /// <param name="srcPoints">Point coordinates in the original plane, 2xN, Nx2, 3xN or Nx3 array (the latter two are for representation in homogeneous coordinates), where N is the number of points. </param>
 /// <param name="dstPoints">Point coordinates in the destination plane, 2xN, Nx2, 3xN or Nx3 array (the latter two are for representation in homogeneous coordinates) </param>
 /// <param name="method">The type of the method</param>
 /// <param name="ransacReprojThreshold">The maximum allowed re-projection error to treat a point pair as an inlier. The parameter is only used in RANSAC-based homography estimation. E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3</param>
 /// <param name="mask">The optional output mask set by a robust method (RANSAC or LMEDS). </param>
 /// <param name="homography">Output 3x3 homography matrix. Homography matrix is determined up to a scale, thus it is normalized to make h33=1</param>
 public static void FindHomography(
    IInputArray srcPoints,
    IInputArray dstPoints,
    IOutputArray homography,
    CvEnum.HomographyMethod method = CvEnum.HomographyMethod.Default,
    double ransacReprojThreshold = 3,
    IOutputArray mask = null)
 {
    using (InputArray iaSrcPoints = srcPoints.GetInputArray())
    using (InputArray iaDstPoints = dstPoints.GetInputArray())
    using (OutputArray oaHomography = homography.GetOutputArray())
    using (OutputArray oaMask = mask == null ? OutputArray.GetEmpty() : mask.GetOutputArray())
       cveFindHomography(iaSrcPoints, iaDstPoints, oaHomography, method, ransacReprojThreshold, oaMask);
 }
Beispiel #31
0
 /// <summary>
 /// The function computes orientation from edge image.
 /// </summary>
 /// <param name="src">Edge image.</param>
 /// <param name="dst">Orientation image.</param>
 public void ComputeOrientation(IInputArray src, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             XImgprocInvoke.cveStructuredEdgeDetectionComputeOrientation(_ptr, iaSrc, oaDst);
 }
Beispiel #32
0
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="opticalFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as prevImg.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found.Otherwise, it is set to 0.</param>
 /// <param name="error">Optional output vector that contains error response for each point (inverse confidence).</param>
 public static void Calc(
     this ISparseOpticalFlow opticalFlow,
     IInputArray prevImg, IInputArray nextImg,
     IInputArray prevPts, IInputOutputArray nextPts,
     IOutputArray status,
     IOutputArray error = null
     )
 {
     using (InputArray iaPreImg = prevImg.GetInputArray())
         using (InputArray iaNextImg = nextImg.GetInputArray())
             using (InputArray iaPrevPts = prevPts.GetInputArray())
                 using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
                     using (OutputArray oaStatus = status.GetOutputArray())
                         using (OutputArray oaError = error == null ? OutputArray.GetEmpty() : error.GetOutputArray())
                             CvInvoke.cveSparseOpticalFlowCalc(
                                 opticalFlow.SparseOpticalFlowPtr,
                                 iaPreImg, iaNextImg,
                                 iaPrevPts, ioaNextPts,
                                 oaStatus, oaError
                                 );
 }
Beispiel #33
0
 /// <summary>
 /// Converts an array to another data type with optional scaling.
 /// </summary>
 /// <param name="m">Output matrix; if it does not have a proper size or type before the operation, it is reallocated.</param>
 /// <param name="rtype">Desired output matrix type or, rather, the depth since the number of channels are the same as the input has; if rtype is negative, the output matrix will have the same type as the input.</param>
 /// <param name="alpha">Optional scale factor.</param>
 /// <param name="beta">Optional delta added to the scaled values.</param>
 public void ConvertTo(IOutputArray m, CvEnum.DepthType rtype, double alpha = 1.0, double beta = 0.0)
 {
     using (OutputArray oaM = m.GetOutputArray())
         UMatInvoke.cveUMatConvertTo(Ptr, oaM, rtype, alpha, beta);
 }
 /// <summary>
 /// Computes a background image.
 /// </summary>
 /// <param name="backgroundImage">The output background image</param>
 /// <remarks> Sometimes the background image can be very blurry, as it contain the average background statistics.</remarks>
 public static void GetBackgroundImage(this IBackgroundSubtractor substractor, IOutputArray backgroundImage)
 {
     using (OutputArray oaBackgroundImage = backgroundImage.GetOutputArray())
         CvInvoke.cveBackgroundSubtractorGetBackgroundImage(substractor.BackgroundSubtractorPtr, oaBackgroundImage);
 }
Beispiel #35
0
 /// <summary>
 /// Render the plot to the resulting Mat
 /// </summary>
 /// <param name="result">The output plot</param>
 public void Render(IOutputArray result)
 {
     using (OutputArray oaResult = result.GetOutputArray())
         PlotInvoke.cvePlot2dRender(_ptr, oaResult);
 }
Beispiel #36
0
        /// <summary>
        /// This function is an extended version of cvInitUndistortMap. That is, in addition to the correction of lens distortion, the function can also apply arbitrary perspective transformation R and finally it can scale and shift the image according to the new camera matrix
        /// </summary>
        /// <param name="cameraMatrix">The camera matrix A=[fx 0 cx; 0 fy cy; 0 0 1]</param>
        /// <param name="distCoeffs">The vector of distortion coefficients, 4x1, 1x4, 5x1 or 1x5</param>
        /// <param name="R">The rectification transformation in object space (3x3 matrix). R1 or R2, computed by cvStereoRectify can be passed here. If the parameter is IntPtr.Zero, the identity matrix is used</param>
        /// <param name="newCameraMatrix">The new camera matrix A'=[fx' 0 cx'; 0 fy' cy'; 0 0 1]</param>
        /// <param name="depthType">Depth type of the first output map that can be CV_32FC1 or CV_16SC2 .</param>
        /// <param name="map1">The first output map.</param>
        /// <param name="map2">The second output map.</param>
        /// <param name="size">Undistorted image size.</param>
        public static void InitUndistortRectifyMap(
            IInputArray cameraMatrix,
            IInputArray distCoeffs,
            IInputArray R,
            IInputArray newCameraMatrix,
            Size size,
            CvEnum.DepthType depthType,
            IOutputArray map1,
            IOutputArray map2 = null)
        {
            int channels = map2 == null ? 2 : 1;

            using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
                using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                    using (InputArray iaR = R == null ? InputArray.GetEmpty() : R.GetInputArray())
                        using (InputArray iaNewCameraMatrix = newCameraMatrix.GetInputArray())
                            using (OutputArray oaMap1 = map1.GetOutputArray())
                                using (OutputArray oaMap2 = map2 == null ? OutputArray.GetEmpty() : map2.GetOutputArray())
                                    cveInitUndistortRectifyMap(
                                        iaCameraMatrix,
                                        iaDistCoeffs,
                                        iaR,
                                        iaNewCameraMatrix,
                                        ref size,
                                        CvInvoke.MakeType(depthType, channels),
                                        oaMap1, oaMap2);
        }
Beispiel #37
0
 /// <summary>
 /// Apply the cuda filter
 /// </summary>
 /// <param name="image">The source CudaImage where the filter will be applied to</param>
 /// <param name="dst">The destination CudaImage</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray image, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaFilterApply(_ptr, iaImage, oaDst, stream);
 }
Beispiel #38
0
 /// <summary>
 /// Finds the neighbors and predicts responses for input vectors.
 /// </summary>
 /// <param name="samples">Input samples stored by rows. It is a single-precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <param name="k">Number of used nearest neighbors. Should be greater than 1.</param>
 /// <param name="results">Vector with results of prediction (regression or classification) for each input sample. It is a single-precision floating-point vector with &lt;number_of_samples&gt; elements.</param>
 /// <param name="neighborResponses">Optional output values for corresponding neighbors. It is a single- precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <param name="dist">Optional output distances from the input vectors to the corresponding neighbors. It is a single-precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <returns>If only a single input vector is passed, the predicted value is returned by the method.</returns>
 public float FindNearest(
     IInputArray samples,
     int k,
     IOutputArray results,
     IOutputArray neighborResponses = null,
     IOutputArray dist = null)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (OutputArray oaResults = results.GetOutputArray())
             using (OutputArray oaNeighborResponses = neighborResponses == null ? OutputArray.GetEmpty() : neighborResponses.GetOutputArray())
                 using (OutputArray oaDist = dist == null ? OutputArray.GetEmpty() : dist.GetOutputArray())
                 {
                     return(MlInvoke.cveKNearestFindNearest(
                                _ptr,
                                iaSamples,
                                k,
                                oaResults,
                                oaNeighborResponses,
                                oaDist));
                 }
 }
Beispiel #39
0
 /// <summary>
 /// Draw a GridBoard.
 /// </summary>
 /// <param name="outSize">size of the output image in pixels.</param>
 /// <param name="img">output image with the board. The size of this image will be outSize and the board will be on the center, keeping the board proportions.</param>
 /// <param name="marginSize">minimum margins (in pixels) of the board in the output image</param>
 /// <param name="borderBits">width of the marker borders.</param>
 public void Draw(Size outSize, IOutputArray img, int marginSize = 0, int borderBits = 1)
 {
     using (OutputArray oaImg = img.GetOutputArray())
         ArucoInvoke.cveArucoGridBoardDraw(_ptr, ref outSize, oaImg, marginSize, borderBits);
 }
 /// <summary>
 /// Get the next frame
 /// </summary>
 public void NextFrame(IOutputArray frame)
 {
     using (OutputArray oaFrame = frame.GetOutputArray())
         SuperresInvoke.cvSuperresFrameSourceNextFrame(_frameSourcePtr, oaFrame);
 }
Beispiel #41
0
 /// <summary>
 /// Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
 /// </summary>
 /// <param name="source">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray source, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaSource = source.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaCLAHEApply(_ptr, iaSource, oaDst, stream);
 }
Beispiel #42
0
 /// <summary>
 /// First call Grab() function follows by Retrieve()
 /// </summary>
 /// <param name="m">The output array where the image will be read into.</param>
 /// <returns>False if no frames has been grabbed</returns>
 public bool Read(IOutputArray m)
 {
     using (OutputArray oaM = m.GetOutputArray())
         return(CvInvoke.cveVideoCaptureRead(Ptr, oaM));
 }
 /// <summary>
 /// Update the background model
 /// </summary>
 /// <param name="image">The image that is used to update the background model</param>
 /// <param name="learningRate">Use -1 for default</param>
 /// <param name="fgMask">The output foreground mask</param>
 public static void Apply(this IBackgroundSubtractor substractor, IInputArray image, IOutputArray fgMask, double learningRate = -1)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaFgMask = fgMask.GetOutputArray())
             CvInvoke.cveBackgroundSubtractorUpdate(substractor.BackgroundSubtractorPtr, iaImage, oaFgMask, learningRate);
 }
Beispiel #44
0
 /// <summary>
 /// Finds centers in the grid of circles
 /// </summary>
 /// <param name="image">Source chessboard view</param>
 /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
 /// <param name="flags">Various operation flags</param>
 /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
 /// <param name="centers">output array of detected centers.</param>
 /// <returns>True if grid found.</returns>
 public static bool FindCirclesGrid(IInputArray image, Size patternSize, IOutputArray centers, CvEnum.CalibCgType flags, Feature2D featureDetector)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaCenters = centers.GetOutputArray())
             return(cveFindCirclesGrid(iaImage, ref patternSize, oaCenters, flags, featureDetector.Feature2DPtr));
 }
Beispiel #45
0
 /// <summary>
 /// Returns the mask of the superpixel segmentation stored in SuperpixelLSC object.
 /// </summary>
 /// <param name="image">Return: CV_8U1 image mask where -1 indicates that the pixel is a superpixel border, and 0 otherwise.</param>
 /// <param name="thickLine">If false, the border is only one pixel wide, otherwise all pixels at the border are masked.</param>
 public void GetLabelContourMask(IOutputArray image, bool thickLine = true)
 {
     using (OutputArray oaImage = image.GetOutputArray())
         XImgprocInvoke.cveSuperpixelLSCGetLabelContourMask(_ptr, oaImage, thickLine);
 }
Beispiel #46
0
 /// <summary>
 /// Computes features by input image.
 /// </summary>
 /// <param name="image">Input image (CV_32FC1)</param>
 /// <param name="features">Feature vector (CV_32FC1)</param>
 public void Compute(IInputArray image, IOutputArray features)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaFeatures = features.GetOutputArray())
             FaceInvoke.cveBIFCompute(_ptr, iaImage, oaFeatures);
 }
Beispiel #47
0
 /// <summary>
 /// Returns the segmentation labeling of the image.
 /// Each label represents a superpixel, and each pixel is assigned to one superpixel label.
 /// </summary>
 /// <param name="labels">A CV_32SC1 integer array containing the labels of the superpixel segmentation. The labels are in the range [0, NumberOfSuperpixels].</param>
 public void GetLabels(IOutputArray labels)
 {
     using (OutputArray oaLabels = labels.GetOutputArray())
         XImgprocInvoke.cveSuperpixelLSCGetLabels(_ptr, oaLabels);
 }
Beispiel #48
0
 /// <summary>
 /// Read point cloud from file
 /// </summary>
 /// <param name="file">The point cloud file</param>
 /// <param name="colors">The color of the points</param>
 /// <param name="normals">The normal of the points</param>
 /// <returns>The points</returns>
 public static Mat ReadCloud(String file, IOutputArray colors = null, IOutputArray normals = null)
 {
     using (CvString cs = new CvString(file))
         using (OutputArray oaColors = colors == null ? OutputArray.GetEmpty() : colors.GetOutputArray())
             using (OutputArray oaNormals = normals == null ? OutputArray.GetEmpty() : normals.GetOutputArray())
             {
                 Mat cloud = new Mat();
                 cveReadCloud(cs, cloud, oaColors, oaNormals);
                 return(cloud);
             }
 }
Beispiel #49
0
        /// <summary>
        /// Computes an optimal limited affine transformation with 4 degrees of freedom between two 2D point sets.
        /// </summary>
        /// <param name="from">First input 2D point set.</param>
        /// <param name="to">Second input 2D point set.</param>
        /// <param name="inliners">Output vector indicating which points are inliers.</param>
        /// <param name="method">Robust method used to compute transformation.</param>
        /// <param name="ransacReprojThreshold">Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier. Applies only to RANSAC.</param>
        /// <param name="maxIters">The maximum number of robust method iterations.</param>
        /// <param name="confidence">Confidence level, between 0 and 1, for the estimated transformation. Anything between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</param>
        /// <param name="refineIters">Maximum number of iterations of refining algorithm (Levenberg-Marquardt). Passing 0 will disable refining, so the output matrix will be output of robust method.</param>
        /// <returns>Output 2D affine transformation (4 degrees of freedom) matrix 2×3 or empty matrix if transformation could not be estimated.</returns>
        public static Mat EstimateAffinePartial2D(
            IInputArray from, IInputArray to,
            IOutputArray inliners,
            CvEnum.RobustEstimationAlgorithm method,
            double ransacReprojThreshold,
            int maxIters, double confidence,
            int refineIters)
        {
            Mat affine = new Mat();

            using (InputArray iaFrom = from.GetInputArray())
                using (InputArray iaTo = to.GetInputArray())
                    using (OutputArray oaInliners = inliners == null ? OutputArray.GetEmpty() : inliners.GetOutputArray())
                    {
                        cveEstimateAffinePartial2D(
                            iaFrom,
                            iaTo,
                            oaInliners,
                            method,
                            ransacReprojThreshold,
                            maxIters,
                            confidence,
                            refineIters,
                            affine
                            );
                    }
            return(affine);
        }
Beispiel #50
0
 /// <summary>
 /// Compute the panoramic images given the images
 /// </summary>
 /// <param name="images">The input images. This can be, for example, a VectorOfMat</param>
 /// <param name="pano">The panoramic image</param>
 /// <returns>true if successful</returns>
 public bool Stitch(IInputArray images, IOutputArray pano)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaPano = pano.GetOutputArray())
             return(StitchingInvoke.CvStitcherStitch(_ptr, iaImages, oaPano));
 }
Beispiel #51
0
 /// <summary>
 /// Applies a GNU Octave/MATLAB equivalent colormap on a given image.
 /// </summary>
 /// <param name="src"> The source image, grayscale or colored of type CV_8UC1 or CV_8UC3</param>
 /// <param name="dst"> The result is the colormapped source image</param>
 /// <param name="colorMapType">The type of color map</param>
 public static void ApplyColorMap(IInputArray src, IOutputArray dst, CvEnum.ColorMapType colorMapType)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveApplyColorMap1(iaSrc, oaDst, colorMapType);
 }
 /// <summary>
 /// Updates the background model
 /// </summary>
 /// <param name="frame">Next video frame.</param>
 /// <param name="learningRate">The learning rate, use -1.0f for default value.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray frame, IOutputArray forgroundMask, double learningRate = -1, Stream stream = null)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         using (OutputArray oaForgroundMask = forgroundMask.GetOutputArray())
             CudaInvoke.cudaBackgroundSubtractorGMGApply(_ptr, iaFrame, oaForgroundMask, learningRate, stream);
 }
Beispiel #53
0
 /// <summary>
 /// Copy the data in this umat to the other mat
 /// </summary>
 /// <param name="mask">Operation mask. Its non-zero elements indicate which matrix elements need to be copied.</param>
 /// <param name="m">The input array to copy to</param>
 public void CopyTo(IOutputArray m, IInputArray mask = null)
 {
     using (OutputArray oaM = m.GetOutputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             UMatInvoke.cveUMatCopyTo(this, oaM, iaMask);
 }
 /// <summary>
 /// Finds line segments in a binary image using the probabilistic Hough transform.
 /// </summary>
 /// <param name="image">8-bit, single-channel binary source image</param>
 /// <param name="lines">Output vector of lines. Each line is represented by a 4-element vector (x1, y1, x2, y2) , where (x1, y1) and (x2, y2) are the ending points of each detected line segment.</param>
 public void Detect(IInputArray image, IOutputArray lines, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaLines = lines.GetOutputArray())
             CudaInvoke.cudaHoughSegmentDetectorDetect(_ptr, iaImage, oaLines, stream);
 }
Beispiel #55
0
 /// <summary>
 /// Applies white balancing to the input image.
 /// </summary>
 /// <param name="src">Input image</param>
 /// <param name="dst">White balancing result</param>
 public void BalanceWhite(IInputArray src, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             XPhotoInvoke.cveWhiteBalancerBalanceWhite(_whiteBalancerPtr, iaSrc, oaDst);
 }
Beispiel #56
0
 /// <summary>
 /// Transform the image using the lookup table
 /// </summary>
 /// <param name="image">The image to be transformed</param>
 /// <param name="dst">The transformation result</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Transform(IInputArray image, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaLookUpTableTransform(_ptr, iaImage, oaDst, stream);
 }
 /// <summary>
 /// Get the reliability map computed from the wrapped phase map.
 /// </summary>
 /// <param name="reliabilityMap">Image where the reliability map is stored.</param>
 public void GetInverseReliabilityMap(IOutputArray reliabilityMap)
 {
     using (OutputArray oaReliabilityMap = reliabilityMap.GetOutputArray())
         PhaseUnwrappingInvoke.cveHistogramPhaseUnwrappingGetInverseReliabilityMap(_ptr, oaReliabilityMap);
 }
Beispiel #58
0
 /// <summary>
 /// Calculate square root of each source array element. in the case of multichannel
 /// arrays each channel is processed independently. The function accuracy is approximately
 /// the same as of the built-in std::sqrt.
 /// </summary>
 /// <param name="src">The source floating-point array</param>
 /// <param name="dst">The destination array; will have the same size and the same type as src</param>
 public static void Sqrt(IInputArray src, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveSqrt(iaSrc, oaDst);
 }
Beispiel #59
0
 /// <summary>
 /// Contrast Limited Adaptive Histogram Equalization (CLAHE)
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="clipLimit">Clip Limit, use 40 for default</param>
 /// <param name="tileGridSize">Tile grid size, use (8, 8) for default</param>
 /// <param name="dst">The destination image</param>
 public static void CLAHE(IInputArray src, double clipLimit, Size tileGridSize, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveCLAHE(iaSrc, clipLimit, ref tileGridSize, oaDst);
 }
Beispiel #60
0
 /// <summary>
 /// Draw a canonical marker image.
 /// </summary>
 /// <param name="dict">dictionary of markers indicating the type of markers</param>
 /// <param name="id">identifier of the marker that will be returned. It has to be a valid id in the specified dictionary.</param>
 /// <param name="sidePixels">size of the image in pixels</param>
 /// <param name="img">output image with the marker</param>
 /// <param name="borderBits">width of the marker border.</param>
 public static void DrawMarker(Dictionary dict, int id, int sidePixels, IOutputArray img, int borderBits = 1)
 {
     using (OutputArray oaImg = img.GetOutputArray())
         cveArucoDrawMarker(dict, id, sidePixels, oaImg, borderBits);
 }