Example #1
1
 /// <summary>
 /// Applies an affine transformation to an image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="mapMatrix">2x3 transformation matrix</param>
 /// <param name="dsize">Size of the output image.</param>
 /// <param name="interpMethod">Interpolation method</param>
 /// <param name="warpMethod">Warp method</param>
 /// <param name="borderMode">Pixel extrapolation method</param>
 /// <param name="borderValue">A value used to fill outliers</param>
 public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
       cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
 }
Example #2
0
 /// <summary>
 /// Reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video.
 /// </summary>
 /// <param name="src">The input 8-bit 1-channel or 3-channel image</param>
 /// <param name="mask">The inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted</param>
 /// <param name="dst">The output image of the same format and the same size as input</param>
 /// <param name="flags">The inpainting method</param>
 /// <param name="inpaintRadius">The radius of circular neighborhood of each point inpainted that is considered by the algorithm</param>
 public static void Inpaint(IInputArray src, IInputArray mask, IOutputArray dst, double inpaintRadius, CvEnum.InpaintType flags)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputArray iaMask = mask.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveInpaint(iaSrc, iaMask, oaDst, inpaintRadius, flags);
 }
Example #3
0
      /// <summary>
      /// Saves the image to the specified file. The image format is chosen depending on the filename extension, see cvLoadImage. Only 8-bit single-channel or 3-channel (with 'BGR' channel order) images can be saved using this function. If the format, depth or channel order is different, use cvCvtScale and cvCvtColor to convert it before saving, or use universal cvSave to save the image to XML or YAML format
      /// </summary>
      /// <param name="filename">The name of the file to be saved to</param>
      /// <param name="image">The image to be saved</param>
      /// <param name="parameters">The parameters</param>
      /// <returns>true if success</returns>
      public static bool Imwrite(String filename, IInputArray image, params int[] parameters)
      {
         using (Util.VectorOfInt vec = new Util.VectorOfInt())
         {
            if (parameters.Length > 0)
               vec.Push(parameters);
            using (CvString s = new CvString(filename))
            using (InputArray iaImage = image.GetInputArray())
            {
#if !(__IOS__ || __ANDROID__ || NETFX_CORE)
               bool containsUnicode = (s.Length != filename.Length);
               if (containsUnicode &&
                   (Emgu.Util.Platform.OperationSystem != OS.MacOSX) &&
                   (Emgu.Util.Platform.OperationSystem != OS.Linux))
               {
                  //Handle unicode in Windows platform
                  //Work around for Open CV ticket:
                  //https://github.com/Itseez/opencv/issues/4292
                  //https://github.com/Itseez/opencv/issues/4866     
                  System.IO.FileInfo fi = new System.IO.FileInfo(filename);

                  using (VectorOfByte vb = new VectorOfByte())
                  {
                     CvInvoke.Imencode(fi.Extension, image, vb, parameters);
                     byte[] arr = vb.ToArray();
                     System.IO.File.WriteAllBytes(filename, arr);
                     return true;
                  }
               }
               else
#endif
                  return cveImwrite(s, iaImage, vec);
            }
         }
      }
 /// <summary>
 /// Set the SVM detector 
 /// </summary>
 /// <param name="detector">The SVM detector</param>
 public void SetSVMDetector(IInputArray detector)
 {
    using (InputArray iaDetector = detector.GetInputArray())
    {
       CudaInvoke.cudaHOGSetSVMDetector(_ptr, iaDetector);
    }
 }
Example #5
0
 /// <summary>
 /// Detects objects of different sizes in the input image.
 /// </summary>
 /// <param name="image">Matrix of type CV_8U containing an image where objects should be detected.</param>
 /// <param name="objects">Buffer to store detected objects (rectangles).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void DetectMultiScale(IInputArray image, IOutputArray objects, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaObjects = objects.GetOutputArray())
       CudaInvoke.cudaCascadeClassifierDetectMultiScale(_ptr, iaImage, oaObjects,
          stream == null ? IntPtr.Zero : stream.Ptr);
 }
 /// <summary>
 /// Computes disparity map for the specified stereo pair
 /// </summary>
 /// <param name="matcher">The stereo matcher</param>
 /// <param name="left">Left 8-bit single-channel image.</param>
 /// <param name="right">Right image of the same size and the same type as the left one.</param>
 /// <param name="disparity">Output disparity map. It has the same size as the input images. Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map</param>
 public static void Compute(this IStereoMatcher matcher, IInputArray left, IInputArray right, IOutputArray disparity)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CvStereoMatcherCompute(matcher.StereoMatcherPtr, iaLeft, iaRight, oaDisparity);
 }
 public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
 {
    using (InputArray iaI0 = i0.GetInputArray())
    using (InputArray iaI1 = i1.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
       cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ?  IntPtr.Zero : stream.Ptr);
 }
Example #8
0
      /// <summary>
      /// Given the left and right image, computer the disparity map and the 3D point cloud.
      /// </summary>
      /// <param name="left">The left image</param>
      /// <param name="right">The right image</param>
      /// <param name="outputDisparityMap">The left disparity map</param>
      /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
      private static void Computer3DPointsFromStereoPair(IInputArray left, IInputArray right, Mat outputDisparityMap, Mat points)
      {
         Size size;
         using (InputArray ia = left.GetInputArray())
            size = ia.GetSize();

         using (StereoBM stereoSolver = new StereoBM())
         {
            stereoSolver.Compute(left, right, outputDisparityMap);

            float scale = Math.Max(size.Width, size.Height);

            //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
            using (Matrix<double> q = new Matrix<double>(
               new double[,]
               {
                  {1.0, 0.0, 0.0, -size.Width/2}, //shift the x origin to image center
                  {0.0, -1.0, 0.0, size.Height/2}, //shift the y origin to image center and flip it upside down
                  {0.0, 0.0, -1.0, 0.0}, //Multiply the z value by -1.0, 
                  {0.0, 0.0, 0.0, scale}
               })) //scale the object's coordinate to within a [-0.5, 0.5] cube
            {
               
               CvInvoke.ReprojectImageTo3D(outputDisparityMap, points, q, false, DepthType.Cv32F);
               
            }
            //points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q);
         }
      }
Example #9
0
      /*
      /// <summary>
      /// Create an auto-tuned flann index
      /// </summary>
      /// <param name="values">A row by row matrix of descriptors</param>
      /// <param name="targetPrecision">Precision desired, use 0.9 if not sure</param>
      /// <param name="buildWeight">build tree time weighting factor, use 0.01 if not sure</param>
      /// <param name="memoryWeight">index memory weighting factor, use 0 if not sure</param>
      /// <param name="sampleFraction">what fraction of the dataset to use for autotuning, use 0.1 if not sure</param>
      public Index(IInputArray values, float targetPrecision, float buildWeight, float memoryWeight, float sampleFraction)
      {
         using (InputArray iaValues = values.GetInputArray())
            _ptr = CvFlannIndexCreateAutotuned(iaValues, targetPrecision, buildWeight, memoryWeight, sampleFraction);
      }*/
      #endregion

      /// <summary>
      /// Perform k-nearest-neighbours (KNN) search
      /// </summary>
      /// <param name="queries">A row by row matrix of descriptors to be query for nearest neighbours</param>
      /// <param name="indices">The result of the indices of the k-nearest neighbours</param>
      /// <param name="squareDistances">The square of the Eculidean distance between the neighbours</param>
      /// <param name="knn">Number of nearest neighbors to search for</param>
      /// <param name="checks">The number of times the tree(s) in the index should be recursively traversed. A
      /// higher value for this parameter would give better search precision, but also take more
      /// time. If automatic configuration was used when the index was created, the number of
      /// checks required to achieve the specified precision was also computed, in which case
      /// this parameter is ignored </param>
      public void KnnSearch(IInputArray queries, IOutputArray indices, IOutputArray squareDistances, int knn, int checks)
      {
         using (InputArray iaQueries = queries.GetInputArray())
         using (OutputArray oaIndices = indices.GetOutputArray())
         using (OutputArray oaSquareDistances = squareDistances.GetOutputArray())
         CvFlannIndexKnnSearch(_ptr, iaQueries, oaIndices, oaSquareDistances, knn, checks);
      }
 /// <summary>
 /// Apply the filter to the disparity image
 /// </summary>
 /// <param name="disparity">The input disparity map</param>
 /// <param name="image">The image</param>
 /// <param name="dst">The output disparity map, should have the same size as the input disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray disparity, IInputArray image, IOutputArray dst, Stream stream = null)
 {
    using (InputArray iaDisparity = disparity.GetInputArray())
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       CudaInvoke.cudaDisparityBilateralFilterApply(this, iaDisparity, iaImage, oaDst, stream);
 }
Example #11
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaDescriptors = descriptors.GetOutputArray())
       Feature2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }
Example #12
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 /// <param name="opticalFlow">The dense optical flow object</param>
 public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
    using (InputArray iaI0 = i0.GetInputArray())
    using (InputArray iaI1 = i1.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
       CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
 }
Example #13
0
 /// <summary>
 ///  This function is similiar to cvCalcBackProjectPatch. It slids through image, compares overlapped patches of size wxh with templ using the specified method and stores the comparison results to result
 /// </summary>
 /// <param name="image">Image where the search is running. It should be 8-bit or 32-bit floating-point</param>
 /// <param name="templ">Searched template; must be not greater than the source image and the same data type as the image</param>
 /// <param name="result">A map of comparison results; single-channel 32-bit floating-point. If image is WxH and templ is wxh then result must be W-w+1xH-h+1.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>  
 public void Match(IInputArray image, IInputArray templ, IOutputArray result, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaTempl = templ.GetInputArray())
    using (OutputArray oaResult = result.GetOutputArray())
       CudaInvoke.cudaTemplateMatchingMatch(_ptr, iaImage, iaTempl, oaResult, stream);
 }
 /// <summary>
 /// Find the k-nearest match
 /// </summary>
 /// <param name="queryDescriptors">An n x m matrix of descriptors to be query for nearest neighbors. n is the number of descriptor and m is the size of the descriptor</param>
 /// <param name="k">Number of nearest neighbors to search for</param>
 /// <param name="mask">Can be null if not needed. An n x 1 matrix. If 0, the query descriptor in the corresponding row will be ignored.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public void KnnMatch(IInputArray queryDescriptors, IInputArray trainDescriptors, VectorOfVectorOfDMatch matches, int k, IInputArray mask = null, bool compactResult = false)
 {
    using (InputArray iaQueryDescriptors = queryDescriptors.GetInputArray())
    using (InputArray iaTrainDescriptors = trainDescriptors.GetInputArray() )
    using (InputArray iaMask = (mask == null ? InputArray.GetEmpty() : mask.GetInputArray()))
       CudaInvoke.cveCudaDescriptorMatcherKnnMatch(_ptr, iaQueryDescriptors, iaTrainDescriptors, matches, k, iaMask, compactResult);
 }
Example #15
0
 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaKeypoints = keypoints.GetOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       CudaInvoke.cveCudaFeature2dAsyncDetectAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, iaMask, stream);
 }
Example #16
0
 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void FindStereoCorrespondence(IInputArray left, IInputArray right, IOutputArray disparity, Stream stream = null)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CudaInvoke.cudaStereoBMFindStereoCorrespondence(_ptr, iaLeft, iaRight, oaDisparity, stream);
 }
 /// <summary>
 /// Find the good features to track
 /// </summary>
 public void Detect(IInputArray image, IOutputArray corners, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaCorners = corners.GetOutputArray())
    using (InputArray iaMask = (mask == null ? mask.GetInputArray() : InputArray.GetEmpty()))
       CudaInvoke.cudaCornersDetectorDetect(_ptr, iaImage, oaCorners, iaMask, stream);
 }
Example #18
0
 public static void Convert(this IFeature2DAsync feature2DAsync, IInputArray gpuKeypoints,
    VectorOfKeyPoint keypoints)
 {
    using (InputArray iaGpuKeypoints = gpuKeypoints.GetInputArray())
    {
       CudaInvoke.cveCudaFeature2dAsyncConvert(feature2DAsync.Feature2DAsyncPtr, iaGpuKeypoints, keypoints);
    }
 }
Example #19
0
 /// <summary>
 /// Create DAISY descriptor extractor
 /// </summary>
 /// <param name="radius">Radius of the descriptor at the initial scale.</param>
 /// <param name="qRadius">Amount of radial range division quantity.</param>
 /// <param name="qTheta">Amount of angular range division quantity.</param>
 /// <param name="qHist">Amount of gradient orientations range division quantity.</param>
 /// <param name="norm">Descriptors normalization type.</param>
 /// <param name="H">optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image</param>
 /// <param name="interpolation">Switch to disable interpolation for speed improvement at minor quality loss</param>
 /// <param name="useOrientation">Sample patterns using keypoints orientation, disabled by default.</param>
 public DAISY(float radius = 15, int qRadius = 3, int qTheta = 8,
    int qHist = 8, NormalizationType norm = NormalizationType.None, IInputArray H = null,
    bool interpolation = true, bool useOrientation = false)
 {
    using (InputArray iaH = H == null ? InputArray.GetEmpty() : H.GetInputArray())
       _ptr = ContribInvoke.cveDAISYCreate(radius, qRadius, qTheta, qHist, norm, iaH, interpolation, useOrientation,
          ref _feature2D);
 }
Example #20
0
 /// <summary>
 /// Iterates to find the object center given its back projection and initial position of search window. The iterations are made until the search window center moves by less than the given value and/or until the function has done the maximum number of iterations. 
 /// </summary>
 /// <param name="probImage">Back projection of object histogram</param>
 /// <param name="window">Initial search window</param>
 /// <param name="criteria">Criteria applied to determine when the window search should be finished. </param>
 /// <returns>The number of iterations made</returns>
 public static int MeanShift(
    IInputArray probImage,
    ref Rectangle window,
    MCvTermCriteria criteria)
 {
    using (InputArray iaProbImage = probImage.GetInputArray())
       return cveMeanShift(iaProbImage, ref window, ref criteria);
 }
Example #21
0
 /// <summary>
 /// Extracts pixels from src:
 /// dst(x, y) = src(x + center.x - (width(dst)-1)*0.5, y + center.y - (height(dst)-1)*0.5)
 /// where the values of pixels at non-integer coordinates are retrieved using bilinear interpolation. Every channel of multiple-channel images is processed independently. Whereas the rectangle center must be inside the image, the whole rectangle may be partially occluded. In this case, the replication border mode is used to get pixel values beyond the image boundaries.
 /// </summary>
 /// <param name="image">Source image</param>
 /// <param name="patchSize">Size of the extracted patch.</param>
 /// <param name="patch">Extracted rectangle</param>
 /// <param name="patchType">Depth of the extracted pixels. By default, they have the same depth as <paramref name="image"/>.</param>
 /// <param name="center">Floating point coordinates of the extracted rectangle center within the source image. The center must be inside the image.</param>
 public static void GetRectSubPix(IInputArray image, Size patchSize, PointF center, IOutputArray patch, DepthType patchType = DepthType.Default)
 {
    using (InputArray iaSrc = image.GetInputArray())
    using (OutputArray oaPatch = patch.GetOutputArray())
    {
       cveGetRectSubPix(iaSrc, ref patchSize, ref center, oaPatch, patchType);
    }
 }
Example #22
0
 public static void DtFilter(IInputArray guide, IInputArray src, IOutputArray dst,
    double sigmaSpatial, double sigmaColor, int mode, int numIters)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveDtFilter(iaGuide, iaSrc, oaDst, sigmaSpatial, sigmaColor, mode, numIters);
 }
Example #23
0
 public static void GuidedFilter(IInputArray guide, IInputArray src, IOutputArray dst, int radius, double eps,
    int dDepth)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveGuidedFilter(iaGuide, iaSrc, oaDst, radius, eps, dDepth);
 }
Example #24
0
 /// <summary>
 /// Trains the statistical model.
 /// </summary>
 /// <param name="model">The stat model.</param>
 /// <param name="samples">The training samples.</param>
 /// <param name="layoutType">Type of the layout.</param>
 /// <param name="responses">Vector of responses associated with the training samples.</param>
 /// <returns></returns>
 public static bool Train(this IStatModel model, IInputArray samples, DataLayoutType layoutType, IInputArray responses)
 {
    using (InputArray iaSamples = samples.GetInputArray())
    using (InputArray iaResponses = responses.GetInputArray())
    {
       return MlInvoke.StatModelTrain(model.StatModelPtr, iaSamples, layoutType, iaResponses);
    }
 }
Example #25
0
 public static float Predict(this IStatModel model, IInputArray samples, IOutputArray results = null, int flags = 0)
 {
    using (InputArray iaSamples = samples.GetInputArray())
    using (OutputArray oaResults = results == null ? OutputArray.GetEmpty() : results.GetOutputArray())
    {
       return MlInvoke.StatModelPredict(model.StatModelPtr, iaSamples, oaResults, flags);
    }
 }
Example #26
0
 /// <summary>
 /// Shows the image in the specified window
 /// </summary>
 /// <param name="name">Name of the window</param>
 /// <param name="image">Image to be shown</param>
 public static void Imshow(String name, IInputArray image)
 {
    using (CvString s = new CvString(name))
    using (InputArray iaImage = image.GetInputArray())
    {
       cveImshow(s, iaImage);
    }
 }
Example #27
0
 public static void AmFilter(IInputArray joint, IInputArray src, IOutputArray dst, double sigmaS, double sigmaR,
    bool adjustOutliers)
 {
    using (InputArray iaJoint = joint.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveAmFilter(iaJoint, iaSrc, oaDst, sigmaS, sigmaR, adjustOutliers);
 }
Example #28
0
 /// <summary>
 /// Produce domain transform filtering operation on source image.
 /// </summary>
 /// <param name="src">Filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="dDepth">Optional depth of the output image. dDepth can be set to Default, which will be equivalent to src.depth().</param>
 public void Filter(IInputArray src, IOutputArray dst, DepthType dDepth = DepthType.Default)
 {
    using (InputArray iaSrc = src.GetInputArray())
       using (OutputArray oaDst = dst.GetOutputArray())
       {
          XimgprocInvoke.cveDTFilterFilter(_ptr, iaSrc, oaDst, dDepth);
       }
    
 }
Example #29
0
 /// <summary>
 /// Recovers inverse camera response.
 /// </summary>
 /// <param name="src">Vector of input images</param>
 /// <param name="dst">256x1 matrix with inverse camera response function</param>
 /// <param name="times">Vector of exposure time values for each image</param>
 public void Process(IInputArray src, IOutputArray dst, IInputArray times)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaTimes = times.GetInputArray())
    {
       CvInvoke.cveCalibrateCRFProcess(_calibrateCRFPtr, iaSrc, oaDst, iaTimes);
    }
 }
Example #30
0
 /// <summary>
 /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles. 
 /// The function scans the image several times at different scales. Each time it considers overlapping regions in the image. 
 /// It may also apply some heuristics to reduce number of analyzed regions, such as Canny prunning. 
 /// After it has proceeded and collected the candidate rectangles (regions that passed the classifier cascade), it groups them and returns a sequence of average rectangles for each large enough group. 
 /// </summary>
 /// <param name="image">The image where the objects are to be detected from</param>
 /// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%</param>
 /// <param name="minNeighbors">Minimum number (minus 1) of neighbor rectangles that makes up an object. All the groups of a smaller number of rectangles than min_neighbors-1 are rejected. If min_neighbors is 0, the function does not any grouping at all and returns all the detected candidate rectangles, which may be useful if the user wants to apply a customized grouping procedure. Use 3 for default.</param>
 /// <param name="minSize">Minimum window size. Use Size.Empty for default, where it is set to the size of samples the classifier has been trained on (~20x20 for face detection)</param>
 /// <param name="maxSize">Maximum window size. Use Size.Empty for default, where the parameter will be ignored.</param>
 /// <returns>The objects detected, one array per channel</returns>
 public Rectangle[] DetectMultiScale(IInputArray image, double scaleFactor = 1.1, int minNeighbors = 3, Size minSize = new Size(), Size maxSize = new Size())
 {
    using (Util.VectorOfRect rectangles = new Util.VectorOfRect())
    using (InputArray iaImage = image.GetInputArray())
    {
       CvCascadeClassifierDetectMultiScale(_ptr, iaImage, rectangles, scaleFactor, minNeighbors, 0, ref minSize, ref maxSize);
       return rectangles.ToArray();
    }
 }
Example #31
0
 /// <summary>
 /// Finds the edges on the input <paramref name="src"/> and marks them in the output image edges using the Canny algorithm.
 /// </summary>
 /// <param name="src">Input image</param>
 /// <param name="edges">Image to store the edges found by the function</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Detect(IInputArray src, IOutputArray edges, Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaEdges = edges.GetOutputArray())
             CudaInvoke.cudaCannyEdgeDetectorDetect(_ptr, iaSrc, oaEdges, stream);
 }
Example #32
0
 /// <summary>
 /// Refine not detected markers based on the already detected and the board layout.
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="board">Layout of markers in the board.</param>
 /// <param name="detectedCorners">Vector of already detected marker corners.</param>
 /// <param name="detectedIds">Vector of already detected marker identifiers.</param>
 /// <param name="rejectedCorners">Vector of rejected candidates during the marker detection process</param>
 /// <param name="cameraMatrix">Optional input 3x3 floating-point camera matrix </param>
 /// <param name="distCoeffs">Optional vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="minRepDistance">Minimum distance between the corners of the rejected candidate and the reprojected marker in order to consider it as a correspondence. (default 10)</param>
 /// <param name="errorCorrectionRate">Rate of allowed erroneous bits respect to the error correction capability of the used dictionary. -1 ignores the error correction step. (default 3)</param>
 /// <param name="checkAllOrders">Consider the four posible corner orders in the rejectedCorners array. If it set to false, only the provided corner order is considered (default true).</param>
 /// <param name="recoveredIdxs">Optional array to returns the indexes of the recovered candidates in the original rejectedCorners array.</param>
 /// <param name="parameters">marker detection parameters</param>
 public static void RefineDetectedMarkers(
     IInputArray image, IBoard board, IInputOutputArray detectedCorners,
     IInputOutputArray detectedIds, IInputOutputArray rejectedCorners,
     IInputArray cameraMatrix, IInputArray distCoeffs,
     float minRepDistance, float errorCorrectionRate,
     bool checkAllOrders,
     IOutputArray recoveredIdxs, DetectorParameters parameters)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputOutputArray ioaDetectedCorners = detectedCorners.GetInputOutputArray())
             using (InputOutputArray ioaDetectedIds = detectedIds.GetInputOutputArray())
                 using (InputOutputArray ioaRejectedCorners = rejectedCorners.GetInputOutputArray())
                     using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                         using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             using (
                                 OutputArray oaRecovervedIdx = recoveredIdxs == null
           ? OutputArray.GetEmpty()
           : recoveredIdxs.GetOutputArray())
                             {
                                 cveArucoRefineDetectedMarkers(iaImage, board.BoardPtr, ioaDetectedCorners, ioaDetectedIds, ioaRejectedCorners,
                                                               iaCameraMatrix, iaDistCoeffs, minRepDistance, errorCorrectionRate, checkAllOrders, oaRecovervedIdx, ref parameters);
                             }
 }
Example #33
0
 /// <summary>
 /// Interpolate position of ChArUco board corners
 /// </summary>
 /// <param name="markerCorners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4.The order of the corners should be clockwise.</param>
 /// <param name="markerIds">list of identifiers for each marker in corners</param>
 /// <param name="image">input image necesary for corner refinement. Note that markers are not detected and should be sent in corners and ids parameters.</param>
 /// <param name="board">layout of ChArUco board.</param>
 /// <param name="charucoCorners">interpolated chessboard corners</param>
 /// <param name="charucoIds">interpolated chessboard corners identifiers</param>
 /// <param name="cameraMatrix">optional 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">optional vector of distortion coefficients, (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]]) of 4, 5, 8 or 12 elements </param>
 /// <param name="minMarkers">number of adjacent markers that must be detected to return a charuco corner</param>
 /// <returns>The number of interpolated corners.</returns>
 public static int InterpolateCornersCharuco(
     IInputArrayOfArrays markerCorners,
     IInputArray markerIds,
     IInputArray image,
     CharucoBoard board,
     IOutputArray charucoCorners,
     IOutputArray charucoIds,
     IInputArray cameraMatrix = null,
     IInputArray distCoeffs   = null,
     int minMarkers           = 2)
 {
     using (InputArray iaMarkerCorners = markerCorners.GetInputArray())
         using (InputArray iaMarkerIds = markerIds.GetInputArray())
             using (InputArray iaImage = image.GetInputArray())
                 using (OutputArray oaCharucoCorners = charucoCorners.GetOutputArray())
                     using (OutputArray oaCharucoIds = charucoIds.GetOutputArray())
                         using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                             using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             {
                                 return(cveArucoInterpolateCornersCharuco(
                                            iaMarkerCorners, iaMarkerIds, iaImage, board,
                                            oaCharucoCorners, oaCharucoIds,
                                            iaCameraMatrix, iaDistCoeffs,
                                            minMarkers));
                             }
 }
Example #34
0
 /// <summary>
 /// Draws a set of Charuco corners
 /// </summary>
 /// <param name="image">image input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="charucoCorners">vector of detected charuco corners</param>
 /// <param name="charucoIds">list of identifiers for each corner in charucoCorners</param>
 /// <param name="cornerColor">color of the square surrounding each corner</param>
 public static void DrawDetectedCornersCharuco(
     IInputOutputArray image,
     IInputArray charucoCorners,
     IInputArray charucoIds,
     MCvScalar cornerColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
             using (InputArray iaCharucoIds = charucoIds == null ? InputArray.GetEmpty() : charucoIds.GetInputArray())
             {
                 cveArucoDrawDetectedCornersCharuco(ioaImage, iaCharucoCorners, iaCharucoIds, ref cornerColor);
             }
 }
Example #35
0
 /// <summary>
 /// Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
 /// </summary>
 /// <param name="source">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray source, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaSource = source.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaCLAHEApply(_ptr, iaSource, oaDst, stream);
 }
Example #36
0
 /// <summary>
 /// Applies X Deriche filter to an image.
 /// </summary>
 /// <param name="op">Source 8-bit or 16bit image, 1-channel or 3-channel image.</param>
 /// <param name="dst">result CV_32FC image with same number of channel than _op.</param>
 /// <param name="alphaDerive">see paper</param>
 /// <param name="alphaMean">see paper</param>
 /// <remarks>For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&amp;rep=rep1&amp;type=pdf </remarks>
 public static void GradientDericheX(IInputArray op, IOutputArray dst, double alphaDerive, double alphaMean)
 {
     using (InputArray iaOp = op.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveGradientDericheX(iaOp, oaDst, alphaDerive, alphaMean);
 }
Example #37
0
 /// <summary>
 /// Performs anisotropic diffusion on an image.
 /// </summary>
 /// <param name="src">Grayscale Source image.</param>
 /// <param name="dst">Destination image of the same size and the same number of channels as src .</param>
 /// <param name="alpha">The amount of time to step forward by on each iteration (normally, it's between 0 and 1).</param>
 /// <param name="K">sensitivity to the edges</param>
 /// <param name="niters">The number of iterations</param>
 public static void AnisotropicDiffusion(IInputArray src, IOutputArray dst, float alpha, float K, int niters)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveAnisotropicDiffusion(iaSrc, oaDst, alpha, K, niters);
 }
Example #38
0
 /// <summary>
 /// Detects QR codes in image and returns the vector of the quadrangles containing the codes.
 /// </summary>
 /// <param name="img">Grayscale or color (BGR) image containing (or not) QR codes</param>
 /// <param name="points">Output vector of vector of vertices of the minimum-area quadrangle containing the codes.</param>
 /// <returns>True if a QRCode is found.</returns>
 public bool DetectMulti(IInputArray img, IOutputArray points)
 {
     using (InputArray iaInput = img.GetInputArray())
         using (OutputArray oaPoints = points.GetOutputArray())
             return(CvInvoke.cveQRCodeDetectorDetectMulti(_ptr, iaInput, oaPoints));
 }
Example #39
0
 /// <summary>
 /// The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image.
 /// </summary>
 /// <param name="image">Image to segment</param>
 /// <param name="algorithm">Chooses the algorithm variant to use</param>
 /// <param name="regionSize">Chooses an average superpixel size measured in pixels</param>
 /// <param name="ruler">Chooses the enforcement of superpixel smoothness factor of superpixel</param>
 public SupperpixelSLIC(IInputArray image, Algorithm algorithm, int regionSize, float ruler)
 {
     using (InputArray iaImage = image.GetInputArray())
         _ptr = XImgprocInvoke.cveSuperpixelSLICCreate(iaImage, algorithm, regionSize, ruler);
 }
Example #40
0
 private static Size InputArrGetSize(IInputArray arr)
 {
     using (InputArray ia = arr.GetInputArray())
         return(ia.GetSize());
 }
Example #41
0
 /// <summary>
 /// Perform a binary map of given saliency map
 /// </summary>
 /// <param name="saliencyMap">the saliency map obtained through one of the specialized algorithms</param>
 /// <param name="binaryMap">the binary map</param>
 /// <param name="saliency">The StatucSaliency object</param>
 /// <returns>True if the binary map is sucessfully computed</returns>
 public static bool ComputeBinaryMap(this IStaticSaliency saliency, IInputArray saliencyMap, IOutputArray binaryMap)
 {
     using (InputArray iaSaliencyMap = saliencyMap.GetInputArray())
         using (OutputArray oaBinaryMap = binaryMap.GetOutputArray())
             return(cveStaticSaliencyComputeBinaryMap(saliency.StaticSaliencyPtr, iaSaliencyMap, oaBinaryMap));
 }
Example #42
0
 /// <summary>
 /// Unwraps a 2D phase map.
 /// </summary>
 /// <param name="wrappedPhaseMap">The wrapped phase map that needs to be unwrapped.</param>
 /// <param name="unwrappedPhaseMap">The unwrapped phase map.</param>
 /// <param name="shadowMask">Optional parameter used when some pixels do not hold any phase information in the wrapped phase map.</param>
 public void UnwrapPhaseMap(
     IInputArray wrappedPhaseMap,
     IOutputArray unwrappedPhaseMap,
     IInputArray shadowMask = null)
 {
     using (InputArray iaWrappedPhaseMap = wrappedPhaseMap.GetInputArray())
         using (OutputArray oaUnwrappedPhaseMap = unwrappedPhaseMap.GetOutputArray())
             using (InputArray iaShadowMask = shadowMask == null ? InputArray.GetEmpty() : shadowMask.GetInputArray())
             {
                 PhaseUnwrappingInvoke.cveHistogramPhaseMapUnwrappingUnwrapPhaseMap(_ptr, iaWrappedPhaseMap, oaUnwrappedPhaseMap, iaShadowMask);
             }
 }
 /// <summary>
 /// Updates the background model
 /// </summary>
 /// <param name="frame">Next video frame.</param>
 /// <param name="learningRate">The learning rate, use -1.0f for default value.</param>
 public void Apply(IInputArray frame, IOutputArray forgroundMask, double learningRate = -1.0)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         using (OutputArray oaForgroundMask = forgroundMask.GetOutputArray())
             CudaInvoke.cudaBackgroundSubtractorFGDApply(_ptr, iaFrame, oaForgroundMask, learningRate);
 }
Example #44
0
 /// <summary>
 /// Write a single frame to the video writer
 /// </summary>
 /// <param name="frame">The frame to be written to the video writer</param>
 public void Write(IInputArray frame)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         CvInvoke.cveVideoWriterWrite(_ptr, iaFrame);
 }
Example #45
0
 /// <summary>
 /// Estimate the Gaussian mixture parameters from a samples set. This variation starts with Expectation step. You need to provide initial means of mixture components. Optionally you can pass initial weights and covariance matrices of mixture components.
 /// </summary>
 /// <param name="samples">Samples from which the Gaussian mixture model will be estimated. It should be a one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it will be converted to the inner matrix of such type for the further computing.</param>
 /// <param name="means0">Initial means of mixture components. It is a one-channel matrix of nclusters x dims size. If the matrix does not have CV_64F type it will be converted to the inner matrix of such type for the further computing.</param>
 /// <param name="covs0">The vector of initial covariance matrices of mixture components. Each of covariance matrices is a one-channel matrix of dims x dims size. If the matrices do not have CV_64F type they will be converted to the inner matrices of such type for the further computing.</param>
 /// <param name="weights0">Initial weights of mixture components. It should be a one-channel floating-point matrix with 1 x nclusters or nclusters x 1 size.</param>
 /// <param name="loglikelihoods">The optional output matrix that contains a likelihood logarithm value for each sample. It has nsamples x 1 size and CV_64FC1 type.</param>
 /// <param name="labels">The optional output "class label" (indices of the most probable mixture component for each sample). It has nsamples x 1 size and CV_32SC1 type.</param>
 /// <param name="probs">The optional output matrix that contains posterior probabilities of each Gaussian mixture component given the each sample. It has nsamples x nclusters size and CV_64FC1 type.</param>
 public void trainE(
     IInputArray samples,
     IInputArray means0,
     IInputArray covs0           = null,
     IInputArray weights0        = null,
     IOutputArray loglikelihoods = null,
     IOutputArray labels         = null,
     IOutputArray probs          = null)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaMeans0 = means0.GetInputArray())
             using (InputArray iaCovs0 = covs0 == null ? InputArray.GetEmpty() : covs0.GetInputArray())
                 using (InputArray iaWeights = weights0 == null ? InputArray.GetEmpty() : weights0.GetInputArray())
                     using (OutputArray oaLogLikelihood = loglikelihoods == null ? OutputArray.GetEmpty() : loglikelihoods.GetOutputArray())
                         using (OutputArray oaLabels = labels == null ? OutputArray.GetEmpty() : labels.GetOutputArray())
                             using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                             {
                                 MlInvoke.CvEMTrainE(_ptr, iaSamples, iaMeans0, iaCovs0, iaWeights, oaLogLikelihood, oaLabels,
                                                     oaProbs, ref _statModel, ref _algorithm);
                             }
 }
Example #46
0
 /// <summary>
 /// Draw a set of detected ChArUco Diamond markers
 /// </summary>
 /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="diamondCorners">positions of diamond corners in the same format returned by detectCharucoDiamond(). (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="diamondIds">vector of identifiers for diamonds in diamondCorners, in the same format returned by detectCharucoDiamond() (e.g. VectorOfMat ). Optional, if not provided, ids are not painted. </param>
 /// <param name="borderColor">color of marker borders. Rest of colors (text color and first corner color) are calculated based on this one.</param>
 public static void DrawDetectedDiamonds(
     IInputOutputArray image,
     IInputArrayOfArrays diamondCorners,
     IInputArray diamondIds,
     MCvScalar borderColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaDiamondCorners = diamondCorners.GetInputArray())
             using (InputArray iaDiamondIds = diamondIds == null ? InputArray.GetEmpty() : diamondIds.GetInputArray())
             {
                 cveArucoDrawDetectedDiamonds(ioaImage, iaDiamondCorners, iaDiamondIds, ref borderColor);
             }
 }
Example #47
0
 /// <summary>
 /// Applies a binary blob thinning operation, to achieve a skeletization of the input image.
 /// The function transforms a binary blob image into a skeletized form using the technique of Zhang-Suen.
 /// </summary>
 /// <param name="src">Source 8-bit single-channel image, containing binary blobs, with blobs having 255 pixel values.</param>
 /// <param name="dst">Destination image of the same size and the same type as src. The function can work in-place.</param>
 /// <param name="thinningType">Value that defines which thinning algorithm should be used.</param>
 public static void Thinning(IInputArray src, IOutputArray dst, ThinningTypes thinningType)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveThinning(iaSrc, oaDst, thinningType);
 }
Example #48
0
 /// <summary>
 /// Detect ChArUco Diamond markers
 /// </summary>
 /// <param name="image">input image necessary for corner subpixel.</param>
 /// <param name="markerCorners">list of detected marker corners from detectMarkers function.</param>
 /// <param name="markerIds">list of marker ids in markerCorners.</param>
 /// <param name="squareMarkerLengthRate">rate between square and marker length: squareMarkerLengthRate = squareLength / markerLength.The real units are not necessary.</param>
 /// <param name="diamondCorners">output list of detected diamond corners (4 corners per diamond). The order is the same than in marker corners: top left, top right, bottom right and bottom left. Similar format than the corners returned by detectMarkers(e.g VectorOfVectorOfPointF ).</param>
 /// <param name="diamondIds">ids of the diamonds in diamondCorners. The id of each diamond is in fact of type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the diamond.</param>
 /// <param name="cameraMatrix">Optional camera calibration matrix.</param>
 /// <param name="distCoeffs">Optional camera distortion coefficients.</param>
 public static void DetectCharucoDiamond(
     IInputArray image,
     IInputArray markerCorners,
     IInputArray markerIds,
     float squareMarkerLengthRate,
     IOutputArray diamondCorners,
     IOutputArray diamondIds,
     IInputArray cameraMatrix = null,
     IInputArray distCoeffs   = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMarkerCorners = markerCorners.GetInputArray())
             using (InputArray iaMarkerIds = markerIds.GetInputArray())
                 using (OutputArray oaDiamondCorners = diamondCorners.GetOutputArray())
                     using (OutputArray oaDiamondIds = diamondIds.GetOutputArray())
                         using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                             using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             {
                                 cveArucoDetectCharucoDiamond(iaImage, iaMarkerCorners, iaMarkerIds, squareMarkerLengthRate, oaDiamondCorners, oaDiamondIds, iaCameraMatrix, iaDistCoeffs);
                             }
 }
Example #49
0
 /// <summary>
 /// Applies Paillou filter to an image.
 /// </summary>
 /// <param name="op">Source 8-bit or 16bit image, 1-channel or 3-channel image.</param>
 /// <param name="dst">result CV_32F image with same number of channel than op.</param>
 /// <param name="alpha">see paper</param>
 /// <param name="omega">see paper</param>
 /// <remarks>For more details about this implementation, please see: Philippe Paillou. Detecting step edges in noisy sar images: a new linear operator. IEEE transactions on geoscience and remote sensing, 35(1):191–196, 1997.</remarks>
 public static void GradientPaillouX(IInputArray op, IOutputArray dst, double alpha, double omega)
 {
     using (InputArray iaOp = op.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveGradientPaillouX(iaOp, oaDst, alpha, omega);
 }
Example #50
0
 /// <summary>
 /// Computes the estimated covariance matrix of an image using the sliding window forumlation.
 /// </summary>
 /// <param name="src">The source image. Input image must be of a complex type.</param>
 /// <param name="dst">The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).</param>
 /// <param name="windowRows">The number of rows in the window.</param>
 /// <param name="windowCols">The number of cols in the window. The window size parameters control the accuracy of the estimation. The sliding window moves over the entire image from the top-left corner to the bottom right corner. Each location of the window represents a sample. If the window is the size of the image, then this gives the exact covariance matrix. For all other cases, the sizes of the window will impact the number of samples and the number of elements in the estimated covariance matrix.</param>
 public static void CovarianceEstimation(IInputArray src, IOutputArray dst, int windowRows, int windowCols)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveCovarianceEstimation(iaSrc, oaDst, windowRows, windowCols);
 }
        public static void Detect(
            IInputArray image,
            string faceFileName,
            string eyeFileName,
            List <Rectangle> faces,
            List <Rectangle> eyes,
            out long detectionTime)
        {
            Stopwatch watch;

            using (var iaImage = image.GetInputArray())
            {
                if (Controller.Instance.Cuda)
                {
                    using (CudaCascadeClassifier face = new CudaCascadeClassifier(faceFileName))
                        using (CudaCascadeClassifier eye = new CudaCascadeClassifier(eyeFileName))
                        {
                            face.ScaleFactor   = 1.1;
                            face.MinNeighbors  = 10;
                            face.MinObjectSize = Size.Empty;
                            eye.ScaleFactor    = 1.1;
                            eye.MinNeighbors   = 10;
                            eye.MinObjectSize  = Size.Empty;
                            watch = Stopwatch.StartNew();
                            using (CudaImage <Bgr, byte> gpuImage = new CudaImage <Bgr, byte>(image))
                                using (CudaImage <Gray, byte> gpuGray = gpuImage.Convert <Gray, byte>())
                                    using (GpuMat region = new GpuMat())
                                    {
                                        face.DetectMultiScale(gpuGray, region);
                                        Rectangle[] faceRegion = face.Convert(region);
                                        faces.AddRange(faceRegion);
                                        foreach (Rectangle f in faceRegion)
                                        {
                                            using (CudaImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f))
                                            {
                                                //For some reason a clone is required.
                                                //Might be a bug of CudaCascadeClassifier in opencv
                                                using (CudaImage <Gray, byte> clone = faceImg.Clone(null))
                                                    using (GpuMat eyeRegionMat = new GpuMat())
                                                    {
                                                        eye.DetectMultiScale(clone, eyeRegionMat);
                                                        Rectangle[] eyeRegion = eye.Convert(eyeRegionMat);
                                                        foreach (Rectangle e in eyeRegion)
                                                        {
                                                            Rectangle eyeRect = e;
                                                            eyeRect.Offset(f.X, f.Y);
                                                            eyes.Add(eyeRect);
                                                        }
                                                    }
                                            }
                                        }
                                    }
                            watch.Stop();
                        }
                }
                else
                {
                    //Read the HaarCascade objects
                    using (CascadeClassifier face = new CascadeClassifier(faceFileName))
                        using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
                        {
                            watch = Stopwatch.StartNew();

                            using (UMat ugray = new UMat())
                            {
                                CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);

                                //normalizes brightness and increases contrast of the image
                                CvInvoke.EqualizeHist(ugray, ugray);

                                //Detect the faces  from the gray scale image and store the locations as rectangle
                                //The first dimensional is the channel
                                //The second dimension is the index of the rectangle in the specific channel
                                Rectangle[] facesDetected = face.DetectMultiScale(
                                    ugray,
                                    1.1,
                                    10,
                                    new Size(20, 20));

                                faces.AddRange(facesDetected);

                                foreach (Rectangle f in facesDetected)
                                {
                                    //Get the region of interest on the faces
                                    using (UMat faceRegion = new UMat(ugray, f))
                                    {
                                        Rectangle[] eyesDetected = eye.DetectMultiScale(
                                            faceRegion,
                                            1.1,
                                            10,
                                            new Size(20, 20));

                                        foreach (Rectangle e in eyesDetected)
                                        {
                                            Rectangle eyeRect = e;
                                            eyeRect.Offset(f.X, f.Y);
                                            eyes.Add(eyeRect);
                                        }
                                    }
                                }
                            }
                            watch.Stop();
                        }
                }
                detectionTime = watch.ElapsedMilliseconds;
            }
        }
Example #52
0
 /// <summary>
 /// Create a flann index
 /// </summary>
 /// <param name="values">A row by row matrix of descriptors</param>
 /// <param name="ip">The index parameter</param>
 /// <param name="distType">The distance type</param>
 public Index(IInputArray values, IIndexParams ip, DistType distType = DistType.L2)
 {
     using (InputArray iaValues = values.GetInputArray())
         _ptr = FlannInvoke.cveFlannIndexCreate(iaValues, ip.IndexParamPtr, distType);
 }
        public static Rectangle[] FindPerson(IInputArray image, out long processingTime)
        {
            Stopwatch watch = new Stopwatch();

            Rectangle[] regions = null;


            if (Controller.Instance.Cuda)
            {
                GpuMat GpuImage = new GpuMat(image);

                using (InputArray iaImage = GpuImage.GetInputArray())
                {
                    //if the input array is a GpuMat
                    //check if there is a compatible Cuda device to run pedestrian detection
                    if (iaImage.Kind == InputArray.Type.CudaGpuMat)
                    {
                        //this is the Cuda version
                        using (CudaHOG des = new CudaHOG(
                                   new Size(64, 128),
                                   new Size(16, 16),
                                   new Size(8, 8),
                                   new Size(8, 8)))
                        {
                            des.SetSVMDetector(des.GetDefaultPeopleDetector());

                            watch = Stopwatch.StartNew();
                            using (GpuMat cudaBgra = new GpuMat())
                                using (VectorOfRect vr = new VectorOfRect())
                                {
                                    CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra);
                                    des.DetectMultiScale(cudaBgra, vr);
                                    regions = vr.ToArray();
                                }
                        }
                    }
                }
            }
            else
            {
                using (InputArray iaImage = image.GetInputArray())
                {
                    //this is the CPU/OpenCL version
                    using (HOGDescriptor des = new HOGDescriptor())
                    {
                        des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
                        watch = Stopwatch.StartNew();

                        MCvObjectDetection[] results = des.DetectMultiScale(image);
                        regions = new Rectangle[results.Length];
                        for (int i = 0; i < results.Length; i++)
                        {
                            regions[i] = results[i].Rect;
                        }
                        watch.Stop();
                    }
                }
            }


            processingTime = watch.ElapsedMilliseconds;

            return(regions);
        }
Example #54
0
 /// <summary>
 /// Compute the panoramic images given the images
 /// </summary>
 /// <param name="images">The input images. This can be, for example, a VectorOfMat</param>
 /// <param name="pano">The panoramic image</param>
 /// <returns>The stitching status</returns>
 public Status Stitch(IInputArray images, IOutputArray pano)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaPano = pano.GetOutputArray())
             return(StitchingInvoke.cveStitcherStitch(_ptr, iaImages, oaPano));
 }
Example #55
0
 public static bool IsUmat(this IInputArray arr)
 {
     using (InputArray ia = arr.GetInputArray())
         return(ia.IsUMat);
 }
 /// <summary>
 /// Computes an image descriptor using the set visual vocabulary.
 /// </summary>
 /// <param name="image">Image, for which the descriptor is computed</param>
 /// <param name="keypoints">Key points detected in the input image.</param>
 /// <param name="imgDescriptors">The output image descriptors.</param>
 public void Compute(IInputArray image, VectorOfKeyPoint keypoints, Mat imgDescriptors)
 {
     using (InputArray iaImage = image.GetInputArray())
         Features2DInvoke.cveBOWImgDescriptorExtractorCompute(_ptr, iaImage, keypoints, imgDescriptors);
 }