Esempio n. 1
3
      public StopSignDetector(IInputArray stopSignModel)
      {
         _detector = new SURF(500);
         using (Mat redMask = new Mat())
         {
            GetRedPixelMask(stopSignModel, redMask);
            _modelKeypoints = new VectorOfKeyPoint();
            _modelDescriptors = new Mat();
            _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
            if (_modelKeypoints.Size == 0)
               throw new Exception("No image feature has been found in the stop sign model");
         }

         _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
         _modelDescriptorMatcher.Add(_modelDescriptors);

         _octagon = new VectorOfPoint(
            new Point[]
            {
               new Point(1, 0),
               new Point(2, 0),
               new Point(3, 1),
               new Point(3, 2),
               new Point(2, 3),
               new Point(1, 3),
               new Point(0, 2),
               new Point(0, 1)
            });

      }
Esempio n. 2
1
 /// <summary>
 /// Applies an affine transformation to an image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="mapMatrix">2x3 transformation matrix</param>
 /// <param name="dsize">Size of the output image.</param>
 /// <param name="interpMethod">Interpolation method</param>
 /// <param name="warpMethod">Warp method</param>
 /// <param name="borderMode">Pixel extrapolation method</param>
 /// <param name="borderValue">A value used to fill outliers</param>
 public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
       cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
 }
Esempio n. 3
0
 /// <summary>
 /// Computes disparity map for the specified stereo pair
 /// </summary>
 /// <param name="matcher">The stereo matcher</param>
 /// <param name="left">Left 8-bit single-channel image.</param>
 /// <param name="right">Right image of the same size and the same type as the left one.</param>
 /// <param name="disparity">Output disparity map. It has the same size as the input images. Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map</param>
 public static void Compute(this IStereoMatcher matcher, IInputArray left, IInputArray right, IOutputArray disparity)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CvStereoMatcherCompute(matcher.StereoMatcherPtr, iaLeft, iaRight, oaDisparity);
 }
Esempio n. 4
0
 /// <summary>
 ///  This function is similiar to cvCalcBackProjectPatch. It slids through image, compares overlapped patches of size wxh with templ using the specified method and stores the comparison results to result
 /// </summary>
 /// <param name="image">Image where the search is running. It should be 8-bit or 32-bit floating-point</param>
 /// <param name="templ">Searched template; must be not greater than the source image and the same data type as the image</param>
 /// <param name="result">A map of comparison results; single-channel 32-bit floating-point. If image is WxH and templ is wxh then result must be W-w+1xH-h+1.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>  
 public void Match(IInputArray image, IInputArray templ, IOutputArray result, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaTempl = templ.GetInputArray())
    using (OutputArray oaResult = result.GetOutputArray())
       CudaInvoke.cudaTemplateMatchingMatch(_ptr, iaImage, iaTempl, oaResult, stream);
 }
Esempio n. 5
0
      /// <summary>
      /// Given the left and right image, computer the disparity map and the 3D point cloud.
      /// </summary>
      /// <param name="left">The left image</param>
      /// <param name="right">The right image</param>
      /// <param name="outputDisparityMap">The left disparity map</param>
      /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
      private static void Computer3DPointsFromStereoPair(IInputArray left, IInputArray right, Mat outputDisparityMap, Mat points)
      {
         Size size;
         using (InputArray ia = left.GetInputArray())
            size = ia.GetSize();

         using (StereoBM stereoSolver = new StereoBM())
         {
            stereoSolver.Compute(left, right, outputDisparityMap);

            float scale = Math.Max(size.Width, size.Height);

            //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
            using (Matrix<double> q = new Matrix<double>(
               new double[,]
               {
                  {1.0, 0.0, 0.0, -size.Width/2}, //shift the x origin to image center
                  {0.0, -1.0, 0.0, size.Height/2}, //shift the y origin to image center and flip it upside down
                  {0.0, 0.0, -1.0, 0.0}, //Multiply the z value by -1.0, 
                  {0.0, 0.0, 0.0, scale}
               })) //scale the object's coordinate to within a [-0.5, 0.5] cube
            {
               
               CvInvoke.ReprojectImageTo3D(outputDisparityMap, points, q, false, DepthType.Cv32F);
               
            }
            //points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q);
         }
      }
Esempio n. 6
0
 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void FindStereoCorrespondence(IInputArray left, IInputArray right, IOutputArray disparity, Stream stream = null)
 {
    using (InputArray iaLeft = left.GetInputArray())
    using (InputArray iaRight = right.GetInputArray())
    using (OutputArray oaDisparity = disparity.GetOutputArray())
       CudaInvoke.cudaStereoBMFindStereoCorrespondence(_ptr, iaLeft, iaRight, oaDisparity, stream);
 }
 /// <summary>
 /// Apply the filter to the disparity image
 /// </summary>
 /// <param name="disparity">The input disparity map</param>
 /// <param name="image">The image</param>
 /// <param name="dst">The output disparity map, should have the same size as the input disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray disparity, IInputArray image, IOutputArray dst, Stream stream = null)
 {
    using (InputArray iaDisparity = disparity.GetInputArray())
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       CudaInvoke.cudaDisparityBilateralFilterApply(this, iaDisparity, iaImage, oaDst, stream);
 }
Esempio n. 8
0
      /*
      /// <summary>
      /// Create an auto-tuned flann index
      /// </summary>
      /// <param name="values">A row by row matrix of descriptors</param>
      /// <param name="targetPrecision">Precision desired, use 0.9 if not sure</param>
      /// <param name="buildWeight">build tree time weighting factor, use 0.01 if not sure</param>
      /// <param name="memoryWeight">index memory weighting factor, use 0 if not sure</param>
      /// <param name="sampleFraction">what fraction of the dataset to use for autotuning, use 0.1 if not sure</param>
      public Index(IInputArray values, float targetPrecision, float buildWeight, float memoryWeight, float sampleFraction)
      {
         using (InputArray iaValues = values.GetInputArray())
            _ptr = CvFlannIndexCreateAutotuned(iaValues, targetPrecision, buildWeight, memoryWeight, sampleFraction);
      }*/
      #endregion

      /// <summary>
      /// Perform k-nearest-neighbours (KNN) search
      /// </summary>
      /// <param name="queries">A row by row matrix of descriptors to be query for nearest neighbours</param>
      /// <param name="indices">The result of the indices of the k-nearest neighbours</param>
      /// <param name="squareDistances">The square of the Eculidean distance between the neighbours</param>
      /// <param name="knn">Number of nearest neighbors to search for</param>
      /// <param name="checks">The number of times the tree(s) in the index should be recursively traversed. A
      /// higher value for this parameter would give better search precision, but also take more
      /// time. If automatic configuration was used when the index was created, the number of
      /// checks required to achieve the specified precision was also computed, in which case
      /// this parameter is ignored </param>
      public void KnnSearch(IInputArray queries, IOutputArray indices, IOutputArray squareDistances, int knn, int checks)
      {
         using (InputArray iaQueries = queries.GetInputArray())
         using (OutputArray oaIndices = indices.GetOutputArray())
         using (OutputArray oaSquareDistances = squareDistances.GetOutputArray())
         CvFlannIndexKnnSearch(_ptr, iaQueries, oaIndices, oaSquareDistances, knn, checks);
      }
Esempio n. 9
0
      /// <summary>
      /// Compute the red pixel mask for the given image. 
      /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
      /// </summary>
      /// <param name="image">The color image to find red mask from</param>
      /// <param name="mask">The red pixel mask</param>
      private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
      {
         bool useUMat;
         using (InputOutputArray ia = mask.GetInputOutputArray())
            useUMat = ia.IsUMat;

         using (IImage hsv = useUMat ? (IImage)new UMat() : (IImage)new Mat())
         using (IImage s = useUMat ? (IImage)new UMat() : (IImage)new Mat())
         {
            CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
            CvInvoke.ExtractChannel(hsv, mask, 0);
            CvInvoke.ExtractChannel(hsv, s, 1);

            //the mask for hue less than 20 or larger than 160
            using (ScalarArray lower = new ScalarArray(20))
            using (ScalarArray upper = new ScalarArray(160))
               CvInvoke.InRange(mask, lower, upper, mask);
            CvInvoke.BitwiseNot(mask, mask);

            //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
            CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
            CvInvoke.BitwiseAnd(mask, s, mask, null);

         }
      }
Esempio n. 10
0
 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="feature2DAsync">The Feature2DAsync object</param>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void DetectAsync(this IFeature2DAsync feature2DAsync, IInputArray image, IOutputArray keypoints, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaKeypoints = keypoints.GetOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       CudaInvoke.cveCudaFeature2dAsyncDetectAsync(feature2DAsync.Feature2DAsyncPtr, iaImage, oaKeypoints, iaMask, stream);
 }
Esempio n. 11
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaDescriptors = descriptors.GetOutputArray())
       Feature2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }
 /// <summary>
 /// Find the k-nearest match
 /// </summary>
 /// <param name="queryDescriptors">An n x m matrix of descriptors to be query for nearest neighbors. n is the number of descriptor and m is the size of the descriptor</param>
 /// <param name="k">Number of nearest neighbors to search for</param>
 /// <param name="mask">Can be null if not needed. An n x 1 matrix. If 0, the query descriptor in the corresponding row will be ignored.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public void KnnMatch(IInputArray queryDescriptors, IInputArray trainDescriptors, VectorOfVectorOfDMatch matches, int k, IInputArray mask = null, bool compactResult = false)
 {
    using (InputArray iaQueryDescriptors = queryDescriptors.GetInputArray())
    using (InputArray iaTrainDescriptors = trainDescriptors.GetInputArray() )
    using (InputArray iaMask = (mask == null ? InputArray.GetEmpty() : mask.GetInputArray()))
       CudaInvoke.cveCudaDescriptorMatcherKnnMatch(_ptr, iaQueryDescriptors, iaTrainDescriptors, matches, k, iaMask, compactResult);
 }
Esempio n. 13
0
 /// <summary>
 /// Detects objects of different sizes in the input image.
 /// </summary>
 /// <param name="image">Matrix of type CV_8U containing an image where objects should be detected.</param>
 /// <param name="objects">Buffer to store detected objects (rectangles).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void DetectMultiScale(IInputArray image, IOutputArray objects, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaObjects = objects.GetOutputArray())
       CudaInvoke.cudaCascadeClassifierDetectMultiScale(_ptr, iaImage, oaObjects,
          stream == null ? IntPtr.Zero : stream.Ptr);
 }
 /// <summary>
 /// Find the good features to track
 /// </summary>
 public void Detect(IInputArray image, IOutputArray corners, IInputArray mask = null, Stream stream = null)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaCorners = corners.GetOutputArray())
    using (InputArray iaMask = (mask == null ? mask.GetInputArray() : InputArray.GetEmpty()))
       CudaInvoke.cudaCornersDetectorDetect(_ptr, iaImage, oaCorners, iaMask, stream);
 }
Esempio n. 15
0
 /// <summary>
 /// Set the SVM detector 
 /// </summary>
 /// <param name="detector">The SVM detector</param>
 public void SetSVMDetector(IInputArray detector)
 {
    using (InputArray iaDetector = detector.GetInputArray())
    {
       CudaInvoke.cudaHOGSetSVMDetector(_ptr, iaDetector);
    }
 }
Esempio n. 16
0
      /// <summary>
      /// Saves the image to the specified file. The image format is chosen depending on the filename extension, see cvLoadImage. Only 8-bit single-channel or 3-channel (with 'BGR' channel order) images can be saved using this function. If the format, depth or channel order is different, use cvCvtScale and cvCvtColor to convert it before saving, or use universal cvSave to save the image to XML or YAML format
      /// </summary>
      /// <param name="filename">The name of the file to be saved to</param>
      /// <param name="image">The image to be saved</param>
      /// <param name="parameters">The parameters</param>
      /// <returns>true if success</returns>
      public static bool Imwrite(String filename, IInputArray image, params int[] parameters)
      {
         using (Util.VectorOfInt vec = new Util.VectorOfInt())
         {
            if (parameters.Length > 0)
               vec.Push(parameters);
            using (CvString s = new CvString(filename))
            using (InputArray iaImage = image.GetInputArray())
            {
#if !(__IOS__ || __ANDROID__ || NETFX_CORE)
               bool containsUnicode = (s.Length != filename.Length);
               if (containsUnicode &&
                   (Emgu.Util.Platform.OperationSystem != OS.MacOSX) &&
                   (Emgu.Util.Platform.OperationSystem != OS.Linux))
               {
                  //Handle unicode in Windows platform
                  //Work around for Open CV ticket:
                  //https://github.com/Itseez/opencv/issues/4292
                  //https://github.com/Itseez/opencv/issues/4866     
                  System.IO.FileInfo fi = new System.IO.FileInfo(filename);

                  using (VectorOfByte vb = new VectorOfByte())
                  {
                     CvInvoke.Imencode(fi.Extension, image, vb, parameters);
                     byte[] arr = vb.ToArray();
                     System.IO.File.WriteAllBytes(filename, arr);
                     return true;
                  }
               }
               else
#endif
                  return cveImwrite(s, iaImage, vec);
            }
         }
      }
Esempio n. 17
0
 /// <summary>
 /// Reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video.
 /// </summary>
 /// <param name="src">The input 8-bit 1-channel or 3-channel image</param>
 /// <param name="mask">The inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted</param>
 /// <param name="dst">The output image of the same format and the same size as input</param>
 /// <param name="flags">The inpainting method</param>
 /// <param name="inpaintRadius">The radius of circular neighborhood of each point inpainted that is considered by the algorithm</param>
 public static void Inpaint(IInputArray src, IInputArray mask, IOutputArray dst, double inpaintRadius, CvEnum.InpaintType flags)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputArray iaMask = mask.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveInpaint(iaSrc, iaMask, oaDst, inpaintRadius, flags);
 }
 public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
 {
    using (InputArray iaI0 = i0.GetInputArray())
    using (InputArray iaI1 = i1.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
       cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ?  IntPtr.Zero : stream.Ptr);
 }
Esempio n. 19
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 /// <param name="opticalFlow">The dense optical flow object</param>
 public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
    using (InputArray iaI0 = i0.GetInputArray())
    using (InputArray iaI1 = i1.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
       CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
 }
Esempio n. 20
0
 public static void Convert(this IFeature2DAsync feature2DAsync, IInputArray gpuKeypoints,
    VectorOfKeyPoint keypoints)
 {
    using (InputArray iaGpuKeypoints = gpuKeypoints.GetInputArray())
    {
       CudaInvoke.cveCudaFeature2dAsyncConvert(feature2DAsync.Feature2DAsyncPtr, iaGpuKeypoints, keypoints);
    }
 }
Esempio n. 21
0
 public static void GuidedFilter(IInputArray guide, IInputArray src, IOutputArray dst, int radius, double eps,
    int dDepth)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveGuidedFilter(iaGuide, iaSrc, oaDst, radius, eps, dDepth);
 }
Esempio n. 22
0
 public static void AmFilter(IInputArray joint, IInputArray src, IOutputArray dst, double sigmaS, double sigmaR,
    bool adjustOutliers)
 {
    using (InputArray iaJoint = joint.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveAmFilter(iaJoint, iaSrc, oaDst, sigmaS, sigmaR, adjustOutliers);
 }
Esempio n. 23
0
 public static void DtFilter(IInputArray guide, IInputArray src, IOutputArray dst,
    double sigmaSpatial, double sigmaColor, int mode, int numIters)
 {
    using (InputArray iaGuide = guide.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveDtFilter(iaGuide, iaSrc, oaDst, sigmaSpatial, sigmaColor, mode, numIters);
 }
Esempio n. 24
0
 /// <summary>
 /// Create DAISY descriptor extractor
 /// </summary>
 /// <param name="radius">Radius of the descriptor at the initial scale.</param>
 /// <param name="qRadius">Amount of radial range division quantity.</param>
 /// <param name="qTheta">Amount of angular range division quantity.</param>
 /// <param name="qHist">Amount of gradient orientations range division quantity.</param>
 /// <param name="norm">Descriptors normalization type.</param>
 /// <param name="H">optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image</param>
 /// <param name="interpolation">Switch to disable interpolation for speed improvement at minor quality loss</param>
 /// <param name="useOrientation">Sample patterns using keypoints orientation, disabled by default.</param>
 public DAISY(float radius = 15, int qRadius = 3, int qTheta = 8,
    int qHist = 8, NormalizationType norm = NormalizationType.None, IInputArray H = null,
    bool interpolation = true, bool useOrientation = false)
 {
    using (InputArray iaH = H == null ? InputArray.GetEmpty() : H.GetInputArray())
       _ptr = ContribInvoke.cveDAISYCreate(radius, qRadius, qTheta, qHist, norm, iaH, interpolation, useOrientation,
          ref _feature2D);
 }
Esempio n. 25
0
 /// <summary>
 /// Iterates to find the object center given its back projection and initial position of search window. The iterations are made until the search window center moves by less than the given value and/or until the function has done the maximum number of iterations. 
 /// </summary>
 /// <param name="probImage">Back projection of object histogram</param>
 /// <param name="window">Initial search window</param>
 /// <param name="criteria">Criteria applied to determine when the window search should be finished. </param>
 /// <returns>The number of iterations made</returns>
 public static int MeanShift(
    IInputArray probImage,
    ref Rectangle window,
    MCvTermCriteria criteria)
 {
    using (InputArray iaProbImage = probImage.GetInputArray())
       return cveMeanShift(iaProbImage, ref window, ref criteria);
 }
Esempio n. 26
0
 /// <summary>
 /// Extracts pixels from src:
 /// dst(x, y) = src(x + center.x - (width(dst)-1)*0.5, y + center.y - (height(dst)-1)*0.5)
 /// where the values of pixels at non-integer coordinates are retrieved using bilinear interpolation. Every channel of multiple-channel images is processed independently. Whereas the rectangle center must be inside the image, the whole rectangle may be partially occluded. In this case, the replication border mode is used to get pixel values beyond the image boundaries.
 /// </summary>
 /// <param name="image">Source image</param>
 /// <param name="patchSize">Size of the extracted patch.</param>
 /// <param name="patch">Extracted rectangle</param>
 /// <param name="patchType">Depth of the extracted pixels. By default, they have the same depth as <paramref name="image"/>.</param>
 /// <param name="center">Floating point coordinates of the extracted rectangle center within the source image. The center must be inside the image.</param>
 public static void GetRectSubPix(IInputArray image, Size patchSize, PointF center, IOutputArray patch, DepthType patchType = DepthType.Default)
 {
    using (InputArray iaSrc = image.GetInputArray())
    using (OutputArray oaPatch = patch.GetOutputArray())
    {
       cveGetRectSubPix(iaSrc, ref patchSize, ref center, oaPatch, patchType);
    }
 }
Esempio n. 27
0
 /// <summary>
 /// Trains the statistical model.
 /// </summary>
 /// <param name="model">The stat model.</param>
 /// <param name="samples">The training samples.</param>
 /// <param name="layoutType">Type of the layout.</param>
 /// <param name="responses">Vector of responses associated with the training samples.</param>
 /// <returns></returns>
 public static bool Train(this IStatModel model, IInputArray samples, DataLayoutType layoutType, IInputArray responses)
 {
    using (InputArray iaSamples = samples.GetInputArray())
    using (InputArray iaResponses = responses.GetInputArray())
    {
       return MlInvoke.StatModelTrain(model.StatModelPtr, iaSamples, layoutType, iaResponses);
    }
 }
Esempio n. 28
0
 public static float Predict(this IStatModel model, IInputArray samples, IOutputArray results = null, int flags = 0)
 {
    using (InputArray iaSamples = samples.GetInputArray())
    using (OutputArray oaResults = results == null ? OutputArray.GetEmpty() : results.GetOutputArray())
    {
       return MlInvoke.StatModelPredict(model.StatModelPtr, iaSamples, oaResults, flags);
    }
 }
Esempio n. 29
0
 /// <summary>
 /// Shows the image in the specified window
 /// </summary>
 /// <param name="name">Name of the window</param>
 /// <param name="image">Image to be shown</param>
 public static void Imshow(String name, IInputArray image)
 {
    using (CvString s = new CvString(name))
    using (InputArray iaImage = image.GetInputArray())
    {
       cveImshow(s, iaImage);
    }
 }
Esempio n. 30
0
 /// <summary>
 /// Produce domain transform filtering operation on source image.
 /// </summary>
 /// <param name="src">Filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.</param>
 /// <param name="dst">Destination image.</param>
 /// <param name="dDepth">Optional depth of the output image. dDepth can be set to Default, which will be equivalent to src.depth().</param>
 public void Filter(IInputArray src, IOutputArray dst, DepthType dDepth = DepthType.Default)
 {
    using (InputArray iaSrc = src.GetInputArray())
       using (OutputArray oaDst = dst.GetOutputArray())
       {
          XimgprocInvoke.cveDTFilterFilter(_ptr, iaSrc, oaDst, dDepth);
       }
    
 }
Esempio n. 31
0
 /// <summary>
 /// Create a GPU image from a regular image
 /// </summary>
 /// <param name="img">The image to be converted to GPU image</param>
 public CudaImage(IInputArray img)
     : base(img)
 {
 }
Esempio n. 32
0
 /// <summary>
 /// Find the k-nearest match
 /// </summary>
 /// <param name="queryDescriptor">An n x m matrix of descriptors to be query for nearest neighbours. n is the number of descriptor and m is the size of the descriptor</param>
 /// <param name="k">Number of nearest neighbors to search for</param>
 /// <param name="mask">Can be null if not needed. An n x 1 matrix. If 0, the query descriptor in the corresponding row will be ignored.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public void KnnMatch(IInputArray queryDescriptor, VectorOfVectorOfDMatch matches, int k, IInputArray mask)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptor.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             DescriptorMatcherInvoke.CvDescriptorMatcherKnnMatch(_descriptorMatcherPtr, iaQueryDesccriptor, matches, k, iaMask);
 }
Esempio n. 33
0
 /// <summary>
 /// Apply color map to the image
 /// </summary>
 /// <param name="src">
 /// The source image.
 /// This function expects Image&lt;Bgr, Byte&gt; or Image&lt;Gray, Byte&gt;. If the wrong image type is given, the original image
 /// will be returned.</param>
 /// <param name="dst">The destination image</param>
 /// <param name="colorMapType">The type of color map</param>
 public static void ApplyColorMap(IInputArray src, IOutputArray dst, CvEnum.ColorMapType colorMapType)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveApplyColorMap(iaSrc, oaDst, colorMapType);
 }
Esempio n. 34
0
 /// <summary>
 /// Perform image denoising using Non-local Means Denoising algorithm (modified for color image):
 /// http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/
 /// with several computational optimizations. Noise expected to be a Gaussian white noise.
 /// The function converts image to CIELAB colorspace and then separately denoise L and AB components with given h parameters using fastNlMeansDenoising function.
 /// </summary>
 /// <param name="src">Input 8-bit 1-channel, 2-channel or 3-channel image.</param>
 /// <param name="dst">Output image with the same size and type as src.</param>
 /// <param name="h">Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details, smaller h value preserves details but also preserves some noise.</param>
 /// <param name="hColor">The same as h but for color components. For most images value equals 10 will be enought to remove colored noise and do not distort colors.</param>
 /// <param name="templateWindowSize">Size in pixels of the template patch that is used to compute weights. Should be odd.</param>
 /// <param name="searchWindowSize">Size in pixels of the window that is used to compute weighted average for given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time.</param>
 public static void FastNlMeansDenoisingColored(IInputArray src, IOutputArray dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveFastNlMeansDenoisingColored(iaSrc, oaDst, h, hColor, templateWindowSize, searchWindowSize);
 }
Esempio n. 35
0
 /// <summary>
 /// This filter enhances the details of a particular image.
 /// </summary>
 /// <param name="src">Input 8-bit 3-channel image</param>
 /// <param name="dst">Output image with the same size and type as src</param>
 /// <param name="sigmaS">Range between 0 to 200</param>
 /// <param name="sigmaR">Range between 0 to 1</param>
 public static void DetailEnhance(IInputArray src, IOutputArray dst, float sigmaS = 10.0f, float sigmaR = 0.15f)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveDetailEnhance(iaSrc, oaDst, sigmaS, sigmaR);
 }
Esempio n. 36
0
        public static void Detect(
            IInputArray image, String faceFileName, String eyeFileName,
            List <Rectangle> faces, List <Rectangle> eyes,
            out long detectionTime)
        {
            Stopwatch watch;

            using (InputArray iaImage = image.GetInputArray())
            {
#if !(__IOS__ || NETFX_CORE)
                if (iaImage.Kind == InputArray.Type.CudaGpuMat && CudaInvoke.HasCuda)
                {
                    using (CudaCascadeClassifier face = new CudaCascadeClassifier(faceFileName))
                        using (CudaCascadeClassifier eye = new CudaCascadeClassifier(eyeFileName))
                        {
                            face.ScaleFactor   = 1.1;
                            face.MinNeighbors  = 10;
                            face.MinObjectSize = Size.Empty;
                            eye.ScaleFactor    = 1.1;
                            eye.MinNeighbors   = 10;
                            eye.MinObjectSize  = Size.Empty;
                            watch = Stopwatch.StartNew();
                            using (CudaImage <Bgr, Byte> gpuImage = new CudaImage <Bgr, byte>(image))
                                using (CudaImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>())
                                    using (GpuMat region = new GpuMat())
                                    {
                                        face.DetectMultiScale(gpuGray, region);
                                        Rectangle[] faceRegion = face.Convert(region);
                                        faces.AddRange(faceRegion);
                                        foreach (Rectangle f in faceRegion)
                                        {
                                            using (CudaImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f))
                                            {
                                                //For some reason a clone is required.
                                                //Might be a bug of CudaCascadeClassifier in opencv
                                                using (CudaImage <Gray, Byte> clone = faceImg.Clone(null))
                                                    using (GpuMat eyeRegionMat = new GpuMat())
                                                    {
                                                        eye.DetectMultiScale(clone, eyeRegionMat);
                                                        Rectangle[] eyeRegion = eye.Convert(eyeRegionMat);
                                                        foreach (Rectangle e in eyeRegion)
                                                        {
                                                            Rectangle eyeRect = e;
                                                            eyeRect.Offset(f.X, f.Y);
                                                            eyes.Add(eyeRect);
                                                        }
                                                    }
                                            }
                                        }
                                    }
                            watch.Stop();
                        }
                }
                else
#endif
                {
                    //Read the HaarCascade objects
                    using (CascadeClassifier face = new CascadeClassifier(faceFileName))
                        using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
                        {
                            watch = Stopwatch.StartNew();

                            using (UMat ugray = new UMat())
                            {
                                CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);

                                //normalizes brightness and increases contrast of the image
                                CvInvoke.EqualizeHist(ugray, ugray);

                                //Detect the faces  from the gray scale image and store the locations as rectangle
                                //The first dimensional is the channel
                                //The second dimension is the index of the rectangle in the specific channel
                                Rectangle[] facesDetected = face.DetectMultiScale(
                                    ugray,
                                    1.1,
                                    10,
                                    new Size(20, 20));

                                faces.AddRange(facesDetected);

                                foreach (Rectangle f in facesDetected)
                                {
                                    //Get the region of interest on the faces
                                    using (UMat faceRegion = new UMat(ugray, f))
                                    {
                                        Rectangle[] eyesDetected = eye.DetectMultiScale(
                                            faceRegion,
                                            1.1,
                                            10,
                                            new Size(20, 20));

                                        foreach (Rectangle e in eyesDetected)
                                        {
                                            Rectangle eyeRect = e;
                                            eyeRect.Offset(f.X, f.Y);
                                            eyes.Add(eyeRect);
                                        }
                                    }
                                }
                            }
                            watch.Stop();
                        }
                }
                detectionTime = watch.ElapsedMilliseconds;
            }
        }
Esempio n. 37
0
        public void SetImage(IInputArray image)
        {
            #region display the size of the image
            if (image != null)
            {
                using (InputArray iaImage = image.GetInputArray())
                {
                    Size size = iaImage.GetSize();
                    widthTextbox.Text  = size.Width.ToString();
                    heightTextBox.Text = size.Height.ToString();
                }
            }
            else
            {
                widthTextbox.Text  = String.Empty;
                heightTextBox.Text = string.Empty;
            }
            #endregion

            #region display the color type of the image
            if (image != null)
            {
                Type     colorType       = Reflection.ReflectIImage.GetTypeOfColor(image);
                Object[] colorAttributes = colorType.GetCustomAttributes(typeof(ColorInfoAttribute), true);
                if (colorAttributes.Length > 0)
                {
                    ColorInfoAttribute info = (ColorInfoAttribute)colorAttributes[0];
                    typeOfColorTexbox.Text = info.ConversionCodename;
                }
                else
                {
                    typeOfColorTexbox.Text = "Unknown";
                }

                Type colorDepth = Reflection.ReflectIImage.GetTypeOfDepth(image);
                typeOfDepthTextBox.Text = colorDepth.Name;
            }
            else
            {
                typeOfColorTexbox.Text  = string.Empty;
                typeOfDepthTextBox.Text = string.Empty;
            }
            #endregion

            #region check if image is a subclass of CvArr type
            if (image != null)
            {
                Type imgType = image.GetType();
                if (IsSubTypeOf(imgType, typeof(CvArray <>)))
                {
                    _imageType = typeof(CvArray <>);
                }
                else if (IsSubTypeOf(imgType, typeof(Mat)))
                {
                    _imageType = typeof(Mat);
                }
                else if (IsSubTypeOf(imgType, typeof(UMat)))
                {
                    _imageType = typeof(UMat);
                }
                else
                {
                    _imageType = null;
                }
            }
            else
            {
                _imageType = null;
            }
            #endregion

            UpdateHistogram();
            UpdateZoomScale();
        }
Esempio n. 38
0
 /// <summary>
 /// Calculate square root of each source array element. in the case of multichannel
 /// arrays each channel is processed independently. The function accuracy is approximately
 /// the same as of the built-in std::sqrt.
 /// </summary>
 /// <param name="src">The source floating-point array</param>
 /// <param name="dst">The destination array; will have the same size and the same type as src</param>
 public static void Sqrt(IInputArray src, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveSqrt(iaSrc, oaDst);
 }
Esempio n. 39
0
 /// <summary>
 /// Contrast Limited Adaptive Histogram Equalization (CLAHE)
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="clipLimit">Clip Limit, use 40 for default</param>
 /// <param name="tileGridSize">Tile grid size, use (8, 8) for default</param>
 /// <param name="dst">The destination image</param>
 public static void CLAHE(IInputArray src, double clipLimit, Size tileGridSize, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveCLAHE(iaSrc, clipLimit, ref tileGridSize, oaDst);
 }
Esempio n. 40
0
 /// <summary>
 /// Trains the statistical model.
 /// </summary>
 /// <param name="model">The stat model.</param>
 /// <param name="samples">The training samples.</param>
 /// <param name="layoutType">Type of the layout.</param>
 /// <param name="responses">Vector of responses associated with the training samples.</param>
 /// <returns>True if the training is successful.</returns>
 public static bool Train(this IStatModel model, IInputArray samples, DataLayoutType layoutType, IInputArray responses)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaResponses = responses.GetInputArray())
         {
             return(MlInvoke.StatModelTrain(model.StatModelPtr, iaSamples, layoutType, iaResponses));
         }
 }
Esempio n. 41
0
 /// <summary>
 /// Detect MSER regions
 /// </summary>
 /// <param name="image">input image (8UC1, 8UC3 or 8UC4, must be greater or equal than 3x3)</param>
 /// <param name="msers">resulting list of point sets</param>
 /// <param name="bboxes">resulting bounding boxes</param>
 public void DetectRegions(IInputArray image, VectorOfVectorOfPoint msers, VectorOfRect bboxes)
 {
     using (InputArray iaImage = image.GetInputArray())
         Features2DInvoke.cveMserDetectRegions(_ptr, iaImage, msers, bboxes);
 }
Esempio n. 42
0
 /// <summary>
 /// Detects edges and prepares them to detect lines and ellipses.
 /// </summary>
 /// <param name="src">Input image</param>
 public void DetectEdges(IInputArray src)
 {
     using (InputArray iaSrc = src.GetInputArray())
         XImgprocInvoke.cveEdgeDrawingDetectEdges(_ptr, iaSrc);
 }
Esempio n. 43
0
 /// <summary>
 /// Updates the motion history image as following:
 /// mhi(x,y)=timestamp  if silhouette(x,y)!=0
 ///         0          if silhouette(x,y)=0 and mhi(x,y)&lt;timestamp-duration
 ///         mhi(x,y)   otherwise
 /// That is, MHI pixels where motion occurs are set to the current timestamp, while the pixels where motion happened far ago are cleared.
 /// </summary>
 /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs. </param>
 /// <param name="mhi">Motion history image, that is updated by the function (single-channel, 32-bit floating-point) </param>
 /// <param name="timestamp">Current time in milliseconds or other units. </param>
 /// <param name="duration">Maximal duration of motion track in the same units as timestamp. </param>
 public static void UpdateMotionHistory(IInputArray silhouette, IInputOutputArray mhi, double timestamp, double duration)
 {
     using (InputArray iaSilhouette = silhouette.GetInputArray())
         using (InputOutputArray ioaMhi = mhi.GetInputOutputArray())
             cveUpdateMotionHistory(iaSilhouette, ioaMhi, timestamp, duration);
 }
Esempio n. 44
0
 /// <summary>
 /// Returns the result of each individual tree in the forest.
 /// In case the model is a regression problem, the method will return each of the trees'
 /// results for each of the sample cases.If the model is a classifier, it will return
 /// a Mat with samples + 1 rows, where the first row gives the class number and the
 /// following rows return the votes each class had for each sample.
 /// </summary>
 /// <param name="samples">Array containing the samples for which votes will be calculated.</param>
 /// <param name="results">Array where the result of the calculation will be written.</param>
 /// <param name="flags">Flags for defining the type of RTrees.</param>
 public void GetVotes(IInputArray samples, IOutputArray results, DTrees.Flags flags)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (OutputArray oaResults = results.GetOutputArray())
             MlInvoke.cveRTreesGetVotes(_ptr, iaSamples, oaResults, flags);
 }
Esempio n. 45
0
 /// <summary>
 /// Segment an image and store output in dst.
 /// </summary>
 /// <param name="src">The input image. Any number of channel (1 (Eg: Gray), 3 (Eg: RGB), 4 (Eg: RGB-D)) can be provided</param>
 /// <param name="dst">The output segmentation. It's a CV_32SC1 Mat with the same number of cols and rows as input image, with an unique, sequential, id for each pixel.</param>
 public void ProcessImage(IInputArray src, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             XImgprocInvoke.cveGraphSegmentationProcessImage(_ptr, iaSrc, oaDst);
 }
Esempio n. 46
0
 /// <summary>
 /// Train the face recognizer with the specific images and labels
 /// </summary>
 /// <param name="images">The images used in the training. This can be a VectorOfMat</param>
 /// <param name="labels">The labels of the images. This can be a VectorOfInt</param>
 public void Train(IInputArray images, IInputArray labels)
 {
     using (InputArray iaImage = images.GetInputArray())
         using (InputArray iaLabels = labels.GetInputArray())
             CvFaceRecognizerTrain(_ptr, iaImage, iaLabels);
 }
Esempio n. 47
0
        /// <summary>
        /// Convert an input array to texture 2D
        /// </summary>
        /// <param name="image">The input image, if 3 channel, we assume it is Bgr, if 4 channels, we assume it is Bgra</param>
        /// <param name="flipType"></param>
        /// <param name="buffer">Optional buffer for the texture conversion, should be big enough to hold the image data. e.g. width*height*pixel_size</param>
        /// <returns>The texture 2D</returns>
        public static Texture2D InputArrayToTexture2D(IInputArray image, Emgu.CV.CvEnum.FlipType flipType = FlipType.Vertical, byte[] buffer = null)
        {
            using (InputArray iaImage = image.GetInputArray())
            {
                Size size = iaImage.GetSize();

                if (iaImage.GetChannels() == 3 && iaImage.GetDepth() == DepthType.Cv8U && SystemInfo.SupportsTextureFormat(TextureFormat.RGB24))
                {
                    //assuming 3 channel image is of BGR color
                    Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGB24, false);

                    byte[] data;
                    int    bufferLength = size.Width * size.Height * 3;
                    if (buffer != null)
                    {
                        if (buffer.Length < bufferLength)
                        {
                            throw new ArgumentException(String.Format("Buffer size ({0}) is not big enough for the RBG24 texture, width * height * 3 = {1} is required.", buffer.Length, bufferLength));
                        }
                        data = buffer;
                    }
                    else
                    {
                        data = new byte[bufferLength];
                    }
                    GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
                    using (
                        Image <Rgb, byte> rgb = new Image <Rgb, byte>(size.Width, size.Height, size.Width * 3,
                                                                      dataHandle.AddrOfPinnedObject()))
                    {
                        rgb.ConvertFrom(image);
                        if (flipType != FlipType.None)
                        {
                            CvInvoke.Flip(rgb, rgb, flipType);
                        }
                    }
                    dataHandle.Free();
                    texture.LoadRawTextureData(data);
                    texture.Apply();
                    return(texture);
                }
                else if (SystemInfo.SupportsTextureFormat(TextureFormat.RGBA32))
                {
                    Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGBA32, false);
                    byte[]    data;
                    int       bufferLength = size.Width * size.Height * 4;
                    if (buffer != null)
                    {
                        if (buffer.Length < bufferLength)
                        {
                            throw new ArgumentException(
                                      String.Format(
                                          "Buffer size ({0}) is not big enough for the RGBA32 texture, width * height * 4 = {1} is required.",
                                          buffer.Length, bufferLength));
                        }
                        data = buffer;
                    }
                    else
                    {
                        data = new byte[bufferLength];
                    }
                    GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
                    using (
                        Image <Rgba, byte> rgba = new Image <Rgba, byte>(size.Width, size.Height, size.Width * 4,
                                                                         dataHandle.AddrOfPinnedObject()))
                    {
                        rgba.ConvertFrom(image);
                        if (flipType != FlipType.None)
                        {
                            CvInvoke.Flip(rgba, rgba, flipType);
                        }
                    }
                    dataHandle.Free();
                    texture.LoadRawTextureData(data);

                    texture.Apply();
                    return(texture);
                }
                else
                {
                    throw new Exception("TextureFormat of RGBA32 is not supported on this device");
                }
            }
        }
Esempio n. 48
0
 /// <summary>
 /// Create a Morphology filter.
 /// </summary>
 /// <param name="op">Type of morphological operation</param>
 /// <param name="kernel">2D 8-bit structuring element for the morphological operation.</param>
 /// <param name="anchor">Anchor position within the structuring element. Negative values mean that the anchor is at the center.</param>
 /// <param name="iterations">Number of times erosion and dilation to be applied.</param>
 /// <param name="srcDepth">The depth type of the source image</param>
 /// <param name="srcChannels">The number of channels in the source image</param>
 public CudaMorphologyFilter(CvEnum.MorphOp op, DepthType srcDepth, int srcChannels, IInputArray kernel, Point anchor, int iterations)
 {
     using (InputArray iaKernel = kernel.GetInputArray())
         _ptr = CudaInvoke.cudaCreateMorphologyFilter(op, CvInvoke.MakeType(srcDepth, srcChannels), iaKernel, ref anchor, iterations);
 }
Esempio n. 49
0
 /// <summary>
 /// Finds centers in the grid of circles
 /// </summary>
 /// <param name="image">Source chessboard view</param>
 /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
 /// <param name="flags">Various operation flags</param>
 /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
 /// <param name="centers">output array of detected centers.</param>
 /// <returns>True if grid found.</returns>
 public static bool FindCirclesGrid(IInputArray image, Size patternSize, IOutputArray centers, CvEnum.CalibCgType flags, Feature2D featureDetector)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaCenters = centers.GetOutputArray())
             return(cveFindCirclesGrid(iaImage, ref patternSize, oaCenters, flags, featureDetector.Feature2DPtr));
 }
Esempio n. 50
0
 /// <summary>
 /// The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image.
 /// </summary>
 /// <param name="image">Image to segment</param>
 /// <param name="algorithm">Chooses the algorithm variant to use</param>
 /// <param name="regionSize">Chooses an average superpixel size measured in pixels</param>
 /// <param name="ruler">Chooses the enforcement of superpixel smoothness factor of superpixel</param>
 public SupperpixelSLIC(IInputArray image, Algorithm algorithm, int regionSize, float ruler)
 {
     using (InputArray iaImage = image.GetInputArray())
         _ptr = XImgprocInvoke.cveSuperpixelSLICCreate(iaImage, algorithm, regionSize, ruler, ref _sharedPtr);
 }
Esempio n. 51
0
 /// <summary>
 /// Finds circles in a grayscale image using the Hough transform.
 /// </summary>
 /// <param name="image">8-bit, single-channel grayscale input image.</param>
 /// <param name="circles">Output vector of found circles. Each vector is encoded as a 3-element floating-point vector.</param>
 public void Detect(IInputArray image, IOutputArray circles, Stream stream = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaCircles = circles.GetOutputArray())
             CudaInvoke.cudaHoughCirclesDetectorDetect(_ptr, iaImage, oaCircles, stream);
 }
Esempio n. 52
0
 /// <summary>
 /// Create 2D plot from data
 /// </summary>
 /// <param name="data">The data to be plotted</param>
 public Plot2d(IInputArray data)
 {
     using (InputArray iaData = data.GetInputArray())
         _ptr = PlotInvoke.cvePlot2dCreateFrom(iaData);
 }
Esempio n. 53
0
 /// <summary>
 /// Updates a FaceRecognizer with given data and associated labels.
 /// </summary>
 /// <param name="images">The training images, that means the faces you want to learn. The data has to be given as a VectorOfMat.</param>
 /// <param name="labels">The labels corresponding to the images</param>
 public void Update(IInputArray images, IInputArray labels)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (InputArray iaLabels = labels.GetInputArray())
             CvFaceRecognizerUpdate(_ptr, iaImages, iaLabels);
 }
Esempio n. 54
0
 private float[] FeatureExtractionVerfication(IInputArray patch)
 {
     return(hog_verfication.Compute(patch, winStride, trainPadding, null));
 }
Esempio n. 55
0
 /// <summary>
 /// Create 2D plot for data
 /// </summary>
 /// <param name="dataX">The data for the X-axis</param>
 /// <param name="dataY">The data for the Y-axis</param>
 public Plot2d(IInputArray dataX, IInputArray dataY)
 {
     using (InputArray iaDataX = dataX.GetInputArray())
         using (InputArray iaDataY = dataY.GetInputArray())
             _ptr = PlotInvoke.cvePlot2dCreateFromXY(iaDataX, iaDataY);
 }
Esempio n. 56
0
 /// <summary>
 /// Stylization aims to produce digital imagery with a wide variety of effects not focused on photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low contrast while preserving, or enhancing, high-contrast features.
 /// </summary>
 /// <param name="src">Input 8-bit 3-channel image.</param>
 /// <param name="dst">Output image with the same size and type as src.</param>
 /// <param name="sigmaS">Range between 0 to 200.</param>
 /// <param name="sigmaR"> Range between 0 to 1.</param>
 public static void Stylization(IInputArray src, IOutputArray dst, float sigmaS = 60, float sigmaR = 0.45f)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveStylization(iaSrc, oaDst, sigmaS, sigmaR);
 }
Esempio n. 57
0
 /// <summary>
 /// Set the image for optical character recognition
 /// </summary>
 /// <param name="image">The image where detection took place</param>
 public void SetImage(IInputArray image)
 {
     using (InputArray iaImage = image.GetInputArray())
         OcrInvoke.TessBaseAPISetImage(_ptr, iaImage);
 }
Esempio n. 58
0
 /// <summary>
 /// Add the model descriptors
 /// </summary>
 /// <param name="modelDescriptors">The model descriptors</param>
 public void Add(IInputArray modelDescriptors)
 {
     using (InputArray iaModelDescriptors = modelDescriptors.GetInputArray())
         DescriptorMatcherInvoke.CvDescriptorMatcherAdd(_descriptorMatcherPtr, iaModelDescriptors);
 }
Esempio n. 59
0
 /// <summary>
 /// Update the background model
 /// </summary>
 /// <param name="image">The image that is used to update the background model</param>
 /// <param name="learningRate">Use -1 for default</param>
 /// <param name="fgMask">The output forground mask</param>
 public void Apply(IInputArray image, IOutputArray fgMask, double learningRate = -1)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaFgMask = fgMask.GetOutputArray())
             CvInvoke.cveBackgroundSubtractorUpdate(_ptr, iaImage, oaFgMask, learningRate);
 }
Esempio n. 60
0
 /// <summary>
 /// Returns the min / max locations and values for the matrix
 /// </summary>
 public void MinMax(out double minValue, out double maxValue, out Point minLocation, out Point maxLocation, IInputArray mask = null)
 {
     //minValue = 0; maxValue = 0;
     minLocation = new Point(); maxLocation = new Point();
     int[] minArr = new int[2], maxArr = new int[2];
     CvInvoke.MinMaxIdx(this, out minValue, out maxValue, minArr, maxArr, mask);
     minLocation.X = minArr[1]; minLocation.Y = minArr[0];
     maxLocation.X = maxArr[1]; maxLocation.Y = maxArr[0];
     //CvInvoke.cvMinMaxLoc(Ptr, ref minValue, ref maxValue, ref minLocation, ref maxLocation, IntPtr.Zero);
 }