예제 #1
1
 /// <summary>
 /// Applies an affine transformation to an image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="mapMatrix">2x3 transformation matrix</param>
 /// <param name="dsize">Size of the output image.</param>
 /// <param name="interpMethod">Interpolation method</param>
 /// <param name="warpMethod">Warp method</param>
 /// <param name="borderMode">Pixel extrapolation method</param>
 /// <param name="borderValue">A value used to fill outliers</param>
 public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
       cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
 }
예제 #2
0
 public static extern int cvFindFundamentalMat(IntPtr points1,
    IntPtr points2,
    IntPtr fundamentalMatrix,
    CvEnum.CV_FM method,
    double param1,
    double param2,
    IntPtr status);
예제 #3
0
 /// <summary>
 /// Reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video.
 /// </summary>
 /// <param name="src">The input 8-bit 1-channel or 3-channel image</param>
 /// <param name="mask">The inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted</param>
 /// <param name="dst">The output image of the same format and the same size as input</param>
 /// <param name="flags">The inpainting method</param>
 /// <param name="inpaintRadius">The radius of circular neighborhood of each point inpainted that is considered by the algorithm</param>
 public static void Inpaint(IInputArray src, IInputArray mask, IOutputArray dst, double inpaintRadius, CvEnum.InpaintType flags)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputArray iaMask = mask.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveInpaint(iaSrc, iaMask, oaDst, inpaintRadius, flags);
 }
예제 #4
0
 /// <summary>
 /// Finds perspective transformation H=||h_ij|| between the source and the destination planes
 /// </summary>
 /// <param name="srcPoints">Point coordinates in the original plane</param>
 /// <param name="dstPoints">Point coordinates in the destination plane</param>
 /// <param name="homography">The output homography matrix</param>
 /// <param name="method">FindHomography method</param>
 /// <param name="ransacReprojThreshold">
 /// The maximum allowed reprojection error to treat a point pair as an inlier. 
 /// The parameter is only used in RANSAC-based homography estimation. 
 /// E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3
 /// </param>
 /// <param name="mask">Optional output mask set by a robust method ( CV_RANSAC or CV_LMEDS ). Note that the input mask values are ignored.</param>
 /// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
 public static void FindHomography(
    PointF[] srcPoints,
    PointF[] dstPoints,
    IOutputArray homography,
    CvEnum.HomographyMethod method,
    double ransacReprojThreshold = 3,
    IOutputArray mask = null)
 {
    GCHandle srcHandle = GCHandle.Alloc(srcPoints, GCHandleType.Pinned);
    GCHandle dstHandle = GCHandle.Alloc(dstPoints, GCHandleType.Pinned);
    try
    {
       using (
          Mat srcPointMatrix = new Mat(srcPoints.Length, 2, DepthType.Cv32F, 1, srcHandle.AddrOfPinnedObject(), 8))
       using (
          Mat dstPointMatrix = new Mat(dstPoints.Length, 2, DepthType.Cv32F, 1, dstHandle.AddrOfPinnedObject(), 8))
       {
          CvInvoke.FindHomography(srcPointMatrix, dstPointMatrix, homography, method, ransacReprojThreshold, mask);
       }
    }
    finally
    {
       srcHandle.Free();
       dstHandle.Free();
    }
 }
예제 #5
0
 /// <summary>
 /// Decode image stored in the buffer
 /// </summary>
 /// <param name="buf">The buffer</param>
 /// <param name="loadType">The image loading type</param>
 /// <param name="dst">The output placeholder for the decoded matrix.</param>
 public static void Imdecode(byte[] buf, CvEnum.LoadImageType loadType, Mat dst)
 {
    using (VectorOfByte vb = new VectorOfByte(buf))
    {
       Imdecode(vb, loadType, dst);
    }
 }
예제 #6
0
 public static extern int cvFindHomography(
    IntPtr srcPoints,
    IntPtr dstPoints,
    IntPtr homography,
    CvEnum.HOMOGRAPHY_METHOD method,
    double ransacReprojThreshold,
    IntPtr mask);
예제 #7
0
      private IntPtr AllocateData(CvEnum.DepthType type, int channels, int totalInBytes)
      {
         FreeData();

         switch (type)
         {
            //case CvEnum.DepthType.Cv8U:
            //   _data = new byte[totalInBytes];
            //   break;
            case CvEnum.DepthType.Cv8S:
               _data = new SByte[totalInBytes];
               break;
            case CvEnum.DepthType.Cv16U:
               _data = new UInt16[totalInBytes >> 1];
               break;
            case CvEnum.DepthType.Cv16S:
               _data = new Int16[totalInBytes >> 1];
               break;
            case CvEnum.DepthType.Cv32S:
               _data = new Int32[totalInBytes >> 2];
               break;
            case CvEnum.DepthType.Cv32F:
               _data = new float[totalInBytes >> 2];
               break;
            case CvEnum.DepthType.Cv64F:
               _data = new double[totalInBytes >> 3];
               break;
            default:
               _data = new byte[totalInBytes];
               break;
         }

         _dataHandle = GCHandle.Alloc(_data, GCHandleType.Pinned);
         return _dataHandle.AddrOfPinnedObject();
      }
        /// <summary>
        /// Computes dense optical flow using Gunnar Farneback's algorithm
        /// </summary>
        /// <param name="prev0">The first 8-bit single-channel input image</param>
        /// <param name="next0">The second input image of the same size and the same type as prevImg</param>
        /// <param name="flowX">The computed flow image for x-velocity; will have the same size as prevImg</param>
        /// <param name="flowY">The computed flow image for y-velocity; will have the same size as prevImg</param>
        /// <param name="pyrScale">Specifies the image scale (!1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous</param>
        /// <param name="levels">The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used</param>
        /// <param name="winSize">The averaging window size; The larger values increase the algorithm robustness to image noise and give more chances for fast motion detection, but yield more blurred motion field</param>
        /// <param name="iterations">The number of iterations the algorithm does at each pyramid level</param>
        /// <param name="polyN">Size of the pixel neighborhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly n=5 or 7</param>
        /// <param name="polySigma">Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly n=5 you can set poly sigma=1.1, for poly n=7 a good value would be poly sigma=1.5</param>
        /// <param name="flags">The operation flags</param>
        public static void Farneback(
         Image<Gray, Byte> prev0,
         Image<Gray, Byte> next0,
         Image<Gray, Single> flowX,
         Image<Gray, Single> flowY,
         double pyrScale,
         int levels,
         int winSize,
         int iterations,
         int polyN,
         double polySigma,
         CvEnum.OPTICALFLOW_FARNEBACK_FLAG flags)
        {
            IntPtr flow0 = CvInvoke.cvCreateImage(prev0.Size, Emgu.CV.CvEnum.IPL_DEPTH.IPL_DEPTH_32F, 2);
             try
             {
            if ((int) (flags  & Emgu.CV.CvEnum.OPTICALFLOW_FARNEBACK_FLAG.USE_INITIAL_FLOW) != 0)
            {  //use initial flow
               CvInvoke.cvMerge(flowX.Ptr, flowY.Ptr, IntPtr.Zero, IntPtr.Zero, flow0);
            }

            CvInvoke.cvCalcOpticalFlowFarneback(prev0, next0, flow0, pyrScale, levels, winSize, iterations, polyN, polySigma, flags);
            CvInvoke.cvSplit(flow0, flowX.Ptr, flowY.Ptr, IntPtr.Zero, IntPtr.Zero);
             }
             finally
             {
            CvInvoke.cvReleaseImage(ref flow0);
             }
        }
예제 #9
0
 private static extern void cveGrabCut(
    IntPtr img,
    IntPtr mask,
    ref Rectangle rect,
    IntPtr bgdModel,
    IntPtr fgdModel,
    int iterCount,
    CvEnum.GrabcutInitType type);
예제 #10
0
 public static extern void cvAdaptiveThreshold(
     IntPtr src,
     IntPtr dst,
     double maxValue,
     CvEnum.ADAPTIVE_THRESHOLD_TYPE adaptiveType,
     CvEnum.THRESH thresholdType,
     int blockSize,
     double param1);
예제 #11
0
 public extern static void CvGrabCut(
    IntPtr img,
    IntPtr mask,
    ref Rectangle rect,
    IntPtr bgdModel,
    IntPtr fgdModel,
    int iterCount,
    CvEnum.GRABCUT_INIT_TYPE type);
 public static extern IntPtr cvHaarDetectObjects(
  IntPtr image,
  IntPtr cascade,
  IntPtr storage,
  double scaleFactor,
  int minNeighbors,
  CvEnum.HAAR_DETECTION_TYPE flags,
  Size minSize);
예제 #13
0
 /// <summary>
 /// Create a Sobel filter.
 /// </summary>
 /// <param name="dx">Order of the derivative x</param>
 /// <param name="dy">Order of the derivative y</param>
 /// <param name="ksize">Size of the extended Sobel kernel</param>
 /// <param name="scale">Optional scale, use 1 for default.</param>
 /// <param name="rowBorderType">The row border type.</param>
 /// <param name="columnBorderType">The column border type.</param>
 public CudaSobelFilter(
    DepthType srcDepth, int srcChannels, 
    DepthType dstDepth, int dstChannels,
    int dx, int dy, int ksize = 3, double scale = 1.0, 
    CvEnum.BorderType rowBorderType = BorderType.Default, CvEnum.BorderType columnBorderType = BorderType.NegativeOne)
 {
    _ptr = CudaInvoke.cudaCreateSobelFilter(CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels), 
       dx, dy, ksize, scale, rowBorderType, columnBorderType);
 }
예제 #14
0
 public static extern double cvCalibrateCamera2(
   IntPtr objectPoints,
   IntPtr imagePoints,
   IntPtr pointCounts,
   Size imageSize,
   IntPtr intrinsicMatrix,
   IntPtr distortionCoeffs,
   IntPtr rotationVectors,
   IntPtr translationVectors,
   CvEnum.CALIB_TYPE flags);
예제 #15
0
 /// <summary>
 /// Applies the joint bilateral filter to an image.
 /// </summary>
 /// <param name="joint">Joint 8-bit or floating-point, 1-channel or 3-channel image.</param>
 /// <param name="src">Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint image.</param>
 /// <param name="dst">Destination image of the same size and type as src .</param>
 /// <param name="d">Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .</param>
 /// <param name="sigmaColor">Filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.</param>
 /// <param name="sigmaSpace">Filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d&gt;0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .</param>
 /// <param name="borderType">Border type</param>
 public static void JointBilateralFilter(
    IInputArray joint, IInputArray src, IOutputArray dst, int d,
    double sigmaColor, double sigmaSpace, CvEnum.BorderType borderType = BorderType.Reflect101)
 {
    using (InputArray iaJoint = joint.GetInputArray())
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveJointBilateralFilter(iaJoint, iaSrc,
          oaDst, d, sigmaColor, sigmaSpace, borderType);
 }
예제 #16
0
 /// <summary>
 /// Create a Laplacian filter.
 /// </summary>
 /// <param name="ksize">Either 1 or 3</param>
 /// <param name="scale">Optional scale. Use 1.0 for default</param>
 /// <param name="borderType">The border type.</param>
 /// <param name="borderValue">The border value.</param>
 public CudaLaplacianFilter(
    DepthType srcDepth, int srcChannels,
    DepthType dstDepth, int dstChannels,
    int ksize = 1, double scale = 1.0, 
    CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
 {
    _ptr = CudaInvoke.cudaCreateLaplacianFilter(
       CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels), 
       ksize, scale, borderType, ref borderValue);
 }
예제 #17
0
 /// <summary>
 /// Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing filters are used in many different applications.
 /// </summary>
 /// <param name="src">Input 8-bit 3-channel image</param>
 /// <param name="dst">Output 8-bit 3-channel image</param>
 /// <param name="flags">Edge preserving filters</param>
 /// <param name="sigmaS">Range between 0 to 200</param>
 /// <param name="sigmaR">Range between 0 to 1</param>
 public static void EdgePreservingFilter(
    IInputArray src, IOutputArray dst,
    CvEnum.EdgePreservingFilterFlag flags = CvEnum.EdgePreservingFilterFlag.RecursFilter,
    float sigmaS = 60.0f,
    float sigmaR = 0.4f)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveEdgePreservingFilter(iaSrc, oaDst, flags, sigmaS, sigmaR);
 }
예제 #18
0
 public extern static void cvCalcOpticalFlowFarneback(
    IntPtr prev0,
    IntPtr next0,
    IntPtr flow0,
    double pyrScale,
    int levels,
    int winSize,
    int iterations,
    int polyN,
    double polySigma,
    CvEnum.OPTICALFLOW_FARNEBACK_FLAG flags);
예제 #19
0
 /// <summary>
 /// Create a Gaussian filter.
 /// </summary>
 /// <param name="ksize">The size of the kernel</param>
 /// <param name="sigma1">This parameter may specify Gaussian sigma (standard deviation). If it is zero, it is calculated from the kernel size.</param>
 /// <param name="sigma2">In case of non-square Gaussian kernel the parameter may be used to specify a different (from param3) sigma in the vertical direction. Use 0 for default</param>
 /// <param name="rowBorderType">The row border type.</param>
 /// <param name="columnBorderType">The column border type.</param>
 /// <param name="srcDepth">The depth type of the source image</param>
 /// <param name="srcChannels">The number of channels in the source image</param>
 /// <param name="dstDepth">The depth type of the destination image</param>
 /// <param name="dstChannels">The number of channels in the destination image</param>
 public CudaGaussianFilter(
    DepthType srcDepth, int srcChannels,
    DepthType dstDepth, int dstChannels,
    Size ksize, 
    double sigma1, double sigma2 = 0, 
    CvEnum.BorderType rowBorderType = BorderType.Default, CvEnum.BorderType columnBorderType = BorderType.NegativeOne)
 {
    _ptr = CudaInvoke.cudaCreateGaussianFilter(
       CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels), 
       ref ksize, sigma1, sigma2, (int)rowBorderType, (int)columnBorderType);
 }
예제 #20
0
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="flags">Flags</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints,
         Size imageSize,
         IntrinsicCameraParameters intrinsicParam,
         CvEnum.CALIB_TYPE flags,
         out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
             int imageCount = objectPoints.Length;

             #region get the array that represent the point counts
             int[] pointCounts = new int[objectPoints.Length];
             for (int i = 0; i < objectPoints.Length; i++)
             {
            Debug.Assert(objectPoints[i].Length == imagePoints[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
            pointCounts[i] = objectPoints[i].Length;
             }
             #endregion

             double reprojectionError = -1;
             using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
             using (Matrix<float> imagePointMatrix = ToMatrix(imagePoints))
             using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
             using (Matrix<double> rotationVectors = new Matrix<double>(imageCount, 3))
             using (Matrix<double> translationVectors = new Matrix<double>(imageCount, 3))
             {
            reprojectionError = CvInvoke.cvCalibrateCamera2(
                objectPointMatrix.Ptr,
                imagePointMatrix.Ptr,
                pointCountsMatrix.Ptr,
                imageSize,
                intrinsicParam.IntrinsicMatrix,
                intrinsicParam.DistortionCoeffs,
                rotationVectors,
                translationVectors,
                flags);

            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
            IntPtr matPtr = Marshal.AllocHGlobal(StructSize.MCvMat);
            for (int i = 0; i < imageCount; i++)
            {
               ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
               CvInvoke.cvGetRow(rotationVectors.Ptr, matPtr, i);
               CvInvoke.cvTranspose(matPtr, p.RotationVector.Ptr);
               CvInvoke.cvGetRow(translationVectors.Ptr, matPtr, i);
               CvInvoke.cvTranspose(matPtr, p.TranslationVector.Ptr);
               extrinsicParams[i] = p;
            }
            Marshal.FreeHGlobal(matPtr);
             }
             return reprojectionError;
        }
예제 #21
0
 /// <summary>
 /// Projects the image.
 /// </summary>
 /// <param name="src">Source image</param>
 /// <param name="K">Camera intrinsic parameters</param>
 /// <param name="R">Camera rotation matrix</param>
 /// <param name="interpMode">Interpolation mode</param>
 /// <param name="borderMode">Border extrapolation mode</param>
 /// <param name="dst">Projected image</param>
 /// <returns>Project image top-left corner</returns>
 public Point Warp(IInputArray src, IInputArray K, IInputArray R, CvEnum.Inter interpMode, CvEnum.BorderType borderMode, IOutputArray dst)
 {
    Point corner = new Point();
    using (InputArray iaSrc = src.GetInputArray())
    using (InputArray iaK = K.GetInputArray())
    using (InputArray iaR = R.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
    {
       StitchingInvoke.cveRotationWarperWarp(_rotationWarper, iaSrc, iaK, iaR, interpMode, borderMode, oaDst, ref corner);
       return corner;
    }
 }
예제 #22
0
 /// <summary>
 /// Create a blob detector of specific type
 /// </summary>
 /// <param name="type">The type of the detector</param>
 public BlobDetector(CvEnum.BLOB_DETECTOR_TYPE type)
 {
    switch (type)
    {
       case Emgu.CV.CvEnum.BLOB_DETECTOR_TYPE.Simple:
          _ptr = CvInvoke.CvCreateBlobDetectorSimple();
          break;
       case Emgu.CV.CvEnum.BLOB_DETECTOR_TYPE.CC:
          _ptr = CvInvoke.CvCreateBlobDetectorCC();
          break;
    }
 }
예제 #23
0
 /// <summary>
 /// Create a Gpu LinearFilter
 /// </summary>
 /// <param name="kernel">Convolution kernel, single-channel floating point matrix (e.g. Emgu.CV.Matrix). If you want to apply different kernels to different channels, split the gpu image into separate color planes and process them individually</param>
 /// <param name="anchor">The anchor of the kernel that indicates the relative position of a filtered point within the kernel. The anchor shoud lie within the kernel. The special default value (-1,-1) means that it is at the kernel center</param>
 /// <param name="borderType">Border type. Use REFLECT101 as default.</param>
 /// <param name="borderValue">The border value</param>
 public CudaLinearFilter(
    DepthType srcDepth, int srcChannels,
    DepthType dstDepth, int dstChannels,
    IInputArray kernel,
    System.Drawing.Point anchor,
    CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
 {
    using (InputArray iaKernel = kernel.GetInputArray())
       _ptr = CudaInvoke.cudaCreateLinearFilter(
          CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels),
          iaKernel, ref anchor, borderType, ref borderValue);
 }
예제 #24
0
 /// <summary>
 /// Finds perspective transformation H=||hij|| between the source and the destination planes
 /// </summary>
 /// <param name="srcPoints">Point coordinates in the original plane, 2xN, Nx2, 3xN or Nx3 array (the latter two are for representation in homogeneous coordinates), where N is the number of points. </param>
 /// <param name="dstPoints">Point coordinates in the destination plane, 2xN, Nx2, 3xN or Nx3 array (the latter two are for representation in homogeneous coordinates) </param>
 /// <param name="method">The type of the method</param>
 /// <param name="ransacReprojThreshold">The maximum allowed re-projection error to treat a point pair as an inlier. The parameter is only used in RANSAC-based homography estimation. E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3</param>
 /// <param name="mask">The optional output mask set by a robust method (RANSAC or LMEDS). </param>
 /// <param name="homography">Output 3x3 homography matrix. Homography matrix is determined up to a scale, thus it is normalized to make h33=1</param>
 public static void FindHomography(
    IInputArray srcPoints,
    IInputArray dstPoints,
    IOutputArray homography,
    CvEnum.HomographyMethod method = CvEnum.HomographyMethod.Default,
    double ransacReprojThreshold = 3,
    IOutputArray mask = null)
 {
    using (InputArray iaSrcPoints = srcPoints.GetInputArray())
    using (InputArray iaDstPoints = dstPoints.GetInputArray())
    using (OutputArray oaHomography = homography.GetOutputArray())
    using (OutputArray oaMask = mask == null ? OutputArray.GetEmpty() : mask.GetOutputArray())
       cveFindHomography(iaSrcPoints, iaDstPoints, oaHomography, method, ransacReprojThreshold, oaMask);
 }
예제 #25
0
 /// <summary>
 /// Create a blob tracking post process module of the specific type
 /// </summary>
 /// <param name="type"></param>
 public BlobTrackPostProc(CvEnum.BLOB_POST_PROCESS_TYPE type)
 {
    switch (type)
    {
       case Emgu.CV.CvEnum.BLOB_POST_PROCESS_TYPE.Kalman:
          _ptr = CvInvoke.CvCreateModuleBlobTrackPostProcKalman();
          break;
       case Emgu.CV.CvEnum.BLOB_POST_PROCESS_TYPE.TimeAverExp:
          _ptr = CvInvoke.CvCreateModuleBlobTrackPostProcTimeAverExp();
          break;
       case Emgu.CV.CvEnum.BLOB_POST_PROCESS_TYPE.TimeAverRect:
          _ptr = CvInvoke.CvCreateModuleBlobTrackPostProcTimeAverRect();
          break;
    }
 }
예제 #26
0
 /// <summary>
 /// The grab cut algorithm for segmentation
 /// </summary>
 /// <param name="img">The 8-bit 3-channel image to be segmented</param>
 /// <param name="mask">Input/output 8-bit single-channel mask. The mask is initialized by the function
 /// when mode is set to GC_INIT_WITH_RECT. Its elements may have one of following values:
 /// 0 (GC_BGD) defines an obvious background pixels.
 /// 1 (GC_FGD) defines an obvious foreground (object) pixel.
 /// 2 (GC_PR_BGR) defines a possible background pixel.
 /// 3 (GC_PR_FGD) defines a possible foreground pixel.
 ///</param>
 /// <param name="rect">The rectangle to initialize the segmentation</param>
 /// <param name="bgdModel">
 /// Temporary array for the background model. Do not modify it while you are
 /// processing the same image.
 /// </param>
 /// <param name="fgdModel">
 /// Temporary arrays for the foreground model. Do not modify it while you are
 /// processing the same image.
 /// </param>
 /// <param name="iterCount">The number of iterations</param>
 /// <param name="type">The initialization type</param>
 public static void GrabCut(
    IInputArray img,
    IInputOutputArray mask,
    Rectangle rect,
    IInputOutputArray bgdModel,
    IInputOutputArray fgdModel,
    int iterCount,
    CvEnum.GrabcutInitType type)
 {
    using (InputArray iaImg = img.GetInputArray())
    using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
    using (InputOutputArray ioaBgdModel = bgdModel.GetInputOutputArray())
    using (InputOutputArray ioaFgdModel = fgdModel.GetInputOutputArray())
       cveGrabCut(iaImg, ioaMask, ref rect, ioaBgdModel, ioaFgdModel, iterCount, type);
 }
예제 #27
0
      /// <summary>
      /// The function imreadmulti loads a multi-page image from the specified file into a vector of Mat objects.
      /// </summary>
      /// <param name="filename">Name of file to be loaded.</param>
      /// <param name="flags">Read flags</param>
      /// <returns>Null if the reading fails, otherwise, an array of Mat from the file</returns>
      public static Mat[] Imreadmulti(String filename, CvEnum.ImreadModes flags = ImreadModes.AnyColor)
      {
         using (VectorOfMat vm = new VectorOfMat())
            using (CvString strFilename = new CvString(filename))
         {
            if (!cveImreadmulti(strFilename, vm, flags))
               return null;
            Mat[] result = new Mat[vm.Size];

            for (int i = 0; i < result.Length; i++)
            {
               Mat m = new Mat();
               CvInvoke.Swap(m, vm[i]);
               result[i] = m;
            }
            return result;
         }
      }
        /// <summary>
        /// Finds convex hull of 2D point set using Sklansky's algorithm
        /// </summary>
        /// <param name="points">The points to find convex hull from</param>
        /// <param name="storage">the storage used by the resulting sequence</param>
        /// <param name="orientation">The orientation of the convex hull</param>
        /// <returns>The convex hull of the points</returns>
        public static Seq<PointF> ConvexHull(PointF[] points, MemStorage storage, CvEnum.ORIENTATION orientation)
        {
            IntPtr seq = Marshal.AllocHGlobal(StructSize.MCvSeq);
             IntPtr block = Marshal.AllocHGlobal(StructSize.MCvSeqBlock);
             GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned);
             CvInvoke.cvMakeSeqHeaderForArray(
            CvInvoke.CV_MAKETYPE((int)CvEnum.MAT_DEPTH.CV_32F, 2),
            StructSize.MCvSeq,
            StructSize.PointF,
            handle.AddrOfPinnedObject(),
            points.Length,
            seq,
            block);

             Seq<PointF> convexHull = new Seq<PointF>(CvInvoke.cvConvexHull2(seq, storage.Ptr, orientation, 1), storage);
             handle.Free();
             Marshal.FreeHGlobal(seq);
             Marshal.FreeHGlobal(block);
             return convexHull;
        }
        /// <summary>
        /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles. 
        /// The function scans the image several times at different scales (see cvSetImagesForHaarClassifierCascade). Each time it considers overlapping regions in the image and applies the classifiers to the regions using cvRunHaarClassifierCascade. 
        /// It may also apply some heuristics to reduce number of analyzed regions, such as Canny prunning. 
        /// After it has proceeded and collected the candidate rectangles (regions that passed the classifier cascade), it groups them and returns a sequence of average rectangles for each large enough group. 
        /// The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. 
        /// For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=&lt;minimum possible face size&gt; 
        /// (for example, ~1/4 to 1/16 of the image area in case of video conferencing). 
        /// </summary>
        /// <param name="image">The image where the objects are to be detected from</param>
        /// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%</param>
        /// <param name="minNeighbors">Minimum number (minus 1) of neighbor rectangles that makes up an object. All the groups of a smaller number of rectangles than min_neighbors-1 are rejected. If min_neighbors is 0, the function does not any grouping at all and returns all the detected candidate rectangles, which may be useful if the user wants to apply a customized grouping procedure</param>
        /// <param name="flag">Mode of operation. Currently the only flag that may be specified is CV_HAAR_DO_CANNY_PRUNING. If it is set, the function uses Canny edge detector to reject some image regions that contain too few or too much edges and thus can not contain the searched object. The particular threshold values are tuned for face detection and in this case the pruning speeds up the processing.</param>
        /// <param name="minSize">Minimum window size. By default, it is set to the size of samples the classifier has been trained on (~20x20 for face detection)</param>
        /// <returns>The objects detected, one array per channel</returns>
        public MCvAvgComp[] Detect(Image<Gray, Byte> image, double scaleFactor, int minNeighbors, CvEnum.HAAR_DETECTION_TYPE flag, Size minSize)
        {
            using (MemStorage stor = new MemStorage())
             {
            IntPtr objects = CvInvoke.cvHaarDetectObjects(
                image.Ptr,
                Ptr,
                stor.Ptr,
                scaleFactor,
                minNeighbors,
                flag,
                minSize);

            if (objects == IntPtr.Zero)
               return new MCvAvgComp[0];

            Seq<MCvAvgComp> rects = new Seq<MCvAvgComp>(objects, stor);
            return rects.ToArray();
             }
        }
예제 #30
0
      /*
      /// <summary>
      /// A comparator which compares only the X value of the point
      /// </summary>
      private class XValueOfPointComparator : IComparer<PointF>
      {
         public int Compare(PointF p1, PointF p2)
         {
            return p1.X.CompareTo(p2.X);
         }
      }

      /// <summary>
      /// Perform a first degree interpolation to lookup the y coordinate given the x coordinate
      /// </summary>
      /// <param name="points">The collection of points. Must be sorted by the x value.</param>
      /// <param name="index">the x coordinate</param>
      /// <returns>the y coordinate as the result of the first degree interpolation</returns>
      public static float FirstDegreeInterpolate(PointF[] points, float index)
      {
         XValueOfPointComparator comparator = new XValueOfPointComparator();
         int idx = Array.BinarySearch<PointF>(points, new PointF(index, 0.0f), comparator);
         
         if (idx >= 0) // an exact index is matched
            return points[idx].Y;

         // the index fall into a range, in this case we do interpolation
         idx = -idx;

         if (idx == 1)
            // the specific index is smaller than all indexes
            idx = 0;
         else if (idx == points.Length + 1)
            // the specific index is larger than all indexes
            idx = points.Length - 2;
         else
            idx -= 2;

         LineSegment2DF line = new LineSegment2DF(points[idx], points[idx + 1]);
         return line.YByX(index);         
      }

      /// <summary>
      /// Perform a first degree interpolation to lookup the y coordinates given the x coordinates
      /// </summary>
      /// <param name="points">The collection of points, Must be sorted by x value</param>
      /// <param name="indexes">the x coordinates</param>
      /// <returns>The y coordinates as the result of the first degree interpolation</returns>
      public static float[] FirstDegreeInterpolate(PointF[] points, float[] indexes)
      {
         return Array.ConvertAll<float, float>(
             indexes,
             delegate(float d) { return FirstDegreeInterpolate(points, d); });
      }*/

      /// <summary>
      /// Fit a line to the points collection
      /// </summary>
      /// <param name="points">The points to be fitted</param>
      /// <param name="type">The type of the fitting</param>
      /// <param name="normalizedDirection">The normalized direction of the fitted line</param>
      /// <param name="aPointOnLine">A point on the fitted line</param>
      public static void Line2DFitting(PointF[] points, CvEnum.DIST_TYPE type, out PointF normalizedDirection, out PointF aPointOnLine)
      {
         float[] data = new float[6];
         IntPtr seq = Marshal.AllocHGlobal(StructSize.MCvSeq);
         IntPtr block = Marshal.AllocHGlobal(StructSize.MCvSeqBlock);
         GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned);

         CvInvoke.cvMakeSeqHeaderForArray(
            CvInvoke.CV_MAKETYPE((int)CvEnum.MAT_DEPTH.CV_32F, 2),
            StructSize.MCvSeq,
            StructSize.PointF,
            handle.AddrOfPinnedObject(),
            points.Length,
            seq,
            block); 

         CvInvoke.cvFitLine(seq, type, 0.0, 0.01, 0.01, data);

         handle.Free();
         Marshal.FreeHGlobal(seq);
         Marshal.FreeHGlobal(block);
         normalizedDirection = new PointF(data[0], data[1]);
         aPointOnLine = new PointF(data[2], data[3]);
      }