コード例 #1
0
ファイル: AutoTestOpenCL.cs プロジェクト: neutmute/emgucv
      public void TestOclKernel()
      {
         if (CvInvoke.HaveOpenCL && CvInvoke.UseOpenCL)
         {

            Ocl.Device defaultDevice = Ocl.Device.Default;

            Mat img = EmguAssert.LoadMat("lena.jpg");
            Mat imgGray = new Mat();
            CvInvoke.CvtColor(img, imgGray, ColorConversion.Bgr2Gray);
            Mat imgFloat = new Mat();
            imgGray.ConvertTo(imgFloat, DepthType.Cv32F, 1.0/255);
            UMat umat = imgFloat.GetUMat(AccessType.Read, UMat.Usage.AllocateDeviceMemory);
            UMat umatDst = new UMat();
            umatDst.Create(umat.Rows, umat.Cols, DepthType.Cv32F, umat.NumberOfChannels, UMat.Usage.AllocateDeviceMemory);
            
            String buildOpts = String.Format("-D dstT={0}", Ocl.OclInvoke.TypeToString(umat.Depth));
    
            String sourceStr = @"
__constant sampler_t samplerLN = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;
__kernel void shift(const image2d_t src, float shift_x, float shift_y, __global uchar* dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)
{
   int x = get_global_id(0);
   int y = get_global_id(1);
   if (x >= dst_cols) return;
   int dst_index = mad24(y, dst_step, mad24(x, (int)sizeof(dstT), dst_offset));
   __global dstT *dstf = (__global dstT *)(dst + dst_index);
   float2 coord = (float2)((float)x+0.5f+shift_x, (float)y+0.5f+shift_y);
   dstf[0] = (dstT)read_imagef(src, samplerLN, coord).x;
}";

            using (CvString errorMsg = new CvString())
            using (Ocl.ProgramSource ps = new Ocl.ProgramSource(sourceStr))
            using (Ocl.Kernel kernel = new Ocl.Kernel())
            using (Ocl.Image2D image2d = new Ocl.Image2D(umat))
            using (Ocl.KernelArg ka = new Ocl.KernelArg(Ocl.KernelArg.Flags.ReadWrite, umatDst))
            {
               float shiftX = 100.5f;
               float shiftY = -50.0f;

               bool success = kernel.Create("shift", ps, buildOpts, errorMsg);
               EmguAssert.IsTrue(success, errorMsg.ToString());
               int idx = 0;
               idx = kernel.Set(idx, image2d);
               idx = kernel.Set(idx, ref shiftX);
               idx = kernel.Set(idx, ref shiftY);
               idx = kernel.Set(idx, ka);
               IntPtr[] globalThreads = new IntPtr[] {new IntPtr(umat.Cols), new IntPtr(umat.Rows), new IntPtr(1) };
               success = kernel.Run(globalThreads, null, true);
               EmguAssert.IsTrue(success, "Failed to run the kernel");
               using (Mat matDst = umatDst.GetMat(AccessType.Read))
               using (Mat saveMat = new Mat())
               {
                  matDst.ConvertTo(saveMat, DepthType.Cv8U, 255.0);
                  saveMat.Save("tmp.jpg");
               }
            }
         }
      }
コード例 #2
0
ファイル: BOWKMeansTrainer.cs プロジェクト: KaganRoman/Eval
 /// <summary>
 /// Cluster the descriptors and return the cluster centers
 /// </summary>
 /// <returns>The cluster centers</returns>
 public Matrix<float> Cluster()
 {
    using (Mat m = new Mat())
    {
       CvInvoke.CvBOWKMeansTrainerCluster(_ptr, m);
       Matrix<float> result = new Matrix<float>(m.Size);
       CvInvoke.cvMatCopyToCvArr(m, result);
       return result;
    }
 }
コード例 #3
0
        /// <summary>
        /// Computes an optimal affine transformation between two 3D point sets.
        /// </summary>
        /// <param name="src">First input 3D point set.</param>
        /// <param name="dst">Second input 3D point set.</param>
        /// <param name="estimate">Output 3D affine transformation matrix.</param>
        /// <param name="inliers">Output vector indicating which points are inliers.</param>
        /// <param name="ransacThreshold">Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier.</param>
        /// <param name="confidence">Confidence level, between 0 and 1, for the estimated transformation. Anything between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</param>
        /// <returns></returns>
        public static int CvEstimateAffine3D(MCvPoint3D32f[] src, MCvPoint3D32f[] dst, out Matrix <double> estimate, out Byte[] inliers, double ransacThreshold, double confidence)
        {
            GCHandle srcHandle = GCHandle.Alloc(src, GCHandleType.Pinned);

            GCHandle dstHandle = GCHandle.Alloc(dst, GCHandleType.Pinned);
            int      result;

            estimate = new Matrix <double>(3, 4);
            using (Util.Mat affineEstimate = new Util.Mat())
                using (Matrix <float> srcMat = new Matrix <float>(1, src.Length, 3, srcHandle.AddrOfPinnedObject(), Marshal.SizeOf(typeof(MCvPoint3D32f)) * src.Length))
                    using (Matrix <float> dstMat = new Matrix <float>(1, dst.Length, 3, dstHandle.AddrOfPinnedObject(), Marshal.SizeOf(typeof(MCvPoint3D32f)) * dst.Length))
                        using (Util.VectorOfByte vectorOfByte = new Util.VectorOfByte())
                        {
                            result  = _CvEstimateAffine3D(srcMat, dstMat, affineEstimate, vectorOfByte, ransacThreshold, confidence);
                            inliers = vectorOfByte.ToArray();
                            CvInvoke.cvMatCopyToCvArr(affineEstimate, estimate);
                        }

            srcHandle.Free();
            dstHandle.Free();

            return(result);
        }
コード例 #4
0
ファイル: Tracker.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Initialize the tracker with a know bounding box that surrounding the target.
 /// </summary>
 /// <param name="image">The initial frame</param>
 /// <param name="boundingBox">The initial boundig box</param>
 /// <returns></returns>
 public bool Init(Mat image, Rectangle boundingBox)
 {
    return ContribInvoke.cveTrackerInit(_ptr, image, ref boundingBox);
 }
コード例 #5
0
ファイル: Tracker.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Update the tracker, find the new most likely bounding box for the target.
 /// </summary>
 /// <param name="image">The current frame</param>
 /// <param name="boundingBox">The boundig box that represent the new target location, if true was returned, not modified otherwise</param>
 /// <returns>True means that target was located and false means that tracker cannot locate target in current frame. Note, that latter does not imply that tracker has failed, maybe target is indeed missing from the frame (say, out of sight)</returns>
 public bool Update(Mat image, out Rectangle boundingBox)
 {
    boundingBox = new Rectangle();
    return ContribInvoke.cveTrackerUpdate(_ptr, image, ref boundingBox);
 }
コード例 #6
0
ファイル: MultiTracker.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Update the current tracking status. The result will be saved in the internal storage.
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="boundingBox">the tracking result, represent a list of ROIs of the tracked objects.</param>
 /// <returns>True id update success</returns>
 public bool Update(Mat image, VectorOfRect boundingBox)
 {
    return ContribInvoke.cveMultiTrackerUpdate(_ptr, image, boundingBox);
 }
コード例 #7
0
ファイル: LSDDetector.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Detect lines inside an image.
 /// </summary>
 /// <param name="image">	input image</param>
 /// <param name="keylines">vector that will store extracted lines for one or more images</param>
 /// <param name="scale">scale factor used in pyramids generation</param>
 /// <param name="numOctaves">number of octaves inside pyramid</param>
 /// <param name="mask">	mask matrix to detect only KeyLines of interest</param>
 public void Detect(Mat image, VectorOfKeyLine keylines, int scale, int numOctaves, Mat mask = null)
 {
    LineDescriptorInvoke.cveLineDescriptorLSDDetectorDetect(_ptr, image, keylines, scale, numOctaves, mask);
 }
コード例 #8
0
ファイル: BinaryDescriptor.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Line detection.
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="keylines">Vector that will store extracted lines for one or more images</param>
 /// <param name="mask">Mask matrix to detect only KeyLines of interest</param>
 public void Detect(Mat image, VectorOfKeyLine keylines, Mat mask = null)
 {
    LineDescriptorInvoke.cveLineDescriptorBinaryDescriptorDetect(_ptr, image, keylines, mask);
 }
コード例 #9
0
ファイル: MultiTracker.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Add a new object to be tracked
 /// </summary>
 /// <param name="trackerType">The name of the tracker algorithm to be used</param>
 /// <param name="image">Input image</param>
 /// <param name="boundingBox">S rectangle represents ROI of the tracked object</param>
 /// <returns>True if sucessfully added</returns>
 public bool Add(String trackerType, Mat image, Rectangle boundingBox)
 {
    using (CvString trackerTypeStr = new CvString(trackerType))
       return ContribInvoke.cveMultiTrackerAddType(_ptr, trackerTypeStr, image, ref boundingBox);
 }
コード例 #10
0
 /// <summary>
 /// The function detects edges in src and draw them to dst. The algorithm underlies this function is much more robust to texture presence, than common approaches, e.g. Sobel
 /// </summary>
 /// <param name="src">source image (RGB, float, in [0;1]) to detect edges</param>
 /// <param name="dst">destination image (grayscale, float, in [0;1]) where edges are drawn</param>
 public void DetectEdges(Mat src, Mat dst)
 {
    XimgprocInvoke.cveStructuredEdgeDetectionDetectEdges(_ptr, src, dst);
 }
コード例 #11
0
ファイル: Plot2d.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Create 2D plot for data
 /// </summary>
 /// <param name="dataX">The data for the X-axis</param>
 /// <param name="dataY">The data for the Y-axis</param>
 public Plot2d(Mat dataX, Mat dataY)
 {
    _ptr = PlotInvoke.cvePlot2dCreateFromXY(dataX, dataY);
 }
コード例 #12
0
         /// <summary>
         /// Filter the matched Features, such that if a match is not unique, it is rejected.
         /// </summary>
         /// <param name="uniquenessThreshold">The distance different ratio which a match is consider unique, a good number will be 0.8</param>
         /// <param name="mask">This is both input and output. This matrix indicates which row is valid for the matches.</param>
         /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param> 
         public static void VoteForUniqueness(VectorOfVectorOfDMatch matches, double uniquenessThreshold, Mat mask)
         {
            MDMatch[][] mArr = matches.ToArrayOfArray();
            byte[] maskData = new byte[mArr.Length];
            GCHandle maskHandle = GCHandle.Alloc(maskData, GCHandleType.Pinned);
            using (Mat m = new Mat(mArr.Length, 1, DepthType.Cv8U, 1, maskHandle.AddrOfPinnedObject(), 1))
            {
               mask.CopyTo(m);
               for (int i = 0; i < mArr.Length; i++)
               {
                  if (maskData[i] != 0 && (mArr[i][0].Distance / mArr[i][1].Distance) <= uniquenessThreshold)
                  {
                     maskData[i] = (byte)255;
                  }
               }

               m.CopyTo(mask);
            }
            maskHandle.Free();

         }
コード例 #13
0
ファイル: CvInvokeCvextern.cs プロジェクト: KaganRoman/Eval
      /// <summary>
      /// Computes an optimal affine transformation between two 3D point sets.
      /// </summary>
      /// <param name="src">First input 3D point set.</param>
      /// <param name="dst">Second input 3D point set.</param>
      /// <param name="estimate">Output 3D affine transformation matrix.</param>
      /// <param name="inliers">Output vector indicating which points are inliers.</param>
      /// <param name="ransacThreshold">Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier.</param>
      /// <param name="confidence">Confidence level, between 0 and 1, for the estimated transformation. Anything between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</param>
      /// <returns></returns>
      public static int CvEstimateAffine3D(MCvPoint3D32f[] src, MCvPoint3D32f[] dst, out Matrix<double> estimate, out Byte[] inliers, double ransacThreshold, double confidence)
      {
         GCHandle srcHandle = GCHandle.Alloc(src, GCHandleType.Pinned);
         GCHandle dstHandle = GCHandle.Alloc(dst, GCHandleType.Pinned);
         int result;

         estimate = new Matrix<double>(3, 4);
         using (Util.Mat affineEstimate = new Util.Mat())
         using (Matrix<float> srcMat = new Matrix<float>(1,  src.Length, 3, srcHandle.AddrOfPinnedObject(), Marshal.SizeOf(typeof(MCvPoint3D32f)) * src.Length))
         using (Matrix<float> dstMat = new Matrix<float>(1,  dst.Length, 3, dstHandle.AddrOfPinnedObject(), Marshal.SizeOf(typeof(MCvPoint3D32f)) * dst.Length ))
         using (Util.VectorOfByte vectorOfByte = new Util.VectorOfByte())
         {
            result = _CvEstimateAffine3D(srcMat, dstMat, affineEstimate, vectorOfByte, ransacThreshold, confidence);
            inliers = vectorOfByte.ToArray();
            CvInvoke.cvMatCopyToCvArr(affineEstimate, estimate);  
         }

         srcHandle.Free();
         dstHandle.Free();

         return result;
      }
コード例 #14
0
 /// <summary>
 /// Recover the homography matrix using RANDSAC. If the matrix cannot be recovered, null is returned.
 /// </summary>
 /// <param name="model">The model keypoints</param>
 /// <param name="observed">The observed keypoints</param>
 /// <param name="ransacReprojThreshold">
 /// The maximum allowed reprojection error to treat a point pair as an inlier. 
 /// If srcPoints and dstPoints are measured in pixels, it usually makes sense to set this parameter somewhere in the range 1 to 10.
 /// </param>
 /// <param name="mask">
 /// The mask matrix of which the value might be modified by the function. 
 /// As input, if the value is 0, the corresponding match will be ignored when computing the homography matrix. 
 /// If the value is 1 and RANSAC determine the match is an outlier, the value will be set to 0.
 /// </param>
 /// <returns>The homography matrix, if it cannot be found, null is returned</returns>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static Mat GetHomographyMatrixFromMatchedFeatures(VectorOfKeyPoint model,
    VectorOfKeyPoint observed, VectorOfVectorOfDMatch matches, Mat mask, double ransacReprojThreshold)
 {
    Mat homography = new Mat();
    bool found = CvInvoke.getHomographyMatrixFromMatchedFeatures(model, observed, matches, mask,
       ransacReprojThreshold, homography);
    if (found)
    {
       return homography;
    }
    else
    {
       homography.Dispose();
       return null;
    }
 }
コード例 #15
0
 /// <summary>
 /// Eliminate the matched features whose scale and rotation do not aggree with the majority's scale and rotation.
 /// </summary>
 /// <param name="rotationBins">The numbers of bins for rotation, a good value might be 20 (which means each bin covers 18 degree)</param>
 /// <param name="scaleIncrement">This determines the different in scale for neighbor hood bins, a good value might be 1.5 (which means matched features in bin i+1 is scaled 1.5 times larger than matched features in bin i</param>
 /// <param name="modelKeyPoints">The keypoints from the model image</param>
 /// <param name="observedKeyPoints">The keypoints from the observed image</param>
 /// <param name="mask">This is both input and output. This matrix indicates which row is valid for the matches.</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 /// <returns> The number of non-zero elements in the resulting mask</returns>
 public static int VoteForSizeAndOrientation(VectorOfKeyPoint modelKeyPoints, VectorOfKeyPoint observedKeyPoints,
    VectorOfVectorOfDMatch matches, Mat mask, double scaleIncrement, int rotationBins)
 {
    return CvInvoke.voteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, scaleIncrement,
       rotationBins);
 }
コード例 #16
0
ファイル: FuzzyInvoke.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Image filtering.
 /// </summary>
 /// <param name="image">Input image.</param>
 /// <param name="kernel">Final 32-b kernel.</param>
 /// <param name="output">Output 32-bit image.</param>
 public static void Filter(Mat image, Mat kernel, Mat output)
 {
    cveFtFilter(image, kernel, output);
 }
コード例 #17
0
ファイル: FuzzyInvoke.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Image inpainting.
 /// </summary>
 /// <param name="image">Input image.</param>
 /// <param name="mask">Mask used for unwanted area marking.</param>
 /// <param name="output">Output 32-bit image.</param>
 /// <param name="radius">Radius of the basic function.</param>
 /// <param name="function">Function type</param>
 /// <param name="algorithm">Algorithm type</param>
 public static void Inpaint(Mat image, Mat mask, Mat output, int radius = 2, Function function = Function.Linear, InpaintAlgorithm algorithm = InpaintAlgorithm.OneStep)
 {
    cveFtInpaint(image, mask, output, radius, function, algorithm);
 }
コード例 #18
0
ファイル: BinaryDescriptor.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Descriptors computation.
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="keylines">Vector containing lines for which descriptors must be computed</param>
 /// <param name="descriptors">Computed descriptors will be stored here</param>
 /// <param name="returnFloatDescr">When true, original non-binary descriptors are returned</param>
 public void Compute(Mat image, VectorOfKeyLine keylines, Mat descriptors, bool returnFloatDescr = false)
 {
    LineDescriptorInvoke.cveLineDescriptorBinaryDescriptorCompute(_ptr, image, keylines, descriptors, returnFloatDescr);
 }
コード例 #19
0
 /// <summary>
 /// Add the descriptors to the trainer
 /// </summary>
 /// <param name="descriptors">The descriptors to be added to the trainer</param>
 public void Add(Mat descriptors)
 {
    CvBOWKMeansTrainerAdd(_ptr, descriptors);
 }
コード例 #20
0
 /// <summary>
 /// Returns coefficients of the classifier trained for people detection (for default window size).
 /// </summary>
 /// <returns>The default people detector</returns>
 public Mat GetDefaultPeopleDetector()
 {
    Mat m = new Mat();
    CudaInvoke.cudaHOGGetDefaultPeopleDetector(_ptr, m);
    return m;
 }
コード例 #21
0
ファイル: Plot2d.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Create 2D plot from data
 /// </summary>
 /// <param name="data">The data to be plotted</param>
 public Plot2d(Mat data)
 {
    _ptr = PlotInvoke.cvePlot2dCreateFromX(data);
 }
コード例 #22
0
ファイル: MultiTracker.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Add a new object to be tracked. The defaultAlgorithm will be used the newly added tracker.
 /// </summary>
 /// <param name="image">Tnput image</param>
 /// <param name="boundingBox">A rectangle represents ROI of the tracked object</param>
 /// <returns>True if sucessfully added</returns>
 public bool Add(Mat image, Rectangle boundingBox)
 {
    return ContribInvoke.cveMultiTrackerAdd(_ptr, image, ref boundingBox);
 }
コード例 #23
0
ファイル: Plot2d.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Render the plot to the resulting Mat
 /// </summary>
 /// <param name="result">The output plot</param>
 public void Render(Mat result)
 {
    PlotInvoke.cvePlot2dRender(_ptr, result);
 }
コード例 #24
0
ファイル: Blob.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Returns reference to Mat, containing blob data.
 /// </summary>
 /// <returns>Reference to Mat, containing blob data.</returns>
 public Mat MatRef()
 {
    Mat m = new Mat();
    ContribInvoke.cveDnnBlobMatRef(_ptr, m);
    return m;
 }