Exemplo n.º 1
0
        public void TestSimpleBlobDetector()
        {
            Mat box = EmguAssert.LoadMat("box.png");
            SimpleBlobDetectorParams p        = new SimpleBlobDetectorParams();
            SimpleBlobDetector       detector = new SimpleBlobDetector(p);

            MKeyPoint[] keypoints = detector.Detect(box);
        }
Exemplo n.º 2
0
        public void TestOclKernel()
        {
            if (CvInvoke.HaveOpenCL && CvInvoke.UseOpenCL)
            {
                Ocl.Device defaultDevice = Ocl.Device.Default;

                Mat img     = EmguAssert.LoadMat("lena.jpg");
                Mat imgGray = new Mat();
                CvInvoke.CvtColor(img, imgGray, ColorConversion.Bgr2Gray);
                Mat imgFloat = new Mat();
                imgGray.ConvertTo(imgFloat, DepthType.Cv32F, 1.0 / 255);
                UMat umat    = imgFloat.GetUMat(AccessType.Read, UMat.Usage.AllocateDeviceMemory);
                UMat umatDst = new UMat();
                umatDst.Create(umat.Rows, umat.Cols, DepthType.Cv32F, umat.NumberOfChannels, UMat.Usage.AllocateDeviceMemory);

                String buildOpts = String.Format("-D dstT={0}", Ocl.OclInvoke.TypeToString(umat.Depth));

                String sourceStr = @"
__constant sampler_t samplerLN = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;
__kernel void shift(const image2d_t src, float shift_x, float shift_y, __global uchar* dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)
{
   int x = get_global_id(0);
   int y = get_global_id(1);
   if (x >= dst_cols) return;
   int dst_index = mad24(y, dst_step, mad24(x, (int)sizeof(dstT), dst_offset));
   __global dstT *dstf = (__global dstT *)(dst + dst_index);
   float2 coord = (float2)((float)x+0.5f+shift_x, (float)y+0.5f+shift_y);
   dstf[0] = (dstT)read_imagef(src, samplerLN, coord).x;
}";

                using (CvString errorMsg = new CvString())
                    using (Ocl.ProgramSource ps = new Ocl.ProgramSource(sourceStr))
                        using (Ocl.Kernel kernel = new Ocl.Kernel())
                            using (Ocl.Image2D image2d = new Ocl.Image2D(umat))
                                using (Ocl.KernelArg ka = new Ocl.KernelArg(Ocl.KernelArg.Flags.ReadWrite, umatDst))
                                {
                                    float shiftX = 100.5f;
                                    float shiftY = -50.0f;

                                    bool success = kernel.Create("shift", ps, buildOpts, errorMsg);
                                    EmguAssert.IsTrue(success, errorMsg.ToString());
                                    int idx = 0;
                                    idx = kernel.Set(idx, image2d);
                                    idx = kernel.Set(idx, ref shiftX);
                                    idx = kernel.Set(idx, ref shiftY);
                                    idx = kernel.Set(idx, ka);
                                    IntPtr[] globalThreads = new IntPtr[] { new IntPtr(umat.Cols), new IntPtr(umat.Rows), new IntPtr(1) };
                                    success = kernel.Run(globalThreads, null, true);
                                    EmguAssert.IsTrue(success, "Failed to run the kernel");
                                    using (Mat matDst = umatDst.GetMat(AccessType.Read))
                                        using (Mat saveMat = new Mat())
                                        {
                                            matDst.ConvertTo(saveMat, DepthType.Cv8U, 255.0);
                                            saveMat.Save("tmp.jpg");
                                        }
                                }
            }
        }
Exemplo n.º 3
0
        public void TestChessboardCalibrationSolvePnPRansac()
        {
            Size patternSize = new Size(9, 6);

            Mat chessboardImage = EmguAssert.LoadMat("left01.jpg", ImreadModes.Grayscale);

            Util.VectorOfPointF corners = new Util.VectorOfPointF();
            bool patternWasFound        = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

            CvInvoke.CornerSubPix(
                chessboardImage,
                corners,
                new Size(10, 10),
                new Size(-1, -1),
                new MCvTermCriteria(0.05));

            MCvPoint3D32f[] objectPts = CalcChessboardCorners(patternSize, 1.0f);

            using (VectorOfVectorOfPoint3D32F ptsVec = new VectorOfVectorOfPoint3D32F(new MCvPoint3D32f[][] { objectPts }))
                using (VectorOfVectorOfPointF imgPtsVec = new VectorOfVectorOfPointF(corners))
                    using (Mat cameraMatrix = new Mat())
                        using (Mat distortionCoeff = new Mat())
                            using (VectorOfMat rotations = new VectorOfMat())
                                using (VectorOfMat translations = new VectorOfMat())
                                {
                                    Mat             calMat  = CvInvoke.InitCameraMatrix2D(ptsVec, imgPtsVec, chessboardImage.Size, 0);
                                    Matrix <double> calMatF = new Matrix <double>(calMat.Rows, calMat.Cols, calMat.NumberOfChannels);
                                    calMat.CopyTo(calMatF);
                                    double error = CvInvoke.CalibrateCamera(ptsVec, imgPtsVec, chessboardImage.Size, cameraMatrix,
                                                                            distortionCoeff,
                                                                            rotations, translations, CalibType.Default, new MCvTermCriteria(30, 1.0e-10));
                                    using (Mat rotation = new Mat())
                                        using (Mat translation = new Mat())
                                            using (VectorOfPoint3D32F vpObject = new VectorOfPoint3D32F(objectPts))
                                            {
                                                CvInvoke.SolvePnPRansac(
                                                    vpObject,
                                                    corners,
                                                    cameraMatrix,
                                                    distortionCoeff,
                                                    rotation,
                                                    translation);
                                            }

                                    CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
                                    using (Mat undistorted = new Mat())
                                    {
                                        CvInvoke.Undistort(chessboardImage, undistorted, cameraMatrix, distortionCoeff);
                                        String title = String.Format("Reprojection error: {0}", error);
                                        //CvInvoke.NamedWindow(title);
                                        //CvInvoke.Imshow(title, undistorted);
                                        //CvInvoke.WaitKey();
                                        //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));
                                    }
                                }
        }
Exemplo n.º 4
0
        public async Task TestWeChatQRCode()
        {
            using (Mat m = EmguAssert.LoadMat("link_github_ocv.jpg"))
                using (Emgu.CV.Models.WeChatQRCodeDetector detector = new WeChatQRCodeDetector())
                {
                    await detector.Init(DownloadManager_OnDownloadProgressChanged);

                    String text = detector.ProcessAndRender(m, m);
                }
        }
Exemplo n.º 5
0
        public async Task TestPedestrianDetector()
        {
            using (Mat m = EmguAssert.LoadMat("pedestrian"))
                using (Emgu.CV.Models.PedestrianDetector detector = new PedestrianDetector())
                {
                    await detector.Init(DownloadManager_OnDownloadProgressChanged);

                    String text = detector.ProcessAndRender(m, m);
                }
        }
Exemplo n.º 6
0
        public void TestMatPixelAccess()
        {
            Mat m1 = EmguAssert.LoadMat("lena.jpg");

            byte[]   data   = new byte[m1.Width * m1.Height * 3]; //3 channel bgr image data
            GCHandle handle = GCHandle.Alloc(data, GCHandleType.Pinned);

            using (Mat m2 = new Mat(m1.Size, DepthType.Cv8U, 3, handle.AddrOfPinnedObject(), m1.Width * 3))
                CvInvoke.BitwiseNot(m1, m2);
            handle.Free();
            //now the data array contains the pixel data of the inverted lena image.
            //note that if the m2 Mat was allocated with the wrong size, data[] array will contains all 0s, and no exception will be thrown
            //so be really careful when performing the above operations.
        }
Exemplo n.º 7
0
        public static bool TestFeature2DTracker(Feature2D keyPointDetector, Feature2D descriptorGenerator)
        {
            //for (int k = 0; k < 1; k++)
            {
                Feature2D feature2D = null;
                if (keyPointDetector == descriptorGenerator)
                {
                    feature2D = keyPointDetector as Feature2D;
                }

                Mat modelImage = EmguAssert.LoadMat("box.png");
                //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
                //modelImage = modelImage.Resize(400, 400, true);

                //modelImage._EqualizeHist();

                #region extract features from the object image
                Stopwatch        stopwatch      = Stopwatch.StartNew();
                VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
                Mat modelDescriptors            = new Mat();
                if (feature2D != null)
                {
                    feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false);
                }
                else
                {
                    keyPointDetector.DetectRaw(modelImage, modelKeypoints);
                    descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors);
                }
                stopwatch.Stop();
                EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                #endregion

                //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
                Image <Gray, Byte> observedImage = EmguAssert.LoadImage <Gray, byte>("box_in_scene.png");
                //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
                //image = image.Resize(400, 400, true);

                //observedImage._EqualizeHist();
                #region extract features from the observed image
                stopwatch.Reset();
                stopwatch.Start();
                VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
                using (Mat observedDescriptors = new Mat())
                {
                    if (feature2D != null)
                    {
                        feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);
                    }
                    else
                    {
                        keyPointDetector.DetectRaw(observedImage, observedKeypoints);
                        descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors);
                    }

                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                    #endregion

                    //Merge the object image and the observed image into one big image for display
                    Image <Gray, Byte> res = modelImage.ToImage <Gray, Byte>().ConcateVertical(observedImage);

                    Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };

                    Mat homography = null;

                    stopwatch.Reset();
                    stopwatch.Start();

                    int          k  = 2;
                    DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2;
                    //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k))
                    //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
                    using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                        using (BFMatcher matcher = new BFMatcher(dt))
                        {
                            //ParamDef[] parameterDefs = matcher.GetParams();
                            matcher.Add(modelDescriptors);
                            matcher.KnnMatch(observedDescriptors, matches, k, null);

                            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                            mask.SetTo(new MCvScalar(255));
                            //mask.SetValue(255);
                            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                {
                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2);
                                }
                            }
                        }
                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));

                    bool success = false;
                    if (homography != null)
                    {
                        PointF[] points = pts.Clone() as PointF[];
                        points = CvInvoke.PerspectiveTransform(points, homography);
                        //homography.ProjectPoints(points);

                        for (int i = 0; i < points.Length; i++)
                        {
                            points[i].Y += modelImage.Height;
                        }

                        res.DrawPolyline(
#if NETFX_CORE
                            Extensions.
#else
                            Array.
#endif
                            ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);

                        success = true;
                    }
                    //Emgu.CV.UI.ImageViewer.Show(res);
                    return(success);
                }



                /*
                 * stopwatch.Reset(); stopwatch.Start();
                 * //set the initial region to be the whole image
                 * using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
                 * {
                 * priorMask.SetValue(1.0);
                 * homography = tracker.CamShiftTrack(
                 *    observedFeatures,
                 *    (RectangleF)observedImage.ROI,
                 *    priorMask);
                 * }
                 * Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                 *
                 * if (homography != null) //set the initial tracking window to be the whole image
                 * {
                 * PointF[] points = pts.Clone() as PointF[];
                 * homography.ProjectPoints(points);
                 *
                 * for (int i = 0; i < points.Length; i++)
                 *    points[i].Y += modelImage.Height;
                 * res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
                 * return true;
                 * }
                 * else
                 * {
                 * return false;
                 * }*/
            }
        }
Exemplo n.º 8
0
        public void TestOclKernel()
        {
            if (CvInvoke.HaveOpenCL && CvInvoke.UseOpenCL)
            {
                Ocl.Device defaultDevice = Ocl.Device.Default;

                Mat img     = EmguAssert.LoadMat("lena.jpg");
                Mat imgGray = new Mat();
                CvInvoke.CvtColor(img, imgGray, ColorConversion.Bgr2Gray);
                Mat imgFloat = new Mat();
                imgGray.ConvertTo(imgFloat, DepthType.Cv32F, 1.0 / 255);
                UMat umat    = imgFloat.GetUMat(AccessType.Read, UMat.Usage.AllocateDeviceMemory);
                UMat umatDst = new UMat();
                umatDst.Create(umat.Rows, umat.Cols, DepthType.Cv32F, umat.NumberOfChannels, UMat.Usage.AllocateDeviceMemory);

                String buildOpts = String.Format("-D dstT={0}", Ocl.OclInvoke.TypeToString(umat.Depth));

                String sourceStr = @"
__kernel void magnutude_filter_8u(
       __global const uchar* src, int src_step, int src_offset,
       __global uchar* dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,
       float scale)
{
   int x = get_global_id(0);
   int y = get_global_id(1);
   if (x < dst_cols && y < dst_rows)
   {
       int dst_idx = y * dst_step + x + dst_offset;
       if (x > 0 && x < dst_cols - 1 && y > 0 && y < dst_rows - 2)
       {
           int src_idx = y * src_step + x + src_offset;
           int dx = (int)src[src_idx]*2 - src[src_idx - 1]          - src[src_idx + 1];
           int dy = (int)src[src_idx]*2 - src[src_idx - 1*src_step] - src[src_idx + 1*src_step];
           dst[dst_idx] = convert_uchar_sat(sqrt((float)(dx*dx + dy*dy)) * scale);
       }
       else
       {
           dst[dst_idx] = 0;
       }
   }
}";

                using (CvString errorMsg = new CvString())
                    using (Ocl.ProgramSource ps = new Ocl.ProgramSource(sourceStr))
                        using (Ocl.Kernel kernel = new Ocl.Kernel())
                            using (Ocl.Image2D image2d = new Ocl.Image2D(umat))
                                using (Ocl.KernelArg ka = new Ocl.KernelArg(Ocl.KernelArg.Flags.ReadWrite, umatDst))
                                {
                                    float shiftX = 100.5f;
                                    float shiftY = -50.0f;

                                    bool success = kernel.Create("myshift", ps, buildOpts, errorMsg);
                                    EmguAssert.IsTrue(success, errorMsg.ToString());
                                    int idx = 0;
                                    idx = kernel.Set(idx, image2d);
                                    idx = kernel.Set(idx, ref shiftX);
                                    idx = kernel.Set(idx, ref shiftY);
                                    idx = kernel.Set(idx, ka);
                                    IntPtr[] globalThreads = new IntPtr[] { new IntPtr(umat.Cols), new IntPtr(umat.Rows), new IntPtr(1) };
                                    success = kernel.Run(globalThreads, null, true);
                                    EmguAssert.IsTrue(success, "Failed to run the kernel");
                                    using (Mat matDst = umatDst.GetMat(AccessType.Read))
                                        using (Mat saveMat = new Mat())
                                        {
                                            matDst.ConvertTo(saveMat, DepthType.Cv8U, 255.0);
                                            saveMat.Save("tmp.jpg");
                                        }
                                }
            }
        }