Beispiel #1
0
 public void TestHughCircle()
 {
     if (CudaInvoke.HasCuda)
     {
         Mat m = new Mat(480, 480, DepthType.Cv8U, 1);
         m.SetTo(new MCvScalar(0));
         CvInvoke.Circle(m, new Point(240, 240), 100, new MCvScalar(150), 10);
         GpuMat gm = new GpuMat();
         gm.Upload(m);
         using (CudaHoughCirclesDetector detector = new CudaHoughCirclesDetector(1, 30, 120, 30, 10, 400))
             using (GpuMat circlesGpu = new GpuMat())
                 using (Mat circlesMat = new Mat())
                 {
                     detector.Detect(gm, circlesGpu);
                     circlesGpu.Download(circlesMat);
                     CircleF[] circles       = new CircleF[circlesMat.Cols];
                     GCHandle  circlesHandle = GCHandle.Alloc(circles, GCHandleType.Pinned);
                     Emgu.CV.Util.CvToolbox.Memcpy(circlesHandle.AddrOfPinnedObject(), circlesMat.DataPointer, Marshal.SizeOf(typeof(CircleF)) * circles.Length);
                     circlesHandle.Free();
                     foreach (var circle in circles)
                     {
                         CvInvoke.Circle(m, Point.Round(circle.Center), (int)circle.Radius, new MCvScalar(255));
                     }
                 }
     }
 }
        public void cuda_MeanShift()
        {
            Mat  temp = Image("lenna.png", ImreadModes.AnyColor);
            Mat  src  = temp.CvtColor(ColorConversionCodes.RGB2RGBA);
            Size size = src.Size();

            int spatialRad = 30;
            int colorRad   = 30;

            using (GpuMat g_src = new GpuMat(size, MatType.CV_8UC4))
                using (GpuMat d_dst = new GpuMat()) {
                    g_src.Upload(src);

                    Cuda.cuda.meanShiftFiltering(g_src, d_dst, spatialRad, colorRad);

                    Mat dst_gold = new Mat();
                    Cv2.PyrMeanShiftFiltering(temp, dst_gold, spatialRad, colorRad);

                    Mat dst = new Mat();
                    d_dst.Download(dst);

                    Mat result = new Mat();
                    Cv2.CvtColor(dst, result, ColorConversionCodes.RGBA2RGB);

                    ImageEquals(result, dst_gold, 3);
                    ShowImagesWhenDebugMode(dst, dst_gold);
                }
        }
Beispiel #3
0
        public void TestCudaUploadDownload()
        {
            if (!CudaInvoke.HasCuda)
            {
                return;
            }

            Mat m = new Mat(new Size(480, 320), DepthType.Cv8U, 3);

            CvInvoke.Randu(m, new MCvScalar(), new MCvScalar(255, 255, 255));

            #region test for async download & upload
            Stream stream = new Stream();
            GpuMat gm1    = new GpuMat();
            gm1.Upload(m, stream);

            Mat m2 = new Mat();
            gm1.Download(m2, stream);

            stream.WaitForCompletion();
            EmguAssert.IsTrue(m.Equals(m2));
            #endregion

            #region test for blocking download & upload
            GpuMat gm2 = new GpuMat();
            gm2.Upload(m);
            Mat m3 = new Mat();
            gm2.Download(m3);
            EmguAssert.IsTrue(m.Equals(m3));
            #endregion
        }
Beispiel #4
0
        // uses cuda if available
        public static Bitmap ResizeImage(Bitmap imgToResize, Size size)
        {
            // emgu cuda detection can be a little finicy sometimes, but try catching cost virtually nothing compared to actually changing the size of the img
            if (CudaInvoke.HasCuda)
            {
                try {
                    // determine ratio between current and desired size

                    double            ratio       = (double)size.Height / (imgToResize.Height);
                    Mat               dst         = new Mat();
                    Image <Bgr, Byte> imageCV     = new Image <Bgr, byte>(imgToResize);
                    var               result      = imageCV.CopyBlank();
                    var               handle      = GCHandle.Alloc(result);
                    Mat               matToResize = imageCV.Mat;
                    using (GpuMat gMatSrc = new GpuMat())
                        using (GpuMat gMatDst = new GpuMat()) {
                            gMatSrc.Upload(matToResize);
                            Emgu.CV.Cuda.CudaInvoke.Resize(gMatSrc, gMatDst, new Size(0, 0), ratio, ratio, Inter.Area);
                            gMatDst.Download(dst);
                        }
                    handle.Free();
                    return(dst.Bitmap);
                }
                catch {
                    return(ResizeImageNoCuda(imgToResize, size));
                }
            }
            else
            {
                return(ResizeImageNoCuda(imgToResize, size));
            }
        }
Beispiel #5
0
 /// <summary>
 /// Converts to UIImage.
 /// </summary>
 /// <returns>The UIImage.</returns>
 public static UIImage ToUIImage(this GpuMat gpuMat)
 {
     using (Mat tmp = new Mat())
     {
         gpuMat.Download(tmp);
         return(tmp.ToUIImage());
     }
 }
Beispiel #6
0
 /// <summary>
 /// Convert the gpuMat into Bitmap, the pixel values are copied over to the Bitmap
 /// </summary>
 public static Bitmap ToBitmap(this GpuMat gpuMat)
 {
     using (Mat tmp = new Mat())
     {
         gpuMat.Download(tmp);
         return(tmp.ToBitmap());
     }
 }
Beispiel #7
0
        private void ProcessFrame(object sender, EventArgs e)
        {
            if (_capture != null && _capture.Ptr != IntPtr.Zero)
            {
                _capture.Retrieve(frame, 0);
                gpuFrame.Upload(frame);
                cudaBgMOG2.Apply(gpuFrame, gpuSub);
                CudaInvoke.Threshold(gpuSub, gpuSub, 12, 255, Emgu.CV.CvEnum.ThresholdType.Binary);
                gpuSub.Download(outSub);

                CvInvoke.FindContours(outSub, contours, hiererachy, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone);

                for (int i = 0; i < contours.Size; i++)
                {
                    if (CvInvoke.ContourArea(contours[i]) > 50)
                    {
                        contoursGood.Push(contours[i]);
                    }
                }

                grayImage = new Image <Gray, byte>(frame.Width, frame.Height, new Gray(0));
                grayImage.SetZero();
                CvInvoke.DrawContours(grayImage, contoursGood, -1, new MCvScalar(255, 255, 255), -1);
                CvInvoke.Dilate(grayImage, grayImage, element, new Point(-1, -1), 6, Emgu.CV.CvEnum.BorderType.Constant, new MCvScalar(255, 255, 255));
                contoursGood.Clear();

                CvInvoke.FindContours(grayImage, contours, hiererachy, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone);

                List <Point> points = new List <Point>();

                for (int i = 0; i < contours.Size; i++)
                {
                    MCvMoments moments          = CvInvoke.Moments(contours[i], false);
                    Point      WeightedCentroid = new Point((int)(moments.M10 / moments.M00), (int)(moments.M01 / moments.M00));
                    points.Add(WeightedCentroid);
                }

                blobList.AssignToBlobs(points);
                blobList.Draw(frame);
                blobList.Draw(mask);
                blobList.Update();

                CvInvoke.DrawContours(frame, contours, -1, new MCvScalar(0, 0, 255));

                imageBox1.Image = frame;
                imageBox2.Image = mask;

                grayImage.Dispose();

                indexFrame++;
            }
        }
        public static Bitmap ToBitmap(this GpuMat gpuMat)
        {
            //IL_0000: Unknown result type (might be due to invalid IL or missing references)
            //IL_0006: Expected O, but got Unknown
            Mat val = (Mat)(object)new Mat();

            try
            {
                gpuMat.Download((IOutputArray)(object)val, (Stream)null);
                return(val.ToBitmap());
            }
            finally
            {
                ((IDisposable)val)?.Dispose();
            }
        }
        public override Mat ComputeDepthMap(Image <Bgr, byte> leftImage, Image <Bgr, byte> rightImage)
        {
            CudaStereoBM _cudaStereoBM = CreateCudaStereoBM();

            ConvertImageToGray(leftImage, rightImage);

            GpuMat            imageDisparity  = new GpuMat();
            GpuMat            imageToSave     = new GpuMat();
            Image <Bgr, byte> disparityToSave = new Image <Bgr, byte>(leftImage.Size);
            Mat disparity = new Mat();

            _cudaStereoBM.FindStereoCorrespondence(LeftGrayImage.ImageToGpuMat(), RightGrayImage.ImageToGpuMat(), imageDisparity);

            imageDisparity.ConvertTo(imageToSave, DepthType.Cv8U);
            imageToSave.Download(disparityToSave);
            imageDisparity.Download(disparity);

            return(disparity);
        }
Beispiel #10
0
        public void TestCudaPyrLKOpticalFlow()
        {
            if (!CudaInvoke.HasCuda)
            {
                return;
            }
            Image <Gray, Byte> prevImg, currImg;

            AutoTestVarious.OpticalFlowImage(out prevImg, out currImg);
            Mat flow = new Mat();
            CudaDensePyrLKOpticalFlow opticalflow = new CudaDensePyrLKOpticalFlow(new Size(21, 21), 3, 30, false);

            using (CudaImage <Gray, Byte> prevGpu = new CudaImage <Gray, byte>(prevImg))
                using (CudaImage <Gray, byte> currGpu = new CudaImage <Gray, byte>(currImg))
                    using (GpuMat flowGpu = new GpuMat())
                    {
                        opticalflow.Calc(prevGpu, currGpu, flowGpu);

                        flowGpu.Download(flow);
                    }
        }
Beispiel #11
0
        public void TestCudaBroxOpticalFlow()
        {
            if (!CudaInvoke.HasCuda)
            {
                return;
            }
            Image <Gray, Byte> prevImg, currImg;

            AutoTestVarious.OpticalFlowImage(out prevImg, out currImg);
            Mat flow = new Mat();
            CudaBroxOpticalFlow opticalflow = new CudaBroxOpticalFlow();

            using (CudaImage <Gray, float> prevGpu = new CudaImage <Gray, float>(prevImg.Convert <Gray, float>()))
                using (CudaImage <Gray, float> currGpu = new CudaImage <Gray, float>(currImg.Convert <Gray, float>()))
                    using (GpuMat flowGpu = new GpuMat())
                    {
                        opticalflow.Calc(prevGpu, currGpu, flowGpu);

                        flowGpu.Download(flow);
                    }
        }
        public void cuda_buildWarpPerspectiveMaps()
        {
            Mat src = Image("lenna.png", ImreadModes.Grayscale);

            Size   srcSize = src.Size();
            double angle   = Cv2.PI / 4;


            Mat M = new Mat(3, 3, MatType.CV_64FC1);

            M.Set <double>(0, 0, System.Math.Cos(angle));
            M.Set <double>(0, 1, -System.Math.Sin(angle));
            M.Set <double>(0, 2, srcSize.Width / 2.0);
            M.Set <double>(1, 0, System.Math.Sin(angle));
            M.Set <double>(1, 1, System.Math.Cos(angle));
            M.Set <double>(1, 2, 0.0);
            M.Set <double>(2, 0, 0.0);
            M.Set <double>(2, 1, 0.0);
            M.Set <double>(2, 2, 1.0);

            using (Mat dst = new Mat())
                using (Mat dst_gold = new Mat()) {
                    GpuMat g_xmap = new GpuMat();
                    GpuMat g_ymap = new GpuMat();

                    Cuda.cuda.buildWarpPerspectiveMaps(M, false, srcSize, g_xmap, g_ymap);


                    Cv2.WarpPerspective(src, dst_gold, M, srcSize, InterpolationFlags.Nearest, BorderTypes.Constant);

                    Mat xmap = new Mat();
                    Mat ymap = new Mat();
                    g_xmap.Download(xmap);
                    g_ymap.Download(ymap);
                    Cv2.Remap(src, dst, xmap, ymap, InterpolationFlags.Nearest, BorderTypes.Constant);

                    ShowImagesWhenDebugMode(dst_gold, dst);
                    ImageEquals(dst_gold, dst);
                }
        }
Beispiel #13
0
        public void TestSplitMerge()
        {
            if (CudaInvoke.HasCuda)
            {
                using (Image <Bgr, Byte> img1 = new Image <Bgr, byte>(1200, 640))
                {
                    img1.SetRandUniform(new MCvScalar(0, 0, 0), new MCvScalar(255, 255, 255));

                    using (GpuMat gpuImg1 = new GpuMat(img1))
                    {
                        GpuMat[] channels = gpuImg1.Split(null);

                        for (int i = 0; i < channels.Length; i++)
                        {
                            Mat imgL = channels[i].ToMat();
                            Image <Gray, Byte> imgR = img1[i];
                            Assert.IsTrue(imgL.Equals(imgR.Mat), "failed split GpuMat");
                        }

                        using (GpuMat gpuImg2 = new GpuMat())
                        {
                            gpuImg2.MergeFrom(channels, null);
                            using (Image <Bgr, byte> img2 = new Image <Bgr, byte>(img1.Size))
                            {
                                gpuImg2.Download(img2);
                                Assert.IsTrue(img2.Equals(img1), "failed split and merge test");
                            }
                        }

                        for (int i = 0; i < channels.Length; i++)
                        {
                            channels[i].Dispose();
                        }
                    }
                }
            }
        }
        // calculates the optical flow according to the Farneback algorithm
        public Bitmap Dense_Optical_Flow(Bitmap bmp, OpticalFlowVariable optiVariables, Camera cam)
        {
            frameReduction = optiVariables.frameReduction < 1 ? 1 : optiVariables.frameReduction;
            // frame becomes previous frame (i.e., prev_frame stores information about current frame)
            prev_frame = matframe;

            Image <Bgr, Byte> imageCV = new Image <Bgr, byte>(bmp); //Image Class from Emgu.CV

            matframe = imageCV.Mat;                                 //This is your Image converted to Mat

            if (prev_frame == null)
            {
                return(bmp);
            }

            // frame_nr increment by number of steps given in textfield on user interface
            frame_nr += 1;


            // intialize this Image Matrix before resizing (see below), so it remains at original size
            img_average_vectors = new Image <Bgr, byte>(matframe.Width, matframe.Height);

            orig_height = matframe.Height;

            Size n_size = new Size(matframe.Width / frameReduction,
                                   matframe.Height / frameReduction);

            // Resize frame and previous frame (smaller to reduce processing load)
            //Source

            Mat matFramDst = new Mat();

            using (GpuMat gMatSrc = new GpuMat())
                using (GpuMat gMatDst = new GpuMat()) {
                    gMatSrc.Upload(matframe);
                    Emgu.CV.Cuda.CudaInvoke.Resize(gMatSrc, gMatDst, new Size(0, 0), (double)1 / frameReduction, (double)1 / frameReduction);
                    gMatDst.Download(matFramDst);
                }

            matframe = matFramDst;

            if (prev_frame.Height != matframe.Height)
            {
                return(bmp);
            }



            // images that are compared during the flow operations (see below)
            // these need to be greyscale images
            Image <Gray, Byte> prev_grey_img, curr_grey_img;

            prev_grey_img = new Image <Gray, byte>(prev_frame.Width, prev_frame.Height);
            curr_grey_img = new Image <Gray, byte>(matframe.Width, matframe.Height);

            // Image arrays to store information of flow vectors (one image array for each direction, which is x and y)
            Image <Gray, float> flow_x;
            Image <Gray, float> flow_y;

            flow_x = new Image <Gray, float>(matframe.Width, matframe.Height);
            flow_y = new Image <Gray, float>(matframe.Width, matframe.Height);

            // assign information stored in frame and previous frame in greyscale images (works without convert function)
            CvInvoke.CvtColor(matframe, curr_grey_img, ColorConversion.Bgr2Gray);
            CvInvoke.CvtColor(prev_frame, prev_grey_img, ColorConversion.Bgr2Gray);


            // Apply Farneback dense optical flow
            // parameters are the two greyscale images (these are compared)
            // and two image arrays storing the flow information
            // the results of the procedure are stored
            // the rest of the parameters are:
            // pryScale: specifies image scale to build pyramids: 0.5 means that each next layer is twice smaller than the former
            // levels: number of pyramid levels: 1 means no extra layers
            // winSize: the average window size; larger values = more robust to noise but more blur
            // iterations: number of iterations at each pyramid level
            // polyN: size of pixel neighbourhood: higher = more precision but more blur
            // polySigma
            // flags


            CvInvoke.CalcOpticalFlowFarneback(prev_grey_img, curr_grey_img, flow_x, flow_y, 0.5, 3, 10, 3, 6, 1.3, 0);


            // call function that shows results of Farneback algorithm
            Image <Bgr, Byte> farnebackImg = Draw_Farneback_flow_map(matframe.ToImage <Bgr, Byte>(), flow_x, flow_y, optiVariables);// given in global variables section

            // Release memory
            prev_grey_img.Dispose();
            curr_grey_img.Dispose();
            flow_x.Dispose();
            flow_y.Dispose();

            //return farnebackImg.ToBitmap();

            Image <Bgra, Byte> alphaImgShape = new Image <Bgra, byte>(imageCV.Size.Width, imageCV.Size.Height, new Bgra(0, 0, 0, .5));

            CvInvoke.AddWeighted(alphaImgShape, .5, BlackTransparent(farnebackImg), .5, 0, alphaImgShape);

            Mat alphaimg = new Mat();

            CvInvoke.CvtColor(imageCV, alphaimg, ColorConversion.Bgr2Bgra);

            if (CudaInvoke.HasCuda)
            {
                using (GpuMat gMatSrc = new GpuMat())
                    using (GpuMat gMatSrc2 = new GpuMat())
                        using (GpuMat gMatDst = new GpuMat()) {
                            gMatSrc.Upload(alphaimg);
                            gMatSrc2.Upload(alphaImgShape);
                            CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus);
                            gMatDst.Download(alphaimg);
                        }
                return(alphaimg.Bitmap);
            }
            else
            {
                return(Overlay(imageCV, alphaImgShape).ToBitmap());
            }
        }
        // function that depicts results of optical flow operations
        // requires reference to image being processed, the results of Farneback algorithm stored in flow_x and flow_y
        // step gives the distance between pixels that are depicted, shift_that_counts is threshold for vector length that is used for calculations
        private Image <Bgr, Byte> Draw_Farneback_flow_map(Image <Bgr, Byte> img_curr, Image <Gray, float> flow_x, Image <Gray, float> flow_y, OpticalFlowVariable optiVars)
        {
            // NOTE: flow Images (flow_x and flow_y) are organized like this:
            // at index (is position of pixel before optical flow operation) of Image array
            // the shift of this specific pixel after the flow operation is stored
            // if no shift has occured value stored at index is zero
            // (i.e., pixel[index] = 0
            GC.Collect(0, GCCollectionMode.Forced);

            Image <Bgr, Byte> blackFrame = new Image <Bgr, Byte>(new Bitmap(1280 / frameReduction, 720 / frameReduction));

            System.Drawing.Point from_dot_xy = new System.Drawing.Point(); // point variable to draw lines between dots before and after flow (=vectors)
            System.Drawing.Point to_dot_xy   = new System.Drawing.Point(); // point variable, which will be endpoint of line between dots before and after flow

            MCvScalar col;                                                 // variable to store color values of lines representing flow vectors

            col.V0 = 100;
            col.V1 = 255;
            col.V2 = 0;
            col.V3 = 100;


            // for drawing central line based on window size
            System.Drawing.Point[] window_centre = new System.Drawing.Point[2];

            window_centre[0].X = img_curr.Width / 2;// * Convert.ToInt32(txt_resize_factor.Text)/ 2;
            window_centre[0].Y = 0;

            window_centre[1].X = img_curr.Width / 2; //* Convert.ToInt32(txt_resize_factor.Text) / 2;
            window_centre[1].Y = orig_height;


            // Point variables that constitute starting point for drawing summed and mean vectors onto image
            System.Drawing.Point vector_right = new System.Drawing.Point();
            System.Drawing.Point vector_left  = new System.Drawing.Point();

            // variables used for summing vectors to left and to the right of the window's centre
            System.Drawing.Point vector_right_end_window = new System.Drawing.Point();
            System.Drawing.Point vector_left_end_window  = new System.Drawing.Point();


            // determine centre of output window (needed for summed vectors)
            int mid_point_horz = 1280 * frameReduction / 2; // width
            int mid_point_vert = 720 * frameReduction / 2;  // height

            // landmark coordinates that are origin of direction vectors
            // near centre of image window; to depict motion of left and right half of "body" (or more precisely, window)
            vector_right.X = (mid_point_horz + 10) * optiVars.stepRate;
            vector_right.Y = mid_point_vert * optiVars.stepRate;

            vector_left.X = (mid_point_horz - 10) * optiVars.stepRate;
            vector_left.Y = mid_point_vert * optiVars.stepRate;


            // counting landmarks in flow field that exceed a certain value (shift_that_counts); left and right is based on centre of window (half of width)
            double count_X_right = 0;
            double count_Y_right = 0;

            double count_X_left = 0;
            double count_Y_left = 0;

            // loops over image matrix; position of dots before and after optical flow operations are compared and vector is drawn between the old and the new position
            for (int i = 0; i < flow_x.Rows; i += optiVars.stepRate)     // NOTE: steps are given by step variable in arguments of method
            {
                for (int j = 0; j < flow_x.Cols; j += optiVars.stepRate) // BEGIN FOR

                // pixel shift measured by optical flow is transferred to point variables
                // storing starting point of motion (from_dot..) and its end points (to_dot...)

                {
                    to_dot_xy.X = (int)flow_x.Data[i, j, 0]; // access single pixel of flow matrix where x-coords of pixel after flow are stored; only gives the shift
                    to_dot_xy.Y = (int)flow_y.Data[i, j, 0]; // access single pixel of flow matrix where y-coords of pixel after flow are stored; only gives the shift

                    from_dot_xy.X = j;                       //  index of loop is  position on image (here: x-coord); X is cols
                    from_dot_xy.Y = i;                       // index of of loop is  position on image (here: y-coord);  Y is rows


                    // LEFT SIDE OF WINDOW BASED CENTRE
                    if (j < window_centre[0].X)
                    {
                        //  count the x and y indices and sum them when they exceed the value given by shift_that_counts (here:0)
                        if (Math.Abs(to_dot_xy.X) > optiVars.shiftThatCounts)
                        {
                            count_X_left++;
                        }
                        if (Math.Abs(to_dot_xy.Y) > optiVars.shiftThatCounts)
                        {
                            count_Y_left++;
                        }
                        // sum up vectors
                        vector_left_end_window.Y += to_dot_xy.Y;
                        vector_left_end_window.X += to_dot_xy.X;
                    }
                    else //(j > window_centre[0].X)// WINDOW BASED CENTRE
                    {
                        //  like above; count the x and y indices and sum them
                        if (Math.Abs(to_dot_xy.X) > optiVars.shiftThatCounts)
                        {
                            count_X_right++;
                        }

                        if (Math.Abs(to_dot_xy.Y) > optiVars.shiftThatCounts)
                        {
                            count_Y_right++;
                        }

                        // sum  vectors
                        vector_right_end_window.Y += to_dot_xy.Y;
                        vector_right_end_window.X += to_dot_xy.X;
                    }

                    to_dot_xy.X = from_dot_xy.X + to_dot_xy.X; // new x-coord position of pixel (taking into account distance from the origin)
                    to_dot_xy.Y = from_dot_xy.Y + to_dot_xy.Y; // new y-coord postion of pixel

                    // draw line between coords on image and pixel shift stored in flow field after applying  optical-flow
                    if (GetDistance(from_dot_xy.X, from_dot_xy.Y, to_dot_xy.X, to_dot_xy.Y) > optiVars.shiftThatCounts)
                    {
                        CvInvoke.Line(blackFrame, from_dot_xy, to_dot_xy, col, 1);
                    }


                    //CvInvoke.Imshow("Flow field vectors", img_curr); // show image with flow depicted as lines
                } // END of both for loops
            }
            Mat blackDst = new Mat();
            Mat BlackMat = blackFrame.Mat;

            using (GpuMat gMatSrc = new GpuMat())
                using (GpuMat gMatDst = new GpuMat()) {
                    gMatSrc.Upload(BlackMat);
                    Emgu.CV.Cuda.CudaInvoke.Resize(gMatSrc, gMatDst, new Size(0, 0), frameReduction, frameReduction, Inter.Area);
                    gMatDst.Download(blackDst);
                }

            GC.Collect();

            return(blackDst.ToImage <Bgr, Byte>());
        }
Beispiel #16
0
        public static void FindMatch(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix <int> indices, out Matrix <byte> mask, out HomographyMatrix homography)
        {
            int          k = 2;
            double       uniquenessThreshold = 0.8;
            SURFDetector surfCPU             = new SURFDetector(500, false);
            Stopwatch    watch;

            homography = null;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                        using (Stream stream = new Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                                                                }

                                                            observedKeyPoints = new VectorOfKeyPoint();
                                                            surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                                                                }
                                                            }

                                                            watch.Stop();
                                                        }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = new VectorOfKeyPoint();
                Matrix <float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = new VectorOfKeyPoint();
                Matrix <float>            observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
                BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix <int>(observedDescriptors.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                }

                watch.Stop();
            }
            matchTime = watch.ElapsedMilliseconds;
        }
Beispiel #17
0
        private void ProcessFrame(object sender, EventArgs arg)
        {
            Image <Bgr, Byte>  frame         = _capture.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            Image <Gray, Byte> grayframe     = frame.Convert <Gray, Byte>();
            Image <Gray, Byte> modelImage    = new Image <Gray, byte>("DataPlate/" + 10 + ".jpg");
            Image <Gray, Byte> observedImage = grayframe;
            Stopwatch          watch;
            HomographyMatrix   homography = null;
            SURFDetector       surfCPU    = new SURFDetector(500, false);
            VectorOfKeyPoint   modelKeyPoints;
            VectorOfKeyPoint   observedKeyPoints;
            Matrix <int>       indices;
            Matrix <float>     dist;
            Matrix <byte>      mask;


            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))

                    #region SURF
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, 2, 1))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuMatchIndices.Size, 1))
                                                {
                                                    observedKeyPoints = new VectorOfKeyPoint();
                                                    surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
                                                    matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);
                                                    indices = new Matrix <int>(gpuMatchIndices.Size);
                                                    dist    = new Matrix <float>(indices.Size);
                                                    gpuMatchIndices.Download(indices);
                                                    gpuMatchDist.Download(dist);

                                                    mask = new Matrix <byte>(dist.Rows, 1);

                                                    mask.SetValue(255);

                                                    Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                                                    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                                                    if (nonZeroCount >= 4)
                                                    {
                                                        nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                        if (nonZeroCount >= 4)
                                                        {
                                                            homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                                                        }
                                                    }

                                                    watch.Stop();
                                                }
                            }
                #endregion
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                //MKeyPoint[] kpts = modelKeyPoints.ToArray();
                Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);
                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                Matrix <float>    observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
                BruteForceMatcher matcher             = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
                matcher.Add(modelDescriptors);
                int k = 2;
                indices = new Matrix <int>(observedDescriptors.Rows, k);

                dist = new Matrix <float>(observedDescriptors.Rows, k);
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);

                mask = new Matrix <byte>(dist.Rows, 1);

                mask.SetValue(255);

                Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 20)
                {
                    nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 20)
                    {
                        homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                        XMLData();
                    }
                    else
                    {
                        textBox1.Text = string.Empty;
                        textBox2.Text = string.Empty;
                        textBox3.Text = string.Empty;
                        textBox4.Text = string.Empty;
                        textBox5.Text = string.Empty;
                    }
                }
                watch.Stop();
                #region draw the projected region on the image
                if (homography != null)
                {  //draw a rectangle along the projected model
                    Rectangle rect = modelImage.ROI;
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };
                    homography.ProjectPoints(pts);
                    frame.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 2);
                }
                #endregion
                CaptureImageBox.Image = frame;
                DataImageBox.Image    = modelImage;
            }
        }
Beispiel #18
0
      public void TestSplitMerge()
      {
         if (CudaInvoke.HasCuda)
         {
            using (Image<Bgr, Byte> img1 = new Image<Bgr, byte>(1200, 640))
            {
               img1.SetRandUniform(new MCvScalar(0, 0, 0), new MCvScalar(255, 255, 255));

               using (GpuMat gpuImg1 = new GpuMat(img1))
               {
                  GpuMat[] channels = gpuImg1.Split(null);

                  for (int i = 0; i < channels.Length; i++)
                  {
                     Mat imgL = channels[i].ToMat();
                     Image<Gray, Byte> imgR = img1[i];
                     Assert.IsTrue(imgL.Equals(imgR.Mat), "failed split GpuMat");
                  }

                  using (GpuMat gpuImg2 = new GpuMat())
                  {
                     gpuImg2.MergeFrom(channels, null);
                     using (Image<Bgr, byte> img2 = new Image<Bgr, byte>(img1.Size))
                     {
                        gpuImg2.Download(img2);
                        Assert.IsTrue(img2.Equals(img1), "failed split and merge test");
                     }
                  }

                  for (int i = 0; i < channels.Length; i++)
                  {
                     channels[i].Dispose();
                  }
               }
            }
         }
      }
        public static Bitmap PerformShapeDetection(Bitmap frame, ShapeDetectionVariables detectionVars)
        {
            StringBuilder msgBuilder = new StringBuilder("Performance: ");

            Image <Bgr, Byte> img = new Image <Bgr, byte>(frame);
            Mat MatImg            = img.Mat;

            Mat outputImg = new Mat();

            if (CudaInvoke.HasCuda)
            {
                using (GpuMat gMatSrc = new GpuMat())
                    using (GpuMat gMatDst = new GpuMat()) {
                        gMatSrc.Upload(MatImg);
                        CudaGaussianFilter noiseReducetion = new CudaGaussianFilter(MatImg.Depth, img.NumberOfChannels, MatImg.Depth, img.NumberOfChannels, new Size(1, 1), 0);
                        noiseReducetion.Apply(gMatSrc, gMatDst);
                        gMatDst.Download(outputImg);
                    }
            }
            else
            {
                Mat pyrDown = new Mat();
                CvInvoke.PyrDown(img, pyrDown);
                CvInvoke.PyrUp(pyrDown, img);
                outputImg = img.Mat;
            }

            UMat uimage = new UMat();

            CvInvoke.CvtColor(outputImg, uimage, ColorConversion.Bgr2Gray);

            CircleF[] circles = new CircleF[0];
            if (detectionVars.calcCircles)
            {
                circles = CvInvoke.HoughCircles(
                    uimage,
                    HoughType.Gradient, 1.0, 20.0,
                    detectionVars.circleCannyThreshold,
                    detectionVars.circleAccumulatorThreshold == 0 ? 1 : detectionVars.circleAccumulatorThreshold,
                    detectionVars.minradius,
                    detectionVars.maxRadius);
            }

            #region Canny and edge detection
            UMat cannyEdges = new UMat();
            CvInvoke.Canny(uimage, cannyEdges, detectionVars.lineCannyThreshold, detectionVars.cannyThresholdLinking);

            LineSegment2D[] lines = new LineSegment2D[0];
            if (detectionVars.calcLines)
            {
                lines = CvInvoke.HoughLinesP(
                    cannyEdges,
                    1,                           //Distance resolution in pixel-related units
                    Math.PI / 45.0,              //Angle resolution measured in radians.
                    detectionVars.lineThreshold, //threshold
                    detectionVars.minLineWidth,  //min Line width
                    10);                         //gap between lines
            }
            #endregion

            #region Find triangles and rectangles

            List <RotatedRect> boxList = new List <RotatedRect>(); //a box is a rotated rectangle

            if (detectionVars.calcRectTri)
            {
                using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) {
                    CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
                    int count = contours.Size;
                    for (int i = 0; i < count; i++)
                    {
                        using (VectorOfPoint contour = contours[i])
                            using (VectorOfPoint approxContour = new VectorOfPoint()) {
                                CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
                                if (CvInvoke.ContourArea(approxContour, false) > 250)   //only consider contours with area greater than 250
                                {
                                    if (approxContour.Size == 4)                        //The contour has 4 vertices.
                                    {
                                        #region determine if all the angles in the contour are within [80, 100] degree
                                        bool            isRectangle = true;
                                        Point[]         pts         = approxContour.ToArray();
                                        LineSegment2D[] edges       = PointCollection.PolyLine(pts, true);

                                        for (int j = 0; j < edges.Length; j++)
                                        {
                                            double angle = Math.Abs(
                                                edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
                                            if (angle < 80 || angle > 100)
                                            {
                                                isRectangle = false;
                                                break;
                                            }
                                        }
                                        #endregion

                                        if (isRectangle)
                                        {
                                            boxList.Add(CvInvoke.MinAreaRect(approxContour));
                                        }
                                    }
                                }
                            }
                    }
                }
            }

            #endregion

            Image <Bgra, Byte> alphaImgShape = new Image <Bgra, byte>(img.Size.Width, img.Size.Height, new Bgra(0, 0, 0, .5));
            Mat alphaimg = new Mat();
            CvInvoke.CvtColor(img, alphaimg, ColorConversion.Bgr2Bgra);
            #region draw rectangles and triangles
            if (detectionVars.calcRectTri)
            {
                Image <Bgr, Byte> triangleRectangleImage = new Image <Bgr, Byte>(img.Size);

                foreach (RotatedRect box in boxList)
                {
                    CvInvoke.Polylines(triangleRectangleImage, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(0, 255, 0).MCvScalar, 2);
                }

                CvInvoke.AddWeighted(alphaImgShape, .5, BlackTransparent(triangleRectangleImage), .5, 0, alphaImgShape);


                if (CudaInvoke.HasCuda)
                {
                    using (GpuMat gMatSrc = new GpuMat())
                        using (GpuMat gMatSrc2 = new GpuMat())
                            using (GpuMat gMatDst = new GpuMat()) {
                                gMatSrc.Upload(alphaimg);
                                gMatSrc2.Upload(alphaImgShape);
                                CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus);
                                gMatDst.Download(alphaimg);
                            }
                }
                else
                {
                    img = Overlay(img, alphaImgShape);
                }
            }
            #endregion

            #region draw circles
            if (detectionVars.calcCircles)
            {
                Image <Bgr, Byte> circleImage = new Image <Bgr, Byte>(img.Size);
                foreach (CircleF circle in circles.Take(10))
                {
                    CvInvoke.Circle(circleImage, Point.Round(circle.Center), (int)circle.Radius, new Bgr(0, 255, 0).MCvScalar, 2);
                }

                alphaImgShape = new Image <Bgra, byte>(img.Size.Width, img.Size.Height, new Bgra(0, 0, 0, .5));
                CvInvoke.AddWeighted(alphaImgShape, .7, BlackTransparent(circleImage), .5, 0, alphaImgShape);
                if (CudaInvoke.HasCuda)
                {
                    using (GpuMat gMatSrc = new GpuMat())
                        using (GpuMat gMatSrc2 = new GpuMat())
                            using (GpuMat gMatDst = new GpuMat()) {
                                gMatSrc.Upload(alphaimg);
                                gMatSrc2.Upload(alphaImgShape);
                                CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus);
                                gMatDst.Download(alphaimg);
                            }
                }
                else
                {
                    img = Overlay(img, alphaImgShape);
                }
            }
            #endregion

            #region draw lines

            if (detectionVars.calcLines)
            {
                Image <Bgr, Byte> lineImage = new Image <Bgr, Byte>(img.Size);
                foreach (LineSegment2D line in lines)
                {
                    CvInvoke.Line(lineImage, line.P1, line.P2, new Bgr(0, 255, 0).MCvScalar, 2);
                }

                alphaImgShape = new Image <Bgra, byte>(img.Size.Width, img.Size.Height, new Bgra(0, 0, 0, .5));
                CvInvoke.AddWeighted(alphaImgShape, .5, BlackTransparent(lineImage), .5, 0, alphaImgShape);
                if (CudaInvoke.HasCuda)
                {
                    using (GpuMat gMatSrc = new GpuMat())
                        using (GpuMat gMatSrc2 = new GpuMat())
                            using (GpuMat gMatDst = new GpuMat()) {
                                gMatSrc.Upload(alphaimg);
                                gMatSrc2.Upload(alphaImgShape);
                                CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus);
                                gMatDst.Download(alphaimg);
                            }
                }
                else
                {
                    img = Overlay(img, alphaImgShape);
                }
            }
            #endregion

            GC.Collect();   // first time I've had to use this but this program will use as much memory as possible, resulting in corrptions

            return(alphaimg.Bitmap ?? frame);
        }
Beispiel #20
0
        public Image<Gray, byte> Solve(Image<Gray, byte> left, Image<Gray, byte> right)
        {
            var size = left.Size;

            using (var leftGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var rightGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var disparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var filteredDisparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var filteredDisparity16S = new Mat(size, DepthType.Cv16S, 1))
            using (var filteredDisparity8U = new Mat(size, DepthType.Cv8U, 1))
            {
                leftGpu.Upload(left.Mat);
                rightGpu.Upload(right.Mat);

                algorithm.FindStereoCorrespondence(leftGpu, rightGpu, disparityGpu);

                filter.Apply(disparityGpu, leftGpu, filteredDisparityGpu);

                filteredDisparityGpu.Download(filteredDisparity16S);

                CvInvoke.MinMaxLoc(filteredDisparity16S, ref min, ref max, ref minPosition, ref maxPosition);

                filteredDisparity16S.ConvertTo(filteredDisparity8U, DepthType.Cv8U, 255.0/(Max - Min));

                return new Image<Gray, byte>(filteredDisparity8U.Bitmap);
            }
        }
Beispiel #21
0
        static void Run()
        {
            Image <Gray, Byte> modelImage    = new Image <Gray, byte>("box.png");
            Image <Gray, Byte> observedImage = new Image <Gray, byte>("box_in_scene.png");
            Stopwatch          watch;
            HomographyMatrix   homography = null;

            SURFDetector surfCPU = new SURFDetector(500, false);

            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;
            Matrix <float>   dist;
            Matrix <byte>    mask;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, 2, 1))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuMatchIndices.Size, 1))
                                                {
                                                    observedKeyPoints = new VectorOfKeyPoint();
                                                    surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                    matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);

                                                    indices = new Matrix <int>(gpuMatchIndices.Size);
                                                    dist    = new Matrix <float>(indices.Size);
                                                    gpuMatchIndices.Download(indices);
                                                    gpuMatchDist.Download(dist);

                                                    mask = new Matrix <byte>(dist.Rows, 1);

                                                    mask.SetValue(255);

                                                    Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                                                    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                                                    if (nonZeroCount >= 4)
                                                    {
                                                        nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                        if (nonZeroCount >= 4)
                                                        {
                                                            homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                                                        }
                                                    }

                                                    watch.Stop();
                                                }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                //MKeyPoint[] kpts = modelKeyPoints.ToArray();
                Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                Matrix <float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);

                BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
                matcher.Add(modelDescriptors);
                int k = 2;
                indices = new Matrix <int>(observedDescriptors.Rows, k);
                dist    = new Matrix <float>(observedDescriptors.Rows, k);
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);

                mask = new Matrix <byte>(dist.Rows, 1);

                mask.SetValue(255);

                Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                    }
                }

                watch.Stop();
            }

            //Draw the matched keypoints
            Image <Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                                                     indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS);

            #region draw the projected region on the image
            if (homography != null)
            { //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };
                homography.ProjectPoints(pts);

                result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
            }
            #endregion

            ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds));
        }
Beispiel #22
0
      /*
      public void TestPyrLK()
      {
         const int MAX_CORNERS = 500;
         Capture c = new Capture();
         ImageViewer viewer = new ImageViewer();
         Image<Gray, Byte> oldImage = null;
         Image<Gray, Byte> currentImage = null;
         Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
         {
            if (oldImage == null)
            {
               oldImage = c.QueryGrayFrame();
            }

            currentImage = c.QueryGrayFrame();
            Features2D.GFTTDetector detector = new Features2D.GFTTDetector(MAX_CORNERS, 0.05, 3, 3);
            
            //PointF[] features = oldImage.GoodFeaturesToTrack(MAX_CORNERS, 0.05, 3.0, 3, false, 0.04)[0];
            PointF[] shiftedFeatures;
            Byte[] status;
            float[] trackErrors;
            CvInvoke.CalcOpticalFlowPyrLK(oldImage, currentImage, features, new Size(9, 9), 3, new MCvTermCriteria(20, 0.05),
               out shiftedFeatures, out status, out trackErrors);

            Image<Gray, Byte> displayImage = currentImage.Clone();
            for (int i = 0; i < features.Length; i++)
               displayImage.Draw(new LineSegment2DF(features[i], shiftedFeatures[i]), new Gray(), 2);

            oldImage = currentImage;
            viewer.Image = displayImage;
         });
         viewer.ShowDialog();
      }*/

     
      public void TestPyrLKGPU()
      {
         if (!CudaInvoke.HasCuda)
            return;

         const int MAX_CORNERS = 500;
         Capture c = new Capture();
         ImageViewer viewer = new ImageViewer();
         GpuMat oldImage = null;
         GpuMat currentImage = null;
         using (CudaGoodFeaturesToTrackDetector detector = new CudaGoodFeaturesToTrackDetector(DepthType.Cv8U, 1, MAX_CORNERS, 0.05, 3.0, 3, false, 0.04))
         using (CudaDensePyrLKOpticalFlow flow = new CudaDensePyrLKOpticalFlow(new Size(21, 21), 3, 30, false))
         {
            Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
            {
               if (oldImage == null)
               {
                  Mat bgrFrame = c.QueryFrame();
                  using (GpuMat oldBgrImage = new GpuMat(bgrFrame))
                  {
                     oldImage = new GpuMat();
                     CudaInvoke.CvtColor(oldBgrImage, oldImage, ColorConversion.Bgr2Gray);
                  }
               }

               using (Mat tmpFrame = c.QueryFrame())
               using (GpuMat tmp = new GpuMat(tmpFrame))
               {
                  currentImage = new GpuMat();
                  CudaInvoke.CvtColor(tmp, currentImage, ColorConversion.Bgr2Gray);
               }
               using (GpuMat f = new GpuMat())
               
               using (GpuMat vertex = new GpuMat())
               using (GpuMat colors = new GpuMat())
               using(GpuMat corners = new GpuMat())
               {
                  flow.Calc(oldImage, currentImage, f);

                  //CudaInvoke.CreateOpticalFlowNeedleMap(u, v, vertex, colors);
                  detector.Detect(oldImage, corners, null);
                  //GpuMat<float> detector.Detect(oldImage, null);
                  /*
                  //PointF[] features = oldImage.GoodFeaturesToTrack(MAX_CORNERS, 0.05, 3.0, 3, false, 0.04)[0];
                  PointF[] shiftedFeatures;
                  Byte[] status;
                  float[] trackErrors;
                  OpticalFlow.PyrLK(oldImage, currentImage, features, new Size(9, 9), 3, new MCvTermCriteria(20, 0.05),
                     out shiftedFeatures, out status, out trackErrors);
                  */

                  Mat displayImage = new Mat();
                  currentImage.Download(displayImage);
                      
                  /*
                  for (int i = 0; i < features.Length; i++)
                     displayImage.Draw(new LineSegment2DF(features[i], shiftedFeatures[i]), new Gray(), 2);
                  */
                  oldImage = currentImage;
                  viewer.Image = displayImage;
               }
            });
            viewer.ShowDialog();
         }
      }
        public static bool FindModelImageInObservedImage( Image<Gray, byte> modelImage, Image<Gray, byte> observedImage )
        {
            var surfCpu = new SURFDetector(500, false);
             VectorOfKeyPoint modelKeyPoints;
             VectorOfKeyPoint observedKeyPoints;
             Matrix<int> indices;

             Matrix<byte> mask;
             int k = 2;
             double uniquenessThreshold = 0.8;
             if ( GpuInvoke.HasCuda )
             {
            GpuSURFDetector surfGpu = new GpuSURFDetector(surfCpu.SURFParams, 0.01f);
            using ( GpuImage<Gray, byte> gpuModelImage = new GpuImage<Gray, byte>( modelImage ) )
            //extract features from the object image
            using ( GpuMat<float> gpuModelKeyPoints = surfGpu.DetectKeyPointsRaw( gpuModelImage, null ) )
            using ( GpuMat<float> gpuModelDescriptors = surfGpu.ComputeDescriptorsRaw( gpuModelImage, null, gpuModelKeyPoints ) )
            using ( GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>( DistanceType.L2 ) )
            {
               modelKeyPoints = new VectorOfKeyPoint();
               surfGpu.DownloadKeypoints( gpuModelKeyPoints, modelKeyPoints );

               // extract features from the observed image
               using ( GpuImage<Gray, byte> gpuObservedImage = new GpuImage<Gray, byte>( observedImage ) )
               using ( GpuMat<float> gpuObservedKeyPoints = surfGpu.DetectKeyPointsRaw( gpuObservedImage, null ) )
               using ( GpuMat<float> gpuObservedDescriptors = surfGpu.ComputeDescriptorsRaw( gpuObservedImage, null, gpuObservedKeyPoints ) )
               using ( GpuMat<int> gpuMatchIndices = new GpuMat<int>( gpuObservedDescriptors.Size.Height, k, 1, true ) )
               using ( GpuMat<float> gpuMatchDist = new GpuMat<float>( gpuObservedDescriptors.Size.Height, k, 1, true ) )
               using ( GpuMat<Byte> gpuMask = new GpuMat<byte>( gpuMatchIndices.Size.Height, 1, 1 ) )
               using ( var stream = new Emgu.CV.GPU.Stream() )
               {
                  matcher.KnnMatchSingle( gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream );
                  indices = new Matrix<int>( gpuMatchIndices.Size );
                  mask = new Matrix<byte>( gpuMask.Size );

                  //gpu implementation of voteForUniquess
                  using ( GpuMat<float> col0 = gpuMatchDist.Col( 0 ) )
                  using ( GpuMat<float> col1 = gpuMatchDist.Col( 1 ) )
                  {
                     GpuInvoke.Multiply( col1, new MCvScalar( uniquenessThreshold ), col1, stream );
                     GpuInvoke.Compare( col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream );
                  }

                  observedKeyPoints = new VectorOfKeyPoint();
                  surfGpu.DownloadKeypoints( gpuObservedKeyPoints, observedKeyPoints );

                  //wait for the stream to complete its tasks
                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                  stream.WaitForCompletion();

                  gpuMask.Download( mask );
                  gpuMatchIndices.Download( indices );

                  if ( GpuInvoke.CountNonZero( gpuMask ) >= 4 )
                  {
                     int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                     if ( nonZeroCount >= 4 )
                     {
                        Features2DToolbox.GetHomographyMatrixFromMatchedFeatures( modelKeyPoints, observedKeyPoints, indices, mask, 2 );
                     }
                     if ( (double)nonZeroCount / mask.Height > 0.02 )
                     {
                        return true;
                     }
                  }
               }
            }
             }
             else
             {
            //extract features from the object image
            modelKeyPoints = surfCpu.DetectKeyPointsRaw( modelImage, null );
            Matrix<float> modelDescriptors = surfCpu.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

            // extract features from the observed image
            observedKeyPoints = surfCpu.DetectKeyPointsRaw( observedImage, null );
            Matrix<float> observedDescriptors = surfCpu.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add( modelDescriptors );

            indices = new Matrix<int>( observedDescriptors.Rows, k );
            using ( Matrix<float> dist = new Matrix<float>( observedDescriptors.Rows, k ) )
            {
               matcher.KnnMatch( observedDescriptors, indices, dist, k, null );
               mask = new Matrix<byte>( dist.Rows, 1 );
               mask.SetValue( 255 );
               Features2DToolbox.VoteForUniqueness( dist, uniquenessThreshold, mask );
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if ( nonZeroCount >= 4 )
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation( modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20 );
               if ( nonZeroCount >= 4 )
               {
                  Features2DToolbox.GetHomographyMatrixFromMatchedFeatures( modelKeyPoints, observedKeyPoints, indices, mask, 2 );
               }
            }

            if ( (double)nonZeroCount/mask.Height > 0.02 )
            {
               return true;
            }
             }

             //Draw the matched keypoints
             //var result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, indices, new Bgr(0, 0, 255), new Bgr(255, 0, 0), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
             //result.Save( @"C:\Users\D.Markachev\Desktop\bleh-keypoints.jpg" );

             return false;
        }
Beispiel #24
0
      public void TestCudaPyrLKOpticalFlow()
      {
         if (!CudaInvoke.HasCuda)
            return;
         Image<Gray, Byte> prevImg, currImg;
         AutoTestVarious.OpticalFlowImage(out prevImg, out currImg);
         Mat flow = new Mat();
         CudaDensePyrLKOpticalFlow opticalflow = new CudaDensePyrLKOpticalFlow(new Size(21, 21), 3, 30, false);
         using (CudaImage<Gray, Byte> prevGpu = new CudaImage<Gray, byte>(prevImg))
         using (CudaImage<Gray, byte> currGpu = new CudaImage<Gray, byte>(currImg))
         using (GpuMat flowGpu = new GpuMat())
         {
            opticalflow.Calc(prevGpu, currGpu, flowGpu);

            flowGpu.Download(flow);
         }
         
      }
Beispiel #25
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime)
        {
            Stopwatch watch;
            HomographyMatrix homography = null;

            SURFDetector surfCPU = new SURFDetector (500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix<int> indices;

            Matrix<byte> mask;
            int k = 2;
            double uniquenessThreshold = 0.8;
            if (GpuInvoke.HasCuda) {
                GpuSURFDetector surfGPU = new GpuSURFDetector (surfCPU.SURFParams, 0.01f);
                using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte> (modelImage))
                    //extract features from the object image
                using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw (gpuModelImage, null))
                using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw (gpuModelImage, null, gpuModelKeyPoints))
                using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float> (DistanceType.L2)) {
                    modelKeyPoints = new VectorOfKeyPoint ();
                    surfGPU.DownloadKeypoints (gpuModelKeyPoints, modelKeyPoints);
                    watch = Stopwatch.StartNew ();

                    // extract features from the observed image
                    using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte> (observedImage))
                    using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw (gpuObservedImage, null))
                    using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw (gpuObservedImage, null, gpuObservedKeyPoints))
                    using (GpuMat<int> gpuMatchIndices = new GpuMat<int> (gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<float> gpuMatchDist = new GpuMat<float> (gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<Byte> gpuMask = new GpuMat<byte> (gpuMatchIndices.Size.Height, 1, 1))
                    using (Stream stream = new Stream ()) {
                        matcher.KnnMatchSingle (gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                        indices = new Matrix<int> (gpuMatchIndices.Size);
                        mask = new Matrix<byte> (gpuMask.Size);

                        //gpu implementation of voteForUniquess
                        using (GpuMat<float> col0 = gpuMatchDist.Col (0))
                        using (GpuMat<float> col1 = gpuMatchDist.Col (1)) {
                            GpuInvoke.Multiply (col1, new MCvScalar (uniquenessThreshold), col1, stream);
                            GpuInvoke.Compare (col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                        }

                        observedKeyPoints = new VectorOfKeyPoint ();
                        surfGPU.DownloadKeypoints (gpuObservedKeyPoints, observedKeyPoints);

                        //wait for the stream to complete its tasks
                        //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                        stream.WaitForCompletion ();

                        gpuMask.Download (mask);
                        gpuMatchIndices.Download (indices);

                        if (GpuInvoke.CountNonZero (gpuMask) >= 4) {
                            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation (modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures (modelKeyPoints, observedKeyPoints, indices, mask, 2);
                        }

                        watch.Stop ();
                    }
                }
            } else {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw (modelImage, null);
                Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw (modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew ();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw (observedImage, null);
                Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw (observedImage, null, observedKeyPoints);
                BruteForceMatcher<float> matcher = new BruteForceMatcher<float> (DistanceType.L2);
                matcher.Add (modelDescriptors);

                indices = new Matrix<int> (observedDescriptors.Rows, k);
                using (Matrix<float> dist = new Matrix<float> (observedDescriptors.Rows, k)) {
                    matcher.KnnMatch (observedDescriptors, indices, dist, k, null);
                    mask = new Matrix<byte> (dist.Rows, 1);
                    mask.SetValue (255);
                    Features2DToolbox.VoteForUniqueness (dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero (mask);
                if (nonZeroCount >= 4) {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation (modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures (modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }

                watch.Stop ();
            }

            //Draw the matched keypoints
            Image<Bgr, Byte> result = Features2DToolbox.DrawMatches (modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                          indices, new Bgr (255, 255, 255), new Bgr (255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the image
            if (homography != null) {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[] pts = new PointF[] {
                    new PointF (rect.Left, rect.Bottom),
                    new PointF (rect.Right, rect.Bottom),
                    new PointF (rect.Right, rect.Top),
                    new PointF (rect.Left, rect.Top)
                };
                homography.ProjectPoints (pts);

                result.DrawPolyline (Array.ConvertAll<PointF, Point> (pts, Point.Round), true, new Bgr (Color.Red), 5);
            }
            #endregion

            matchTime = watch.ElapsedMilliseconds;

            return result;
        }
        public static void FindMatch(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix<int> indices, out Matrix<byte> mask, out HomographyMatrix homography)
        {
            int k = 2;
             double uniquenessThreshold = 0.8;
             SURFDetector surfCPU = new SURFDetector(500, false);
             Stopwatch watch;
             homography = null;
             #if !IOS
             if (GpuInvoke.HasCuda)
             {
            GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
            using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
            //extract features from the object image
            using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>(DistanceType.L2))
            {
               modelKeyPoints = new VectorOfKeyPoint();
               surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
               watch = Stopwatch.StartNew();

               // extract features from the observed image
               using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
               using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
               using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
               using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, k, 1, true))
               using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuObservedDescriptors.Size.Height, k, 1, true))
               using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))
               using (Stream stream = new Stream())
               {
                  matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                  indices = new Matrix<int>(gpuMatchIndices.Size);
                  mask = new Matrix<byte>(gpuMask.Size);

                  //gpu implementation of voteForUniquess
                  using (GpuMat<float> col0 = gpuMatchDist.Col(0))
                  using (GpuMat<float> col1 = gpuMatchDist.Col(1))
                  {
                     GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                     GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                  }

                  observedKeyPoints = new VectorOfKeyPoint();
                  surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                  //wait for the stream to complete its tasks
                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                  stream.WaitForCompletion();

                  gpuMask.Download(mask);
                  gpuMatchIndices.Download(indices);

                  if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                  {
                     int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                  }

                  watch.Stop();
               }
            }
             }
             else
             #endif
             {
            //extract features from the object image
            modelKeyPoints = new VectorOfKeyPoint();
            Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            observedKeyPoints = new VectorOfKeyPoint();
            Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(modelDescriptors);

            indices = new Matrix<int>(observedDescriptors.Rows, k);
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
               matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
               mask = new Matrix<byte>(dist.Rows, 1);
               mask.SetValue(255);
               Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 4)
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
               if (nonZeroCount >= 4)
                  homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
            }

            watch.Stop();
             }
             matchTime = watch.ElapsedMilliseconds;
        }
Beispiel #27
0
      public void TestCudaUploadDownload()
      {
         if (!CudaInvoke.HasCuda)
            return;

         Mat m = new Mat(new Size(480, 320), DepthType.Cv8U, 3);
         CvInvoke.Randu(m, new MCvScalar(), new MCvScalar(255, 255, 255) );

         #region test for async download & upload
         Stream stream = new Stream();
         GpuMat gm1 = new GpuMat();
         gm1.Upload(m, stream);

         Mat m2 = new Mat();
         gm1.Download(m2, stream);

         stream.WaitForCompletion();
         EmguAssert.IsTrue(m.Equals(m2));
         #endregion

         #region test for blocking download & upload
         GpuMat gm2 = new GpuMat();
         gm2.Upload(m);
         Mat m3 = new Mat();
         gm2.Download(m3);
         EmguAssert.IsTrue(m.Equals(m3));
         #endregion
      }
Beispiel #28
0
        public static bool FindModelImageInObservedImage(Image <Gray, byte> modelImage, Image <Gray, byte> observedImage)
        {
            var surfCpu = new SURFDetector(500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;

            Matrix <byte> mask;
            int           k = 2;
            double        uniquenessThreshold = 0.8;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGpu = new GpuSURFDetector(surfCpu.SURFParams, 0.01f);
                using (GpuImage <Gray, byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGpu.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGpu.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGpu.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);

                                // extract features from the observed image
                                using (GpuImage <Gray, byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGpu.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGpu.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                        using (var stream = new Emgu.CV.GPU.Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                                                                }

                                                            observedKeyPoints = new VectorOfKeyPoint();
                                                            surfGpu.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                                                                }
                                                                if ((double)nonZeroCount / mask.Height > 0.02)
                                                                {
                                                                    return(true);
                                                                }
                                                            }
                                                        }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCpu.DetectKeyPointsRaw(modelImage, null);
                Matrix <float> modelDescriptors = surfCpu.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                // extract features from the observed image
                observedKeyPoints = surfCpu.DetectKeyPointsRaw(observedImage, null);
                Matrix <float>            observedDescriptors = surfCpu.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
                BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix <int>(observedDescriptors.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                }

                if ((double)nonZeroCount / mask.Height > 0.02)
                {
                    return(true);
                }
            }

            //Draw the matched keypoints
            //var result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, indices, new Bgr(0, 0, 255), new Bgr(255, 0, 0), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
            //result.Save( @"C:\Users\D.Markachev\Desktop\bleh-keypoints.jpg" );

            return(false);
        }
Beispiel #29
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image <Bgr, Byte> Draw(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage, out long matchTime)
        {
            Stopwatch        watch;
            HomographyMatrix homography = null;

            SURFDetector     surfCPU = new SURFDetector(500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;

            Matrix <byte> mask;
            int           k = 2;
            double        uniquenessThreshold = 0.8;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                        using (Stream stream = new Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                                                                }

                                                            observedKeyPoints = new VectorOfKeyPoint();
                                                            surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                                                                }
                                                            }

                                                            watch.Stop();
                                                        }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                Matrix <float>            observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
                BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix <int>(observedDescriptors.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                }

                watch.Stop();
            }

            //Draw the matched keypoints
            Image <Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                                                     indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the image
            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };
                homography.ProjectPoints(pts);

                result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
            }
            #endregion

            matchTime = watch.ElapsedMilliseconds;

            return(result);
        }
        private Image <Bgr, byte> Match(Image <Bgr, byte> image1, Image <Bgr, byte> image2, int flag)
        {
            HomographyMatrix homography      = null;
            SURFDetector     surfDetectorCPU = new SURFDetector(500, false);

            int    k = 2;           //number of matches that we want ot find between image1 and image2
            double uniquenessThreshold = 0.8;

            Matrix <int>  indices;
            Matrix <byte> mask;

            VectorOfKeyPoint KeyPointsImage1;
            VectorOfKeyPoint KeyPointsImage2;

            Image <Gray, Byte> Image1G = image1.Convert <Gray, Byte>();
            Image <Gray, Byte> Image2G = image2.Convert <Gray, Byte>();

            if (GpuInvoke.HasCuda)      //Using CUDA, the GPUs can be used for general purpose processing (i.e., not exclusively graphics), speed up performance
            {
                Console.WriteLine("Here");
                GpuSURFDetector surfDetectorGPU = new GpuSURFDetector(surfDetectorCPU.SURFParams, 0.01f);

                // extract features from Image1
                using (GpuImage <Gray, Byte> gpuImage1 = new GpuImage <Gray, byte>(Image1G))                                                     //convert CPU input image to GPUImage(greyscale)
                    using (GpuMat <float> gpuKeyPointsImage1 = surfDetectorGPU.DetectKeyPointsRaw(gpuImage1, null))                              //find key points for image
                        using (GpuMat <float> gpuDescriptorsImage1 = surfDetectorGPU.ComputeDescriptorsRaw(gpuImage1, null, gpuKeyPointsImage1)) //calculate descriptor for each key point
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))                     //create a new matcher object
                            {
                                KeyPointsImage1 = new VectorOfKeyPoint();
                                surfDetectorGPU.DownloadKeypoints(gpuKeyPointsImage1, KeyPointsImage1);                                 //copy the Matrix from GPU to CPU

                                // extract features from Image2
                                using (GpuImage <Gray, Byte> gpuImage2 = new GpuImage <Gray, byte>(Image2G))
                                    using (GpuMat <float> gpuKeyPointsImage2 = surfDetectorGPU.DetectKeyPointsRaw(gpuImage2, null))
                                        using (GpuMat <float> gpuDescriptorsImage2 = surfDetectorGPU.ComputeDescriptorsRaw(gpuImage2, null, gpuKeyPointsImage2))

                                            //for each descriptor of each image2 , we find k best matching points and their distances from image1 descriptors

                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuDescriptorsImage2.Size.Height, k, 1, true))      //stores indices of k best mathces
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuDescriptorsImage2.Size.Height, k, 1, true)) //stores distance of k best matches

                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))               //stores result of comparison
                                                        using (Stream stream = new Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuDescriptorsImage2, gpuDescriptorsImage1, gpuMatchIndices, gpuMatchDist, k, null, stream); //matching descriptors of image2 to image1 and storing the k best indices and corresponding distances

                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream); //by setting stream, we perform an Async Task
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);         //col0 >= 0.8col1 , only then is it considered a good match
                                                                }

                                                            KeyPointsImage2 = new VectorOfKeyPoint();
                                                            surfDetectorGPU.DownloadKeypoints(gpuKeyPointsImage2, KeyPointsImage2);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(KeyPointsImage1, KeyPointsImage2, indices, mask, 1.5, 20); //count the number of nonzero points in the mask(this stored the comparison result of col0 >= 0.8col1)
                                                                //we can create a homography matrix only if we have atleast 4 matching points
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(KeyPointsImage1, KeyPointsImage2, indices, mask, 2);
                                                                }
                                                            }
                                                        }
                            }
            }
            else
            {
                Console.WriteLine("No CUDA");
                //extract features from image2
                KeyPointsImage1 = new VectorOfKeyPoint();
                Matrix <float> DescriptorsImage1 = surfDetectorCPU.DetectAndCompute(Image1G, null, KeyPointsImage1);

                //extract features from image1
                KeyPointsImage2 = new VectorOfKeyPoint();
                Matrix <float>            DescriptorsImage2 = surfDetectorCPU.DetectAndCompute(Image2G, null, KeyPointsImage2);
                BruteForceMatcher <float> matcher           = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(DescriptorsImage1);

                indices = new Matrix <int>(DescriptorsImage2.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(DescriptorsImage2.Rows, k))
                {
                    matcher.KnnMatch(DescriptorsImage2, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(KeyPointsImage1, KeyPointsImage2, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(KeyPointsImage1, KeyPointsImage2, indices, mask, 2);
                    }
                }
            }
            Image <Bgr, Byte> mImage = image1.Convert <Bgr, Byte>();
            Image <Bgr, Byte> oImage = image2.Convert <Bgr, Byte>();
            Image <Bgr, Byte> result = new Image <Bgr, byte>(mImage.Width + oImage.Width, mImage.Height);

            //Image<Bgr, Byte> temp = Features2DToolbox.DrawMatches(image1, KeyPointsImage1, image2, KeyPointsImage2, indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = image1.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };

                homography.ProjectPoints(pts);

                HomographyMatrix origin = new HomographyMatrix();                //I perform a copy of the left image with a not real shift operation on the origin
                origin.SetIdentity();
                origin.Data[0, 2] = 0;
                origin.Data[1, 2] = 0;
                Image <Bgr, Byte> mosaic = new Image <Bgr, byte>(mImage.Width + oImage.Width, mImage.Height * 2);

                Image <Bgr, byte> warp_image = mosaic.Clone();
                mosaic = mImage.WarpPerspective(origin, mosaic.Width, mosaic.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_DEFAULT, new Bgr(0, 0, 0));

                warp_image = oImage.WarpPerspective(homography, warp_image.Width, warp_image.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Bgr(200, 0, 0));
                Image <Gray, byte> warp_image_mask = oImage.Convert <Gray, byte>();
                warp_image_mask.SetValue(new Gray(255));
                Image <Gray, byte> warp_mosaic_mask = mosaic.Convert <Gray, byte>();
                warp_mosaic_mask.SetZero();
                warp_mosaic_mask = warp_image_mask.WarpPerspective(homography, warp_mosaic_mask.Width, warp_mosaic_mask.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Gray(0));

                warp_image.Copy(mosaic, warp_mosaic_mask);
                if (flag == 1)
                {
                    Console.WriteLine("Using Image Blending");
                    return(blend(mosaic, warp_image, warp_mosaic_mask, 2));
                }
                else
                {
                    Console.WriteLine("No Image Blending");
                    return(mosaic);
                }
            }
            return(null);
        }
Beispiel #31
0
      public void TestCudaBroxOpticalFlow()
      {
         if (!CudaInvoke.HasCuda)
            return;
         Image<Gray, Byte> prevImg, currImg;
         AutoTestVarious.OpticalFlowImage(out prevImg, out currImg);
         Mat flow = new Mat();
         CudaBroxOpticalFlow opticalflow = new CudaBroxOpticalFlow();
         using (CudaImage<Gray, float> prevGpu = new CudaImage<Gray, float>(prevImg.Convert<Gray, float>()))
         using (CudaImage<Gray, float> currGpu = new CudaImage<Gray, float>(currImg.Convert<Gray, float>()))
         using (GpuMat flowGpu = new GpuMat())
         {
            opticalflow.Calc(prevGpu, currGpu, flowGpu);

            flowGpu.Download(flow);
         }
      }