Exemplo n.º 1
0
 private static void ConvertColor(IInputArray src, IOutputArray dest, Type srcColor, Type destColor, int dcn, Size size, Stream stream)
 {
     try
     {
         // if the direct conversion exist, apply the conversion
         CudaInvoke.CvtColor(src, dest, CvToolbox.GetColorCvtCode(srcColor, destColor), dcn, stream);
     } catch
     {
         try
         {
             //if a direct conversion doesn't exist, apply a two step conversion
             //in this case, needs to wait for the completion of the stream because a temporary local image buffer is used
             //we don't want the tmp image to be released before the operation is completed.
             using (CudaImage <Bgr, TDepth> tmp = new CudaImage <Bgr, TDepth>(size))
             {
                 CudaInvoke.CvtColor(src, tmp, CvToolbox.GetColorCvtCode(srcColor, typeof(Bgr)), 3, stream);
                 CudaInvoke.CvtColor(tmp, dest, CvToolbox.GetColorCvtCode(typeof(Bgr), destColor), dcn, stream);
                 stream.WaitForCompletion();
             }
         } catch (Exception excpt)
         {
             throw new NotSupportedException(String.Format(
                                                 "Conversion from CudaImage<{0}, {1}> to CudaImage<{2}, {3}> is not supported by OpenCV: {4}",
                                                 srcColor.ToString(),
                                                 typeof(TDepth).ToString(),
                                                 destColor.ToString(),
                                                 typeof(TDepth).ToString(),
                                                 excpt.Message));
         }
     }
 }
Exemplo n.º 2
0
        /// <summary>
        /// Finds matching points in the faces using SURF
        /// </summary>
        /// <param name="modelImage">
        /// The model image.
        /// </param>
        /// <param name="observedImage">
        /// The observed image.
        /// </param>
        /// <param name="matchTime">
        /// The match time.
        /// </param>
        /// <param name="modelKeyPoints">
        /// The model key points.
        /// </param>
        /// <param name="observedKeyPoints">
        /// The observed key points.
        /// </param>
        /// <param name="matches">
        /// The matches.
        /// </param>
        /// <param name="mask">
        /// The mask.
        /// </param>
        /// <param name="homography">
        /// The homography.
        /// </param>
        /// <param name="score">
        /// The score.
        /// </param>
        private void FindMatch(
            Mat modelImage,
            Mat observedImage,
            out long matchTime,
            out VectorOfKeyPoint modelKeyPoints,
            out VectorOfKeyPoint observedKeyPoints,
            VectorOfVectorOfDMatch matches,
            out Mat mask,
            out Mat homography,
            out long score)
        {
            int       k = 2;
            double    uniquenessThreshold = 5;
            Stopwatch watch;

            homography = null;
            mask       = null;
            score      = 0;



            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();



            if (Controller.Instance.Cuda)
            {
                CudaSURF surfGPU = new CudaSURF(700f, 4, 2, false);
                using (CudaImage <Gray, byte> gpuModelImage = new CudaImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat gpuModelDescriptors =
                                   surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
                            {
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (CudaImage <Gray, Byte> gpuObservedImage = new CudaImage <Gray, byte>(observedImage))
                                    using (GpuMat gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(
                                                   gpuObservedImage,
                                                   null,
                                                   gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(
                                                       gpuObservedDescriptors.Size.Height,
                                                       k,
                                                       1,
                                                       true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(
                                                           gpuObservedDescriptors.Size.Height,
                                                           k,
                                                           1,
                                                           true))
                                                    //using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                    using (Emgu.CV.Cuda.Stream stream = new Emgu.CV.Cuda.Stream())
                                                    {
                                                        matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k, null);
                                                        //indices = new Matrix<int>(gpuMatchIndices.Size);
                                                        //mask = new Matrix<byte>(gpuMask.Size);

                                                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                                        mask.SetTo(new MCvScalar(255));


                                                        surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                        /*//gpu implementation of voteForUniquess
                                                         * using (GpuMat col0 = gpuMatchDist.Col(0))
                                                         * using (GpuMat col1 = gpuMatchDist.Col(1))
                                                         * {
                                                         *  CudaInvoke.Multiply(col1, new GpuMat(), col1, 1, DepthType.Default, stream);
                                                         *  CudaInvoke.Compare(col0, col1, mask, CmpType.LessEqual, stream);
                                                         * }*/

                                                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                                        //wait for the stream to complete its tasks
                                                        //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                        stream.WaitForCompletion();
                                                        //gpuMatchIndices.Download(indices);
                                                        if (CudaInvoke.CountNonZero(mask) >= 4)
                                                        {
                                                            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(
                                                                modelKeyPoints,
                                                                observedKeyPoints,
                                                                matches,
                                                                mask,
                                                                1.5,
                                                                20);
                                                            if (nonZeroCount >= 4)
                                                            {
                                                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(
                                                                    modelKeyPoints,
                                                                    observedKeyPoints,
                                                                    matches,
                                                                    mask,
                                                                    2);
                                                            }
                                                        }

                                                        watch.Stop();
                                                    }

                                for (int i = 0; i < matches.Size; i++)
                                {
                                    score++;
                                }
                            }
            }

            //else
            //{
            //    SURF surfCPU = new SURF(500, 4, 2, false);
            //    //extract features from the object image
            //    modelKeyPoints = new VectorOfKeyPoint();
            //    Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

            //    watch = Stopwatch.StartNew();

            //    // extract features from the observed image
            //    observedKeyPoints = new VectorOfKeyPoint();
            //    Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
            //    BFMatcher matcher = new BFMatcher<float>(DistanceType.L2);
            //    matcher.Add(modelDescriptors);

            //    indices = new Matrix<int>(observedDescriptors.Rows, k);
            //    using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            //    {
            //        matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
            //        mask = new Matrix<byte>(dist.Rows, 1);
            //        mask.SetValue(255);
            //        Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            //    }

            //    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            //    if (nonZeroCount >= 4)
            //    {
            //        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
            //        if (nonZeroCount >= 4)
            //            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
            //    }

            //    watch.Stop();
            //}
            matchTime = 0;
        }
Exemplo n.º 3
0
      public void TestErodeDilate()
      {
         if (!CudaInvoke.HasCuda)
            return;
         
         int morphIter = 2;
         Image<Gray, Byte> image = new Image<Gray, byte>(640, 320);
         image.Draw(new CircleF(new PointF(200, 200), 30), new Gray(255.0), 4);

         Size ksize = new Size(morphIter * 2 + 1, morphIter * 2 + 1);

         using (GpuMat cudaImage = new GpuMat(image))
         using (GpuMat cudaTemp = new GpuMat())
         using (Stream stream = new Stream())
         using (CudaBoxMaxFilter dilate = new CudaBoxMaxFilter(DepthType.Cv8U, 1, ksize, new Point(-1, -1), CvEnum.BorderType.Default, new MCvScalar()))
         using (CudaBoxMinFilter erode = new CudaBoxMinFilter(DepthType.Cv8U, 1, ksize, new Point(-1, -1), CvEnum.BorderType.Default, new MCvScalar()))
         {
            //run the GPU version asyncrhonously with stream
            erode.Apply(cudaImage, cudaTemp, stream);
            dilate.Apply(cudaTemp, cudaImage, stream);

            //run the CPU version in parallel to the gpu version.
            using (Image<Gray, Byte> temp = new Image<Gray, byte>(image.Size))
            {
               CvInvoke.Erode(image, temp, null, new Point(-1, -1), morphIter, CvEnum.BorderType.Constant, new MCvScalar());
               CvInvoke.Dilate(temp, image, null, new Point(-1, -1), morphIter, CvEnum.BorderType.Constant, new MCvScalar());
            }

            //syncrhonize with the GPU version
            stream.WaitForCompletion();

            Assert.IsTrue(cudaImage.ToMat().Equals(image.Mat));
         }
         
      }
Exemplo n.º 4
0
      public void TestConvolutionAndLaplace()
      {
         if (CudaInvoke.HasCuda)
         {
            Image<Gray, Byte> image = new Image<Gray, byte>(300, 400);
            image.SetRandUniform(new MCvScalar(0.0), new MCvScalar(255.0));

            float[,] k = { {0, 1, 0},
                        {1, -4, 1},
                        {0, 1, 0}};
            using (ConvolutionKernelF kernel = new ConvolutionKernelF(k))
            using (Stream s = new Stream())
            using (GpuMat cudaImg = new GpuMat(image))
            using (GpuMat cudaLaplace = new GpuMat())
            using (CudaLinearFilter cudaLinear = new CudaLinearFilter(DepthType.Cv8U, 1, DepthType.Cv8U, 1, kernel, kernel.Center))
            using (GpuMat cudaConv = new GpuMat())
            using (CudaLaplacianFilter laplacian = new CudaLaplacianFilter(DepthType.Cv8U, 1, DepthType.Cv8U, 1, 1, 1.0))
            {
               cudaLinear.Apply(cudaImg, cudaConv, s);
               laplacian.Apply(cudaImg, cudaLaplace, s);
               s.WaitForCompletion();
               Assert.IsTrue(cudaLaplace.Equals(cudaConv));
            }
         }
      }
Exemplo n.º 5
0
      public void TestCudaUploadDownload()
      {
         if (!CudaInvoke.HasCuda)
            return;

         Mat m = new Mat(new Size(480, 320), DepthType.Cv8U, 3);
         CvInvoke.Randu(m, new MCvScalar(), new MCvScalar(255, 255, 255) );

         #region test for async download & upload
         Stream stream = new Stream();
         GpuMat gm1 = new GpuMat();
         gm1.Upload(m, stream);

         Mat m2 = new Mat();
         gm1.Download(m2, stream);

         stream.WaitForCompletion();
         EmguAssert.IsTrue(m.Equals(m2));
         #endregion

         #region test for blocking download & upload
         GpuMat gm2 = new GpuMat();
         gm2.Upload(m);
         Mat m3 = new Mat();
         gm2.Download(m3);
         EmguAssert.IsTrue(m.Equals(m3));
         #endregion
      }
Exemplo n.º 6
-1
      public void TestMatchTemplate()
      {
         if (!CudaInvoke.HasCuda)
            return;

         #region prepare synthetic image for testing
         int templWidth = 50;
         int templHeight = 50;
         Point templCenter = new Point(120, 100);

         //Create a random object
         Image<Bgr, Byte> randomObj = new Image<Bgr, byte>(templWidth, templHeight);
         randomObj.SetRandUniform(new MCvScalar(), new MCvScalar(255, 255, 255));

         //Draw the object in image1 center at templCenter;
         Image<Bgr, Byte> img = new Image<Bgr, byte>(300, 200);
         Rectangle objectLocation = new Rectangle(templCenter.X - (templWidth >> 1), templCenter.Y - (templHeight >> 1), templWidth, templHeight);
         img.ROI = objectLocation;
         randomObj.Copy(img, null);
         img.ROI = Rectangle.Empty;
         #endregion

         Image<Gray, Single> match = img.MatchTemplate(randomObj, Emgu.CV.CvEnum.TemplateMatchingType.Sqdiff);
         double[] minVal, maxVal;
         Point[] minLoc, maxLoc;
         match.MinMax(out minVal, out maxVal, out minLoc, out maxLoc);

         double gpuMinVal = 0, gpuMaxVal = 0;
         Point gpuMinLoc = Point.Empty, gpuMaxLoc = Point.Empty;
         GpuMat cudaImage = new GpuMat(img);
         GpuMat gpuRandomObj = new GpuMat(randomObj);
         GpuMat gpuMatch = new GpuMat();
         using (CudaTemplateMatching buffer = new CudaTemplateMatching(DepthType.Cv8U, 3, CvEnum.TemplateMatchingType.Sqdiff))
         using (Stream stream = new Stream())
         {
            buffer.Match(cudaImage, gpuRandomObj, gpuMatch, stream);
            //GpuInvoke.MatchTemplate(CudaImage, gpuRandomObj, gpuMatch, CvEnum.TM_TYPE.CV_TM_SQDIFF, buffer, stream);
            stream.WaitForCompletion();
            CudaInvoke.MinMaxLoc(gpuMatch, ref gpuMinVal, ref gpuMaxVal, ref gpuMinLoc, ref gpuMaxLoc, null);
         }

         EmguAssert.AreEqual(minLoc[0].X, templCenter.X - templWidth / 2);
         EmguAssert.AreEqual(minLoc[0].Y, templCenter.Y - templHeight / 2);
         EmguAssert.IsTrue(minLoc[0].Equals(gpuMinLoc));
         EmguAssert.IsTrue(maxLoc[0].Equals(gpuMaxLoc));
         
      }