示例#1
0
        public static Rectangle[] Detect(Image <Bgr, Byte> image, string cascadeFile,
                                         double scaleFactor = 1.3, int minNeighbors = 10,
                                         Emgu.CV.CvEnum.HAAR_DETECTION_TYPE detectionType = Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                                         int minSize = 20, int maxSize = 0)
        {
            string cascadeFilePath = CascadeManager.GetCascade(cascadeFile);

            Size minimumSize;

            if (minSize == 0)
            {
                minimumSize = Size.Empty;
            }
            else
            {
                minimumSize = new Size(minSize, minSize);
            }

            Size maximumSize;

            if (maxSize == 0)
            {
                maximumSize = Size.Empty;
            }
            else
            {
                maximumSize = new Size(maxSize, maxSize);
            }

            if (GpuInvoke.HasCuda)
            {
                using (GpuCascadeClassifier cascade = new GpuCascadeClassifier(cascadeFilePath))
                    using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image))
                        using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>())
                        {
                            return(cascade.DetectMultiScale(gpuGray, scaleFactor, minNeighbors, minimumSize));
                        }
            }
            else
            {
                using (HaarCascade cascade = new HaarCascade(cascadeFilePath))
                    using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>())
                    {
                        gray._EqualizeHist();

                        MCvAvgComp[] detected = cascade.Detect(gray,
                                                               scaleFactor, minNeighbors,
                                                               detectionType,
                                                               minimumSize, maximumSize);

                        return((from x in detected
                                select x.rect).ToArray());
                    }
            }
        }
示例#2
0
        public static Rectangle[] Detect(Image<Bgr, Byte> image, string cascadeFile,
            double scaleFactor = 1.3, int minNeighbors = 10,
            Emgu.CV.CvEnum.HAAR_DETECTION_TYPE detectionType = Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
            int minSize = 20, int maxSize = 0)
        {
            string cascadeFilePath = CascadeManager.GetCascade(cascadeFile);

            Size minimumSize;
            if (minSize == 0)
            {
                minimumSize = Size.Empty;
            }
            else
            {
                minimumSize = new Size(minSize, minSize);
            }

            Size maximumSize;
            if (maxSize == 0)
            {
                maximumSize = Size.Empty;
            }
            else
            {
                maximumSize = new Size(maxSize, maxSize);
            }

            if (GpuInvoke.HasCuda)
            {
                using (GpuCascadeClassifier cascade = new GpuCascadeClassifier(cascadeFilePath))
                using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
                using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
                {
                    return cascade.DetectMultiScale(gpuGray, scaleFactor, minNeighbors, minimumSize);
                }
            }
            else
            {
                using (HaarCascade cascade = new HaarCascade(cascadeFilePath))
                using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>())
                {
                    gray._EqualizeHist();

                    MCvAvgComp[] detected = cascade.Detect(gray,
                        scaleFactor, minNeighbors,
                        detectionType,
                        minimumSize, maximumSize);

                    return (from x in detected
                            select x.rect).ToArray();
                }
            }
        }
        public IImage GetResult(IImage source, PrepareImageParameter parameter)
        {
            if (source is Image <Bgr, byte> == false)
            {
                return(null);
            }

            using (var image = new GpuImage <Bgr, byte>((Image <Bgr, byte>)source))
            {
                return(image.Convert <Gray, byte>());
            }
        }
示例#4
0
 public SkinDetectorGpu(int width, int height)
 {
     this.width = width;
       this.height = height;
       bgrImageGpu = new GpuImage<Bgr, Byte>(height, width);
       yccImageGpu = new GpuImage<Ycc, byte>(height, width);
       skinGpu = new GpuImage<Gray, byte>(height, width);
       buffer1 = new GpuImage<Gray, byte>(height, width);
       buffer2 = new GpuImage<Gray, byte>(height, width);
       bgrImage = new Image<Bgr, Byte>(width, height);
       skin = new Image<Gray, byte>(width, height);
 }
示例#5
0
 public SkinDetectorGpu(int width, int height)
 {
     this.width  = width;
     this.height = height;
     bgrImageGpu = new GpuImage <Bgr, Byte>(height, width);
     yccImageGpu = new GpuImage <Ycc, byte>(height, width);
     skinGpu     = new GpuImage <Gray, byte>(height, width);
     buffer1     = new GpuImage <Gray, byte>(height, width);
     buffer2     = new GpuImage <Gray, byte>(height, width);
     bgrImage    = new Image <Bgr, Byte>(width, height);
     skin        = new Image <Gray, byte>(width, height);
 }
示例#6
0
        private Rectangle[] ProcessOnGpu(Image <Gray, byte> grayImage)
        {
            if (FGpuCascadeClassifier == null)
            {
                Status = "Can't load Haar file";
                return(new Rectangle[0]);
            }

            using (var gpuImage = new GpuImage <Gray, byte>(grayImage))
            {
                return(FGpuCascadeClassifier.DetectMultiScale(gpuImage, ScaleFactor, MinNeighbors, MinSize));
            }
        }
示例#7
0
        public Image<Gray, short> GetDispMapGPU(Image<Gray, byte> leftImg, Image<Gray, byte> rightImg, DispMapFounderParameters parameters)
        {
            var ap = (GpuStereoBMDispMapFounderParameters)parameters;

            using(var leftGpuImg = new GpuImage<Gray, byte>(leftImg))
            using(var rightGpuImg = new GpuImage<Gray, byte>(rightImg))
            using (GpuStereoBM sbm = new GpuStereoBM(
                numberOfDisparities: ap.NumberOfDisparities,
                blockSize: ap.BlockSize))
            {
                var dispMap = new GpuImage<Gray, byte>(leftGpuImg.Size);
                sbm.FindStereoCorrespondence(leftGpuImg, rightGpuImg, dispMap, null);
                return dispMap.ToImage().Convert<Gray, short>();
            }
        }
示例#8
0
        static void Run()
        {
            Image <Bgr, Byte> image = new Image <Bgr, byte>("pedestrian.png");

            Stopwatch watch;

            Rectangle[] regions;

            //check if there is a compatible GPU to run pedestrian detection
            if (GpuInvoke.HasCuda)
            { //this is the GPU version
                using (GpuHOGDescriptor des = new GpuHOGDescriptor())
                {
                    des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image))
                        using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>())
                        {
                            regions = des.DetectMultiScale(gpuBgra);
                        }
                }
            }
            else
            { //this is the CPU version
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());

                    watch   = Stopwatch.StartNew();
                    regions = des.DetectMultiScale(image);
                }
            }
            watch.Stop();

            foreach (Rectangle pedestrain in regions)
            {
                image.Draw(pedestrain, new Bgr(Color.Red), 1);
            }

            ImageViewer.Show(
                image,
                String.Format("Pedestrain detection using {0} in {1} milliseconds.",
                              GpuInvoke.HasCuda ? "GPU" : "CPU",
                              watch.ElapsedMilliseconds));
        }
        public static Image<Bgr, Byte> Find(Image<Bgr, Byte> image, out long processingTime)
        {
            Stopwatch watch;
            Rectangle[] regions;

            if (GpuInvoke.HasCuda)
            {
                using (GpuHOGDescriptor des = new GpuHOGDescriptor())
                {
                    des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();

                    using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
                    {
                        using (GpuImage<Bgra, Byte> gpuGpraImage = gpuImage.Convert<Bgra, Byte>())
                        {
                            regions = des.DetectMultiScale(gpuGpraImage);
                        }

                    }

                }

            }
            else
            {
                using (HOGDescriptor des = new HOGDescriptor())
                {

                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
                    watch = Stopwatch.StartNew();
                    regions = des.DetectMultiScale(image);
                }
            }
            watch.Stop();

            processingTime = watch.ElapsedMilliseconds;

            foreach (Rectangle rect in regions)
            {
                image.Draw(rect, new Bgr(Color.Red),1 );
            }

            return image;
        }
示例#10
0
        public static void Detect(Image<Bgr, Byte> image, String faceFileName, List<Rectangle> faces, out long detectionTime)
        {
            Stopwatch watch;

            if (GpuInvoke.HasCuda)
            {
                using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
                {
                    watch = Stopwatch.StartNew();
                    using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
                    using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
                    {
                        Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
                        faces.AddRange(faceRegion);
                    }
                    watch.Stop();
                }
            }
            else
            {
                //Read the HaarCascade objects
                using (CascadeClassifier face = new CascadeClassifier(faceFileName))
                {
                    watch = Stopwatch.StartNew();
                    using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
                    {
                        //normalizes brightness and increases contrast of the image
                        gray._EqualizeHist();

                        //Detect the faces  from the gray scale image and store the locations as rectangle
                        //The first dimensional is the channel
                        //The second dimension is the index of the rectangle in the specific channel
                        Rectangle[] facesDetected = face.DetectMultiScale(
                           gray,
                           1.1,
                           10,
                           new Size(20, 20),
                           Size.Empty);
                        faces.AddRange(facesDetected);
                    }
                    watch.Stop();
                }
            }
            detectionTime = watch.ElapsedMilliseconds;
        }
示例#11
0
        public static void Detect(Image <Bgr, Byte> image, String faceFileName, List <Rectangle> faces, out long detectionTime)
        {
            Stopwatch watch;

            if (GpuInvoke.HasCuda)
            {
                using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
                {
                    watch = Stopwatch.StartNew();
                    using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image))
                        using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>())
                        {
                            Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
                            faces.AddRange(faceRegion);
                        }
                    watch.Stop();
                }
            }
            else
            {
                //Read the HaarCascade objects
                using (CascadeClassifier face = new CascadeClassifier(faceFileName))
                {
                    watch = Stopwatch.StartNew();
                    using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale
                    {
                        //normalizes brightness and increases contrast of the image
                        gray._EqualizeHist();

                        //Detect the faces  from the gray scale image and store the locations as rectangle
                        //The first dimensional is the channel
                        //The second dimension is the index of the rectangle in the specific channel
                        Rectangle[] facesDetected = face.DetectMultiScale(
                            gray,
                            1.1,
                            10,
                            new Size(20, 20),
                            Size.Empty);
                        faces.AddRange(facesDetected);
                    }
                    watch.Stop();
                }
            }
            detectionTime = watch.ElapsedMilliseconds;
        }
示例#12
0
 public CameraParameters(double[,] A, double[,] kc, double[,] fc, double[,] cc, int Width, int Height)
 {
     Fc = new Matrix<double>(fc);
     Cc = new Matrix<double>(cc);
     CameraMatrix = new Matrix<double>(A);
     distCoeffs = new Matrix<double>(4, 1);
     for (int i = 0; i < distCoeffs.Rows; i++)
     {
         distCoeffs[i, 0] = kc[i, 0];
     }
     RectificationTransform = new Matrix<double>(3, 3);
     ProjectionMatrix = new Matrix<double>(3, 4);
     ImageWidth = Width;
     ImageHeight = Height;
     ImageRawGPU = new GpuImage<Gray, byte>(ImageHeight, ImageWidth);
     ImageRectifiedGPU = new GpuImage<Gray, byte>(ImageHeight, ImageWidth);
     ImageRectified = new Image<Gray, byte>(ImageWidth, ImageHeight);
 }
示例#13
0
        public Bitmap GetNextDisparityMap()
        {
            Image<Bgr, byte> leftFrame;
            Image<Bgr, byte> rightFrame;

            this.GetNextLRPair(out leftFrame, out rightFrame);
            Image<Gray, short> dispMap = new Image<Gray, short>(width: leftFrame.Width, height: leftFrame.Height);
            GpuImage<Gray, byte> gpuDispMap = new GpuImage<Gray, byte>(cols: leftFrame.Width, rows: leftFrame.Height);
            var grayLeft = leftFrame.Convert<Gray, byte>();
            var grayRight = rightFrame.Convert<Gray, byte>();

            //this.StereoMapper.FindStereoCorrespondence(leftFrame.Convert<Gray, byte>(), rightFrame.Convert<Gray, byte>(), dispMap);
            //this.StereoBMMapper.FindStereoCorrespondence(leftFrame.Convert<Gray, byte>(), rightFrame.Convert<Gray, byte>(), dispMap);
            this.GpuStereoMapper.FindStereoCorrespondence(new GpuImage<Gray, byte>(grayLeft), new GpuImage<Gray, byte>(grayRight), gpuDispMap, null);
            return gpuDispMap.ToImage().ToBitmap();

            return dispMap.ToBitmap();
        }
        public static Image <Bgr, Byte> Find(Image <Bgr, Byte> image, out long processingTime)
        {
            Stopwatch watch;

            Rectangle[] regions;

            if (GpuInvoke.HasCuda)
            {
                using (GpuHOGDescriptor des = new GpuHOGDescriptor())
                {
                    des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();

                    using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image))
                    {
                        using (GpuImage <Bgra, Byte> gpuGpraImage = gpuImage.Convert <Bgra, Byte>())
                        {
                            regions = des.DetectMultiScale(gpuGpraImage);
                        }
                    }
                }
            }
            else
            {
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
                    watch   = Stopwatch.StartNew();
                    regions = des.DetectMultiScale(image);
                }
            }
            watch.Stop();

            processingTime = watch.ElapsedMilliseconds;

            foreach (Rectangle rect in regions)
            {
                image.Draw(rect, new Bgr(Color.Red), 1);
            }

            return(image);
        }
        public static Image <Bgr, Byte> findPerson(Image <Bgr, Byte> image, out int detections, out List <PointF> positions)
        {
            //If the gpu has nvidia CUDA
            if (GpuInvoke.HasCuda)
            {
                imageToProcess = new Image <Bgr, byte>(image.Bitmap);                   //Value is coppied so the refference of the input image is not changed outside of this class
                Person_Detector.positions.Clear();
                using (GpuHOGDescriptor descriptor = new GpuHOGDescriptor())
                {
                    descriptor.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector());

                    using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(imageToProcess))                  //Create gpuImage from image
                    {
                        using (GpuImage <Bgra, Byte> bgraImage = gpuImage.Convert <Bgra, Byte>())
                        {
                            regeions = descriptor.DetectMultiScale(bgraImage);                             //Returns all detected regions in a rectangle array
                        }
                    }
                }
            }
            else
            {
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    regeions = des.DetectMultiScale(imageToProcess);
                }
            }


            detections = regeions.Length;

            //Draws detected rectangles onto the image being returned
            foreach (Rectangle ped in regeions)
            {
                imageToProcess.Draw(ped, new Bgr(Color.Red), 5);
                imageToProcess.Draw(new Cross2DF(new PointF(ped.Location.X + (ped.Width / 2), ped.Location.Y + (ped.Height / 2)), 30, 30), new Bgr(Color.Green), 3);
                Person_Detector.positions.Add(new PointF(ped.Location.X + (ped.Width / 2), ped.Location.Y + (ped.Height / 2)));                 //Sets the putput variable
            }


            positions = Person_Detector.positions;
            return(imageToProcess);
        }
示例#16
0
        /// <summary>
        /// Find the pedestrian in the image
        /// </summary>
        /// <param name="imageFileName">The name of the image</param>
        /// <param name="processingTime">The pedestrian detection time in milliseconds</param>
        /// <returns>The image with pedestrian highlighted.</returns>
        public static Image <Bgr, Byte> Find(Image <Bgr, byte> source, out long processingTime)
        {
            Image <Bgr, Byte> image = source.Copy();

            Stopwatch watch;

            Rectangle[] regions;

            //check if there is a compatible GPU to run pedestrian detection
            if (GpuInvoke.HasCuda)
            { //this is the GPU version
                using (GpuHOGDescriptor des = new GpuHOGDescriptor())
                {
                    des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image))
                        using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>())
                        {
                            regions = des.DetectMultiScale(gpuBgra);
                        }
                }
            }
            else
            { //this is the CPU version
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());

                    watch   = Stopwatch.StartNew();
                    regions = des.DetectMultiScale(image);
                }
            }
            watch.Stop();

            processingTime = watch.ElapsedMilliseconds;

            foreach (Rectangle pedestrain in regions)
            {
                image.Draw(pedestrain, new Bgr(Color.Red), 1);
            }
            return(image);
        }
        private void DetectFace(Image <Bgr, Byte> image, List <Rectangle> faces)
        {
#if !IOS
            if (GpuInvoke.HasCuda)
            {
                using (GpuCascadeClassifier face = new GpuCascadeClassifier(_faceFileName))
                {
                    using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image))
                        using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>())
                        {
                            Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
                            faces.AddRange(faceRegion);
                        }
                }
            }
            else
#endif
            {
                //Read the HaarCascade objects
                using (CascadeClassifier face = new CascadeClassifier(_faceFileName))
                {
                    using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale
                    {
                        //normalizes brightness and increases contrast of the image
                        gray._EqualizeHist();

                        //Detect the faces  from the gray scale image and store the locations as rectangle
                        //The first dimensional is the channel
                        //The second dimension is the index of the rectangle in the specific channel
                        Rectangle[] facesDetected = face.DetectMultiScale(
                            gray,
                            1.1,
                            10,
                            new Size(20, 20),
                            Size.Empty);
                        faces.AddRange(facesDetected);
                    }
                }
            }
        }
示例#18
0
        /// <summary>
        /// Find the pedestrian in the image
        /// </summary>
        /// <param name="image">The image</param>
        /// <param name="processingTime">The pedestrian detection time in milliseconds</param>
        /// <returns>The region where pedestrians are detected</returns>
        public static Rectangle[] Find(Image <Bgr, Byte> image, out long processingTime)
        {
            Stopwatch watch;

            Rectangle[] regions;
#if !IOS
            //check if there is a compatible GPU to run pedestrian detection
            if (GpuInvoke.HasCuda)
            {  //this is the GPU version
                using (GpuHOGDescriptor des = new GpuHOGDescriptor())
                {
                    des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image))
                        using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>())
                        {
                            regions = des.DetectMultiScale(gpuBgra);
                        }
                }
            }
            else
#endif
            {  //this is the CPU version
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());

                    watch   = Stopwatch.StartNew();
                    regions = des.DetectMultiScale(image);
                }
            }
            watch.Stop();

            processingTime = watch.ElapsedMilliseconds;

            return(regions);
        }
示例#19
0
        //=================================================== Feature Descriptor (HOG) Data Training Kursi ===========================================
        public static Rectangle[] FindObject(Image<Bgr, Byte> image, out long processingTime, Size winSizeObject, string dataFile)
        {
            Stopwatch watch;
            Rectangle[] regions;
            //check if there is a compatible GPU to run pedestrian detection
            if (GpuInvoke.HasCuda)
            {  //this is the GPU version
                using (GpuHOGDescriptor des = new GpuHOGDescriptor())
                {
                    des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (GpuImage<Bgr, Byte> gpuImg = new GpuImage<Bgr, byte>(image))
                    using (GpuImage<Bgra, Byte> gpuBgra = gpuImg.Convert<Bgra, Byte>())
                    {
                        regions = des.DetectMultiScale(gpuBgra);
                    }
                }
            }
            else
            {  //this is the CPU version
                using (HOGDescriptor des = new HOGDescriptor(winSizeObject, blockSize, blockStride, cellSize, nbins, 1, -1, 0.2, true))
                {
                    des.SetSVMDetector(GetDataObjects(dataFile));
                    //des.SetSVMDetector(GetData2());

                    watch = Stopwatch.StartNew();
                    regions = des.DetectMultiScale(image);

                }
            }
            watch.Stop();

            processingTime = watch.ElapsedMilliseconds;

            return regions;
        }
示例#20
0
        //public static Image<Bgr, Byte> DetectFeatures()
        //{
        //    Stopwatch watch = Stopwatch.StartNew();
        //    Rectangle[] regions = GetBodies("Untitled7.jpg");
        //    MCvAvgComp[][] facesDetected = GetFaces("Untitled7.jpg");

        //    Image<Bgr, Byte> image = new Image<Bgr, byte>("Untitled7.jpg");
        //    foreach (Rectangle pedestrain in regions)
        //    {
        //        image.Draw(pedestrain, new Bgr(Color.Red), 1);
        //    }

        //    foreach (MCvAvgComp f in facesDetected[0])
        //    {
        //        //draw the face detected in the 0th (gray) channel with blue color
        //        image.Draw(f.rect, new Bgr(Color.Blue), 2);
        //    }
        //    return image;
        //}

        // Body Function
        public static Rectangle[] GetBodies(string fileName)
        {
            Image <Bgr, Byte> image = new Image <Bgr, byte>(fileName);

            Stopwatch watch;

            Rectangle[] regions;

            //check if there is a compatible GPU to run pedestrian detection
            if (GpuInvoke.HasCuda)
            {  //this is the GPU version
                using (GpuHOGDescriptor des = new GpuHOGDescriptor())
                {
                    des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image))
                        using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>())
                        {
                            regions = des.DetectMultiScale(gpuBgra);
                        }
                }
            }
            else
            {  //this is the CPU version
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());

                    watch   = Stopwatch.StartNew();
                    regions = des.DetectMultiScale(image);
                }
            }
            watch.Stop();

            return(regions);
        }
        //
        // try this generic function could be better
        //
        // if image is not in BGRA format, convert it...
        //

        private Rectangle[] GPUDetectPedestrianTest <Tcol, Tdepth>(Image <Tcol, Tdepth> image)
            where Tcol : struct, IColor
            where Tdepth : new()
        {
            //if (image.GetType() == typeof(TColorFormat) )
            //{
            //    // test format or convert
            //}

            var bgraImage = image.Convert <Bgra, byte>();

            try
            {
                using (var gpuImage = new GpuImage <Bgra, byte>(bgraImage))
                {
                    return(gpuhog.DetectMultiScale(gpuImage));
                }
            }
            catch (Exception e)
            {
                Status = "Exception: " + e;
                return(new Rectangle[0]);
            }
        }
        public static void Detect(Image<Bgr, Byte> image, String faceFileName, String eyeFileName, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
        {
            Stopwatch watch;

             if (GpuInvoke.HasCuda)
             {
            using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
            using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName))
            {
               watch = Stopwatch.StartNew();
               using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
               using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
               {
                  Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
                  faces.AddRange(faceRegion);
                  foreach (Rectangle f in faceRegion)
                  {
                     using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f))
                     {
                        //For some reason a clone is required.
                        //Might be a bug of GpuCascadeClassifier in opencv
                        using (GpuImage<Gray, Byte> clone = faceImg.Clone())
                        {
                           Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);

                           foreach (Rectangle e in eyeRegion)
                           {
                              Rectangle eyeRect = e;
                              eyeRect.Offset(f.X, f.Y);
                              eyes.Add(eyeRect);
                           }
                        }
                     }
                  }
               }
               watch.Stop();
            }
             }
             else
             {
            //Read the HaarCascade objects
            using (CascadeClassifier face = new CascadeClassifier(faceFileName))
            using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
            {
               watch = Stopwatch.StartNew();
               using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
               {
                  //normalizes brightness and increases contrast of the image
                  gray._EqualizeHist();

                  //Detect the faces  from the gray scale image and store the locations as rectangle
                  //The first dimensional is the channel
                  //The second dimension is the index of the rectangle in the specific channel
                  Rectangle[] facesDetected = face.DetectMultiScale(
                     gray,
                     1.1,
                     10,
                     new Size(20, 20),
                     Size.Empty);
                  faces.AddRange(facesDetected);

                  foreach (Rectangle f in facesDetected)
                  {
                     //Set the region of interest on the faces
                     gray.ROI = f;
                     Rectangle[] eyesDetected = eye.DetectMultiScale(
                        gray,
                        1.1,
                        10,
                        new Size(20, 20),
                        Size.Empty);
                     gray.ROI = Rectangle.Empty;

                     foreach (Rectangle e in eyesDetected)
                     {
                        Rectangle eyeRect = e;
                        eyeRect.Offset(f.X, f.Y);
                        eyes.Add(eyeRect);
                     }
                  }
               }
               watch.Stop();
            }
             }
             detectionTime = watch.ElapsedMilliseconds;
        }
示例#23
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime)
        {
            Stopwatch watch;
            HomographyMatrix homography = null;

            SURFDetector surfCPU = new SURFDetector (500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix<int> indices;

            Matrix<byte> mask;
            int k = 2;
            double uniquenessThreshold = 0.8;
            if (GpuInvoke.HasCuda) {
                GpuSURFDetector surfGPU = new GpuSURFDetector (surfCPU.SURFParams, 0.01f);
                using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte> (modelImage))
                    //extract features from the object image
                using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw (gpuModelImage, null))
                using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw (gpuModelImage, null, gpuModelKeyPoints))
                using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float> (DistanceType.L2)) {
                    modelKeyPoints = new VectorOfKeyPoint ();
                    surfGPU.DownloadKeypoints (gpuModelKeyPoints, modelKeyPoints);
                    watch = Stopwatch.StartNew ();

                    // extract features from the observed image
                    using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte> (observedImage))
                    using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw (gpuObservedImage, null))
                    using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw (gpuObservedImage, null, gpuObservedKeyPoints))
                    using (GpuMat<int> gpuMatchIndices = new GpuMat<int> (gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<float> gpuMatchDist = new GpuMat<float> (gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<Byte> gpuMask = new GpuMat<byte> (gpuMatchIndices.Size.Height, 1, 1))
                    using (Stream stream = new Stream ()) {
                        matcher.KnnMatchSingle (gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                        indices = new Matrix<int> (gpuMatchIndices.Size);
                        mask = new Matrix<byte> (gpuMask.Size);

                        //gpu implementation of voteForUniquess
                        using (GpuMat<float> col0 = gpuMatchDist.Col (0))
                        using (GpuMat<float> col1 = gpuMatchDist.Col (1)) {
                            GpuInvoke.Multiply (col1, new MCvScalar (uniquenessThreshold), col1, stream);
                            GpuInvoke.Compare (col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                        }

                        observedKeyPoints = new VectorOfKeyPoint ();
                        surfGPU.DownloadKeypoints (gpuObservedKeyPoints, observedKeyPoints);

                        //wait for the stream to complete its tasks
                        //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                        stream.WaitForCompletion ();

                        gpuMask.Download (mask);
                        gpuMatchIndices.Download (indices);

                        if (GpuInvoke.CountNonZero (gpuMask) >= 4) {
                            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation (modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures (modelKeyPoints, observedKeyPoints, indices, mask, 2);
                        }

                        watch.Stop ();
                    }
                }
            } else {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw (modelImage, null);
                Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw (modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew ();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw (observedImage, null);
                Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw (observedImage, null, observedKeyPoints);
                BruteForceMatcher<float> matcher = new BruteForceMatcher<float> (DistanceType.L2);
                matcher.Add (modelDescriptors);

                indices = new Matrix<int> (observedDescriptors.Rows, k);
                using (Matrix<float> dist = new Matrix<float> (observedDescriptors.Rows, k)) {
                    matcher.KnnMatch (observedDescriptors, indices, dist, k, null);
                    mask = new Matrix<byte> (dist.Rows, 1);
                    mask.SetValue (255);
                    Features2DToolbox.VoteForUniqueness (dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero (mask);
                if (nonZeroCount >= 4) {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation (modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures (modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }

                watch.Stop ();
            }

            //Draw the matched keypoints
            Image<Bgr, Byte> result = Features2DToolbox.DrawMatches (modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                          indices, new Bgr (255, 255, 255), new Bgr (255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the image
            if (homography != null) {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[] pts = new PointF[] {
                    new PointF (rect.Left, rect.Bottom),
                    new PointF (rect.Right, rect.Bottom),
                    new PointF (rect.Right, rect.Top),
                    new PointF (rect.Left, rect.Top)
                };
                homography.ProjectPoints (pts);

                result.DrawPolyline (Array.ConvertAll<PointF, Point> (pts, Point.Round), true, new Bgr (Color.Red), 5);
            }
            #endregion

            matchTime = watch.ElapsedMilliseconds;

            return result;
        }
示例#24
0
        private void ProcessFrameFindFaces()
        {
            var stereoCalibration = Options.StereoCalibrationOptions;

            if (stereoCalibration == null)
            {
                return;
            }

            var leftImageR  = new Image <Gray, byte>(_cameras[0].Image.Width, _cameras[0].Image.Height);
            var rightImageR = new Image <Gray, byte>(_cameras[1].Image.Width, _cameras[1].Image.Height);

            try
            {
                CvInvoke.cvRemap(_cameras[0].Image.Ptr, leftImageR.Ptr,
                                 stereoCalibration.MapXLeft, stereoCalibration.MapYLeft, 0, new MCvScalar(0));
            }
            catch (Exception ex)
            {
            }

            CvInvoke.cvRemap(_cameras[1].Image.Ptr, rightImageR.Ptr,
                             stereoCalibration.MapXRight, stereoCalibration.MapYRight, 0, new MCvScalar(0));

            // find first face points
            var leftFaceRegions  = Helper2D.GetFaceRegion2Ds(leftImageR, FaceWidth, FaceHeight, true, true);
            var rightFaceRegions = Helper2D.GetFaceRegion2Ds(rightImageR, FaceWidth, FaceHeight, true, true);

            FaceRegion2D leftFace;
            FaceRegion2D rightFace;

            if (leftFaceRegions != null &&
                rightFaceRegions != null &&
                (leftFace = leftFaceRegions.FirstOrDefault()) != null &&
                (rightFace = rightFaceRegions.FirstOrDefault()) != null)
            {
                if (leftFace.EyeAngle != 0)
                {
                    _leftRoll = leftFace.EyeAngle;
                }
                if (rightFace.EyeAngle != 0)
                {
                    _rightRoll = rightFace.EyeAngle;
                }

                var leftPoints  = new Point[4]; // face location, left eye, right eye, mouth
                var rightPoints = new Point[4];

                #region Points

                // face
                leftPoints[0]  = new Point(leftFace.Face.Location.X + leftFace.Face.Width / 2, leftFace.Face.Location.Y + leftFace.Face.Height / 2);
                rightPoints[0] = new Point(rightFace.Face.Location.X + rightFace.Face.Width / 2, rightFace.Face.Location.Y + rightFace.Face.Height / 2);

                // left eye
                if (leftFace.LeftEye != null && rightFace.LeftEye != null)
                {
                    leftPoints[1] = new Point(leftFace.Face.Location.X + leftFace.LeftEye.Location.X + leftFace.LeftEye.Width / 2,
                                              leftFace.Face.Location.Y + leftFace.LeftEye.Location.Y + leftFace.LeftEye.Height / 2);

                    rightPoints[1] = new Point(rightFace.Face.Location.X + rightFace.LeftEye.Location.X + rightFace.LeftEye.Width / 2,
                                               rightFace.Face.Location.Y + rightFace.LeftEye.Location.Y + rightFace.LeftEye.Height / 2);
                }

                // right eye
                if (leftFace.RightEye != null && rightFace.RightEye != null)
                {
                    leftPoints[2] = new Point(leftFace.Face.Location.X + leftFace.RightEye.Location.X + leftFace.RightEye.Width / 2,
                                              leftFace.Face.Location.Y + leftFace.RightEye.Location.Y + leftFace.RightEye.Height / 2);

                    rightPoints[2] = new Point(rightFace.Face.Location.X + rightFace.RightEye.Location.X + rightFace.RightEye.Width / 2,
                                               rightFace.Face.Location.Y + rightFace.RightEye.Location.Y + rightFace.RightEye.Height / 2);
                }

                // mouth
                if (leftFace.Mouth != null && rightFace.Mouth != null)
                {
                    leftPoints[3] = new Point(leftFace.Face.Location.X + leftFace.Mouth.Location.X + leftFace.Mouth.Width / 2,
                                              leftFace.Face.Location.Y + leftFace.Mouth.Location.Y + leftFace.Mouth.Height / 2);

                    rightPoints[3] = new Point(rightFace.Face.Location.X + rightFace.Mouth.Location.X + rightFace.Mouth.Width / 2,
                                               rightFace.Face.Location.Y + rightFace.Mouth.Location.Y + rightFace.Mouth.Height / 2);
                }

                #endregion

                #region Manual Point Cloud Calculation

                {
                    var pointCloud = new MCvPoint3D64f[leftPoints.Length];

                    #region Calculate Point Cloud

                    for (int i = 0; i < leftPoints.Length; i++)
                    {
                        if (leftPoints[i].X == 0 && leftPoints[i].Y == 0)
                        {
                            continue;
                        }

                        var d = rightPoints[i].X - leftPoints[i].X;

                        var X = leftPoints[i].X * stereoCalibration.Q[0, 0] + stereoCalibration.Q[0, 3];
                        var Y = leftPoints[i].Y * stereoCalibration.Q[1, 1] + stereoCalibration.Q[1, 3];
                        var Z = stereoCalibration.Q[2, 3];
                        var W = d * stereoCalibration.Q[3, 2] + stereoCalibration.Q[3, 3];

                        X = X / W;
                        Y = Y / W;
                        Z = Z / W;

                        leftImageR.Draw(string.Format("{0:0.0} {1:0.0} {2:0.0}", X, Y, Z), ref _font, leftPoints[i], new Gray(255));
                        rightImageR.Draw(string.Format("{0:0.0} {1:0.0} {2:0.0}", X, Y, Z), ref _font, rightPoints[i], new Gray(255));

                        pointCloud[i] = new MCvPoint3D64f(X, Y, Z);
                    }

                    #endregion


                    _foundFace3d = new Face3D()
                    {
                        Location = pointCloud[0].x == 0 && pointCloud[0].y == 0 && pointCloud[0].z == 0 ? (MCvPoint3D64f?)null : pointCloud[0],
                        LeftEye  = pointCloud[1].x == 0 && pointCloud[1].y == 0 && pointCloud[1].z == 0 ? (MCvPoint3D64f?)null : pointCloud[1],
                        RightEye = pointCloud[2].x == 0 && pointCloud[2].y == 0 && pointCloud[2].z == 0 ? (MCvPoint3D64f?)null : pointCloud[2],
                        Mouth    = pointCloud[3].x == 0 && pointCloud[3].y == 0 && pointCloud[3].z == 0 ? (MCvPoint3D64f?)null : pointCloud[3],
                    };

                    if (_foundFace3d.LeftEye != null &&
                        _foundFace3d.RightEye != null &&
                        _foundFace3d.Mouth != null)
                    {
                        var srcMatrix = new Matrix <float>(3, 4);

                        srcMatrix[0, 0] = (float)_foundFace3d.LeftEye.Value.x;
                        srcMatrix[1, 0] = (float)_foundFace3d.LeftEye.Value.y;
                        srcMatrix[2, 0] = (float)_foundFace3d.LeftEye.Value.z;

                        srcMatrix[0, 1] = (float)_foundFace3d.RightEye.Value.x;
                        srcMatrix[1, 1] = (float)_foundFace3d.RightEye.Value.y;
                        srcMatrix[2, 1] = (float)_foundFace3d.RightEye.Value.z;

                        srcMatrix[0, 2] = (float)_foundFace3d.Mouth.Value.x;
                        srcMatrix[1, 2] = (float)_foundFace3d.Mouth.Value.y;
                        srcMatrix[2, 2] = (float)_foundFace3d.Mouth.Value.z;

                        srcMatrix[0, 3] = (float)_foundFace3d.Location.Value.x;
                        srcMatrix[1, 3] = (float)_foundFace3d.Location.Value.y;
                        srcMatrix[2, 3] = (float)_foundFace3d.Location.Value.z;


                        var dstMatrix = new Matrix <float>(3, 4);

                        dstMatrix[0, 0] = (float)_foundFace3d.LeftEye.Value.x;
                        dstMatrix[1, 0] = (float)_foundFace3d.LeftEye.Value.y;
                        dstMatrix[2, 0] = (float)30;

                        dstMatrix[0, 1] = (float)_foundFace3d.RightEye.Value.x;
                        dstMatrix[1, 1] = (float)_foundFace3d.RightEye.Value.y;
                        dstMatrix[2, 1] = (float)30;

                        dstMatrix[0, 2] = (float)_foundFace3d.Mouth.Value.x;
                        dstMatrix[1, 2] = (float)_foundFace3d.Mouth.Value.y;
                        dstMatrix[2, 2] = (float)30;

                        dstMatrix[0, 3] = (float)_foundFace3d.Location.Value.x;
                        dstMatrix[1, 3] = (float)_foundFace3d.Location.Value.y;
                        dstMatrix[2, 3] = (float)30;

                        HomographyMatrix homographyMatrix = CameraCalibration.FindHomography(srcMatrix, dstMatrix, HOMOGRAPHY_METHOD.DEFAULT, 1);

                        if (homographyMatrix != null)
                        {
                            try
                            {
                                leftImageR = leftImageR.WarpPerspective(homographyMatrix, INTER.CV_INTER_LINEAR, WARP.CV_WARP_DEFAULT, new Gray(0));
                            }
                            catch (Exception ex)
                            {
                            }
                        }
                    }
                }

                #endregion

                #region Automatic Point Cloud

                {
                    _imagePointsDisparity = new Image <Gray, byte>(_cameras[0].Image.Width, _cameras[0].Image.Height);
                    _imagePointsLeft      = new Image <Gray, byte>(_cameras[0].Image.Width, _cameras[0].Image.Height, new Gray(255));
                    _imagePointsRight     = new Image <Gray, byte>(_cameras[0].Image.Width, _cameras[0].Image.Height, new Gray(255));

                    for (int i = 0; i < leftPoints.Length; i++)
                    {
                        if (leftPoints[i].X == 0 && leftPoints[i].Y == 0)
                        {
                            continue;
                        }

                        _imagePointsLeft.Draw(new Rectangle(new Point(leftPoints[i].X, leftPoints[i].Y), new Size(10, 10)), new Gray(0), 10);
                        _imagePointsRight.Draw(new Rectangle(new Point(rightPoints[i].X, rightPoints[i].Y), new Size(10, 10)), new Gray(0), 10);
                    }

                    var imagePointsDisparityGpu = new GpuImage <Gray, byte>(_imagePointsDisparity);

                    _stereoSolver.FindStereoCorrespondence(new GpuImage <Gray, byte>(_imagePointsLeft), new GpuImage <Gray, byte>(_imagePointsRight),
                                                           imagePointsDisparityGpu, null);

                    _imagePointsDisparity = imagePointsDisparityGpu.ToImage();



                    //MCvPoint3D32f[] pointCloud = PointCollection.ReprojectImageTo3D(_imagePointsDisparity, stereoCalibration.Q);

                    //var filteredPointCloud = pointCloud.
                    //    Where(item => item.z != 10000).
                    //    GroupBy(item => item.z).
                    //    Select(item => new
                    //    {
                    //        z = item.Key,
                    //        x = item.Average(point => point.x),
                    //        y = item.Average(point => point.y)
                    //    }).ToArray();

                    //for (int i = 0; i < filteredPointCloud.Length; i++)
                    //{
                    //    _imagePointsDisparity.Draw(string.Format("{0:0.0} {1:0.0} {2:0.0}", filteredPointCloud[i].x, filteredPointCloud[i].y, filteredPointCloud[i].z),
                    //        ref _font, new Point((int)filteredPointCloud[i].x, (int)filteredPointCloud[i].y), new Gray(255));
                    //}
                }

                #endregion
            }

            var oldLeft  = _cameras[0].Image;
            var oldRight = _cameras[1].Image;

            _cameras[0].Image = leftImageR;
            _cameras[1].Image = rightImageR;

            oldLeft.Dispose();
            oldRight.Dispose();
        }
示例#25
0
        static void Run()
        {
            Image <Gray, Byte> modelImage    = new Image <Gray, byte>("box.png");
            Image <Gray, Byte> observedImage = new Image <Gray, byte>("box_in_scene.png");
            Stopwatch          watch;
            HomographyMatrix   homography = null;

            SURFDetector surfCPU = new SURFDetector(500, false);

            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;
            Matrix <float>   dist;
            Matrix <byte>    mask;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, 2, 1))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuMatchIndices.Size, 1))
                                                {
                                                    observedKeyPoints = new VectorOfKeyPoint();
                                                    surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                    matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);

                                                    indices = new Matrix <int>(gpuMatchIndices.Size);
                                                    dist    = new Matrix <float>(indices.Size);
                                                    gpuMatchIndices.Download(indices);
                                                    gpuMatchDist.Download(dist);

                                                    mask = new Matrix <byte>(dist.Rows, 1);

                                                    mask.SetValue(255);

                                                    Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                                                    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                                                    if (nonZeroCount >= 4)
                                                    {
                                                        nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                        if (nonZeroCount >= 4)
                                                        {
                                                            homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                                                        }
                                                    }

                                                    watch.Stop();
                                                }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                //MKeyPoint[] kpts = modelKeyPoints.ToArray();
                Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                Matrix <float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);

                BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
                matcher.Add(modelDescriptors);
                int k = 2;
                indices = new Matrix <int>(observedDescriptors.Rows, k);
                dist    = new Matrix <float>(observedDescriptors.Rows, k);
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);

                mask = new Matrix <byte>(dist.Rows, 1);

                mask.SetValue(255);

                Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                    }
                }

                watch.Stop();
            }

            //Draw the matched keypoints
            Image <Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                                                     indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS);

            #region draw the projected region on the image
            if (homography != null)
            { //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };
                homography.ProjectPoints(pts);

                result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
            }
            #endregion

            ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds));
        }
示例#26
0
        public static bool FindModelImageInObservedImage(Image <Gray, byte> modelImage, Image <Gray, byte> observedImage)
        {
            var surfCpu = new SURFDetector(500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;

            Matrix <byte> mask;
            int           k = 2;
            double        uniquenessThreshold = 0.8;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGpu = new GpuSURFDetector(surfCpu.SURFParams, 0.01f);
                using (GpuImage <Gray, byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGpu.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGpu.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGpu.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);

                                // extract features from the observed image
                                using (GpuImage <Gray, byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGpu.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGpu.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                        using (var stream = new Emgu.CV.GPU.Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                                                                }

                                                            observedKeyPoints = new VectorOfKeyPoint();
                                                            surfGpu.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                                                                }
                                                                if ((double)nonZeroCount / mask.Height > 0.02)
                                                                {
                                                                    return(true);
                                                                }
                                                            }
                                                        }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCpu.DetectKeyPointsRaw(modelImage, null);
                Matrix <float> modelDescriptors = surfCpu.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                // extract features from the observed image
                observedKeyPoints = surfCpu.DetectKeyPointsRaw(observedImage, null);
                Matrix <float>            observedDescriptors = surfCpu.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
                BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix <int>(observedDescriptors.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                }

                if ((double)nonZeroCount / mask.Height > 0.02)
                {
                    return(true);
                }
            }

            //Draw the matched keypoints
            //var result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, indices, new Bgr(0, 0, 255), new Bgr(255, 0, 0), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
            //result.Save( @"C:\Users\D.Markachev\Desktop\bleh-keypoints.jpg" );

            return(false);
        }
示例#27
0
        public void classify(BitmapSource frame)
        {
            Console.WriteLine(relativeURI);

            //byte[] classifiedImage = frame;
            //WriteableBitmap frameImage = new WriteableBitmap(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null);

            //BitmapSource frameImage = BitmapSource.Create(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null, frame, stride);

            /*
             * resultsPtr = CvInvoke.cvHaarDetectObjects(
             *  Marshal.GetIUnknownForObject(frame),
             *  classifier,
             *  resultsPtr,
             *  1.1,
             *  3,
             *  Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
             *  new System.Drawing.Size(0,0),
             *  new System.Drawing.Size(0,0)
             * );
             *
             * Console.WriteLine("Classified?!? Pointer below: ");
             * Console.WriteLine(resultsPtr.ToString());
             */
            //return classifiedImage;
            Console.WriteLine(" - - - Converting Bitmap...");
            System.Drawing.Bitmap bitmapFrame;
            using (MemoryStream outStream = new MemoryStream())
            {
                BitmapEncoder enc = new BmpBitmapEncoder();
                enc.Frames.Add(BitmapFrame.Create(frame));
                enc.Save(outStream);
                bitmapFrame = new System.Drawing.Bitmap(outStream);
            }
            Console.WriteLine(" - - - Bitmap converted!");

            Image <Bgr, Byte> image = new Image <Bgr, Byte>(bitmapFrame);

            Console.WriteLine(" - - - Image set");
            Console.WriteLine(" - - - Check CUDA...");

            if (GpuInvoke.HasCuda)
            {
                Console.WriteLine(" - - - Has CUDA!");
                using (GpuCascadeClassifier target = new GpuCascadeClassifier(classifierURI))
                {
                    using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image))
                        using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>())
                        {
                            Console.WriteLine(" - - - Detecting!");
                            Rectangle[] targetSet = target.DetectMultiScale(gpuGray, 1.1, 10, System.Drawing.Size.Empty);
                            Console.WriteLine(" - - - Detected :D :D :D Printing rectangle set: ");
                            foreach (Rectangle f in targetSet)
                            {
                                Console.WriteLine("Rectangle found at: " + f.ToString());
                                //draw the face detected in the 0th (gray) channel with blue color
                                image.Draw(f, new Bgr(System.Drawing.Color.Blue), 2);
                            }
                            Console.WriteLine(" - - - DONE");
                        }
                }
            }
            else
            {
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    //des.SetSVMDetector
                }

                Console.WriteLine(" - - - No CUDA  :( ");
                Console.WriteLine(" - - - Devices available: " + GpuInvoke.GetCudaEnabledDeviceCount());
            }
        }
示例#28
0
        static void Run()
        {
            Image <Bgr, Byte> image = new Image <Bgr, byte>("lena.jpg"); //Read the files as an 8-bit Bgr image

            Stopwatch watch;
            String    faceFileName = "haarcascade_frontalface_default.xml";
            String    eyeFileName  = "haarcascade_eye.xml";

            if (GpuInvoke.HasCuda)
            {
                using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
                    using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName))
                    {
                        watch = Stopwatch.StartNew();
                        using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image))
                            using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>())
                            {
                                Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
                                foreach (Rectangle f in faceRegion)
                                {
                                    //draw the face detected in the 0th (gray) channel with blue color
                                    image.Draw(f, new Bgr(Color.Blue), 2);
                                    using (GpuImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f))
                                    {
                                        //For some reason a clone is required.
                                        //Might be a bug of GpuCascadeClassifier in opencv
                                        using (GpuImage <Gray, Byte> clone = faceImg.Clone())
                                        {
                                            Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);

                                            foreach (Rectangle e in eyeRegion)
                                            {
                                                Rectangle eyeRect = e;
                                                eyeRect.Offset(f.X, f.Y);
                                                image.Draw(eyeRect, new Bgr(Color.Red), 2);
                                            }
                                        }
                                    }
                                }
                            }
                        watch.Stop();
                    }
            }
            else
            {
                //Read the HaarCascade objects
                using (HaarCascade face = new HaarCascade(faceFileName))
                    using (HaarCascade eye = new HaarCascade(eyeFileName))
                    {
                        watch = Stopwatch.StartNew();
                        using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale
                        {
                            //normalizes brightness and increases contrast of the image
                            gray._EqualizeHist();

                            //Detect the faces  from the gray scale image and store the locations as rectangle
                            //The first dimensional is the channel
                            //The second dimension is the index of the rectangle in the specific channel
                            MCvAvgComp[] facesDetected = face.Detect(
                                gray,
                                1.1,
                                10,
                                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                                new Size(20, 20));

                            foreach (MCvAvgComp f in facesDetected)
                            {
                                //draw the face detected in the 0th (gray) channel with blue color
                                image.Draw(f.rect, new Bgr(Color.Blue), 2);

                                //Set the region of interest on the faces
                                gray.ROI = f.rect;
                                MCvAvgComp[] eyesDetected = eye.Detect(
                                    gray,
                                    1.1,
                                    10,
                                    Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                                    new Size(20, 20));
                                gray.ROI = Rectangle.Empty;

                                foreach (MCvAvgComp e in eyesDetected)
                                {
                                    Rectangle eyeRect = e.rect;
                                    eyeRect.Offset(f.rect.X, f.rect.Y);
                                    image.Draw(eyeRect, new Bgr(Color.Red), 2);
                                }
                            }
                        }
                        watch.Stop();
                    }
            }

            //display the image
            ImageViewer.Show(image, String.Format(
                                 "Completed face and eye detection using {0} in {1} milliseconds",
                                 GpuInvoke.HasCuda ? "GPU": "CPU",
                                 watch.ElapsedMilliseconds));
        }
 /// <summary>
 /// Apply the filter to the disparity image
 /// </summary>
 /// <param name="disparity">The input disparity map</param>
 /// <param name="image">The image</param>
 /// <param name="dst">The output disparity map, should have the same size as the input disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(GpuImage<Gray, Byte> disparity, GpuImage<Gray, Byte> image, GpuImage<Gray, byte> dst, Stream stream)
 {
     GpuDisparityBilateralFilterApply(_ptr, disparity, image, dst, stream);
 }
示例#30
0
文件: DetectFace.cs 项目: Raptek/STEM
 //        public static void detectFaceGPU(Image<Bgr, Byte> image, String faceFileName, String eyesFileName, List<Rectangle> facesList, List<Rectangle> eyesList, out long detectionTime)
 public static void detectFaceGPU(Image<Bgr, Byte> image, String faceFileName, String eyesFileName, List<Rectangle> facesList, out Rectangle eyeL, out Rectangle eyeR, out long detectionTime)
 {
     Stopwatch watch;
     Rectangle eyeLtmp = new Rectangle(), eyeRtmp = new Rectangle();
     using (GpuCascadeClassifier faceCascade = new GpuCascadeClassifier(faceFileName))
     using (GpuCascadeClassifier eyesCascade = new GpuCascadeClassifier(eyesFileName))
     {
         watch = Stopwatch.StartNew();
         using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr,byte>(image))
         using (GpuImage<Gray, Byte> grayImage = gpuImage.Convert<Gray, Byte>())
         {
             Rectangle[] facesRegion = faceCascade.DetectMultiScale(grayImage, 1.1, 10, new Size(image.Width / 8, image.Height / 8));
             facesList.AddRange(facesRegion);
             foreach (Rectangle f in facesRegion)
             {
                 #region ROI dla oczu
                 Point eyesBoxTopLeft = new Point(f.X, f.Top + f.Height / 4);
                 Size eyesBoxArea = new Size(f.Width, f.Height / 4);
                 Rectangle eyesBox = new Rectangle(eyesBoxTopLeft, eyesBoxArea);
                 Size rightEyeBoxArea = new Size(eyesBox.Width / 2, eyesBox.Height);
                 Rectangle rightEyeBox = new Rectangle(eyesBoxTopLeft, rightEyeBoxArea);
                 Size leftEyeBoxArea = new Size(eyesBox.Width / 2, eyesBox.Height);
                 Rectangle leftEyeBox = new Rectangle(new Point(eyesBoxTopLeft.X + leftEyeBoxArea.Width, eyesBoxTopLeft.Y), leftEyeBoxArea);
                 #endregion
                 #region Prawe oko
                 using (GpuImage<Gray, Byte> faceImg = grayImage.GetSubRect(rightEyeBox))
                 {
                     using (GpuImage<Gray, Byte> clone = faceImg.Clone())
                     {
                         Rectangle[] eyeRegion = eyesCascade.DetectMultiScale(clone, 1.1, 10, new Size(30, 30));
                         foreach (Rectangle e in eyeRegion)
                         {
                             Rectangle eyeRect = e;
                             eyeRect.Offset(rightEyeBox.X, rightEyeBox.Y);
                             eyeRtmp = eyeRect;
                         }
                     }
                 }
                 #endregion
                 #region Lewe oko
                 using (GpuImage<Gray, Byte> faceImg = grayImage.GetSubRect(leftEyeBox))
                 {
                     using (GpuImage<Gray, Byte> clone = faceImg.Clone())
                     {
                         Rectangle[] eyeRegion = eyesCascade.DetectMultiScale(clone, 1.1, 10, new Size(30, 30));
                         foreach (Rectangle e in eyeRegion)
                         {
                             Rectangle eyeRect = e;
                             eyeRect.Offset(leftEyeBox.X, leftEyeBox.Y);
                             eyeLtmp = eyeRect;
                         }
                     }
                 }
                 #endregion
             }
         }
         watch.Stop();
     }
     detectionTime = watch.ElapsedMilliseconds;
     eyeL = eyeLtmp;
     eyeR = eyeRtmp;
 }
 /// <summary>
 /// Detect keypoints in the GpuImage
 /// </summary>
 /// <param name="img">The image where keypoints will be detected from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(GpuImage<Gray, Byte> img, GpuImage<Gray, Byte> mask)
 {
     using (GpuMat<float> tmp = DetectKeyPointsRaw(img, mask))
      using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
      {
     DownloadKeypoints(tmp, kpts);
     return kpts.ToArray();
      }
 }
示例#32
0
        private static Rectangle[] DetectFace(Image<Bgr, Byte> image, string faceFileName)
        {
            try
            {
                if (GpuInvoke.HasCuda)
                {
                    using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
                    {
                        using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
                        using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
                        {
                            Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);

                            return faceRegion;
                        }
                    }
                }
                else
                {
                    //Read the HaarCascade objects
                    using (CascadeClassifier face = new CascadeClassifier(faceFileName))
                    {

                        using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
                        {
                            //normalizes brightness and increases contrast of the image
                            gray._EqualizeHist();

                            //Detect the faces  from the gray scale image and store the locations as rectangle
                            //The first dimensional is the channel
                            //The second dimension is the index of the rectangle in the specific channel
                            Rectangle[] facesDetected = face.DetectMultiScale(
                               gray,
                               1.1,
                               10,
                               new Size(filterWidth, filterHeight),
                               Size.Empty);

                            return facesDetected;
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                throw ex;
            }
        }
示例#33
0
        public static bool FindModelImageInObservedImage( Image<Gray, byte> modelImage, Image<Gray, byte> observedImage )
        {
            var surfCpu = new SURFDetector(500, false);
             VectorOfKeyPoint modelKeyPoints;
             VectorOfKeyPoint observedKeyPoints;
             Matrix<int> indices;

             Matrix<byte> mask;
             int k = 2;
             double uniquenessThreshold = 0.8;
             if ( GpuInvoke.HasCuda )
             {
            GpuSURFDetector surfGpu = new GpuSURFDetector(surfCpu.SURFParams, 0.01f);
            using ( GpuImage<Gray, byte> gpuModelImage = new GpuImage<Gray, byte>( modelImage ) )
            //extract features from the object image
            using ( GpuMat<float> gpuModelKeyPoints = surfGpu.DetectKeyPointsRaw( gpuModelImage, null ) )
            using ( GpuMat<float> gpuModelDescriptors = surfGpu.ComputeDescriptorsRaw( gpuModelImage, null, gpuModelKeyPoints ) )
            using ( GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>( DistanceType.L2 ) )
            {
               modelKeyPoints = new VectorOfKeyPoint();
               surfGpu.DownloadKeypoints( gpuModelKeyPoints, modelKeyPoints );

               // extract features from the observed image
               using ( GpuImage<Gray, byte> gpuObservedImage = new GpuImage<Gray, byte>( observedImage ) )
               using ( GpuMat<float> gpuObservedKeyPoints = surfGpu.DetectKeyPointsRaw( gpuObservedImage, null ) )
               using ( GpuMat<float> gpuObservedDescriptors = surfGpu.ComputeDescriptorsRaw( gpuObservedImage, null, gpuObservedKeyPoints ) )
               using ( GpuMat<int> gpuMatchIndices = new GpuMat<int>( gpuObservedDescriptors.Size.Height, k, 1, true ) )
               using ( GpuMat<float> gpuMatchDist = new GpuMat<float>( gpuObservedDescriptors.Size.Height, k, 1, true ) )
               using ( GpuMat<Byte> gpuMask = new GpuMat<byte>( gpuMatchIndices.Size.Height, 1, 1 ) )
               using ( var stream = new Emgu.CV.GPU.Stream() )
               {
                  matcher.KnnMatchSingle( gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream );
                  indices = new Matrix<int>( gpuMatchIndices.Size );
                  mask = new Matrix<byte>( gpuMask.Size );

                  //gpu implementation of voteForUniquess
                  using ( GpuMat<float> col0 = gpuMatchDist.Col( 0 ) )
                  using ( GpuMat<float> col1 = gpuMatchDist.Col( 1 ) )
                  {
                     GpuInvoke.Multiply( col1, new MCvScalar( uniquenessThreshold ), col1, stream );
                     GpuInvoke.Compare( col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream );
                  }

                  observedKeyPoints = new VectorOfKeyPoint();
                  surfGpu.DownloadKeypoints( gpuObservedKeyPoints, observedKeyPoints );

                  //wait for the stream to complete its tasks
                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                  stream.WaitForCompletion();

                  gpuMask.Download( mask );
                  gpuMatchIndices.Download( indices );

                  if ( GpuInvoke.CountNonZero( gpuMask ) >= 4 )
                  {
                     int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                     if ( nonZeroCount >= 4 )
                     {
                        Features2DToolbox.GetHomographyMatrixFromMatchedFeatures( modelKeyPoints, observedKeyPoints, indices, mask, 2 );
                     }
                     if ( (double)nonZeroCount / mask.Height > 0.02 )
                     {
                        return true;
                     }
                  }
               }
            }
             }
             else
             {
            //extract features from the object image
            modelKeyPoints = surfCpu.DetectKeyPointsRaw( modelImage, null );
            Matrix<float> modelDescriptors = surfCpu.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

            // extract features from the observed image
            observedKeyPoints = surfCpu.DetectKeyPointsRaw( observedImage, null );
            Matrix<float> observedDescriptors = surfCpu.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add( modelDescriptors );

            indices = new Matrix<int>( observedDescriptors.Rows, k );
            using ( Matrix<float> dist = new Matrix<float>( observedDescriptors.Rows, k ) )
            {
               matcher.KnnMatch( observedDescriptors, indices, dist, k, null );
               mask = new Matrix<byte>( dist.Rows, 1 );
               mask.SetValue( 255 );
               Features2DToolbox.VoteForUniqueness( dist, uniquenessThreshold, mask );
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if ( nonZeroCount >= 4 )
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation( modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20 );
               if ( nonZeroCount >= 4 )
               {
                  Features2DToolbox.GetHomographyMatrixFromMatchedFeatures( modelKeyPoints, observedKeyPoints, indices, mask, 2 );
               }
            }

            if ( (double)nonZeroCount/mask.Height > 0.02 )
            {
               return true;
            }
             }

             //Draw the matched keypoints
             //var result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, indices, new Bgr(0, 0, 255), new Bgr(255, 0, 0), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
             //result.Save( @"C:\Users\D.Markachev\Desktop\bleh-keypoints.jpg" );

             return false;
        }
 /// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from. The order of the keypoints might be changed unless the GPU_SURF detector is UP-RIGHT.</param>
 /// <returns>The image features founded on the keypoint location</returns>
 public GpuMat<float> ComputeDescriptorsRaw(GpuImage<Gray, Byte> image, GpuImage<Gray, byte> mask, GpuMat<float> keyPoints)
 {
     GpuMat<float> descriptors = new GpuMat<float>(keyPoints.Size.Height, DescriptorSize, 1);
      gpuSURFDetectorCompute(_ptr, image, mask, keyPoints, descriptors, true);
      return descriptors;
 }
示例#35
0
        public static FaceRegion2D[] GetFaceRegion2Ds(IImage gray,
                                                      int faceWidth, int faceHeight, bool findEyes = true, bool findMouthRegions = true)
        {
            var ret = new List <FaceRegion2D>();

            var parameter = new PrepareImageParameter
            {
            };

            using (var grayGpu = new GpuImage <Gray, byte>((Image <Gray, byte>)gray))
            {
                IEnumerable <Region2D> faceRegions = FilterInstances.FaceFilter.GetResult(grayGpu, new FacePartParameter
                {
                    MinWidth  = faceWidth,
                    MinHeight = faceHeight
                });

                if (faceRegions == null)
                {
                    return(ret.ToArray());
                }


                foreach (Region2D faceRegion in faceRegions)
                {
                    Region2D leftEyeRegion  = null;
                    Region2D rightEyeRegion = null;
                    Region2D mouthRegion    = null;
                    double   angle          = 0;
                    IImage   faceImage      = null;

                    using (IImage cropFace = FilterInstances.CropFilter.GetResult(grayGpu, new CropParameter {
                        Region = faceRegion
                    }))
                    {
                        #region Face parts detection

                        if (findEyes)
                        {
                            #region Detect Eyes

                            #region Left

                            var leftRegion = new Region2D
                            {
                                Location = new Point
                                {
                                    X = 0,
                                    Y = 0
                                },
                                Width  = faceRegion.Width / 2,
                                Height = faceRegion.Height / 2
                            };

                            IEnumerable <Region2D> leftEyes = FilterInstances.EyeFilter.GetResult
                                                                  (cropFace, new FacePartParameter {
                                Region = leftRegion
                            });

                            if (leftEyes.Any())
                            {
                                leftEyeRegion = new Region2D
                                {
                                    Location = new Point
                                    {
                                        X = (int)leftEyes.Average(item => item.Location.X) + leftRegion.Location.X,
                                        Y = (int)leftEyes.Average(item => item.Location.Y) + leftRegion.Location.Y
                                    },
                                    Width  = (int)leftEyes.Average(item => item.Width),
                                    Height = (int)leftEyes.Average(item => item.Height)
                                };
                            }

                            #endregion

                            #region Right

                            var rightRegion = new Region2D
                            {
                                Location = new Point
                                {
                                    X = faceRegion.Width / 2,
                                    Y = 0,
                                },
                                Width  = faceRegion.Width / 2,
                                Height = faceRegion.Height / 2
                            };

                            IEnumerable <Region2D> rightEyes = FilterInstances.EyeFilter.GetResult
                                                                   (cropFace, new FacePartParameter {
                                Region = rightRegion
                            });

                            if (rightEyes.Any())
                            {
                                rightEyeRegion = new Region2D
                                {
                                    Location = new Point
                                    {
                                        X = (int)rightEyes.Average(item => item.Location.X) + rightRegion.Location.X,
                                        Y = (int)rightEyes.Average(item => item.Location.Y) + rightRegion.Location.Y
                                    },
                                    Width  = (int)rightEyes.Average(item => item.Width),
                                    Height = (int)rightEyes.Average(item => item.Height)
                                };
                            }

                            #endregion

                            #endregion

                            #region Calculate Face Angle

                            if (leftEyeRegion != null &&
                                rightEyeRegion != null)
                            {
                                int yCoord = (rightEyeRegion.Location.Y + rightEyeRegion.Height / 2)
                                             - (leftEyeRegion.Location.Y + leftEyeRegion.Height / 2);

                                int xCoord = (rightEyeRegion.Location.X + rightEyeRegion.Width / 2 + leftRegion.Width)
                                             - (leftEyeRegion.Location.X + leftEyeRegion.Width / 2);

                                // calc rotation angle in radians
                                angle = -Math.Atan2(yCoord, xCoord) * (180.0 / Math.PI);
                            }

                            #endregion
                        }

                        if (findMouthRegions)
                        {
                            #region Mouth

                            var mouthSearchRegion = new Region2D
                            {
                                Location = new Point
                                {
                                    X = 0,
                                    Y = faceRegion.Height / 2
                                },
                                Width  = faceRegion.Width,
                                Height = faceRegion.Height / 2
                            };

                            IEnumerable <Region2D> mouths = FilterInstances.MouthFilter.GetResult(cropFace, new FacePartParameter
                            {
                                Region = mouthSearchRegion
                            });

                            if (mouths.Any())
                            {
                                mouthRegion = new Region2D
                                {
                                    Location = new Point
                                    {
                                        X = (int)mouths.Average(item => item.Location.X) + mouthSearchRegion.Location.X,
                                        Y = (int)mouths.Average(item => item.Location.Y) + mouthSearchRegion.Location.Y
                                    },
                                    Width  = (int)mouths.Average(item => item.Width),
                                    Height = (int)mouths.Average(item => item.Height)
                                };
                            }

                            #endregion
                        }

                        #endregion

                        faceImage = FilterInstances.ResizeFilter.GetResult(cropFace, new ResizeParameter {
                            NewHeight = faceHeight, NewWidth = faceWidth
                        });
                    }

                    ret.Add(new FaceRegion2D
                    {
                        Face        = faceRegion,
                        LeftEye     = leftEyeRegion,
                        RightEye    = rightEyeRegion,
                        Mouth       = mouthRegion,
                        EyeAngle    = angle,
                        FaceImage   = (Image <Gray, byte>)faceImage,
                        SourceImage = (gray as Image <Gray, byte>) ?? ((GpuImage <Gray, byte>)grayGpu).ToImage()
                    });
                }
            }

            return(ret.ToArray());
        }
 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void FindStereoCorrespondence(GpuImage<Gray, Byte> left, GpuImage<Gray, Byte> right, GpuImage<Gray, Byte> disparity, Stream stream)
 {
     GpuStereoBMFindStereoCorrespondence(_ptr, left, right, disparity, stream);
 }
示例#37
0
        public static FaceRegion2D[] GetFaceRegion2Ds(IImage gray,
            int faceWidth, int faceHeight, bool findEyes = true, bool findMouthRegions = true)
        {
            var ret = new List<FaceRegion2D>();

            var parameter = new PrepareImageParameter
            {
            };

            using (var grayGpu = new GpuImage<Gray, byte>((Image<Gray, byte>)gray))
            {
                IEnumerable<Region2D> faceRegions = FilterInstances.FaceFilter.GetResult(grayGpu, new FacePartParameter
                                                                                                           {
                                                                                                               MinWidth = faceWidth,
                                                                                                               MinHeight = faceHeight
                                                                                                           });

                if (faceRegions == null)
                {
                    return ret.ToArray();
                }

                foreach (Region2D faceRegion in faceRegions)
                {
                    Region2D leftEyeRegion = null;
                    Region2D rightEyeRegion = null;
                    Region2D mouthRegion = null;
                    double angle = 0;
                    IImage faceImage = null;

                    using (IImage cropFace = FilterInstances.CropFilter.GetResult(grayGpu, new CropParameter { Region = faceRegion }))
                    {
                        #region Face parts detection

                        if (findEyes)
                        {
                            #region Detect Eyes

                            #region Left

                            var leftRegion = new Region2D
                                             {
                                                 Location = new Point
                                                            {
                                                                X = 0,
                                                                Y = 0
                                                            },
                                                 Width = faceRegion.Width / 2,
                                                 Height = faceRegion.Height / 2
                                             };

                            IEnumerable<Region2D> leftEyes = FilterInstances.EyeFilter.GetResult
                                (cropFace, new FacePartParameter { Region = leftRegion });

                            if (leftEyes.Any())
                            {
                                leftEyeRegion = new Region2D
                                                {
                                                    Location = new Point
                                                               {
                                                                   X = (int)leftEyes.Average(item => item.Location.X) + leftRegion.Location.X,
                                                                   Y = (int)leftEyes.Average(item => item.Location.Y) + leftRegion.Location.Y
                                                               },
                                                    Width = (int)leftEyes.Average(item => item.Width),
                                                    Height = (int)leftEyes.Average(item => item.Height)
                                                };
                            }

                            #endregion

                            #region Right

                            var rightRegion = new Region2D
                                              {
                                                  Location = new Point
                                                             {
                                                                 X = faceRegion.Width / 2,
                                                                 Y = 0,
                                                             },
                                                  Width = faceRegion.Width / 2,
                                                  Height = faceRegion.Height / 2
                                              };

                            IEnumerable<Region2D> rightEyes = FilterInstances.EyeFilter.GetResult
                                (cropFace, new FacePartParameter { Region = rightRegion });

                            if (rightEyes.Any())
                            {
                                rightEyeRegion = new Region2D
                                                 {
                                                     Location = new Point
                                                                {
                                                                    X = (int)rightEyes.Average(item => item.Location.X) + rightRegion.Location.X,
                                                                    Y = (int)rightEyes.Average(item => item.Location.Y) + rightRegion.Location.Y
                                                                },
                                                     Width = (int)rightEyes.Average(item => item.Width),
                                                     Height = (int)rightEyes.Average(item => item.Height)
                                                 };
                            }

                            #endregion

                            #endregion

                            #region Calculate Face Angle

                            if (leftEyeRegion != null
                                && rightEyeRegion != null)
                            {
                                int yCoord = (rightEyeRegion.Location.Y + rightEyeRegion.Height / 2)
                                             - (leftEyeRegion.Location.Y + leftEyeRegion.Height / 2);

                                int xCoord = (rightEyeRegion.Location.X + rightEyeRegion.Width / 2 + leftRegion.Width)
                                             - (leftEyeRegion.Location.X + leftEyeRegion.Width / 2);

                                // calc rotation angle in radians
                                angle = -Math.Atan2(yCoord, xCoord) * (180.0 / Math.PI);
                            }

                            #endregion
                        }

                        if (findMouthRegions)
                        {
                            #region Mouth

                            var mouthSearchRegion = new Region2D
                                                    {
                                                        Location = new Point
                                                                   {
                                                                       X = 0,
                                                                       Y = faceRegion.Height / 2
                                                                   },
                                                        Width = faceRegion.Width,
                                                        Height = faceRegion.Height / 2
                                                    };

                            IEnumerable<Region2D> mouths = FilterInstances.MouthFilter.GetResult(cropFace, new FacePartParameter
                                                                                                           {
                                                                                                               Region = mouthSearchRegion
                                                                                                           });

                            if (mouths.Any())
                            {
                                mouthRegion = new Region2D
                                              {
                                                  Location = new Point
                                                             {
                                                                 X = (int)mouths.Average(item => item.Location.X) + mouthSearchRegion.Location.X,
                                                                 Y = (int)mouths.Average(item => item.Location.Y) + mouthSearchRegion.Location.Y
                                                             },
                                                  Width = (int)mouths.Average(item => item.Width),
                                                  Height = (int)mouths.Average(item => item.Height)
                                              };
                            }

                            #endregion
                        }

                        #endregion

                        faceImage = FilterInstances.ResizeFilter.GetResult(cropFace, new ResizeParameter { NewHeight = faceHeight, NewWidth = faceWidth });
                    }

                    ret.Add(new FaceRegion2D
                            {
                                Face = faceRegion,
                                LeftEye = leftEyeRegion,
                                RightEye = rightEyeRegion,
                                Mouth = mouthRegion,
                                EyeAngle = angle,
                                FaceImage = (Image<Gray, byte>)faceImage,
                                SourceImage = (gray as Image<Gray, byte>) ?? ((GpuImage<Gray, byte>)grayGpu).ToImage()
                            });
                }
            }

            return ret.ToArray();
        }
示例#38
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image <Bgr, Byte> Draw(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage, out long matchTime)
        {
            Stopwatch        watch;
            HomographyMatrix homography = null;

            SURFDetector     surfCPU = new SURFDetector(500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;

            Matrix <byte> mask;
            int           k = 2;
            double        uniquenessThreshold = 0.8;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                        using (Stream stream = new Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                                                                }

                                                            observedKeyPoints = new VectorOfKeyPoint();
                                                            surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                                                                }
                                                            }

                                                            watch.Stop();
                                                        }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                Matrix <float>            observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
                BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix <int>(observedDescriptors.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                }

                watch.Stop();
            }

            //Draw the matched keypoints
            Image <Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                                                     indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the image
            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };
                homography.ProjectPoints(pts);

                result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
            }
            #endregion

            matchTime = watch.ElapsedMilliseconds;

            return(result);
        }
示例#39
0
        public StereoSystem(string CalibrationMatFile, string CalibrationMatFile_Rectified)
        {
            MatFileReader mfr = new MatFileReader(CalibrationMatFile);
            MatFileReader mfr2 = new MatFileReader(CalibrationMatFile_Rectified);
            double[,] A1 = MLDoubleParser("KK_left_new", mfr2);
            double[,] A2 = MLDoubleParser("KK_right_new", mfr2);
            double[,] kc1 = MLDoubleParser("kc_left", mfr);
            double[,] kc2 = MLDoubleParser("kc_right", mfr);
            double[,] fc1 = MLDoubleParser("fc_left", mfr);
            double[,] fc2 = MLDoubleParser("fc_right", mfr);
            double[,] cc1 = MLDoubleParser("cc_left", mfr);
            double[,] cc2 = MLDoubleParser("cc_right", mfr);
            double[,] R = MLDoubleParser("R", mfr);
            double[,] T = MLDoubleParser("T", mfr);
            double[,] nx = MLDoubleParser("nx", mfr);
            double[,] ny = MLDoubleParser("ny", mfr);
            int ImageWidth = (int)nx[0, 0];
            int ImageHeight = (int)ny[0, 0];

            LeftCam = new CameraParameters(A1, kc1, fc1, cc1, ImageWidth, ImageHeight);
            RightCam = new CameraParameters(A2, kc2, fc2, cc2, ImageWidth, ImageHeight);
            TranslationVector = new Matrix<double>(T);
            RotationMatrix = new Matrix<double>(R);
            DisparityDepthMapMatrix = new Matrix<double>(4, 4);
            ImageSize = new Size(ImageWidth, ImageHeight);

            CvInvoke.cvStereoRectify(
                LeftCam.CameraMatrix.Ptr,
                RightCam.CameraMatrix.Ptr,
                LeftCam.distCoeffs.Ptr,
                RightCam.distCoeffs.Ptr,
                ImageSize,
                RotationMatrix.Ptr,
                TranslationVector.Ptr,
                LeftCam.RectificationTransform.Ptr,
                RightCam.RectificationTransform.Ptr,
                LeftCam.ProjectionMatrix.Ptr,
                RightCam.ProjectionMatrix.Ptr,
                DisparityDepthMapMatrix.Ptr,
                Emgu.CV.CvEnum.STEREO_RECTIFY_TYPE.DEFAULT,
                1,
                Size.Empty,
                ref LeftCam.ROI,
                ref RightCam.ROI);

            LeftCam.UndistortRectifyMap();
            RightCam.UndistortRectifyMap();

            var Variables = new List<MLArray>();
            int[] Dims = {ImageHeight, ImageWidth};
            Variables.Add(MLVariableCreator("LeftMapX", LeftCam.mapx));
            Variables.Add(MLVariableCreator("RightMapX", RightCam.mapx));
            Variables.Add(MLVariableCreator("LeftMapY", LeftCam.mapy));
            Variables.Add(MLVariableCreator("RightMapY", RightCam.mapy));
            var Writer = new MatFileWriter("ImageMap.mat", Variables, false);
            DisparityMapGPU = new GpuImage<Gray, byte>(ImageHeight, ImageWidth);
            DisparityMap = new Image<Gray, byte>(ImageWidth, ImageHeight);
            GPU_Block_Matcher = new GpuStereoBM(NumDisparities, BlockSize);
        }
示例#40
0
        private void ProcessFrame(object sender, EventArgs arg)
        {
            Image <Bgr, Byte>  frame         = _capture.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            Image <Gray, Byte> grayframe     = frame.Convert <Gray, Byte>();
            Image <Gray, Byte> modelImage    = new Image <Gray, byte>("DataPlate/" + 10 + ".jpg");
            Image <Gray, Byte> observedImage = grayframe;
            Stopwatch          watch;
            HomographyMatrix   homography = null;
            SURFDetector       surfCPU    = new SURFDetector(500, false);
            VectorOfKeyPoint   modelKeyPoints;
            VectorOfKeyPoint   observedKeyPoints;
            Matrix <int>       indices;
            Matrix <float>     dist;
            Matrix <byte>      mask;


            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))

                    #region SURF
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, 2, 1))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuMatchIndices.Size, 1))
                                                {
                                                    observedKeyPoints = new VectorOfKeyPoint();
                                                    surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
                                                    matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);
                                                    indices = new Matrix <int>(gpuMatchIndices.Size);
                                                    dist    = new Matrix <float>(indices.Size);
                                                    gpuMatchIndices.Download(indices);
                                                    gpuMatchDist.Download(dist);

                                                    mask = new Matrix <byte>(dist.Rows, 1);

                                                    mask.SetValue(255);

                                                    Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                                                    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                                                    if (nonZeroCount >= 4)
                                                    {
                                                        nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                        if (nonZeroCount >= 4)
                                                        {
                                                            homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                                                        }
                                                    }

                                                    watch.Stop();
                                                }
                            }
                #endregion
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                //MKeyPoint[] kpts = modelKeyPoints.ToArray();
                Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);
                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                Matrix <float>    observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
                BruteForceMatcher matcher             = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
                matcher.Add(modelDescriptors);
                int k = 2;
                indices = new Matrix <int>(observedDescriptors.Rows, k);

                dist = new Matrix <float>(observedDescriptors.Rows, k);
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);

                mask = new Matrix <byte>(dist.Rows, 1);

                mask.SetValue(255);

                Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 20)
                {
                    nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 20)
                    {
                        homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                        XMLData();
                    }
                    else
                    {
                        textBox1.Text = string.Empty;
                        textBox2.Text = string.Empty;
                        textBox3.Text = string.Empty;
                        textBox4.Text = string.Empty;
                        textBox5.Text = string.Empty;
                    }
                }
                watch.Stop();
                #region draw the projected region on the image
                if (homography != null)
                {  //draw a rectangle along the projected model
                    Rectangle rect = modelImage.ROI;
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };
                    homography.ProjectPoints(pts);
                    frame.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 2);
                }
                #endregion
                CaptureImageBox.Image = frame;
                DataImageBox.Image    = modelImage;
            }
        }
示例#41
0
        private void DetectFaceAndEyes(Image <Bgr, byte> image, List <Rectangle> faces, List <Rectangle> eyes)
        {
#if !IOS
            if (GpuInvoke.HasCuda)
            {
                using (var face = new GpuCascadeClassifier(_faceFileName))
                    using (var eye = new GpuCascadeClassifier(_eyeFileName))
                    {
                        using (var gpuImage = new GpuImage <Bgr, byte>(image))
                            using (var gpuGray = gpuImage.Convert <Gray, byte>())
                            {
                                var faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
                                faces.AddRange(faceRegion);
                                foreach (var f in faceRegion)
                                {
                                    using (var faceImg = gpuGray.GetSubRect(f))
                                    {
                                        //For some reason a clone is required.
                                        //Might be a bug of GpuCascadeClassifier in opencv
                                        using (var clone = faceImg.Clone())
                                        {
                                            var eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);

                                            foreach (var e in eyeRegion)
                                            {
                                                var eyeRect = e;
                                                eyeRect.Offset(f.X, f.Y);
                                                eyes.Add(eyeRect);
                                            }
                                        }
                                    }
                                }
                            }
                    }
            }
            else
#endif
            using (var face = new CascadeClassifier(_faceFileName))
                using (var eye = new CascadeClassifier(_eyeFileName))
                {
                    using (var gray = image.Convert <Gray, byte>()) //Convert it to Grayscale
                    {
                        //normalizes brightness and increases contrast of the image
                        gray._EqualizeHist();

                        //Detect the faces  from the gray scale image and store the locations as rectangle
                        //The first dimensional is the channel
                        //The second dimension is the index of the rectangle in the specific channel
                        var facesDetected = face.DetectMultiScale(
                            gray,
                            1.1,
                            10,
                            new Size(20, 20),
                            Size.Empty);
                        faces.AddRange(facesDetected);

                        foreach (var f in facesDetected)
                        {
                            //Set the region of interest on the faces
                            gray.ROI = f;
                            var eyesDetected = eye.DetectMultiScale(
                                gray,
                                1.1,
                                10,
                                new Size(20, 20),
                                Size.Empty);
                            gray.ROI = Rectangle.Empty;

                            foreach (var e in eyesDetected)
                            {
                                var eyeRect = e;
                                eyeRect.Offset(f.X, f.Y);
                                eyes.Add(eyeRect);
                            }
                        }
                    }
                }
        }
示例#42
0
        public static void FindMatch(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix <int> indices, out Matrix <byte> mask, out HomographyMatrix homography)
        {
            int          k = 2;
            double       uniquenessThreshold = 0.8;
            SURFDetector surfCPU             = new SURFDetector(500, false);
            Stopwatch    watch;

            homography = null;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                        using (Stream stream = new Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                                                                }

                                                            observedKeyPoints = new VectorOfKeyPoint();
                                                            surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                                                                }
                                                            }

                                                            watch.Stop();
                                                        }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = new VectorOfKeyPoint();
                Matrix <float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = new VectorOfKeyPoint();
                Matrix <float>            observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
                BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix <int>(observedDescriptors.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                }

                watch.Stop();
            }
            matchTime = watch.ElapsedMilliseconds;
        }
示例#43
0
 /// <summary>
 /// Perfroms object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The GpuImage to search in</param>
 /// <param name="hitThreshold">The threshold for the distance between features and classifying plane.</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride.</param>
 /// <param name="padding">Mock parameter to keep CPU interface compatibility. Must be (0,0).</param>
 /// <param name="scale">Coefficient of the detection window increase.</param>
 /// <param name="groupThreshold">After detection some objects could be covered by many rectangles. This coefficient regulates similarity threshold. 0 means don't perform grouping.</param>
 /// <returns>The regions where positives are found</returns>
 public Rectangle[] DetectMultiScale(
  GpuImage<Gray, Byte> image,
  double hitThreshold,
  Size winStride,
  Size padding,
  double scale,
  int groupThreshold)
 {
     gpuHOGDescriptorDetectMultiScale(_ptr, image, _rectSeq, hitThreshold, winStride, padding, scale, groupThreshold);
      return _rectSeq.ToArray();
 }
示例#44
0
        private void ProcessFrameFindFaces()
        {
            var stereoCalibration = Options.StereoCalibrationOptions;

            if (stereoCalibration == null)
            {
                return;
            }

            var leftImageR = new Image<Gray, byte>(_cameras[0].Image.Width, _cameras[0].Image.Height);
            var rightImageR = new Image<Gray, byte>(_cameras[1].Image.Width, _cameras[1].Image.Height);

            try
            {
                CvInvoke.cvRemap(_cameras[0].Image.Ptr, leftImageR.Ptr,
                    stereoCalibration.MapXLeft, stereoCalibration.MapYLeft, 0, new MCvScalar(0));

            }
            catch (Exception ex)
            {

            }

            CvInvoke.cvRemap(_cameras[1].Image.Ptr, rightImageR.Ptr,
                stereoCalibration.MapXRight, stereoCalibration.MapYRight, 0, new MCvScalar(0));

            // find first face points
            var leftFaceRegions = Helper2D.GetFaceRegion2Ds(leftImageR, FaceWidth, FaceHeight, true, true);
            var rightFaceRegions = Helper2D.GetFaceRegion2Ds(rightImageR, FaceWidth, FaceHeight, true, true);

            FaceRegion2D leftFace;
            FaceRegion2D rightFace;

            if (leftFaceRegions != null
                && rightFaceRegions != null
                && (leftFace = leftFaceRegions.FirstOrDefault()) != null
                && (rightFace = rightFaceRegions.FirstOrDefault()) != null)
            {

                if (leftFace.EyeAngle != 0)
                {
                    _leftRoll = leftFace.EyeAngle;
                }
                if (rightFace.EyeAngle != 0)
                {
                    _rightRoll = rightFace.EyeAngle;
                }

                var leftPoints = new Point[4]; // face location, left eye, right eye, mouth
                var rightPoints = new Point[4];

                #region Points

                // face
                leftPoints[0] = new Point(leftFace.Face.Location.X + leftFace.Face.Width / 2, leftFace.Face.Location.Y + leftFace.Face.Height / 2);
                rightPoints[0] = new Point(rightFace.Face.Location.X + rightFace.Face.Width / 2, rightFace.Face.Location.Y + rightFace.Face.Height / 2);

                // left eye
                if (leftFace.LeftEye != null && rightFace.LeftEye != null)
                {
                    leftPoints[1] = new Point(leftFace.Face.Location.X + leftFace.LeftEye.Location.X + leftFace.LeftEye.Width / 2,
                        leftFace.Face.Location.Y + leftFace.LeftEye.Location.Y + leftFace.LeftEye.Height / 2);

                    rightPoints[1] = new Point(rightFace.Face.Location.X + rightFace.LeftEye.Location.X + rightFace.LeftEye.Width / 2,
                        rightFace.Face.Location.Y + rightFace.LeftEye.Location.Y + rightFace.LeftEye.Height / 2);
                }

                // right eye
                if (leftFace.RightEye != null && rightFace.RightEye != null)
                {
                    leftPoints[2] = new Point(leftFace.Face.Location.X + leftFace.RightEye.Location.X + leftFace.RightEye.Width / 2,
                        leftFace.Face.Location.Y + leftFace.RightEye.Location.Y + leftFace.RightEye.Height / 2);

                    rightPoints[2] = new Point(rightFace.Face.Location.X + rightFace.RightEye.Location.X + rightFace.RightEye.Width / 2,
                        rightFace.Face.Location.Y + rightFace.RightEye.Location.Y + rightFace.RightEye.Height / 2);
                }

                // mouth
                if (leftFace.Mouth != null && rightFace.Mouth != null)
                {
                    leftPoints[3] = new Point(leftFace.Face.Location.X + leftFace.Mouth.Location.X + leftFace.Mouth.Width / 2,
                        leftFace.Face.Location.Y + leftFace.Mouth.Location.Y + leftFace.Mouth.Height / 2);

                    rightPoints[3] = new Point(rightFace.Face.Location.X + rightFace.Mouth.Location.X + rightFace.Mouth.Width / 2,
                        rightFace.Face.Location.Y + rightFace.Mouth.Location.Y + rightFace.Mouth.Height / 2);
                }

                #endregion

                #region Manual Point Cloud Calculation

                {
                    var pointCloud = new MCvPoint3D64f[leftPoints.Length];

                    #region Calculate Point Cloud

                    for (int i = 0; i < leftPoints.Length; i++)
                    {
                        if (leftPoints[i].X == 0 && leftPoints[i].Y == 0)
                        {
                            continue;
                        }

                        var d = rightPoints[i].X - leftPoints[i].X;

                        var X = leftPoints[i].X * stereoCalibration.Q[0, 0] + stereoCalibration.Q[0, 3];
                        var Y = leftPoints[i].Y * stereoCalibration.Q[1, 1] + stereoCalibration.Q[1, 3];
                        var Z = stereoCalibration.Q[2, 3];
                        var W = d * stereoCalibration.Q[3, 2] + stereoCalibration.Q[3, 3];

                        X = X / W;
                        Y = Y / W;
                        Z = Z / W;

                        leftImageR.Draw(string.Format("{0:0.0} {1:0.0} {2:0.0}", X, Y, Z), ref _font, leftPoints[i], new Gray(255));
                        rightImageR.Draw(string.Format("{0:0.0} {1:0.0} {2:0.0}", X, Y, Z), ref _font, rightPoints[i], new Gray(255));

                        pointCloud[i] = new MCvPoint3D64f(X, Y, Z);
                    }

                    #endregion

                    _foundFace3d = new Face3D()
                    {
                        Location = pointCloud[0].x == 0 && pointCloud[0].y == 0 && pointCloud[0].z == 0 ? (MCvPoint3D64f?)null : pointCloud[0],
                        LeftEye = pointCloud[1].x == 0 && pointCloud[1].y == 0 && pointCloud[1].z == 0 ? (MCvPoint3D64f?)null : pointCloud[1],
                        RightEye = pointCloud[2].x == 0 && pointCloud[2].y == 0 && pointCloud[2].z == 0 ? (MCvPoint3D64f?)null : pointCloud[2],
                        Mouth = pointCloud[3].x == 0 && pointCloud[3].y == 0 && pointCloud[3].z == 0 ? (MCvPoint3D64f?)null : pointCloud[3],
                    };

                    if (_foundFace3d.LeftEye != null
                        && _foundFace3d.RightEye != null
                        && _foundFace3d.Mouth != null)
                    {
                        var srcMatrix = new Matrix<float>(3,4);

                        srcMatrix[0, 0] = (float)_foundFace3d.LeftEye.Value.x;
                        srcMatrix[1, 0] = (float)_foundFace3d.LeftEye.Value.y;
                        srcMatrix[2, 0] = (float)_foundFace3d.LeftEye.Value.z;

                        srcMatrix[0, 1] = (float)_foundFace3d.RightEye.Value.x;
                        srcMatrix[1, 1] = (float)_foundFace3d.RightEye.Value.y;
                        srcMatrix[2, 1] = (float)_foundFace3d.RightEye.Value.z;

                        srcMatrix[0, 2] = (float)_foundFace3d.Mouth.Value.x;
                        srcMatrix[1, 2] = (float)_foundFace3d.Mouth.Value.y;
                        srcMatrix[2, 2] = (float)_foundFace3d.Mouth.Value.z;

                        srcMatrix[0, 3] = (float)_foundFace3d.Location.Value.x;
                        srcMatrix[1, 3] = (float)_foundFace3d.Location.Value.y;
                        srcMatrix[2, 3] = (float)_foundFace3d.Location.Value.z;

                        var dstMatrix = new Matrix<float>(3, 4);

                        dstMatrix[0, 0] = (float)_foundFace3d.LeftEye.Value.x;
                        dstMatrix[1, 0] = (float)_foundFace3d.LeftEye.Value.y;
                        dstMatrix[2, 0] = (float)30;

                        dstMatrix[0, 1] = (float)_foundFace3d.RightEye.Value.x;
                        dstMatrix[1, 1] = (float)_foundFace3d.RightEye.Value.y;
                        dstMatrix[2, 1] = (float)30;

                        dstMatrix[0, 2] = (float)_foundFace3d.Mouth.Value.x;
                        dstMatrix[1, 2] = (float)_foundFace3d.Mouth.Value.y;
                        dstMatrix[2, 2] = (float)30;

                        dstMatrix[0, 3] = (float)_foundFace3d.Location.Value.x;
                        dstMatrix[1, 3] = (float)_foundFace3d.Location.Value.y;
                        dstMatrix[2, 3] = (float)30;

                        HomographyMatrix homographyMatrix = CameraCalibration.FindHomography(srcMatrix, dstMatrix, HOMOGRAPHY_METHOD.DEFAULT, 1);

                        if (homographyMatrix != null)
                        {
                            try
                            {
                                leftImageR = leftImageR.WarpPerspective(homographyMatrix, INTER.CV_INTER_LINEAR, WARP.CV_WARP_DEFAULT, new Gray(0));
                            }
                            catch (Exception ex)
                            {

                            }
                        }

                    }
                }

                #endregion

                #region Automatic Point Cloud

                {
                    _imagePointsDisparity = new Image<Gray, byte>(_cameras[0].Image.Width, _cameras[0].Image.Height);
                    _imagePointsLeft = new Image<Gray, byte>(_cameras[0].Image.Width, _cameras[0].Image.Height, new Gray(255));
                    _imagePointsRight = new Image<Gray, byte>(_cameras[0].Image.Width, _cameras[0].Image.Height, new Gray(255));

                    for (int i = 0; i < leftPoints.Length; i++)
                    {
                        if (leftPoints[i].X == 0 && leftPoints[i].Y == 0)
                        {
                            continue;
                        }

                        _imagePointsLeft.Draw(new Rectangle(new Point(leftPoints[i].X, leftPoints[i].Y), new Size(10, 10)), new Gray(0), 10);
                        _imagePointsRight.Draw(new Rectangle(new Point(rightPoints[i].X, rightPoints[i].Y), new Size(10, 10)), new Gray(0), 10);
                    }

                    var imagePointsDisparityGpu = new GpuImage<Gray, byte>(_imagePointsDisparity);

                    _stereoSolver.FindStereoCorrespondence(new GpuImage<Gray, byte>(_imagePointsLeft), new GpuImage<Gray, byte>(_imagePointsRight),
                        imagePointsDisparityGpu, null);

                    _imagePointsDisparity = imagePointsDisparityGpu.ToImage();

                    //MCvPoint3D32f[] pointCloud = PointCollection.ReprojectImageTo3D(_imagePointsDisparity, stereoCalibration.Q);

                    //var filteredPointCloud = pointCloud.
                    //    Where(item => item.z != 10000).
                    //    GroupBy(item => item.z).
                    //    Select(item => new
                    //    {
                    //        z = item.Key,
                    //        x = item.Average(point => point.x),
                    //        y = item.Average(point => point.y)
                    //    }).ToArray();

                    //for (int i = 0; i < filteredPointCloud.Length; i++)
                    //{
                    //    _imagePointsDisparity.Draw(string.Format("{0:0.0} {1:0.0} {2:0.0}", filteredPointCloud[i].x, filteredPointCloud[i].y, filteredPointCloud[i].z),
                    //        ref _font, new Point((int)filteredPointCloud[i].x, (int)filteredPointCloud[i].y), new Gray(255));
                    //}
                }

                #endregion
            }

            var oldLeft = _cameras[0].Image;
            var oldRight = _cameras[1].Image;

            _cameras[0].Image = leftImageR;
            _cameras[1].Image = rightImageR;

            oldLeft.Dispose();
            oldRight.Dispose();
        }
示例#45
0
 /// <summary>
 /// Perfroms object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The GpuImage to search in</param>
 /// <returns>The regions where positives are found</returns>
 public Rectangle[] DetectMultiScale(GpuImage<Gray, Byte> image)
 {
     return DetectMultiScale(image, 0, new Size(8, 8), new Size(0, 0), 1.05, 2);
 }
        public void classify(BitmapSource frame)
        {
            Console.WriteLine(relativeURI);

            //byte[] classifiedImage = frame;
            //WriteableBitmap frameImage = new WriteableBitmap(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null);

            //BitmapSource frameImage = BitmapSource.Create(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null, frame, stride);

            /*
            resultsPtr = CvInvoke.cvHaarDetectObjects(
                Marshal.GetIUnknownForObject(frame),
                classifier,
                resultsPtr,
                1.1,
                3,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new System.Drawing.Size(0,0),
                new System.Drawing.Size(0,0)
            );

            Console.WriteLine("Classified?!? Pointer below: ");
            Console.WriteLine(resultsPtr.ToString());
            */
            //return classifiedImage;
            Console.WriteLine(" - - - Converting Bitmap...");
            System.Drawing.Bitmap bitmapFrame;
            using (MemoryStream outStream = new MemoryStream())
            {
                BitmapEncoder enc = new BmpBitmapEncoder();
                enc.Frames.Add(BitmapFrame.Create(frame));
                enc.Save(outStream);
                bitmapFrame = new System.Drawing.Bitmap(outStream);
            }
            Console.WriteLine(" - - - Bitmap converted!");

            Image<Bgr, Byte> image = new Image<Bgr, Byte>(bitmapFrame);

            Console.WriteLine(" - - - Image set");
            Console.WriteLine(" - - - Check CUDA...");

            if (GpuInvoke.HasCuda)
            {
                Console.WriteLine(" - - - Has CUDA!");
                using (GpuCascadeClassifier target = new GpuCascadeClassifier(classifierURI))
                {
                    using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
                    using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
                    {
                        Console.WriteLine(" - - - Detecting!");
                        Rectangle[] targetSet = target.DetectMultiScale(gpuGray, 1.1, 10, System.Drawing.Size.Empty);
                        Console.WriteLine(" - - - Detected :D :D :D Printing rectangle set: ");
                        foreach (Rectangle f in targetSet)
                        {
                            Console.WriteLine("Rectangle found at: " + f.ToString());
                            //draw the face detected in the 0th (gray) channel with blue color
                            image.Draw(f, new Bgr(System.Drawing.Color.Blue), 2);
                        }
                        Console.WriteLine(" - - - DONE");
                    }
                }

            }
            else
            {

                using (HOGDescriptor des = new HOGDescriptor())
                {
                    //des.SetSVMDetector
                }

                Console.WriteLine(" - - - No CUDA  :( ");
                Console.WriteLine(" - - - Devices available: " + GpuInvoke.GetCudaEnabledDeviceCount());
            }
        }
        public static void FindMatch(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix<int> indices, out Matrix<byte> mask, out HomographyMatrix homography)
        {
            int k = 2;
             double uniquenessThreshold = 0.8;
             SURFDetector surfCPU = new SURFDetector(500, false);
             Stopwatch watch;
             homography = null;
             #if !IOS
             if (GpuInvoke.HasCuda)
             {
            GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
            using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
            //extract features from the object image
            using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>(DistanceType.L2))
            {
               modelKeyPoints = new VectorOfKeyPoint();
               surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
               watch = Stopwatch.StartNew();

               // extract features from the observed image
               using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
               using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
               using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
               using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, k, 1, true))
               using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuObservedDescriptors.Size.Height, k, 1, true))
               using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))
               using (Stream stream = new Stream())
               {
                  matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                  indices = new Matrix<int>(gpuMatchIndices.Size);
                  mask = new Matrix<byte>(gpuMask.Size);

                  //gpu implementation of voteForUniquess
                  using (GpuMat<float> col0 = gpuMatchDist.Col(0))
                  using (GpuMat<float> col1 = gpuMatchDist.Col(1))
                  {
                     GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                     GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                  }

                  observedKeyPoints = new VectorOfKeyPoint();
                  surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                  //wait for the stream to complete its tasks
                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                  stream.WaitForCompletion();

                  gpuMask.Download(mask);
                  gpuMatchIndices.Download(indices);

                  if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                  {
                     int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                  }

                  watch.Stop();
               }
            }
             }
             else
             #endif
             {
            //extract features from the object image
            modelKeyPoints = new VectorOfKeyPoint();
            Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            observedKeyPoints = new VectorOfKeyPoint();
            Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(modelDescriptors);

            indices = new Matrix<int>(observedDescriptors.Rows, k);
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
               matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
               mask = new Matrix<byte>(dist.Rows, 1);
               mask.SetValue(255);
               Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 4)
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
               if (nonZeroCount >= 4)
                  homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
            }

            watch.Stop();
             }
             matchTime = watch.ElapsedMilliseconds;
        }
示例#48
0
        public static void Detect(Image <Bgr, Byte> image, String faceFileName, String eyeFileName, List <Rectangle> faces, List <Rectangle> eyes, out long detectionTime)
        {
            Stopwatch watch;

            if (GpuInvoke.HasCuda)
            {
                using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
                    using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName))
                    {
                        watch = Stopwatch.StartNew();
                        using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image))
                            using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>())
                            {
                                Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.4, 4, Size.Empty);
                                faces.AddRange(faceRegion);
                                foreach (Rectangle f in faceRegion)
                                {
                                    using (GpuImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f))
                                    {
                                        //For some reason a clone is required.
                                        //Might be a bug of GpuCascadeClassifier in opencv
                                        using (GpuImage <Gray, Byte> clone = faceImg.Clone(null))
                                        {
                                            Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.4, 4, Size.Empty);

                                            foreach (Rectangle e in eyeRegion)
                                            {
                                                //Rectangle eyeRect = e;
                                                //eyeRect.Offset(f.X, f.Y);
                                                //eyes.Add(eyeRect);
                                            }
                                        }
                                    }
                                }
                            }
                        watch.Stop();
                    }
            }
            else
            {
                //Read the HaarCascade objects
                using (CascadeClassifier face = new CascadeClassifier(faceFileName))
                    using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
                    {
                        watch = Stopwatch.StartNew();
                        using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale
                        {
                            //normalizes brightness and increases contrast of the image
                            gray._EqualizeHist();

                            //Detect the faces  from the gray scale image and store the locations as rectangle
                            //The first dimensional is the channel
                            //The second dimension is the index of the rectangle in the specific channel
                            Rectangle[] facesDetected = face.DetectMultiScale(
                                gray,
                                1.3,
                                4,
                                new Size(20, 20),
                                Size.Empty);
                            faces.AddRange(facesDetected);

                            foreach (Rectangle f in facesDetected)
                            {
                                //Set the region of interest on the faces
                                gray.ROI = f;
                                Rectangle[] eyesDetected = eye.DetectMultiScale(
                                    gray,
                                    1.4,
                                    10,
                                    new Size(20, 20),
                                    Size.Empty);
                                gray.ROI = Rectangle.Empty;

                                foreach (Rectangle e in eyesDetected)
                                {
                                    //Rectangle eyeRect = e;
                                    //eyeRect.Offset(f.X, f.Y);
                                    //eyes.Add(eyeRect);
                                }
                            }
                        }
                        watch.Stop();
                    }
            }
            detectionTime = watch.ElapsedMilliseconds;
        }
示例#49
0
        private Image <Bgr, byte> Match(Image <Bgr, byte> image1, Image <Bgr, byte> image2, int flag)
        {
            HomographyMatrix homography      = null;
            SURFDetector     surfDetectorCPU = new SURFDetector(500, false);

            int    k = 2;           //number of matches that we want ot find between image1 and image2
            double uniquenessThreshold = 0.8;

            Matrix <int>  indices;
            Matrix <byte> mask;

            VectorOfKeyPoint KeyPointsImage1;
            VectorOfKeyPoint KeyPointsImage2;

            Image <Gray, Byte> Image1G = image1.Convert <Gray, Byte>();
            Image <Gray, Byte> Image2G = image2.Convert <Gray, Byte>();

            if (GpuInvoke.HasCuda)      //Using CUDA, the GPUs can be used for general purpose processing (i.e., not exclusively graphics), speed up performance
            {
                Console.WriteLine("Here");
                GpuSURFDetector surfDetectorGPU = new GpuSURFDetector(surfDetectorCPU.SURFParams, 0.01f);

                // extract features from Image1
                using (GpuImage <Gray, Byte> gpuImage1 = new GpuImage <Gray, byte>(Image1G))                                                     //convert CPU input image to GPUImage(greyscale)
                    using (GpuMat <float> gpuKeyPointsImage1 = surfDetectorGPU.DetectKeyPointsRaw(gpuImage1, null))                              //find key points for image
                        using (GpuMat <float> gpuDescriptorsImage1 = surfDetectorGPU.ComputeDescriptorsRaw(gpuImage1, null, gpuKeyPointsImage1)) //calculate descriptor for each key point
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))                     //create a new matcher object
                            {
                                KeyPointsImage1 = new VectorOfKeyPoint();
                                surfDetectorGPU.DownloadKeypoints(gpuKeyPointsImage1, KeyPointsImage1);                                 //copy the Matrix from GPU to CPU

                                // extract features from Image2
                                using (GpuImage <Gray, Byte> gpuImage2 = new GpuImage <Gray, byte>(Image2G))
                                    using (GpuMat <float> gpuKeyPointsImage2 = surfDetectorGPU.DetectKeyPointsRaw(gpuImage2, null))
                                        using (GpuMat <float> gpuDescriptorsImage2 = surfDetectorGPU.ComputeDescriptorsRaw(gpuImage2, null, gpuKeyPointsImage2))

                                            //for each descriptor of each image2 , we find k best matching points and their distances from image1 descriptors

                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuDescriptorsImage2.Size.Height, k, 1, true))      //stores indices of k best mathces
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuDescriptorsImage2.Size.Height, k, 1, true)) //stores distance of k best matches

                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))               //stores result of comparison
                                                        using (Stream stream = new Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuDescriptorsImage2, gpuDescriptorsImage1, gpuMatchIndices, gpuMatchDist, k, null, stream); //matching descriptors of image2 to image1 and storing the k best indices and corresponding distances

                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream); //by setting stream, we perform an Async Task
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);         //col0 >= 0.8col1 , only then is it considered a good match
                                                                }

                                                            KeyPointsImage2 = new VectorOfKeyPoint();
                                                            surfDetectorGPU.DownloadKeypoints(gpuKeyPointsImage2, KeyPointsImage2);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(KeyPointsImage1, KeyPointsImage2, indices, mask, 1.5, 20); //count the number of nonzero points in the mask(this stored the comparison result of col0 >= 0.8col1)
                                                                //we can create a homography matrix only if we have atleast 4 matching points
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(KeyPointsImage1, KeyPointsImage2, indices, mask, 2);
                                                                }
                                                            }
                                                        }
                            }
            }
            else
            {
                Console.WriteLine("No CUDA");
                //extract features from image2
                KeyPointsImage1 = new VectorOfKeyPoint();
                Matrix <float> DescriptorsImage1 = surfDetectorCPU.DetectAndCompute(Image1G, null, KeyPointsImage1);

                //extract features from image1
                KeyPointsImage2 = new VectorOfKeyPoint();
                Matrix <float>            DescriptorsImage2 = surfDetectorCPU.DetectAndCompute(Image2G, null, KeyPointsImage2);
                BruteForceMatcher <float> matcher           = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(DescriptorsImage1);

                indices = new Matrix <int>(DescriptorsImage2.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(DescriptorsImage2.Rows, k))
                {
                    matcher.KnnMatch(DescriptorsImage2, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(KeyPointsImage1, KeyPointsImage2, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(KeyPointsImage1, KeyPointsImage2, indices, mask, 2);
                    }
                }
            }
            Image <Bgr, Byte> mImage = image1.Convert <Bgr, Byte>();
            Image <Bgr, Byte> oImage = image2.Convert <Bgr, Byte>();
            Image <Bgr, Byte> result = new Image <Bgr, byte>(mImage.Width + oImage.Width, mImage.Height);

            //Image<Bgr, Byte> temp = Features2DToolbox.DrawMatches(image1, KeyPointsImage1, image2, KeyPointsImage2, indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = image1.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };

                homography.ProjectPoints(pts);

                HomographyMatrix origin = new HomographyMatrix();                //I perform a copy of the left image with a not real shift operation on the origin
                origin.SetIdentity();
                origin.Data[0, 2] = 0;
                origin.Data[1, 2] = 0;
                Image <Bgr, Byte> mosaic = new Image <Bgr, byte>(mImage.Width + oImage.Width, mImage.Height * 2);

                Image <Bgr, byte> warp_image = mosaic.Clone();
                mosaic = mImage.WarpPerspective(origin, mosaic.Width, mosaic.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_DEFAULT, new Bgr(0, 0, 0));

                warp_image = oImage.WarpPerspective(homography, warp_image.Width, warp_image.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Bgr(200, 0, 0));
                Image <Gray, byte> warp_image_mask = oImage.Convert <Gray, byte>();
                warp_image_mask.SetValue(new Gray(255));
                Image <Gray, byte> warp_mosaic_mask = mosaic.Convert <Gray, byte>();
                warp_mosaic_mask.SetZero();
                warp_mosaic_mask = warp_image_mask.WarpPerspective(homography, warp_mosaic_mask.Width, warp_mosaic_mask.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Gray(0));

                warp_image.Copy(mosaic, warp_mosaic_mask);
                if (flag == 1)
                {
                    Console.WriteLine("Using Image Blending");
                    return(blend(mosaic, warp_image, warp_mosaic_mask, 2));
                }
                else
                {
                    Console.WriteLine("No Image Blending");
                    return(mosaic);
                }
            }
            return(null);
        }
示例#50
0
        static void Run()
        {
            Image<Bgr, Byte> image = new Image<Bgr, byte>("lena.jpg"); //Read the files as an 8-bit Bgr image

             Stopwatch watch;
             String faceFileName = "haarcascade_frontalface_default.xml";
             String eyeFileName = "haarcascade_eye.xml";

             if (GpuInvoke.HasCuda)
             {
            using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
            using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName))
            {
               watch = Stopwatch.StartNew();
               using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
               using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
               {
                  Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
                  foreach (Rectangle f in faceRegion)
                  {
                     //draw the face detected in the 0th (gray) channel with blue color
                     image.Draw(f, new Bgr(Color.Blue), 2);
                     using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f))
                     {
                        //For some reason a clone is required.
                        //Might be a bug of GpuCascadeClassifier in opencv
                        using (GpuImage<Gray, Byte> clone = faceImg.Clone())
                        {
                           Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);

                           foreach (Rectangle e in eyeRegion)
                           {
                              Rectangle eyeRect = e;
                              eyeRect.Offset(f.X, f.Y);
                              image.Draw(eyeRect, new Bgr(Color.Red), 2);
                           }
                        }
                     }
                  }
               }
               watch.Stop();
            }
             }
             else
             {
            //Read the HaarCascade objects
            using(HaarCascade face = new HaarCascade(faceFileName))
            using(HaarCascade eye = new HaarCascade(eyeFileName))
            {
               watch = Stopwatch.StartNew();
               using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
               {
                  //normalizes brightness and increases contrast of the image
                  gray._EqualizeHist();

                  //Detect the faces  from the gray scale image and store the locations as rectangle
                  //The first dimensional is the channel
                  //The second dimension is the index of the rectangle in the specific channel
                  MCvAvgComp[] facesDetected = face.Detect(
                     gray,
                     1.1,
                     10,
                     Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                     new Size(20, 20));

                  foreach (MCvAvgComp f in facesDetected)
                  {
                     //draw the face detected in the 0th (gray) channel with blue color
                     image.Draw(f.rect, new Bgr(Color.Blue), 2);

                     //Set the region of interest on the faces
                     gray.ROI = f.rect;
                     MCvAvgComp[] eyesDetected = eye.Detect(
                        gray,
                        1.1,
                        10,
                        Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                        new Size(20, 20));
                     gray.ROI = Rectangle.Empty;

                     foreach (MCvAvgComp e in eyesDetected)
                     {
                        Rectangle eyeRect = e.rect;
                        eyeRect.Offset(f.rect.X, f.rect.Y);
                        image.Draw(eyeRect, new Bgr(Color.Red), 2);
                     }
                  }
               }
               watch.Stop();
            }
             }

             //display the image
             ImageViewer.Show(image, String.Format(
            "Completed face and eye detection using {0} in {1} milliseconds",
            GpuInvoke.HasCuda ? "GPU": "CPU",
            watch.ElapsedMilliseconds));
        }
 /// <summary>
 /// Detect keypoints in the GpuImage
 /// </summary>
 /// <param name="img">The image where keypoints will be detected from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>
 /// The keypoints GpuMat that will have 1 row.
 /// keypoints.at&lt;float[6]&gt;(1, i) contains i'th keypoint
 /// format: (x, y, size, response, angle, octave)
 /// </returns>
 public GpuMat<float> DetectKeyPointsRaw(GpuImage<Gray, Byte> img, GpuImage<Gray, Byte> mask)
 {
     GpuMat<float> result = new GpuMat<float>();
      gpuSURFDetectorDetectKeyPoints(_ptr, img, mask, result);
      return result;
 }