private void DetectFaces(Mat image)
        {
            OnFaceDetectionStateChanged(true);
            detectingInProgress = true;
            List <FaceFeatures> detectedFaces = new List <FaceFeatures>();

            //Many opencl functions require opencl compatible gpu devices.
            //As of opencv 3.0-alpha, opencv will crash if opencl is enable and only opencv compatible cpu device is presented
            //So we need to call CvInvoke.HaveOpenCLCompatibleGpuDevice instead of CvInvoke.HaveOpenCL (which also returns true on a system that only have cpu opencl devices).
            //CvInvoke.UseOpenCL = TryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice;

            var watch = Stopwatch.StartNew();

            using (var grayImage = new UMat())
            {
                // Convert image to grayscale
                CvInvoke.CvtColor(image, grayImage, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);

                //normalizes brightness and increases contrast of the image
                CvInvoke.EqualizeHist(grayImage, grayImage);

                //Detect the faces  from the gray scale image and store the locations as rectangle in FaceFeature each

                Rectangle[] facesDetected = faceClassifier.DetectMultiScale(grayImage, 1.1 /*the the scale factor opencv uses to increase the window each pass, default 1.1*/,
                                                                            3 /*minNeighbors, default: 3 (the min. number of rects to group together to call it a face)*/,
                                                                            new Size(40, 40) /*min rect check size */);

                foreach (Rectangle facerect in facesDetected)
                {
                    FaceFeatures face = new FaceFeatures(facerect, grayImage.Cols, grayImage.Rows);
                    //Get the region of interest on the faces
                    using (var probableNoseRegion = new UMat(grayImage, face.ProbableNoseLocation))
                    {
                        face.AddNose(noseClassifier.DetectMultiScale(probableNoseRegion, 1.13, 3, new Size(10, 10)));
                    }

                    using (var probableEyesRegion = new UMat(grayImage, face.ProbableEyeLocation))
                    {
                        face.AddEyes(eyeClassifier.DetectMultiScale(probableEyesRegion, 1.13, 3, new Size(10, 10)));
                    }

                    using (var probableMouthRegion = new UMat(grayImage, face.ProbableMouthLocation))
                    {
                        face.AddMouth(mouthClassifier.DetectMultiScale(probableMouthRegion, 1.13, 3, new Size(10, 20)));
                    }

                    detectedFaces.Add(face);
                }
            }

            watch.Stop();
            lastDetectedFaces = detectedFaces;
            OnFaceDetectionStateChanged(false, detectedFaces, (int)watch.ElapsedMilliseconds);
            detectingInProgress = false;
        }
        private void DetectFaces(Mat image)
        {
            OnFaceDetectionStateChanged(true);
            detectingInProgress = true;
            List<FaceFeatures> detectedFaces = new List<FaceFeatures>();

            //Many opencl functions require opencl compatible gpu devices.
            //As of opencv 3.0-alpha, opencv will crash if opencl is enable and only opencv compatible cpu device is presented
            //So we need to call CvInvoke.HaveOpenCLCompatibleGpuDevice instead of CvInvoke.HaveOpenCL (which also returns true on a system that only have cpu opencl devices).
            //CvInvoke.UseOpenCL = TryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice;

            var watch = Stopwatch.StartNew();
            using (var grayImage = new UMat())
            {
                // Convert image to grayscale
                CvInvoke.CvtColor(image, grayImage, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);

                //normalizes brightness and increases contrast of the image
                CvInvoke.EqualizeHist(grayImage, grayImage);

                //Detect the faces  from the gray scale image and store the locations as rectangle in FaceFeature each

                Rectangle[] facesDetected = faceClassifier.DetectMultiScale(grayImage, 1.1 /*the the scale factor opencv uses to increase the window each pass, default 1.1*/,
                    3 /*minNeighbors, default: 3 (the min. number of rects to group together to call it a face)*/,
                    new Size(40, 40) /*min rect check size */);

                foreach (Rectangle facerect in facesDetected)
                {
                    FaceFeatures face = new FaceFeatures(facerect, grayImage.Cols, grayImage.Rows);
                    //Get the region of interest on the faces
                    using (var probableNoseRegion = new UMat(grayImage, face.ProbableNoseLocation))
                    {
                        face.AddNose(noseClassifier.DetectMultiScale(probableNoseRegion, 1.13, 3, new Size(10, 10)));
                    }

                    using (var probableEyesRegion = new UMat(grayImage, face.ProbableEyeLocation))
                    {
                        face.AddEyes(eyeClassifier.DetectMultiScale(probableEyesRegion, 1.13, 3, new Size(10, 10)));
                    }

                    using (var probableMouthRegion = new UMat(grayImage, face.ProbableMouthLocation))
                    {
                        face.AddMouth(mouthClassifier.DetectMultiScale(probableMouthRegion, 1.13, 3, new Size(10, 20)));
                    }

                    detectedFaces.Add(face);
                }
            }

            watch.Stop();
            lastDetectedFaces = detectedFaces;
            OnFaceDetectionStateChanged(false, detectedFaces, (int) watch.ElapsedMilliseconds);
            detectingInProgress = false;
        }