示例#1
0
    public virtual int ProcessTexture(T texture, OpenCvSharp.Unity.TextureConversionParams texParams, bool detect = true)
    {
        // convert Unity texture to OpenCv::Mat
        ImportTexture(texture, texParams);

        // detect
        if (detect)
        {
            double invF = 1.0 / appliedFactor;
            DataStabilizer.ThresholdFactor = invF;

            // convert to grayscale and normalize
            Mat gray = new Mat();
            Cv2.CvtColor(processingImage, gray, ColorConversionCodes.BGR2GRAY);

            // fix shadows
            Cv2.EqualizeHist(gray, gray);

            /*Mat normalized = new Mat();
             * CLAHE clahe = CLAHE.Create();
             * clahe.TilesGridSize = new Size(8, 8);
             * clahe.Apply(gray, normalized);
             * gray = normalized;*/

            // detect matching regions (meats bounding)
            Rect[] rawMeats = cascadeMeats.DetectMultiScale(gray, 1.2, 6);
            if (Meats.Count != rawMeats.Length)
            {
                Meats.Clear();
            }

            for (int i = 0; i < rawMeats.Length; ++i)
            {
                Rect meatRect       = rawMeats[i];
                Rect meatRectScaled = meatRect * invF;
                using (Mat grayMeat = new Mat(gray, meatRect))
                {
                    // get meat object
                    DetectedMeat meat = null;
                    if (Meats.Count < i + 1)
                    {
                        meat = new DetectedMeat(DataStabilizer, meatRectScaled);
                        Meats.Add(meat);
                    }
                    else
                    {
                        meat = Meats[i];
                        meat.SetRegion(meatRectScaled);
                    }
                }
            }
            // log
            //UnityEngine.Debug.Log(String.Format("Found {0} meats", Meats.Count));
        }

        return(Meats.Count);
    }
示例#2
0
 /// <summary>
 /// Creates OpenCV Mat from Unity texture
 /// </summary>
 /// <param name="texture">Texture instance, must be either Texture2D or WbCamTexture</param>
 /// <returns>Newely created Mat object, ready to use with OpenCV</returns>
 /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
 protected virtual Mat MatFromTexture(T texture, OpenCvSharp.Unity.TextureConversionParams texParams)
 {
     if (texture is UnityEngine.Texture2D)
     {
         return(OpenCvSharp.Unity.TextureToMat(texture as UnityEngine.Texture2D, texParams));
     }
     else if (texture is UnityEngine.WebCamTexture)
     {
         return(OpenCvSharp.Unity.TextureToMat(texture as UnityEngine.WebCamTexture, texParams));
     }
     else
     {
         throw new Exception("FaceProcessor: incorrect input texture type, must be Texture2D or WebCamTexture");
     }
 }
示例#3
0
        /// <summary>
        /// Imports Unity texture to the FaceProcessor, can pre-process object (white balance, resize etc.)
        /// Fill few properties and fields: Image, downscaledImage, appliedScaleFactor
        /// </summary>
        /// <param name="texture">Input texture</param>
        /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
        protected virtual void ImportTexture(T texture, Unity.TextureConversionParams texParams)
        {
            // free currently used textures
            if (null != processingImage)
            {
                processingImage.Dispose();
            }
            if (null != Image)
            {
                Image.Dispose();
            }

            // convert and prepare
            Image = MatFromTexture(texture, texParams);

            processingImage = Image;
        }
示例#4
0
    /// <summary>
    /// Imports Unity texture to the FaceProcessor, can pre-process object (white balance, resize etc.)
    /// Fill few properties and fields: Image, downscaledImage, appliedScaleFactor
    /// </summary>
    /// <param name="texture">Input texture</param>
    /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
    protected virtual void ImportTexture(T texture, OpenCvSharp.Unity.TextureConversionParams texParams)
    {
        // free currently used textures
        if (null != processingImage)
        {
            processingImage.Dispose();
        }
        if (null != Image)
        {
            Image.Dispose();
        }

        // convert and prepare
        Image = MatFromTexture(texture, texParams);
        if (Performance.Downscale > 0 && (Performance.Downscale < Image.Width || Performance.Downscale < Image.Height))
        {
            // compute aspect-respective scaling factor
            int w = Image.Width;
            int h = Image.Height;

            // scale by max side
            if (w >= h)
            {
                appliedFactor = (double)Performance.Downscale / (double)w;
                w             = Performance.Downscale;
                h             = (int)(h * appliedFactor + 0.5);
            }
            else
            {
                appliedFactor = (double)Performance.Downscale / (double)h;
                h             = Performance.Downscale;
                w             = (int)(w * appliedFactor + 0.5);
            }

            // resize
            processingImage = new Mat();
            Cv2.Resize(Image, processingImage, new Size(w, h));
        }
        else
        {
            appliedFactor   = 1.0;
            processingImage = Image;
        }
    }
    private bool shouldSetTrackingAreaSize = true; //


    /// <summary>
    /// This method scans source device params (flip, rotation, front-camera status etc.) and
    /// prepares TextureConversionParameters that will compensate all that stuff for OpenCV
    /// </summary>
    private void ReadTextureConversionParameters()
    {
        OpenCvSharp.Unity.TextureConversionParams parameters = new OpenCvSharp.Unity.TextureConversionParams();

        // frontal camera - we must flip around Y axis to make it mirror-like
        parameters.FlipHorizontally = forceFrontalCamera || webCamDevice.Value.isFrontFacing;

        // TODO:
        // actually, code below should work, however, on our devices tests every device except iPad
        // returned "false", iPad said "true" but the texture wasn't actually flipped

        // compensate vertical flip
        //parameters.FlipVertically = webCamTexture.videoVerticallyMirrored;

        // deal with rotation
        if (0 != webCamTexture.videoRotationAngle)
        {
            parameters.RotationAngle = webCamTexture.videoRotationAngle; // cw -> ccw
        }
        // apply
        TextureParameters = parameters;

        //UnityEngine.Debug.Log (string.Format("front = {0}, vertMirrored = {1}, angle = {2}", webCamDevice.isFrontFacing, webCamTexture.videoVerticallyMirrored, webCamTexture.videoRotationAngle));
    }
示例#6
0
    /// <summary>
    /// Detector
    /// </summary>
    /// <param name="inputTexture">Input Unity texture</param>
    /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
    /// <param name="detect">Flag signalling whether we need detection on this frame</param>
    public override void ProcessTexture(T texture, OpenCvSharp.Unity.TextureConversionParams texParams, bool detect = true)
    {
        bool acceptedFrame = (0 == Performance.SkipRate || 0 == frameCounter++ % Performance.SkipRate);

        base.ProcessTexture(texture, texParams, detect && acceptedFrame);
    }
示例#7
0
    /// <summary>
    /// Detector
    /// </summary>
    /// <param name="inputTexture">Input Unity texture</param>
    /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
    /// <param name="detect">Flag signalling whether we need detection on this frame</param>
    public virtual void ProcessTexture(T texture, OpenCvSharp.Unity.TextureConversionParams texParams, bool detect = true)
    {
        // convert Unity texture to OpenCv::Mat
        ImportTexture(texture, texParams);

        // detect
        if (detect)
        {
            double invF = 1.0 / appliedFactor;
            DataStabilizer.ThresholdFactor = invF;

            // convert to grayscale and normalize
            Mat gray = new Mat();
            Cv2.CvtColor(processingImage, gray, ColorConversionCodes.BGR2GRAY);

            // fix shadows
            Cv2.EqualizeHist(gray, gray);

            /*Mat normalized = new Mat();
             * CLAHE clahe = CLAHE.Create();
             * clahe.TilesGridSize = new Size(8, 8);
             * clahe.Apply(gray, normalized);
             * gray = normalized;*/

            // detect matching regions (faces bounding)
            Rect[] rawFaces = cascadeFaces.DetectMultiScale(gray, 1.2, 6);
            if (Faces.Count != rawFaces.Length)
            {
                Faces.Clear();
            }

            // now per each detected face draw a marker and detect eyes inside the face rect
            int facesCount = 0;
            for (int i = 0; i < rawFaces.Length; ++i)
            {
                Rect faceRect       = rawFaces[i];
                Rect faceRectScaled = faceRect * invF;
                using (Mat grayFace = new Mat(gray, faceRect))
                {
                    // another trick: confirm the face with eye detector, will cut some false positives
                    if (cutFalsePositivesWithEyesSearch && null != cascadeEyes)
                    {
                        Rect[] eyes = cascadeEyes.DetectMultiScale(grayFace);
                        if (eyes.Length == 0 || eyes.Length > 2)
                        {
                            continue;
                        }
                    }

                    // get face object
                    DetectedFace face = null;
                    if (Faces.Count < i + 1)
                    {
                        face = new DetectedFace(DataStabilizer, faceRectScaled);
                        Faces.Add(face);
                    }
                    else
                    {
                        face = Faces[i];
                        face.SetRegion(faceRectScaled);
                    }

                    // shape
                    facesCount++;
                    if (null != shapeFaces)
                    {
                        Point[] marks = shapeFaces.DetectLandmarks(gray, faceRect);

                        // we have 68-point predictor
                        if (marks.Length == 68)
                        {
                            // transform landmarks to the original image space
                            List <Point> converted = new List <Point>();
                            foreach (Point pt in marks)
                            {
                                converted.Add(pt * invF);
                            }

                            // save and parse landmarks
                            face.SetLandmarks(converted.ToArray());
                        }
                    }
                }
            }

            // log
            //UnityEngine.Debug.Log(String.Format("Found {0} faces", Faces.Count));
        }
    }
示例#8
0
        /// <summary>
        /// Detector
        /// </summary>
        /// <param name="inputTexture">Input Unity texture</param>
        /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
        /// <param name="detect">Flag signalling whether we need detection on this frame</param>
        public virtual Point[] ProcessTexture(T texture, Unity.TextureConversionParams texParams)
        {
            // convert Unity texture to OpenCv::Mat
            ImportTexture(texture, texParams);

            DataStabilizer.ThresholdFactor = 1;

            // convert to grayscale and normalize
            Mat gray = new Mat();

            Cv2.CvtColor(processingImage, gray, ColorConversionCodes.BGR2GRAY);
            Cv2.Blur(gray, gray, new Size(10, 10));

            // fix shadows
            // Cv2.EqualizeHist(gray, gray);

            // detect matching regions (faces bounding)
            Rect[] rawFaces = cascadeFaces.DetectMultiScale(gray, 1.2, 6);
            if (Faces.Count != rawFaces.Length)
            {
                Faces.Clear();
            }

            // now per each detected face draw a marker and detect eyes inside the face rect
            int facesCount = 0;

            Point[] maxFace     = lastFace;
            double  maxFaceSize = 0;

            for (int i = 0; i < rawFaces.Length; ++i)
            {
                Rect faceRect = rawFaces[i];
                using (Mat grayFace = new Mat(gray, faceRect))
                {
                    // another trick: confirm the face with eye detector, will cut some false positives
                    if (cutFalsePositivesWithEyesSearch && null != cascadeEyes)
                    {
                        Rect[] eyes = cascadeEyes.DetectMultiScale(grayFace);
                        if (eyes.Length == 0 || eyes.Length > 2)
                        {
                            continue;
                        }
                    }

                    // get face object
                    Demo.DetectedFace face = null;
                    if (Faces.Count < i + 1)
                    {
                        face = new Demo.DetectedFace(DataStabilizer, faceRect);
                        Faces.Add(face);
                    }
                    else
                    {
                        face = Faces[i];
                        face.SetRegion(faceRect);
                    }

                    // shape
                    facesCount++;
                    if (null != shapeFaces)
                    {
                        Point[] marks = shapeFaces.DetectLandmarks(gray, faceRect);

                        // we have 68-point predictor
                        if (marks.Length == 68)
                        {
                            double size = Point.DistancePow2(marks[0], marks[16]);
                            if (size > maxFaceSize)
                            {
                                maxFaceSize = size;
                                maxFace     = marks;
                            }
                        }
                    }
                }
            }

            if (maxFaceSize != 0)
            {
                lastFace = maxFace;
            }

            return(maxFace);
        }