Ejemplo n.º 1
0
        /// <summary>
        /// This method scans source device params (flip, rotation, front-camera status etc.) and
        /// prepares TextureConversionParameters that will compensate all that stuff for OpenCV
        /// </summary>
        private void ReadTextureConversionParameters()
        {
            Unity.TextureConversionParams parameters = new Unity.TextureConversionParams();

            // frontal camera - we must flip around Y axis to make it mirror-like
            parameters.FlipHorizontally = forceFrontalCamera || webCamDevice.Value.isFrontFacing;
            //parameters.FlipHorizontally = false;

            // TODO:
            // actually, code below should work, however, on our devices tests every device except iPad
            // returned "false", iPad said "true" but the texture wasn't actually flipped

            // compensate vertical flip
            //parameters.FlipVertically = webCamTexture.videoVerticallyMirrored;

            // deal with rotation
            if (0 != webCamTexture.videoRotationAngle)
            {
                parameters.RotationAngle = webCamTexture.videoRotationAngle;                 // cw -> ccw
            }
            // apply
            TextureParameters = parameters;

            //UnityEngine.Debug.Log (string.Format("front = {0}, vertMirrored = {1}, angle = {2}", webCamDevice.isFrontFacing, webCamTexture.videoVerticallyMirrored, webCamTexture.videoRotationAngle));
        }
Ejemplo n.º 2
0
        private void ReadTextureConversionParameters()
        {
            Unity.TextureConversionParams parameters = new Unity.TextureConversionParams();
            parameters.FlipHorizontally = currentDeviceAndTexture.device.isFrontFacing;

            if (0 != currentDeviceAndTexture.texture.videoRotationAngle)
            {
                parameters.RotationAngle = currentDeviceAndTexture.texture.videoRotationAngle;                 // cw -> ccw
            }
            TextureParameters = parameters;
        }
Ejemplo n.º 3
0
 /// <summary>
 /// Creates OpenCV Mat from Unity texture
 /// </summary>
 /// <param name="texture">Texture instance, must be either Texture2D or WbCamTexture</param>
 /// <returns>Newely created Mat object, ready to use with OpenCV</returns>
 /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
 protected virtual Mat MatFromTexture(T texture, Unity.TextureConversionParams texParams)
 {
     if (texture is UnityEngine.Texture2D)
     {
         return(Unity.TextureToMat(texture as UnityEngine.Texture2D, texParams));
     }
     else if (texture is UnityEngine.WebCamTexture)
     {
         return(Unity.TextureToMat(texture as UnityEngine.WebCamTexture, texParams));
     }
     else
     {
         throw new Exception("FaceProcessor: incorrect input texture type, must be Texture2D or WebCamTexture");
     }
 }
Ejemplo n.º 4
0
        /// <summary>
        /// Imports Unity texture to the FaceProcessor, can pre-process object (white balance, resize etc.)
        /// Fill few properties and fields: Image, downscaledImage, appliedScaleFactor
        /// </summary>
        /// <param name="texture">Input texture</param>
        /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
        protected virtual void ImportTexture(T texture, Unity.TextureConversionParams texParams)
        {
            // free currently used textures
            if (null != processingImage)
            {
                processingImage.Dispose();
            }
            if (null != Image)
            {
                Image.Dispose();
            }

            // convert and prepare
            Image = MatFromTexture(texture, texParams);
            if (Performance.Downscale > 0 && (Performance.Downscale < Image.Width || Performance.Downscale < Image.Height))
            {
                // compute aspect-respective scaling factor
                int w = Image.Width;
                int h = Image.Height;

                // scale by max side
                if (w >= h)
                {
                    appliedFactor = (double)Performance.Downscale / (double)w;
                    w             = Performance.Downscale;
                    h             = (int)(h * appliedFactor + 0.5);
                }
                else
                {
                    appliedFactor = (double)Performance.Downscale / (double)h;
                    h             = Performance.Downscale;
                    w             = (int)(w * appliedFactor + 0.5);
                }

                // resize
                processingImage = new Mat();
                Cv2.Resize(Image, processingImage, new Size(w, h));
            }
            else
            {
                appliedFactor   = 1.0;
                processingImage = Image;
            }
        }
Ejemplo n.º 5
0
        public OpenCvSharp.Rect DrawRect(RectTransform imageTransform, Mat image, Mat downscaled, Unity.TextureConversionParams TextureParameters, ref Texture2D output)
        {
            // screen space -> image space
            Vector2 sp       = ConvertToImageSpace(imageTransform, startPoint, image.Size(), TextureParameters);
            Vector2 ep       = ConvertToImageSpace(imageTransform, endPoint, image.Size(), TextureParameters);
            Point   location = new Point(Math.Min(sp.x, ep.x), Math.Min(sp.y, ep.y));
            Size    size     = new Size(Math.Abs(ep.x - sp.x), Math.Abs(ep.y - sp.y));
            var     areaRect = new OpenCvSharp.Rect(location, size);
            Rect2d  obj      = Rect2d.Empty;

            // If not dragged - show the tracking data
            if (!isDragging)
            {
                // drop tracker if the frame's size has changed, this one is necessary as tracker doesn't hold it well
                if (frameSize.Height != 0 && frameSize.Width != 0 && downscaled.Size() != frameSize)
                {
                    DropTracking();
                }

                // we have to tracker - let's initialize one
                if (null == tracker)
                {
                    // but only if we have big enough "area of interest", this one is added to avoid "tracking" some 1x2 pixels areas
                    if ((ep - sp).magnitude >= minimumAreaDiagonal)
                    {
                        obj = new Rect2d(areaRect.X, areaRect.Y, areaRect.Width, areaRect.Height);

                        // initial tracker with current image and the given rect, one can play with tracker types here
                        tracker = Tracker.Create(TrackerTypes.MIL);
                        tracker.Init(downscaled, obj);

                        frameSize = downscaled.Size();
                    }
                }
                // if we already have an active tracker - just to to update with the new frame and check whether it still tracks object
                else
                {
                    if (!tracker.Update(downscaled, ref obj))
                    {
                        obj = Rect2d.Empty;
                    }
                }

                // save tracked object location
                if (0 != obj.Width && 0 != obj.Height)
                {
                    areaRect = new OpenCvSharp.Rect((int)obj.X, (int)obj.Y, (int)obj.Width, (int)obj.Height);
                }
            }

            // render rect we've tracker or one is being drawn by the user
            if (isDragging || (null != tracker && obj.Width != 0))
            {
                Cv2.Rectangle((InputOutputArray)image, areaRect * (1.0 / downScale), isDragging ? Scalar.Red : Scalar.Blue, 4);
            }

            // result, passing output texture as parameter allows to re-use it's buffer
            // should output texture be null a new texture will be created
            if (!isTracking)
            {
                output = Unity.MatToTexture(image, output);
            }
            return(areaRect);
        }
Ejemplo n.º 6
0
        protected Vector2 ConvertToImageSpace(RectTransform imageTransform, Vector2 coord, Size size, Unity.TextureConversionParams TextureParameters)
        {
            Vector2 output = new Vector2();

            RectTransformUtility.ScreenPointToLocalPointInRectangle(imageTransform, coord, null, out output);

            // pivot is in the center of the rectTransform, we need { 0, 0 } origin
            output.x += size.Width / 2;
            output.y += size.Height / 2;

            // now our image might have various transformations of it's own
            if (!TextureParameters.FlipVertically)
            {
                output.y = size.Height - output.y;
            }

            // downscaling
            output.x *= downScale;
            output.y *= downScale;

            return(output);
        }
Ejemplo n.º 7
0
        /// <summary>
        /// Detector
        /// </summary>
        /// <param name="inputTexture">Input Unity texture</param>
        /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
        /// <param name="detect">Flag signalling whether we need detection on this frame</param>
        public override void ProcessTexture(T texture, Unity.TextureConversionParams texParams, bool detect = true)
        {
            bool acceptedFrame = (0 == Performance.SkipRate || 0 == frameCounter++ % Performance.SkipRate);

            base.ProcessTexture(texture, texParams, detect && acceptedFrame);
        }
Ejemplo n.º 8
0
        /// <summary>
        /// Detector
        /// </summary>
        /// <param name="inputTexture">Input Unity texture</param>
        /// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
        /// <param name="detect">Flag signalling whether we need detection on this frame</param>
        public virtual void ProcessTexture(T texture, Unity.TextureConversionParams texParams, bool detect = true)
        {
            // convert Unity texture to OpenCv::Mat
            ImportTexture(texture, texParams);

            // detect
            if (detect)
            {
                double invF = 1.0 / appliedFactor;
                DataStabilizer.ThresholdFactor = invF;

                // convert to grayscale and normalize
                Mat gray = new Mat();
                Cv2.CvtColor(processingImage, gray, ColorConversionCodes.BGR2GRAY);

                // fix shadows
                Cv2.EqualizeHist(gray, gray);

                /*Mat normalized = new Mat();
                 * CLAHE clahe = CLAHE.Create();
                 * clahe.TilesGridSize = new Size(8, 8);
                 * clahe.Apply(gray, normalized);
                 * gray = normalized;*/

                // detect matching regions (faces bounding)
                Rect[] rawFaces = cascadeFaces.DetectMultiScale(gray, 1.2, 6);
                if (Faces.Count != rawFaces.Length)
                {
                    Faces.Clear();
                }

                // now per each detected face draw a marker and detect eyes inside the face rect
                int facesCount = 0;
                for (int i = 0; i < rawFaces.Length; ++i)
                {
                    Rect faceRect       = rawFaces[i];
                    Rect faceRectScaled = faceRect * invF;
                    using (Mat grayFace = new Mat(gray, faceRect))
                    {
                        // another trick: confirm the face with eye detector, will cut some false positives
                        if (cutFalsePositivesWithEyesSearch && null != cascadeEyes)
                        {
                            Rect[] eyes = cascadeEyes.DetectMultiScale(grayFace);
                            if (eyes.Length == 0 || eyes.Length > 2)
                            {
                                continue;
                            }
                        }

                        // get face object
                        DetectedFace face = null;
                        if (Faces.Count < i + 1)
                        {
                            face = new DetectedFace(DataStabilizer, faceRectScaled);
                            Faces.Add(face);
                        }
                        else
                        {
                            face = Faces[i];
                            face.SetRegion(faceRectScaled);
                        }

                        // shape
                        facesCount++;
                        if (null != shapeFaces)
                        {
                            Point[] marks = shapeFaces.DetectLandmarks(gray, faceRect);

                            // we have 68-point predictor
                            if (marks.Length == 68)
                            {
                                // transform landmarks to the original image space
                                List <Point> converted = new List <Point>();
                                foreach (Point pt in marks)
                                {
                                    converted.Add(pt * invF);
                                }

                                // save and parse landmarks
                                face.SetLandmarks(converted.ToArray());
                            }
                        }
                    }
                }

                // log
                //UnityEngine.Debug.Log(String.Format("Found {0} faces", Faces.Count));
            }
        }