Exemple #1
0
    public override void CalculateWeights(Mat image, ImageFeatureMap target)
    {
        DetectionTime = 0;
        if (!Enabled)
        {
            return;
        }
        List <Rectangle> objects     = new List <Rectangle> ();
        List <Rect>      normObjects = new List <Rect> ();

        DetectObjects(image, objects, Params.DetectorParameters);

        int W = image.Width;
        int H = image.Height;

        //fill features to the features map
        foreach (Rectangle o in objects)
        {
            //expand the detected area few pixels
            Rectangle r = new Rectangle();
            r.X      = o.X + (int)(Params.DetectorParameters.ExpansionRect.x * o.Width);
            r.Y      = o.Y + (int)(Params.DetectorParameters.ExpansionRect.y * o.Height);
            r.Width  = o.Width + (int)((Params.DetectorParameters.ExpansionRect.width) * o.Width);
            r.Height = o.Height + (int)((Params.DetectorParameters.ExpansionRect.height) * o.Height);
            //fill detected face rectangle with full weight
            target.FillRectangle((float)r.X / (float)W, (float)r.Y / (float)H,
                                 (float)r.Width / (float)W, (float)r.Height / (float)H, 1);
            normObjects.Add(new Rect(r.Left / (float)W, r.Top / (float)H, r.Width / (float)W, r.Height / (float)H));
        }


        lock (_objectsLock) {
            _detectedObjects = normObjects;
        }
    }
Exemple #2
0
    public override void CalculateWeights(GstImageInfo image, ImageFeatureMap target)
    {
        DetectionTime = 0;
        if (!Enabled)
        {
            return;
        }
        List <Rect> normObjects = new List <Rect> ();

        _detector.BindImage(image);

        if (!_featuresDetected)
        {
            return;
        }

        _featuresDetected = false;


        int W = image.Width;
        int H = image.Height;

        //fill features to the features map
        foreach (Rect o in _faces)
        {
            //expand the detected area few pixels
            Rect r = new Rect();
            r.x      = o.x + (int)(Params.FaceConfig.ExpansionRect.x * o.width);
            r.y      = o.y + (int)(Params.FaceConfig.ExpansionRect.y * o.height);
            r.width  = o.width + (int)((Params.FaceConfig.ExpansionRect.width) * o.width);
            r.height = o.height + (int)((Params.FaceConfig.ExpansionRect.height) * o.height);
            //fill detected face rectangle with full weight
            target.FillRectangle((float)r.x / (float)W, (float)r.y / (float)H,
                                 (float)r.width / (float)W, (float)r.height / (float)H, 1);
            normObjects.Add(new Rect(r.left / (float)W, r.top / (float)H, r.width / (float)W, r.height / (float)H));
        }


        _detectedObjects = normObjects;
    }
Exemple #3
0
    void Start()
    {
        Gaze                  = GameObject.FindObjectOfType <GazeFollowComponent> ();
        _processor            = new OffscreenProcessor();
        _processor.ShaderName = "GazeBased/Blend";


        Image <Gray, byte> cache = null;

        EmguImageUtil.UnityTextureToOpenCVImageGray(TargetTexture, ref cache);        //new Mat(pictureStr, LoadImageType.Color); //Read the files as an 8-bit Bgr image
        long             detectionTime;
        List <Rectangle> faces = new List <Rectangle>();
        List <Rectangle> eyes  = new List <Rectangle>();

        //The cuda cascade classifier doesn't seem to be able to load "haarcascade_frontalface_default.xml" file in this release
        //disabling CUDA module for now
        bool tryUseCuda   = true;
        bool tryUseOpenCL = false;

        DetectObjectCL.DetectFace(
            cache.Mat, false,
            faces, eyes,
            tryUseCuda,
            tryUseOpenCL,
            out detectionTime);

        foreach (Rectangle face in faces)
        {
            CvInvoke.Rectangle(cache.Mat, face, new Bgr(0, 0, 1).MCvScalar, 2);
        }
        foreach (Rectangle eye in eyes)
        {
            CvInvoke.Rectangle(cache.Mat, eye, new Bgr(1, 0, 0).MCvScalar, 2);
        }

        Debug.Log("detected faces:" + faces.Count);
        Debug.Log("face detection time:" + detectionTime.ToString() + "ms");

        //display the image

        /*	ImageViewer.Show(image, String.Format(
         *      "Completed face and eye detection using {0} in {1} milliseconds",
         *      (tryUseCuda && CudaInvoke.HasCuda) ? "GPU"
         *      : (tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice) ? "OpenCL"
         *      : "CPU",
         *      detectionTime));
         */

        _map = new ImageFeatureMap(128, 128);
        foreach (Rectangle face in faces)
        {
            Rectangle r = new Rectangle();
            r.X      = face.X - 50;
            r.Y      = face.Y - 5;
            r.Width  = face.Width + 50;
            r.Height = face.Height + 520;
            _map.FillRectangle((float)r.X / (float)cache.Mat.Width, (float)r.Y / (float)cache.Mat.Height,
                               (float)r.Width / (float)cache.Mat.Width, (float)r.Height / (float)cache.Mat.Height, 1);
        }

        _map.Blur();
        _map.Blur();
        _map.Blur();
        _map.Blur();
        Texture2D tex = new Texture2D(1, 1);

        tex.filterMode = FilterMode.Point;

        _map.ConvertToTexture(tex, true);

        _processor.ProcessingMaterial.SetTexture("_TargetMask", tex);
        _processor.ProcessingMaterial.SetTexture("_MainTex", TargetTexture);

        GetComponent <UITexture> ().mainTexture = tex;       //_processor.ProcessTexture (TargetTexture);
    }
    public override void CalculateWeights(Mat image, ImageFeatureMap target)
    {
        DetectionTime = 0;
        if (!Enabled)
        {
            return;
        }
        byte[]   status;
        float[]  errTracker;
        PointF[] features;



        float W = image.Width;
        float H = image.Height;

        if (_isFirstFrame ||
            _prevImage.Width != image.Width ||
            _prevImage.Height != image.Height)
        {
            _prevImage    = image.Clone();
            _isFirstFrame = false;
            return;
        }

        DateTime t = DateTime.Now;

        if (_currPoints == null || _currPoints.Length < 50 ||
            (t - _time).TotalSeconds > Params.OFParameters.FeaturesUpdateTime)
        {
            _time = t;
            UnityEngine.Debug.Log("Recalculating feature points");

            GFTTDetector _GFTTdetector = new GFTTDetector(Params.OFParameters.MaxFeaturesCount);
            MKeyPoint[]  featPoints    = _GFTTdetector.Detect(image, null);

            _prevPoints = new PointF[featPoints.Length];
            int i = 0;
            foreach (var k in featPoints)
            {
                _prevPoints [i] = k.Point;
                ++i;
            }

            _currPoints = _prevPoints;
        }

        Stopwatch watch;

        watch = Stopwatch.StartNew();
        try{
            _criteria.Type    = Params.OFParameters.CriteriaType;
            _criteria.MaxIter = Params.OFParameters.Iterations;
            _criteria.Epsilon = Params.OFParameters.Epsilon;
            CvInvoke.CalcOpticalFlowPyrLK(_prevImage, image, _prevPoints, new Size((int)Params.OFParameters.SearchWindow.x, (int)Params.OFParameters.SearchWindow.y),
                                          Params.OFParameters.Level, _criteria, out features, out status, out errTracker);

            //calculate homography matrix
            CvInvoke.FindHomography(_prevPoints, features, _homography, Emgu.CV.CvEnum.HomographyMethod.Default);
        }catch (Exception e) {
            UnityEngine.Debug.Log(e.Message);
            return;
        }
        watch.Stop();
        DetectionTime = watch.ElapsedMilliseconds;

        //calculate homography transformation, and remove it from points
        Matrix4x4 m = new Matrix4x4();

        m.SetRow(0, new Vector4((float)_homography[0, 0], (float)_homography[0, 1], 0, (float)_homography[0, 2]));
        m.SetRow(1, new Vector4((float)_homography[1, 0], (float)_homography[1, 1], 0, (float)_homography[1, 2]));
        m.SetRow(2, new Vector4(0, 0, 1, 0));
        m.SetRow(3, new Vector4((float)_homography[2, 0], (float)_homography[2, 1], 0, (float)_homography[2, 2]));
        Matrix4x4 homographyInverse = Matrix4x4.Inverse(m);         //get the inverse


        //next, fill weight map


        Vector2 direction = new Vector2((float)_homography [0, 2], (float)_homography [1, 2]);

        direction.Normalize();
        _opticalFlow.Clear();
        int count = 0;

        for (int i = 0; i < features.Length; ++i)
        {
            Vector3 dp   = m * new Vector3(features [i].X, features [i].Y, 0);
            float   dist = (dp.x - _prevPoints [i].X) * (dp.x - _prevPoints [i].X) +
                           (dp.y - _prevPoints [i].Y) * (dp.y - _prevPoints [i].Y);
            if (dist > Params.OFParameters.MinDistance * Params.OFParameters.MinDistance &&
                dist < Params.OFParameters.MaxDistance * Params.OFParameters.MaxDistance)
            {
                //check if the calculated point belongs to the object motion or to camera motion
                //Vector3 d = new Vector3 (features [i].X - dp.x, features [i].Y - dp.y,0);

                /*	float len= Mathf.Sqrt(dist);//dp.magnitude;
                 *      if (len < Params.OFParameters.FeatureSimilarityThreshold) {
                 *              continue;//skip this point, correlated with camera motion
                 *      }*/
                /*
                 * Vector3 d = new Vector3 (features [i].X - _currPoints [i].X, features [i].Y - _currPoints [i].Y,0);
                 * d.Normalize ();
                 * float dp = Vector2.Dot (d, direction);
                 * if (dp > Params.OFParameters.FeatureSimilarityThreshold) {
                 *      continue;//skip this point, correlated with camera motion
                 * }*/
                // add this point
                ++count;
                float x = features [i].X / (float)W;
                float y = (features [i].Y / (float)H);
                if (x > 1 || x < 0 || y > 1 || y < 0)
                {
                    continue;
                }
                float w = 20 / W;              // Mathf.Abs(_currPoints [i].X - features [i].X)/W;
                float h = 20 / H;              //Mathf.Abs(_currPoints [i].Y - features [i].Y)/H;
                Rect  r = new Rect(x - w / 2.0f, y - h / 2.0f /*1-y-h*/, w, h);
                //target.SetWeight (x,1-y,1.0f);
                target.FillRectangle(r.x, r.y, r.width, r.height, 1);

                TrackedFeature f = new TrackedFeature();
                f.v1 = new Vector2(_currPoints[i].X / W, _currPoints[i].Y / H);
                f.v2 = new Vector2(features [i].X / W, features [i].Y / H);
                _opticalFlow.Add(f);
            }
        }

        if (count > features.Length / 10)
        {
            _featuresDetected = true;
        }
        else
        {
            _featuresDetected = false;
        }


        if (features != null)
        {
            lock (_objectsLock) {
                _prevPoints = _currPoints;
                _currPoints = features;
            }
        }

        _prevImage = image.Clone();
    }