Exemple #1
0
        public override void UpdateValue()
        {
            if (matSourceGetterInterface == null)
            {
                return;
            }

            if (faceLandmarkGetterInterface == null)
            {
                return;
            }

            Mat rgbaMat = matSourceGetterInterface.GetMatSource();

            if (rgbaMat == null)
            {
                return;
            }
            else
            {
                if (rgbaMat.width() != imageWidth || rgbaMat.height() != imageHeight)
                {
                    imageWidth  = rgbaMat.width();
                    imageHeight = rgbaMat.height();
                    SetCameraMatrix(camMatrix, imageWidth, imageHeight);
                }
            }

            didUpdateHeadPositionAndRotation = false;

            List <Vector2> points = faceLandmarkGetterInterface.GetFaceLanmarkPoints();

            if (points != null)
            {
                MatOfPoint3f objectPoints = null;

                if (points.Count == 68)
                {
                    objectPoints = objectPoints68;

                    imagePoints.fromArray(
                        new Point((points[38].x + points[41].x) / 2, (points[38].y + points[41].y) / 2), //l eye (Interpupillary breadth)
                        new Point((points[43].x + points[46].x) / 2, (points[43].y + points[46].y) / 2), //r eye (Interpupillary breadth)
                        new Point(points[30].x, points[30].y),                                           //nose (Tip)
                        new Point(points[33].x, points[33].y),                                           //nose (Subnasale)
                        new Point(points[0].x, points[0].y),                                             //l ear (Bitragion breadth)
                        new Point(points[16].x, points[16].y)                                            //r ear (Bitragion breadth)
                        );
                }
                else if (points.Count == 17)
                {
                    objectPoints = objectPoints17;

                    imagePoints.fromArray(
                        new Point((points[2].x + points[3].x) / 2, (points[2].y + points[3].y) / 2), //l eye (Interpupillary breadth)
                        new Point((points[4].x + points[5].x) / 2, (points[4].y + points[5].y) / 2), //r eye (Interpupillary breadth)
                        new Point(points[0].x, points[0].y),                                         //nose (Tip)
                        new Point(points[1].x, points[1].y),                                         //nose (Subnasale)
                        new Point(points[6].x, points[6].y),                                         //l ear (Bitragion breadth)
                        new Point(points[8].x, points[8].y)                                          //r ear (Bitragion breadth)
                        );
                }
                else if (points.Count == 6)
                {
                    objectPoints = objectPoints6;

                    imagePoints.fromArray(
                        new Point((points[2].x + points[3].x) / 2, (points[2].y + points[3].y) / 2), //l eye (Interpupillary breadth)
                        new Point((points[4].x + points[5].x) / 2, (points[4].y + points[5].y) / 2), //r eye (Interpupillary breadth)
                        new Point(points[0].x, points[0].y),                                         //nose (Tip)
                        new Point(points[1].x, points[1].y)                                          //nose (Subnasale)
                        );
                }

                // Estimate head pose.
                if (rvec == null || tvec == null)
                {
                    rvec = new Mat(3, 1, CvType.CV_64FC1);
                    tvec = new Mat(3, 1, CvType.CV_64FC1);
                    Calib3d.solvePnP(objectPoints, imagePoints, camMatrix, distCoeffs, rvec, tvec);
                }


                double tvec_x = tvec.get(0, 0)[0], tvec_y = tvec.get(1, 0)[0], tvec_z = tvec.get(2, 0)[0];

                bool    isNotInViewport = false;
                Vector4 pos             = VP * new Vector4((float)tvec_x, (float)tvec_y, (float)tvec_z, 1.0f);
                if (pos.w != 0)
                {
                    float x = pos.x / pos.w, y = pos.y / pos.w, z = pos.z / pos.w;
                    if (x < -1.0f || x > 1.0f || y < -1.0f || y > 1.0f || z < -1.0f || z > 1.0f)
                    {
                        isNotInViewport = true;
                    }
                }

                if (double.IsNaN(tvec_z) || isNotInViewport)
                { // if tvec is wrong data, do not use extrinsic guesses. (the estimated object is not in the camera field of view)
                    Calib3d.solvePnP(objectPoints, imagePoints, camMatrix, distCoeffs, rvec, tvec);
                }
                else
                {
                    Calib3d.solvePnP(objectPoints, imagePoints, camMatrix, distCoeffs, rvec, tvec, true, Calib3d.SOLVEPNP_ITERATIVE);
                }

                //Debug.Log (tvec.dump () + " " + isNotInViewport);

                if (!isNotInViewport)
                {
                    // Convert to unity pose data.
                    double[] rvecArr = new double[3];
                    rvec.get(0, 0, rvecArr);
                    double[] tvecArr = new double[3];
                    tvec.get(0, 0, tvecArr);
                    PoseData poseData = ARUtils.ConvertRvecTvecToPoseData(rvecArr, tvecArr);

                    // adjust the position to the scale of real-world space.
                    poseData.pos = new Vector3(poseData.pos.x * 0.001f, poseData.pos.y * 0.001f, poseData.pos.z * 0.001f);

                    // Changes in pos/rot below these thresholds are ignored.
                    if (enableLowPassFilter)
                    {
                        ARUtils.LowpassPoseData(ref oldPoseData, ref poseData, positionLowPass, rotationLowPass);
                    }
                    oldPoseData = poseData;

                    Matrix4x4 transformationM = Matrix4x4.TRS(poseData.pos, poseData.rot, Vector3.one);

                    // right-handed coordinates system (OpenCV) to left-handed one (Unity)
                    // https://stackoverflow.com/questions/30234945/change-handedness-of-a-row-major-4x4-transformation-matrix
                    transformationM = invertYM * transformationM * invertYM;

                    // Apply Y-axis and Z-axis refletion matrix. (Adjust the posture of the AR object)
                    transformationM = transformationM * invertYM * invertZM;

                    headPosition = ARUtils.ExtractTranslationFromMatrix(ref transformationM);
                    headRotation = ARUtils.ExtractRotationFromMatrix(ref transformationM);

                    didUpdateHeadPositionAndRotation = true;
                }
            }
        }
Exemple #2
0
        //NOTE: ここList渡しなのがちょっとイヤなんだけど、Dlibのマナーに沿うことを重視
        /// <summary>
        /// 特徴点を受け取って頭部姿勢の推定状況を更新します。
        /// </summary>
        /// <param name="landmarks"></param>
        public void EstimatePose(List <Vector2> landmarks)
        {
            //画像の初期化が終わってない場合は完全NG
            if (_width < 1 || _height < 1)
            {
                return;
            }

            HasValidPoseData = false;

            SetImagePoints(landmarks);

            //初期推定っぽいやつ
            if (_rVec == null || _tVec == null)
            {
                _rVec = new Mat(3, 1, CvType.CV_64FC1);
                _tVec = new Mat(3, 1, CvType.CV_64FC1);
                Calib3d.solvePnP(_objPoints, _imagePoints, _camMatrix, _distCoeffs, _rVec, _tVec);
            }

            _tVec.get(0, 0, _tVecArrayTemp);
            float tx = (float)_tVecArrayTemp[0];
            float ty = (float)_tVecArrayTemp[1];
            float tz = (float)_tVecArrayTemp[2];

            bool notInViewport = false;
            var  pos           = _vp * new Vector4(tx, ty, tz, 1.0f);

            if (Mathf.Abs(pos.w) > 0.0001)
            {
                float x = pos.x / pos.w;
                float y = pos.y / pos.w;
                float z = pos.z / pos.w;
                notInViewport = Mathf.Abs(x) > 1.0f || Mathf.Abs(y) > 1.0f || Mathf.Abs(z) > 1.0f;
            }

            //NOTE: 要するに現状の推定が怪しすぎるならゼロベースで計算をやり直せ、って話
            if (float.IsNaN(tz) || notInViewport)
            {
                Calib3d.solvePnP(_objPoints, _imagePoints, _camMatrix, _distCoeffs, _rVec, _tVec);
            }
            else
            {
                //普通にトラッキングできてればこっちのはず
                Calib3d.solvePnP(
                    _objPoints, _imagePoints, _camMatrix, _distCoeffs, _rVec, _tVec,
                    true, Calib3d.SOLVEPNP_ITERATIVE
                    );
            }

            if (notInViewport)
            {
                return;
            }

            // 最終的なt/rの値をUnityで使いたいのでPosition/Rotationに変換
            _rVec.get(0, 0, _rVecArray);
            _tVec.get(0, 0, _tVecArray);
            var poseData = ARUtils.ConvertRvecTvecToPoseData(_rVecArray, _tVecArray);

            // 0.001fは[mm]単位から[m]への変換
            // YMのほう: 右手系を左手系にする
            // ZMのほう: 手前/奥をひっくり返す(はず)
            var transformationM =
                _invertYM *
                Matrix4x4.TRS(0.001f * poseData.pos, poseData.rot, Vector3.one) *
                _invertZM;

            HeadPosition     = ARUtils.ExtractTranslationFromMatrix(ref transformationM);
            HeadRotation     = ARUtils.ExtractRotationFromMatrix(ref transformationM);
            HasValidPoseData = true;
        }