Beispiel #1
0
    /// <summary>
    /// 検出値を取得
    /// </summary>
    void FaceModule_FrameProcessed(object sender, FrameProcessedEventArgs args)
    {
        FaceData.Update();
        var face = FaceData.QueryFaceByIndex(0);

        if (face != null)
        {
            // 検出値
            FaceRect = face.Detection.BoundingRect;
            Landmark = face.Landmarks.Points;
            FaceExp  = face.Expressions.ExpressionResults;

            // 体位置
            BodyPos = SmoothBody.SmoothValue(GetBodyPos(FaceRect));

            // 頭角度
            HeadAng = SmoothHead.SmoothValue(GetHeadAng(Landmark));

            // 視線
            EyesPos = SmoothEyes.SmoothValue(GetEyesPos(Landmark));

            // 目パチ
            float eyeL = FaceExp[FaceExpression.EXPRESSION_EYES_CLOSED_LEFT].intensity;
            float eyeR = FaceExp[FaceExpression.EXPRESSION_EYES_CLOSED_RIGHT].intensity;
            EyesClose = SmoothEyesClose.SmoothValue(Mathf.Max(eyeL, eyeR));
            EyesClose = EyesClose < 50 ? 0 : (EyesClose - 50) * 2;

            // 眉上
            float browRaiL = FaceExp[FaceExpression.EXPRESSION_BROW_RAISER_LEFT].intensity;
            float browRaiR = FaceExp[FaceExpression.EXPRESSION_BROW_RAISER_RIGHT].intensity;
            BrowRai = SmoothBrowRai.SmoothValue(Mathf.Max(browRaiL, browRaiR));

            // 眉下
            float browLowL = FaceExp[FaceExpression.EXPRESSION_BROW_LOWERER_LEFT].intensity;
            float browLowR = FaceExp[FaceExpression.EXPRESSION_BROW_LOWERER_RIGHT].intensity;
            BrowLow = SmoothBrowLow.SmoothValue(Mathf.Max(browLowL, browLowR));

            // 笑顔
            Smile = SmoothSmile.SmoothValue(FaceExp[FaceExpression.EXPRESSION_SMILE].intensity);

            // キス(口開と若干競合)
            Kiss = SmoothKiss.SmoothValue(FaceExp[FaceExpression.EXPRESSION_KISS].intensity);

            // 口開(キスと若干競合)
            Mouth = SmoothMouth.SmoothValue(FaceExp[FaceExpression.EXPRESSION_MOUTH_OPEN].intensity);

            // べー(口開と競合)
            Tongue = SmoothTongue.SmoothValue(FaceExp[FaceExpression.EXPRESSION_TONGUE_OUT].intensity);

            Ready = true;
        }
    }
        private void CreateLandmarkBoundingBoxes()
        {
            landmarkBoundingBoxes.Clear();
            LandmarkPoint[] points;
            RectI32         boundingBox;

            for (int i = 0; i < landmarks.Count; i++)
            {
                points = landmarks.ElementAt(i);
                int centroidX = (int)points.Select(point => point.image.x).Sum() / points.Length;
                int centroidY = (int)points.Select(point => point.image.y).Sum() / points.Length;
                boundingBox = new RectI32(centroidX - (boxWidth / 2), centroidY - (boxHeight / 2), boxWidth, boxHeight);
                landmarkBoundingBoxes.Add(boundingBox);
            }
        }
Beispiel #3
0
    /// <summary>
    /// 体位置を取得
    /// </summary>
    /// <param name="rect">顔の矩形</param>
    /// <returns>体位置</returns>
    /// <remarks>
    /// https://software.intel.com/sites/landingpage/realsense/camera-sdk/v2016r3/documentation/html/index.html?doc_face_face_location_data.html
    /// </remarks>
    Vector3 GetBodyPos(RectI32 rect)
    {
        // 体位置に利用するため頭位置を取得
        float xMax = Resolution.width;
        float yMax = Resolution.height;
        float xPos = rect.x + (rect.w / 2);
        float yPos = rect.h + (rect.h / 2);
        float zPos = (yMax - rect.h);

        // 末尾の除算で調整
        xPos = (xPos - (xMax / 2)) / (xMax / 2) / BodyPosX;
        yPos = (yPos - (yMax / 2)) / (yMax / 2) / BodyPosY;
        zPos = zPos / BodyPosZ;

        // 初期位置のオフセットを適用
        yPos += BodyPosYOffset;

        // 顔の大きさと中心から初期位置分ずらして体位置に利用
        return(new Vector3(-xPos, yPos, zPos));
    }