/// <summary>
    /// Get the angle between an index and a point in radians.
    /// </summary>
    /// <param name="shape"></param>
    /// <param name="pointIndex"></param>
    /// <param name="point"></param>
    /// <returns></returns>
    static float GetAngleBetween(ref FullObjectDetection shape, uint pointIndex, Point point)
    {
        var pointI = shape.GetPart(pointIndex);

        var vectorBetweenPoints           = pointI - point;
        var vectorBetweenPointsNormalized = new Vector2((float)(vectorBetweenPoints.X / vectorBetweenPoints.Length), (float)(vectorBetweenPoints.Y / vectorBetweenPoints.Length));

        return((float)Math.Atan2(vectorBetweenPointsNormalized.Y, vectorBetweenPointsNormalized.X) /** (180f / (float)Math.PI)*/);
    }
Пример #2
0
        public static FaceLandmarkDetail From(FullObjectDetection faceLandmark)
        {
            var points = new DlibDotNet.Point[faceLandmark.Parts];

            for (uint index = 0; index < faceLandmark.Parts; index++)
            {
                points[index] = faceLandmark.GetPart(index);
            }

            return(new FaceLandmarkDetail(points));
        }
    static double GetTotalFeatureLength(Point refPoint, Point[] eyebrowPoints, Point refPoint2)
    {
        var    tempPoints = eyebrowPoints.Clone() as Point[];
        double total      = 0.0;

        for (int i = 0; i < tempPoints.Length; ++i)
        {
            tempPoints[i] -= refPoint;
            total         += tempPoints[i].Length;
        }

        return(total / (refPoint2 - refPoint).Length);
    }
    /// <summary>
    /// Extract the features from a shape and place it into the <see cref="FaceData3"/>.
    /// </summary>
    /// <param name="shape"></param>
    /// <param name="label">The emotion/label of the face.</param>
    /// <returns></returns>
    static FaceData3 GetFaceDataPoints3(ref FullObjectDetection shape, string label)
    {
        // Get average point
        float avgx = 0f, avgy = 0f;

        for (uint i = 0; i < shape.Parts; ++i)
        {
            avgx += shape.GetPart(i).X;
            avgy += shape.GetPart(i).Y;
        }
        avgx /= shape.Parts;
        avgy /= shape.Parts;

        // Get normalization Distance
        var middle        = new Point((int)avgx, (int)avgy);
        var normalization = (float)(shape.GetPart(27) - middle).LengthSquared;

        FaceData3 fd3 = new FaceData3();

        fd3.Emotion             = label;
        fd3.LeftEyebrowDistance = (GetDistance(ref shape, middle, 17, normalization) +
                                   GetDistance(ref shape, middle, 18, normalization) +
                                   GetDistance(ref shape, middle, 18, normalization) +
                                   GetDistance(ref shape, middle, 19, normalization) +
                                   GetDistance(ref shape, middle, 20, normalization) +
                                   GetDistance(ref shape, middle, 21, normalization)) / 5;
        fd3.RightEyebrowDistance = (GetDistance(ref shape, middle, 22, normalization) +
                                    GetDistance(ref shape, middle, 23, normalization) +
                                    GetDistance(ref shape, middle, 24, normalization) +
                                    GetDistance(ref shape, middle, 25, normalization) +
                                    GetDistance(ref shape, middle, 25, normalization)) / 5;
        fd3.LeftEyeWidth   = GetDistanceBetween(ref shape, 36, 39, normalization);
        fd3.RightEyeWidth  = GetDistanceBetween(ref shape, 42, 45, normalization);
        fd3.LeftEyeHeight  = GetDistanceBetween(ref shape, 40, 38, normalization);
        fd3.RightEyeHeight = GetDistanceBetween(ref shape, 46, 44, normalization);
        fd3.OuterLipWidth  = GetDistanceBetween(ref shape, 48, 54, normalization);
        fd3.InnerLipWidth  = GetDistanceBetween(ref shape, 60, 64, normalization);
        fd3.OuterLipHeight = GetDistanceBetween(ref shape, 52, 58, normalization);
        fd3.InnerLipHeight = GetDistanceBetween(ref shape, 63, 67, normalization);

        fd3.LeftLipEdgeAngle  = GetAngleBetween(ref shape, 48, middle);
        fd3.RightLipEdgeAngle = GetAngleBetween(ref shape, 54, middle);

        return(fd3);
    }
    /// <summary>
    /// Extract the features from a shape and place it into the <see cref="FaceData2"/>.
    /// </summary>
    /// <param name="shape"></param>
    /// <param name="label">The emotion/label of the face.</param>
    /// <returns></returns>
    static FaceData2 GetFaceDataPoints2(ref FullObjectDetection shape, string label)
    {
        //http://www.paulvangent.com/2016/08/05/emotion-recognition-using-facial-landmarks/#more-565

        float avgx = 0f, avgy = 0f;

        float[] x = new float[shape.Parts];
        float[] y = new float[shape.Parts];

        Point[] distToCentres = new Point[shape.Parts];
        for (uint i = 0; i < shape.Parts; ++i)
        {
            avgx += shape.GetPart(i).X;
            x[i]  = shape.GetPart(i).X;
            avgy += shape.GetPart(i).Y;
            y[i]  = shape.GetPart(i).Y;
        }
        avgx /= shape.Parts;
        avgy /= shape.Parts;

        for (var i = 0; i < distToCentres.Length; i++)
        {
            distToCentres[i] = new Point(Convert.ToInt32(x[i] - avgx), Convert.ToInt32(y[i] - avgy));
        }

        FaceData2 fd = new FaceData2();

        // Get angle
        var middlePoint   = shape.GetPart(33);
        var topNasalPoint = shape.GetPart(27);

        for (uint i = 0; i < shape.Parts; ++i)
        {
            fd.Emotion            = label;
            fd.RawCoordiantesX[i] = x[i];
            fd.RawCoordiantesY[i] = y[i];
            var distance = (new Point((int)avgx, (int)avgy) - new Point((int)x[i], (int)y[i])).LengthSquared;
            fd.LengthBetweenFeatures[i] = (float)distance;
            fd.AngleBetweenFeatures[i]  = Convert.ToSingle(Math.Atan2(y[i], x[i]) * 360 / (2 * Math.PI));
        }

        return(fd);
    }
 /// <summary>
 /// Get distance between a point on a face by using the point index and middle of face, and divide it by the normalization value.
 /// </summary>
 /// <param name="shape"></param>
 /// <param name="middle"></param>
 /// <param name="pointIndex"></param>
 /// <param name="normalization"></param>
 /// <returns></returns>
 static float GetDistance(ref FullObjectDetection shape, Point middle, uint pointIndex, float normalization)
 {
     return((float)(shape.GetPart(pointIndex) - middle).LengthSquared / normalization);
 }
Пример #7
0
        ///<summary>初期設定後、非同期で顔検出を始める           Start face detection asynchronously after initialization</summary>
        async void Start()
        {
            if (caputure == null)
            {
                Debug.LogError("Video is null");

#if UNITY_EDITOR
                UnityEditor.EditorApplication.isPlaying = false;
#else
                Application.Quit();
#endif
                return;
            }

            //最初に設定しなければいけないもの      valiable that have to set first
            if (debug_eye_image)
            {
                thread = 2;
            }
            if (debug_face_image)
            {
#if VRM_EXIST
                if (gameObject.GetComponent <ApplyToVRM>() != null)
                {
                    gameObject.GetComponent <ApplyToVRM>().enabled = false;
                }
#endif
#if LIVE2D_EXIST
                if (gameObject.GetComponent <ApplyToLive2D>() != null)
                {
                    gameObject.GetComponent <ApplyToLive2D>().enabled = false;
                }
#endif
            }
            else
            {
#if VRM_EXIST
                if (gameObject.GetComponent <ApplyToVRM>() != null)
                {
                    gameObject.GetComponent <ApplyToVRM>().enabled = true;
                }
#endif
#if LIVE2D_EXIST
                if (gameObject.GetComponent <ApplyToLive2D>() != null)
                {
                    gameObject.GetComponent <ApplyToLive2D>().enabled = true;
                }
#endif
            }
            _mainContext = SynchronizationContext.Current;
            System.Diagnostics.Process process = System.Diagnostics.Process.GetCurrentProcess();
            process.PriorityClass = System.Diagnostics.ProcessPriorityClass.BelowNormal;
            Mat image_r = new Mat();
            await caputure.WaitOpen();

            image_r = caputure.Read();
            Mat image = new Mat();
            if (resolution == 1)
            {
                image = image_r.Clone();
            }
            else
            {
                Cv2.Resize(image_r, image, new Size(image_r.Cols / resolution, image_r.Rows / resolution));
            }
            //モードごとによって設定するもの   variable that set each mode
            switch (mode)
            {
            case DetectMode.OpenCV:
                if (!File.Exists(cascade_file))
                {
                    Debug.LogError("Path for cascade file is invalid");
#if UNITY_EDITOR
                    UnityEditor.EditorApplication.isPlaying = false;
#else
                    Application.Quit();
#endif
                    return;
                }

                cascade = new CascadeClassifier();
                ptr     = new IntPtr[thread - 1];
                try
                {
                    cascade.Load(cascade_file);
                }
                catch (Exception e)
                {
                    Debug.LogError(e.ToString());
                    Quit();
                    while (true)
                    {
                    }
                }
                break;

            case DetectMode.Dlib5:
                if (!File.Exists(shape_file_5))
                {
                    Debug.LogError("Path for 5 face landmarks is invalid");
#if UNITY_EDITOR
                    UnityEditor.EditorApplication.isPlaying = false;
#else
                    Application.Quit();
#endif
                    return;
                }
                ptr = new IntPtr[thread - 1];
                if (un_safe)
                {
                    detector    = new FrontalFaceDetector[1];
                    detector[0] = Dlib.GetFrontalFaceDetector();
                }
                else
                {
                    detector = new FrontalFaceDetector[thread - 1];
                    for (int i = 0; i < thread - 1; i++)
                    {
                        detector[i] = Dlib.GetFrontalFaceDetector();
                    }
                }
                try
                {
                    shape = ShapePredictor.Deserialize(shape_file_5);
                }
                catch (Exception e)
                {
                    Debug.LogError(e.ToString());
                    Quit();
                    while (true)
                    {
                    }
                }

                break;

            case DetectMode.Dlib68:
                if (!File.Exists(shape_file_68))
                {
                    Debug.LogError("Path for 68 face landmarks is invalid");
#if UNITY_EDITOR
                    UnityEditor.EditorApplication.isPlaying = false;
#else
                    Application.Quit();
#endif
                    return;
                }
                if (un_safe)
                {
                    detector    = new FrontalFaceDetector[1];
                    detector[0] = Dlib.GetFrontalFaceDetector();
                }
                else
                {
                    detector = new FrontalFaceDetector[thread - 1];
                    for (int i = 0; i < thread - 1; i++)
                    {
                        detector[i] = Dlib.GetFrontalFaceDetector();
                    }
                }
                landmark_detection = new DlibDotNet.Point[68];
                landmarks          = new Vector2[68];
                ptr         = new IntPtr[thread - 1];
                proj        = new double[thread - 1][];
                pos_double  = new double[thread - 1][];
                eye_point_L = new DlibDotNet.Point[thread - 1][];
                eye_ratio_L = new float[thread - 1];
                eye_point_R = new DlibDotNet.Point[thread - 1][];
                eye_ratio_R = new float[thread - 1];
                try
                {
                    shape = ShapePredictor.Deserialize(shape_file_68);
                }
                catch (Exception e)
                {
                    Debug.LogError(e.ToString());
                    Quit();
                    while (true)
                    {
                    }
                }
                dist_coeffs_mat = new Mat(4, 1, MatType.CV_64FC1, 0);
                var focal_length  = image.Cols;
                var center        = new Point2d(image.Cols / 2, image.Rows / 2);
                var camera_matrix = new double[3, 3] {
                    { focal_length, 0, center.X }, { 0, focal_length, center.Y }, { 0, 0, 1 }
                };
                camera_matrix_mat = new Mat(3, 3, MatType.CV_64FC1, camera_matrix);
                SetmodelPoints();
                for (int i = 0; i < thread - 1; i++)
                {
                    proj[i]        = new double[9];
                    pos_double[i]  = new double[3];
                    eye_point_L[i] = new DlibDotNet.Point[6];
                    eye_point_R[i] = new DlibDotNet.Point[6];
                }
                break;

            case DetectMode.Mixed:
                ptr     = new IntPtr[thread - 1];
                cascade = new CascadeClassifier();
                try
                {
                    cascade.Load(cascade_file);
                    shape = ShapePredictor.Deserialize(shape_file_68);
                }
                catch (Exception e)
                {
                    Debug.LogError(e.ToString());
                    Quit();
                    while (true)
                    {
                    }
                }
                landmark_detection = new DlibDotNet.Point[68];
                landmarks          = new Vector2[68];
                proj        = new double[thread - 1][];
                pos_double  = new double[thread - 1][];
                eye_point_L = new DlibDotNet.Point[thread - 1][];
                eye_ratio_L = new float[thread - 1];
                eye_point_R = new DlibDotNet.Point[thread - 1][];
                eye_ratio_R = new float[thread - 1];

                dist_coeffs_mat = new Mat(4, 1, MatType.CV_64FC1, 0);
                var focal_length2  = image.Cols;
                var center2        = new Point2d(image.Cols / 2, image.Rows / 2);
                var camera_matrix2 = new double[3, 3] {
                    { focal_length2, 0, center2.X }, { 0, focal_length2, center2.Y }, { 0, 0, 1 }
                };
                camera_matrix_mat = new Mat(3, 3, MatType.CV_64FC1, camera_matrix2);
                SetmodelPoints();
                for (int i = 0; i < thread - 1; i++)
                {
                    proj[i]        = new double[9];
                    pos_double[i]  = new double[3];
                    eye_point_L[i] = new DlibDotNet.Point[6];
                    eye_point_R[i] = new DlibDotNet.Point[6];
                }
                break;
            }
            //上記以外の設定       other setting
            ptr[0] = image_r.Data;
            if (logToFile)
            {
                if (!Directory.Exists(Application.dataPath + "/DebugData"))
                {
                    Directory.CreateDirectory(Application.dataPath + "/DebugData");
                }
                fps_writer     = new StreamWriter(Application.dataPath + "/DebugData/F_FPS_LOG" + DateTime.Now.ToString("yyyyMMddHHmmss") + ".csv");
                pos_rot_writer = new StreamWriter(Application.dataPath + "/DebugData/F_POS_ROT_LOG" + DateTime.Now.ToString("yyyyMMddHHmmss") + ".csv");
                final_writer   = new StreamWriter(Application.dataPath + "/DebugData/F_FINAL_LOG" + DateTime.Now.ToString("yyyyMMddHHmmss") + ".csv");
                fps_writer.WriteLine("FPS");
                pos_rot_writer.WriteLine("POS_X,POS_Y,POS_Z,ROT_X,ROT_Y,ROT_Z");
                final_writer.WriteLine("POS_X,POS_Y,POS_Z,ROT_X,ROT_Y,ROT_Z,EYE_CLOSE_L,EYE_CLOSE_R,EYE_ROT_L_X,EYE_ROT_L_Y,EYE_ROT_R_X,EYE_ROT_R_Y");
                if (mode == DetectMode.Dlib68 || mode == DetectMode.Mixed)
                {
                    if (eye_tracking)
                    {
                        eye_rot_writer = new StreamWriter(Application.dataPath + "/DebugData/F_EYE_ROT_LOG" + DateTime.Now.ToString("yyyyMMddHHmmss") + ".csv");
                        eye_rot_writer.WriteLine("EYE_ROT_L_X,EYE_ROT_L_Y,EYE_ROT_R_X,EYE_ROT_R_Y");
                    }

                    if (blink_tracking)
                    {
                        eye_ratio_writer = new StreamWriter(Application.dataPath + "/DebugData/F_EYE_RATIO_LOG" + DateTime.Now.ToString("yyyyMMddHHmmss") + ".csv");
                        eye_ratio_writer.WriteLine("EYE_RATIO_L,EYE_RATIO_R");
                    }
                }
            }

            pos = transform.position - pos_offset;
            rot = transform.eulerAngles;

            for (int i = 0; i < smooth; i++)
            {
                pos_chain.AddLast(pos);
                rot_chain.AddLast(rot);
            }

            for (int i = 0; i < 8; i++)
            {
                eye_L.AddLast(0.0f);
                eye_R.AddLast(0.0f);
                eye_rot_L.AddLast(Vector3.zero);
                eye_rot_R.AddLast(Vector3.zero);
            }
            if (debug_face_image)
            {
                out_mat       = new Mat[thread - 1];
                out_texture2D = new Texture2D(image.Width, image.Height);
            }
            bytes           = new byte[thread - 1][];
            lock_imagebytes = new object[thread - 1];
            lock_out_mat    = new object[thread - 1];
            for (int i = 0; i < thread - 1; i++)
            {
                bytes[i]           = new byte[image.Width * image.Height * image.ElemSize()];
                lock_imagebytes[i] = new object();
                lock_out_mat[i]    = new object();
                if (debug_face_image)
                {
                    out_mat[i] = new Mat();
                }
            }
            if (image.IsEnabledDispose)
            {
                image.Dispose();
            }
            if (model == null)
            {
                model = transform;
            }

            //フェイストラッキング開始      start face tracking
            _ = Task.Run(DetectAsync);
        }
Пример #8
0
        //--------------------------------------------------------------------------------------------------------
        /// <summary>
        /// OpenCVとDlib68を併用した推定。Dlib68より高速だが、顔の検出率は低め。
        /// </summary>
        /// <param name="threadNo">走らせるスレッド番号</param>
        /// <param name="est_pos">推定した位置</param>
        /// <param name="est_rot">推定した回転</param>
        /// <returns>推定できたか</returns>
        private bool Mixed(int threadNo, out Vector3 est_pos, out Vector3 est_rot)
        {
            Mat image_r = new Mat();
            Mat image   = new Mat();

            try
            {
                lock (lock_capture)
                {
                    image_r = caputure.Read();
                    if (image_r.Data == null)
                    {
                        throw new NullReferenceException("capture is null");
                    }

                    if (ptr.Contains(image_r.Data))
                    {
                        throw new InvalidOperationException("taken same data");
                    }
                    else
                    {
                        ptr[threadNo] = image_r.Data;
                    }
                }

                if (resolution == 1)
                {
                    image = image_r.Clone();
                }
                else
                {
                    Cv2.Resize(image_r, image, new Size(image_r.Cols / resolution, image_r.Rows / resolution));
                }

                GC.KeepAlive(image_r);

                var faces = cascade.DetectMultiScale(image);

                if (!faces.Any())
                {
                    throw new InvalidOperationException("this contains no elements");
                }

                Array2D <RgbPixel> array2D = new Array2D <RgbPixel>();

                lock (lock_imagebytes[threadNo])
                {
                    Marshal.Copy(image.Data, bytes[threadNo], 0, bytes[threadNo].Length);
                    array2D = Dlib.LoadImageData <RgbPixel>(bytes[threadNo], (uint)image.Height, (uint)image.Width, (uint)(image.Width * image.ElemSize()));
                }

                var rectangles = new Rectangle(faces.First().Left, faces.First().Top, faces.First().Right, faces.First().Bottom);

                DlibDotNet.Point[] points = new DlibDotNet.Point[68];

                using (FullObjectDetection shapes = shape.Detect(array2D, rectangles))
                {
                    for (uint i = 0; i < 68; i++)
                    {
                        points[i] = shapes.GetPart(i);
                    }
                    lock (lock_landmarks)
                    {
                        landmark_detection = points;
                    }
                }

                array2D.Dispose();

                Point2f[] image_points = new Point2f[6];
                image_points[0] = new Point2f(points[30].X, points[30].Y);
                image_points[1] = new Point2f(points[8].X, points[8].Y);
                image_points[2] = new Point2f(points[45].X, points[45].Y);
                image_points[3] = new Point2f(points[36].X, points[36].Y);
                image_points[4] = new Point2f(points[54].X, points[54].Y);
                image_points[5] = new Point2f(points[48].X, points[48].Y);
                var image_points_mat = new Mat(image_points.Length, 1, MatType.CV_32FC2, image_points);
                eye_point_R[threadNo][0] = points[42]; eye_point_L[threadNo][1] = points[36];
                eye_point_R[threadNo][2] = points[43]; eye_point_L[threadNo][2] = points[38];
                eye_point_R[threadNo][3] = points[47]; eye_point_L[threadNo][3] = points[40];
                eye_point_R[threadNo][4] = points[44]; eye_point_L[threadNo][4] = points[37];
                eye_point_R[threadNo][5] = points[46]; eye_point_L[threadNo][5] = points[41];

                Mat rvec_mat       = new Mat();
                Mat tvec_mat       = new Mat();
                Mat projMatrix_mat = new Mat();
                Cv2.SolvePnP(model_points_mat, image_points_mat, camera_matrix_mat, dist_coeffs_mat, rvec_mat, tvec_mat);
                Marshal.Copy(tvec_mat.Data, pos_double[threadNo], 0, 3);
                Cv2.Rodrigues(rvec_mat, projMatrix_mat);
                Marshal.Copy(projMatrix_mat.Data, proj[threadNo], 0, 9);

                est_pos.x = -(float)pos_double[threadNo][0];
                est_pos.y = (float)pos_double[threadNo][1];
                est_pos.z = (float)pos_double[threadNo][2];

                est_rot = RotMatToQuatanion(proj[threadNo]).eulerAngles;

                if (blink_tracking)
                {
                    BlinkTracker(threadNo, eye_point_L[threadNo], eye_point_R[threadNo], est_rot);
                }
                if (eye_tracking)
                {
                    EyeTracker(threadNo, image, points.Skip(42).Take(6), points.Skip(36).Take(6));
                }

                image_points_mat.Dispose();
                rvec_mat.Dispose();
                tvec_mat.Dispose();
                projMatrix_mat.Dispose();
                GC.KeepAlive(image);
            }
            catch (Exception e)
            {
                Debug.Log(e.ToString());
                est_pos = pos; est_rot = rot;
                if (image.IsEnabledDispose)
                {
                    image.Dispose();
                }
                return(false);
            }
            lock (lock_imagebytes[threadNo])
            {
                if (image.IsEnabledDispose)
                {
                    image.Dispose();
                }
            }

            return(true);
        }
Пример #9
0
        //--------------------------------------------------------------------------------------------------------
        /// <summary>
        /// Dlib5を利用した顔検出                    Face detection using Dlib5
        /// </summary>
        /// <param name="threadNo">スレッド番号      Thread number</param>
        /// <param name="est_pos">推定された位置     Estimated position</param>
        /// <param name="est_rot">推定された回転     Estimated quotation</param>
        /// <returns>推定できたか                    Whether it could be estimated</returns>
        private bool Dlib5(int threadNo, out Vector3 est_pos, out Vector3 est_rot)
        {
            est_rot = rot;
            Mat image_r = new Mat();
            Array2D <RgbPixel> array2D = new Array2D <RgbPixel>();
            Mat image = new Mat();

            try
            {
                lock (lock_capture)
                {
                    image_r = caputure.Read();
                    if (image_r.Data == null)
                    {
                        throw new NullReferenceException("capture is null");
                    }

                    if (ptr.Contains(image_r.Data))
                    {
                        throw new InvalidOperationException("taken same data");
                    }
                    else
                    {
                        ptr[threadNo] = image_r.Data;
                    }
                }
                if (resolution == 1)
                {
                    image = image_r.Clone();
                }
                else
                {
                    Cv2.Resize(image_r, image, new Size(image_r.Cols / resolution, image_r.Rows / resolution));
                }

                GC.KeepAlive(image_r);

                lock (lock_imagebytes[threadNo])
                {
                    Marshal.Copy(image.Data, bytes[threadNo], 0, bytes[threadNo].Length);
                    array2D = Dlib.LoadImageData <RgbPixel>(bytes[threadNo], (uint)image.Height, (uint)image.Width, (uint)(image.Width * image.ElemSize()));
                }

                Rectangle rectangles = default;
                if (un_safe)
                {
                    rectangles = detector[0].Operator(array2D).FirstOrDefault();
                }
                else
                {
                    rectangles = detector[threadNo].Operator(array2D).FirstOrDefault();
                }

                DlibDotNet.Point[] points = new DlibDotNet.Point[5];
                if (rectangles == default)
                {
                    throw new InvalidOperationException("this contains no elements.");
                }

                using (FullObjectDetection shapes = shape.Detect(array2D, rectangles))
                {
                    for (uint i = 0; i < 5; i++)
                    {
                        points[i] = shapes.GetPart(i);
                    }
                }

                est_pos.x = -(image.Width / 2 - points[4].X) / (float)image.Width;
                est_pos.y = (image.Height / 2 - points[4].Y) / (float)image.Height;
                est_pos.z = (points[0].X - points[2].X) / (float)image.Width + (points[0].Y - points[2].Y) / (float)image.Height - z_offset;

                try
                {
                    est_rot.z = Mathf.Rad2Deg * Mathf.Atan2(points[0].Y - points[2].Y, points[0].X - points[2].X);
                }
                catch (DivideByZeroException)
                {
                    est_rot.z = points[0].Y - points[2].Y < 0 ? -90 : 90;
                }
                if (debug_face_image)
                {
                    DetectDebug(threadNo, image, points: points);
                }
                GC.KeepAlive(image);
            }
            catch (Exception e)
            {
                Debug.Log(e.ToString());
                est_pos = pos;
                if (array2D.IsEnableDispose)
                {
                    array2D.Dispose();
                }
                if (image.IsEnabledDispose)
                {
                    image.Dispose();
                }

                return(false);
            }

            if (array2D.IsEnableDispose)
            {
                array2D.Dispose();
            }
            lock (lock_imagebytes[threadNo])
            {
                if (image.IsEnabledDispose)
                {
                    image.Dispose();
                }
            }
            return(true);
        }