Exemplo n.º 1
0
        public async Task <string> FaceDetect(string filePath)
        {
            return(await Task.Run(() =>
            {
                try
                {
                    var image = Convert.ToBase64String(File.ReadAllBytes(filePath));

                    var imageType = "BASE64";

                    // 调用人脸检测,可能会抛出网络等异常,请使用try/catch捕获
                    var result = _client.Detect(image, imageType);
                    Console.WriteLine(result);
                    // 如果有可选参数
                    //var options = new Dictionary<string, object>
                    //{
                    //    {"face_field", "age"},
                    //    {"max_face_num", 2},
                    //    {"face_type", "LIVE"},
                    //    {"liveness_control", "LOW"}
                    //};
                    //// 带参数调用人脸检测
                    //result = _client.Detect(image, imageType, options);
                    return result.ToString();
                }
                catch (Exception e)
                {
                    _logger.LogError(null, e);
                    throw;
                }
            }));
        }
Exemplo n.º 2
0
        /// <summary>
        /// 人脸检测
        /// 检测请求图片中的人脸,返回人脸位置、72个关键点坐标、及人脸相关属性信息。检测响应速度,与图片中人脸数量相关,人脸数量较多时响应时间会有些许延长。
        /// </summary>
        /// <param name="ImageFilePath">图片文件路径</param>
        /// <param name="FaceDetectOptions">默认只返回人脸框、概率和旋转角度。</param>
        /// <param name="max_face_num">检测人脸数量,默认为1</param>
        /// <returns>
        /// 字段	是否必选	类型	说明
        /// log_id	number	是	日志id
        /// result_num number  是 人脸数目
        ///result array   是 人脸属性对象的集合
        ///+age number  否 年龄。face_fields包含age时返回
        ///+beauty number  否 美丑打分,范围0-100,越大表示越美。face_fields包含beauty时返回
        ///+location object 是   人脸在图片中的位置
        ///++left number  是 人脸区域离左边界的距离
        ///++top number  是 人脸区域离上边界的距离
        ///++width number  是 人脸区域的宽度
        ///++height number  是 人脸区域的高度
        ///+face_probability number  是 人脸置信度,范围0-1
        ///+rotation_angle number  是 人脸框相对于竖直方向的顺时针旋转角,[-180,180]
        ///+yaw number  是 三维旋转之左右旋转角[-90(左), 90(右)]
        ///+pitch number  是 三维旋转之俯仰角度[-90(上), 90(下)]
        ///+roll number  是 平面内旋转角[-180(逆时针), 180(顺时针)]
        ///+expression number  否 表情,0,不笑;1,微笑;2,大笑。face_fields包含expression时返回
        ///+expression_probability number  否 表情置信度,范围0 ~1。face_fields包含expression时返回
        ///+faceshape array   否 脸型置信度。face_fields包含faceshape时返回
        ///++type string 是   脸型:square/triangle/oval/heart/round
        ///++probability number  是 置信度:0~1
        ///+gender string 否   male、female。face_fields包含gender时返回
        ///+gender_probability number  否 性别置信度,范围0 ~1。face_fields包含gender时返回
        ///+glasses number  否 是否带眼镜,0-无眼镜,1-普通眼镜,2-墨镜。face_fields包含glasses时返回
        ///+glasses_probability number  否 眼镜置信度,范围0 ~1。face_fields包含glasses时返回
        ///+landmark array   否	4个关键点位置,左眼中心、右眼中心、鼻尖、嘴中心。face_fields包含landmark时返回
        ///++x number  否 x坐标
        ///++y number  否 y坐标
        ///+landmark72 array   否	72个特征点位置,示例图 。face_fields包含landmark时返回
        ///++x number  否 x坐标
        ///++y number  否 y坐标
        ///+race string 否   yellow、white、black、arabs。face_fields包含race时返回
        ///+race_probability number  否 人种置信度,范围0 ~1。face_fields包含race时返回
        ///+qualities object 否   人脸质量信息。face_fields包含qualities时返回
        ///++occlusion object 是   人脸各部分遮挡的概率,[0, 1],0表示完整,1表示不完整
        ///+++left_eye number  是 左眼
        ///+++right_eye number  是 右眼
        ///+++nose number  是 鼻子
        ///+++mouth number  是 嘴
        ///+++left_cheek number  是 左脸颊
        ///+++right_cheek number  是 右脸颊
        ///+++chin number  是 下巴
        ///++blur number  是 人脸模糊程度,[0, 1]。0表示清晰,1表示模糊
        ///++illumination number  是 取值范围在[0, 255], 表示脸部区域的光照程度
        ///++completeness number  是 人脸完整度,0或1, 0为人脸溢出图像边界,1为人脸都在图像边界内
        ///++type object 是   真实人脸/卡通人脸置信度
        ///+++human number  是 真实人脸置信度,[0, 1]
        ///+++cartoon number  是 卡通人脸置信度,[0, 1]
        ///
        ///
        ///可通过人脸检测接口,基于以下字段和对应阈值,进行质量检测的判断,以保证人脸质量符合后续业务操作要求。
        ///
        ///遮挡范围
        ///occlusion(0~1),0为无遮挡,1是完全遮挡含有多个具体子字段,表示脸部多个部位通常用作判断头发、墨镜、口罩等遮挡
        ///left_eye : 0.6, #左眼被遮挡的阈值
        ///right_eye : 0.6, #右眼被遮挡的阈值
        ///nose : 0.7, #鼻子被遮挡的阈值
        ///mouth : 0.7, #嘴巴被遮挡的阈值
        ///left_check : 0.8, #左脸颊被遮挡的阈值
        ///right_check : 0.8, #右脸颊被遮挡的阈值
        ///chin_contour : 0.6, #下巴被遮挡阈值
        ///
        ///模糊度范围
        ///Blur(0~1),0是最清晰,1是最模糊
        ///小于0.7
        ///
        ///光照范围
        ///illumination(0~255)脸部光照的灰度值,0表示光照不好以及对应客户端SDK中,YUV的Y分量
        ///大于40
        ///
        ///姿态角度
        ///Pitch:三维旋转之俯仰角度[-90(上), 90(下)]
        ///Roll:平面内旋转角[-180(逆时针), 180(顺时针)]
        ///Yaw:三维旋转之左右旋转角[-90(左), 90(右)]
        ///分别小于20度
        ///
        ///人脸完整度
        ///completeness(0~1),0代表完整,1代表不完整
        ///小于0.4
        ///
        ///人脸大小
        ///人脸部分的大小 建议长宽像素值范围:80*80~200*200
        ///人脸部分不小于100*100像素
        /// </returns>
        public static BaseResponse FaceDetect(byte[] imageByte, List <FaceDetectOptions> FaceDetectOptions, int max_face_num = 1)
        {
            if (imageByte.Length > 1024 * 1024 * 10)
            {
                throw new Exception("图片大小必须小于10Mb");
            }

            var    imageBase64         = Convert.ToBase64String(imageByte);
            string face_fields_options = "";

            foreach (var item in FaceDetectOptions)
            {
                face_fields_options += "," + item.ToString();
            }

            if (face_fields_options.Length != 0)
            {
                face_fields_options = face_fields_options.Remove(0, 1);
            }

            var options = new Dictionary <string, object>()
            {
                { "face_fields", face_fields_options },
                { "max_face_num", max_face_num }
            };

            return(Execute(client.Detect(imageBase64, ImageType, options)));
        }
Exemplo n.º 3
0
        /// <summary>
        /// 人脸识别
        /// </summary>
        /// <param name="image">人脸图片</param>
        /// <returns></returns>
        public JObject NetRecognition(Image image)
        {
            try
            {
                var client = new Baidu.Aip.Face.Face(API_KEY, SECRET_KEY)
                {
                    Timeout = 60000  // 修改超时时间
                };

                //图片转为Base64
                Bitmap       bmp = new Bitmap(image);
                MemoryStream ms  = new MemoryStream();
                bmp.Save(ms, System.Drawing.Imaging.ImageFormat.Jpeg);
                byte[] arr = new byte[ms.Length]; ms.Position = 0;
                ms.Read(arr, 0, (int)ms.Length); ms.Close();
                var base64 = Convert.ToBase64String(arr);

                string imageType = "BASE64";
                //可选参数
                var options = new Dictionary <string, object> {
                    { "face_field", "age,beauty,gender" },
                    { "max_face_num", 1 },
                    { "face_type", "LIVE" }
                };
                JObject result = client.Detect(base64, imageType, options);
                return(result);
            }
            catch (Exception ex)
            {
                MessageBox.Show("连接人脸识别API出错:" + ex);
                return(new JObject());
            }
        }
Exemplo n.º 4
0
        protected static void Detect()
        {
            // 设置APPID/AK/SK
            var client = new Baidu.Aip.Face.Face(API_KEY, SECRET_KEY);

            client.Timeout = 60000;  // 修改超时时间


            var image = ComFunc.StreamToBase64String(new FileStream("C:/Users/admin/Desktop/微信图片_20190104172518.jpg", FileMode.Open));

            var imageType = "BASE64";

            // 调用人脸检测,可能会抛出网络等异常,请使用try/catch捕获
            //var result = client.Detect(image, imageType);
            //Console.WriteLine(result);
            // 如果有可选参数
            var options = new Dictionary <string, object> {
                { "face_field", "age,beauty,expression,faceshape,gender,glasses,landmark,race,quality,facetype" },
                { "max_face_num", 2 },
                { "face_type", "LIVE" }
            };
            // 带参数调用人脸检测
            var result = client.Detect(image, imageType, options);

            Console.WriteLine(result);
        }
Exemplo n.º 5
0
        public JObject FaceJianCe(Stream faceStream)
        {
            byte[] buffer = new byte[faceStream.Length];
            faceStream.Read(buffer, 0, (int)faceStream.Length);
            // 调用人脸检测,可能会抛出网络等异常,请使用try/catch捕获
            // 如果有可选参数
            var options = new Dictionary <string, object> {
                { "max_face_num", 1 },
                { "face_fields", "beauty" }
            };

            // 带参数调用人脸检测
            try
            {
                return(Client.Detect(buffer, options));
            }
            catch (Exception ex)
            {
                throw ex;
            }
        }
Exemplo n.º 6
0
        /// <summary>
        /// 人脸检测
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void skinButton1_Click(object sender, EventArgs e)
        {
            if (picbPreview.Image is null)
            {
                MessageBox.Show("请先拍照或者选取一张图片,再进行检测操作");
                return;
            }
            // var APP_ID = "17894506";
            Thread threadadd = new Thread(() =>
            {
                try
                {
                    var client = new Baidu.Aip.Face.Face(API_KEY, SECRET_KEY)
                    {
                        Timeout = 60000  // 修改超时时间
                    };
                    var image     = ImgToBase64String((Bitmap)this.picbPreview.Image);
                    var imageType = "BASE64";

                    // 调用人脸检测,可能会抛出网络等异常,请使用try/catch捕获
                    var result = client.Detect(image, imageType);
                    // 如果有可选参数
                    var options = new Dictionary <string, object>
                    {
                        { "face_field", "age,beauty,expression,face_shape,gender,glasses," +
                          ",race,quality,eye_status,emotion,face_type,eye_status" },
                        { "max_face_num", Max_face_num },
                        { "face_type", "LIVE" },
                        { "liveness_control", "LOW" }
                    };
                    // 带参数调用人脸检测
                    result = client.Detect(image, imageType, options);
                    if (result != null && result.ToString() != null && result.ToString().Length > 0)
                    {
                        var json             = JsonConvert.SerializeObject(result);
                        FaceCheckModel model = DeserializeJsonToObject <GetApiJson <FaceCheckModel> >(json)?.Result ?? new FaceCheckModel();
                        FaceTestBind(model);
                    }
                    ;


                    var picclient = new Baidu.Aip.ImageClassify.ImageClassify("NpBGfUR6qBGtFo5bIFbiPCO9", "S0L7LXAewfW7BBKmbXd0EQ8iRzEYRGqc")
                    {
                        Timeout = 60000  // 修改超时时间
                    };
                    Image img        = this.picbPreview.Image;
                    MemoryStream ms  = new MemoryStream();
                    byte[] imagedata = null;
                    img.Save(ms, System.Drawing.Imaging.ImageFormat.Jpeg);
                    imagedata = ms.GetBuffer();

                    var picoptions = new Dictionary <string, object> {
                        { "baike_num", Result_num }
                    };

                    var results = picclient.AdvancedGeneral(imagedata, picoptions);
                    if (results != null && results.ToString() != null && results.ToString().Length > 0)
                    {
                        var json = JsonConvert.SerializeObject(results);
                        ImageRecognitionModel model = DeserializeJsonToObject <ImageRecognitionModel>(json) ?? new ImageRecognitionModel();
                        ImageRecognitionBind(model);
                    }
                }
                catch (Exception ex)
                {
                    MessageBox.Show("网络错误!错误信息:" + ex.Message);
                }
            });

            threadadd.Start();
        }
Exemplo n.º 7
0
        /// <summary>
        /// 人脸识别
        /// </summary>
        /// <returns></returns>
        public JsonResult FaceDistinguish()
        {
            // 设置APPID/AK/SK
            var API_KEY    = "mxzzu1vLxca9KjnLwBCgOZs5";                //你的 Api Key
            var SECRET_KEY = "D9CkVbdziW9GrHiAZDENt8rOf0tVw9im";        //你的 Secret Key
            var client     = new Baidu.Aip.Face.Face(API_KEY, SECRET_KEY);

            client.Timeout = 60000;      // 修改超时时间

            var    imageType = "BASE64"; //BASE64   URL
            string imgData64 = Request["imgData64"];

            imgData64 = imgData64.Substring(imgData64.IndexOf(",") + 1);       //将‘,’以前的多余字符串删除

            ResultInfo result = new ResultInfo();

            try
            {
                var groupId = "TestGroupA";
                var userId  = "TestUserA";


                var result323 = client.Detect(imgData64, imageType);


                //活体检测阈值是多少
                //0.05 活体误拒率:万分之一;拒绝率:63.9%
                //0.3 活体误拒率:千分之一;拒绝率:90.3%
                //0.9 活体误拒率:百分之一;拒绝率:97.6%
                //1误拒率: 把真人识别为假人的概率. 阈值越高,安全性越高, 要求也就越高, 对应的误识率就越高
                //2、通过率=1-误拒率
                //所以你thresholds参数返回 和 face_liveness 比较大于推荐值就是活体

                ////活体判断
                var faces = new JArray
                {
                    new JObject
                    {
                        { "image", imgData64 },
                        { "image_type", "BASE64" }
                    }
                };
                var Living     = client.Faceverify(faces); //活体检测交互返回
                var LivingJson = Newtonsoft.Json.JsonConvert.SerializeObject(Living);
                var LivingObj  = Newtonsoft.Json.JsonConvert.DeserializeObject(LivingJson) as JObject;
                if (LivingObj["error_code"].ToString() == "0" && LivingObj["error_msg"].ToString() == "SUCCESS")
                {
                    var    Living_result = Newtonsoft.Json.JsonConvert.DeserializeObject(LivingObj["result"].ToString()) as JObject;
                    var    Living_list   = Living_result["thresholds"];
                    double face_liveness = Convert.ToDouble(Living_result["face_liveness"]);
                    var    frr           = Newtonsoft.Json.JsonConvert.SerializeObject(Living_list.ToString());
                    var    frr_1eObj     = Newtonsoft.Json.JsonConvert.DeserializeObject(Living_list.ToString()) as JObject;
                    double frr_1e4       = Convert.ToDouble(frr_1eObj["frr_1e-4"]);
                    if (face_liveness < frr_1e4)
                    {
                        result.info = "识别失败:这是相片之类的非活体!";
                        return(Json(result, JsonRequestBehavior.AllowGet));
                    }
                }

                //首先查询是否存在人脸
                var result2 = client.Search(imgData64, imageType, groupId);
                var strJson = Newtonsoft.Json.JsonConvert.SerializeObject(result2);
                var o2      = Newtonsoft.Json.JsonConvert.DeserializeObject(strJson) as JObject;


                //判断是否存在当前人脸,相识度是否大于80
                if (o2["error_code"].ToString() == "0" && o2["error_msg"].ToString() == "SUCCESS")
                {
                    var result_list = Newtonsoft.Json.JsonConvert.DeserializeObject(o2["result"].ToString()) as JObject;
                    var user_list   = result_list["user_list"];
                    var Obj         = JArray.Parse(user_list.ToString());
                    foreach (var item in Obj)
                    {
                        //80分以上可以判断为同一人,此分值对应万分之一误识率
                        var score = Convert.ToInt32(item["score"]);
                        if (score > 80)
                        {
                            result.info      = result2.ToString();
                            result.res       = true;
                            result.startcode = 221;
                            return(Json(result, JsonRequestBehavior.AllowGet));
                        }
                    }
                }
                else
                {
                    result.info = strJson.ToString();
                    result.res  = false;
                    return(Json(result, JsonRequestBehavior.AllowGet));
                }
            }
            catch (Exception e)
            {
                result.info = e.Message;
            }
            return(Json(result, JsonRequestBehavior.AllowGet));
        }
Exemplo n.º 8
0
        private void button_recongnition2_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                OpenFileDialog fdlg = new OpenFileDialog();
                fdlg.Title = "选择识别语音文件";
                //fdlg.InitialDirectory = @"c:\";
                fdlg.Filter = "图片文件(*.jpg,*.jpeg,*.png,*.bmp)|*.jpg;*.jpeg;*.png;*.bmp";

                fdlg.FilterIndex      = 0;
                fdlg.RestoreDirectory = false;
                if (fdlg.ShowDialog() == true)
                {
                    m_imgPath2 = fdlg.FileName;
                }


                string image = Convert.ToBase64String(File.ReadAllBytes(m_imgPath2));


                // 调用人脸检测,可能会抛出网络等异常,请使用try/catch捕获
                // 如果有可选参数
                var options = new Dictionary <string, object>
                {
                    { "face_field", "beauty,age,expression,face_shape,gender,race" },
                };


                // 带参数调用人脸检测
                var result        = mClient.Detect(image, "BASE64", options);
                var list_faceinfo = new List <string>();
                var faces         = result["result"]["face_list"];
                foreach (var face in faces)
                {
                    var age = (int)face["age"];
                    list_faceinfo.Add("年龄:" + age);
                    var beauty = (string)face["beauty"];
                    list_faceinfo.Add("样貌评分:" + beauty);
                    var expression    = (string)face["expression"]["type"];
                    var expression_pb = (string)face["expression"]["probability"];
                    list_faceinfo.Add("表情:" + expression + ",置信度:" + expression_pb);
                    var face_shape    = (string)face["face_shape"]["type"];
                    var face_shape_pb = (string)face["face_shape"]["probability"];
                    list_faceinfo.Add("脸型:" + face_shape + ",置信度:" + face_shape_pb);
                    var gender    = (string)face["gender"]["type"];
                    var gender_pb = (string)face["gender"]["probability"];
                    list_faceinfo.Add("性别:" + gender + ",置信度:" + gender_pb);
                    var race    = (string)face["race"]["type"];
                    var race_pb = (string)face["race"]["probability"];
                    list_faceinfo.Add("种族:" + race + ",置信度:" + race_pb);

                    var left   = (int)face["location"]["left"];
                    var top    = (int)face["location"]["top"];
                    var width  = (int)face["location"]["width"];
                    var height = (int)face["location"]["height"];
                    //var rotation = (int) face["location"]["rotation"];

                    Rectangle rect = new Rectangle(left, top, width, height);

                    Image    change_img = Image.FromFile(m_imgPath2);
                    Graphics plate      = Graphics.FromImage(change_img);

                    plate.DrawRectangle(new System.Drawing.Pen(Color.BlueViolet, 2), rect);
                    plate.Save();
                    plate.Dispose();
                    MemoryStream ms = new MemoryStream();
                    change_img.Save(ms, ImageFormat.Bmp);
                    change_img.Dispose();
                    BitmapImage bi = new BitmapImage();
                    bi.BeginInit();
                    bi.StreamSource = ms;
                    bi.EndInit();
                    image_recongnition2.Source = bi;
                    image_recongnition2.InvalidateVisual();
                    break;
                }

                textBox_recongnition2.Text = string.Join("\n", list_faceinfo);
            }
            catch (Exception exception)
            {
                MessageBox.Show(exception.Message, "错误");
            }
        }