public FaceImageControl( FaceImage Face, FaceImageListControl FaceControlParent ) { this.Face = Face; this.FaceControlParent = FaceControlParent; InitializeComponent(); SetBounds( 0, 0, Face.Face.Width, Face.Face.Height ); List<MenuItem> menuitems = new List<MenuItem>(); MenuItem mi; mi = new MenuItem( "Edit URL" ); mi.Click += new EventHandler( mi_EditURL_Click ); menuitems.Add( mi ); mi = new MenuItem( "Edit Name" ); mi.Click += new EventHandler( mi_EditName_Click ); menuitems.Add( mi ); mi = new MenuItem( "Delete" ); mi.Click += new EventHandler( mi_Delete_Click ); menuitems.Add( mi ); RightClickMenuItems = menuitems.ToArray(); RightClickMenu = new System.Windows.Forms.ContextMenu( RightClickMenuItems ); ImageAttrs = new System.Drawing.Imaging.ImageAttributes(); ColorMatrix = new System.Drawing.Imaging.ColorMatrix(); }
public List <FaceImage> GetImages(int section) { List <FaceImage> images = new List <FaceImage>(); SqlConnection dbConnection = DbConnection.GetConnection(); try { dbConnection.Open(); SqlCommand cmd = new SqlCommand($"SELECT [SECCION], [IDENTIFICADOR], [DATE], [IMAGENWEBCAM] FROM NEUROSKY_IMAGENES WHERE SECCION = {section}", dbConnection); SqlDataReader dr = cmd.ExecuteReader(); while (dr.Read()) { FaceImage img = new FaceImage(); img.Id = int.Parse(Convert.ToString(dr["IDENTIFICADOR"])); img.Section = int.Parse(Convert.ToString(dr["SECCION"])); img.Date = DateHelper.FormatDate((Convert.ToString(dr["DATE"])), DateHelper.FULL_DATE_HOUR_PERIOD); img.Path = Convert.ToString(dr["IMAGENWEBCAM"]); images.Add(img); } dbConnection.Close(); } catch (Exception e) { string message = "Error recuperando imagenes de la base de datos"; Console.WriteLine(message + e.Message); throw e; } return(images); }
private FaceImage SaveToFile(FaceImage savedFace, string fullName) { try { savedFace.FullName = fullName; if (File.Exists(fullName) == false) { _face.Save(savedFace.FullName); } return(savedFace); } catch (Exception ex) { bool rethrow = ExceptionPolicy.HandleException(ex, "IO Policy"); if (rethrow) { throw; } MessageBox.Show(string.Format("Failed to save {0} file", savedFace.FullName)); } return(null); }
private void LoadImages(FaceDataBases selectedDataBasesRep) { int iDb = -1; foreach (FaceDataBase faceDataBase in selectedDataBasesRep) { iDb++; var nameImageDataBase = Helper.GetShortName(faceDataBase.FullName); var imageDirs = Directory.EnumerateDirectories(faceDataBase.FullName); var faceImages = new FaceImages(); foreach (var imageDir in imageDirs) { foreach (var fullName in Directory.EnumerateFiles(imageDir)) { foreach (var extension in Constants.ValidExtensions) { if (fullName.EndsWith(extension)) { var shortName = Helper.GetShortName(fullName); var personId = Helper.GetShortName(imageDir); var faceImage = new FaceImage(shortName, fullName, personId, nameImageDataBase); faceImages.Add(faceImage); break; } } } } selectedDataBasesRep[iDb].Add(faceImages); } }
protected void Page_Load(object sender, EventArgs e) { Status.RecordHit(); String numFacesString = Request.QueryString["numFaces"]; int numFaces = numFacesString == null ? 20 : int.Parse(numFacesString); if (numFaces > 100) numFaces = 100; var faces = FacesLite.GetRandomFaces(numFaces).ToArray(); Message mess = new Message(); mess.FaceImages = new List<FaceImage>(); for (int i = 0; i < faces.Count(); i++) { FaceImage fi = new FaceImage() { ukey = faces[i].Item1, path = faces[i].Item2 }; mess.FaceImages.Add(fi); } DataContractJsonSerializer ser = new DataContractJsonSerializer(typeof(FaceImage[])); MemoryStream ms = new MemoryStream(); ser.WriteObject(ms, mess.FaceImages.ToArray()); string json = Encoding.UTF8.GetString(ms.ToArray()); ms.Close(); Response.Clear(); Response.ContentType = "application/json; charset=utf-8"; Response.Write(json); Response.End(); }
/// <summary> /// 清晰度评估。 /// <para>清晰度这里是传统方式通过二次模糊后图像信息损失程度统计的清晰度。</para> /// <para> /// 映射关系为: <br /> /// • [0, low) => <see cref="QualityLevel.Low"/> <br /> /// • [low, high) => <see cref="QualityLevel.Medium"/> <br /> /// • [high, ~) => <see cref="QualityLevel.High"/> <br /> /// </para> <br /> /// <para><see langword="{low, high}"/> 的默认值为 <see langword="{0.1, 0.2}"/></para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <param name="level">存储 等级</param> /// <param name="score">存储 分数</param> /// <param name="low"></param> /// <param name="high"></param> /// <returns></returns> public static bool QualityOfClarity(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, int pointsLength, ref int level, ref float score, float low = 0.1f, float high = 0.2f) { return(ViewFacePlusNative.QualityOfClarity(imgData, ref img, faceRect, points, pointsLength, ref level, ref score, low, high)); }
/// <summary> /// 姿态评估 (深度)。 /// <para>此姿态评估器是深度学习方式,通过回归人头部在yaw、pitch、roll三个方向的偏转角度来评估人脸是否是正面。</para> /// <para> /// 需要模型 <see langword="pose_estimation.csta"/> /// </para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <param name="level">存储 等级</param> /// <param name="score">存储 分数</param> /// <param name="yawLow">yaw 方向低分数阈值</param> /// <param name="yawHigh">yaw 方向高分数阈值</param> /// <param name="pitchLow">pitch 方向低分数阈值</param> /// <param name="pitchHigh">pitch 方向高分数阈值</param> /// <param name="rollLow">roll 方向低分数阈值</param> /// <param name="rollHigh">roll 方向高分数阈值</param> /// <returns></returns> public static bool QualityOfPoseEx(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, int pointsLength, ref int level, ref float score, float yawLow = 25, float yawHigh = 10, float pitchLow = 20, float pitchHigh = 10, float rollLow = 33.33f, float rollHigh = 16.67f) { return(ViewFacePlusNative.QualityOfPoseEx(imgData, ref img, faceRect, points, pointsLength, ref level, ref score, yawLow, yawHigh, pitchLow, pitchHigh, rollLow, rollHigh)); }
/// <summary> /// 清晰度 (深度)评估。 /// <para> /// 需要模型 <see langword="quality_lbn.csta"/> <br /> /// 需要模型 <see langword="face_landmarker_pts68.csta"/> /// </para> /// <para><see langword="{blur_thresh}"/> 的默认值为 <see langword="{0.8}"/></para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <param name="level">存储 等级</param> /// <param name="score">存储 分数</param> /// <param name="blur_thresh"></param> /// <returns></returns> public static bool QualityOfClarityEx(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, int pointsLength, ref int level, ref float score, float blur_thresh = 0.8f) { return(ViewFacePlusNative.QualityOfClarityEx(imgData, ref img, faceRect, points, pointsLength, ref level, ref score, blur_thresh)); }
public void Draw(FaceImage img) { var image = FacesImage[img.ToString()] as BitmapSource; DrawingContext ctx = Visual.RenderOpen(); ctx.DrawImage(image, new Rect(0, 0, image.Width, image.Height)); ctx.Close(); }
private DbFaceImage ConvertToDbFaceImage(FaceImage faceImage) { return(new DbFaceImage { Nickname = faceImage.Nickname, Bytes = faceImage.Bytes }); }
/// <summary> /// 分辨率评估。 /// <para>判断人脸部分的分辨率。</para> /// <para> /// 映射关系为: <br /> /// • [0, low) => <see cref="QualityLevel.Low"/> <br /> /// • [low, high) => <see cref="QualityLevel.Medium"/> <br /> /// • [high, ~) => <see cref="QualityLevel.High"/> <br /> /// </para> <br /> /// <para><see langword="{low, high}"/> 的默认值为 <see langword="{80, 120}"/></para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <param name="level">存储 等级</param> /// <param name="score">存储 分数</param> /// <param name="low"></param> /// <param name="high"></param> /// <returns></returns> public static bool QualityOfResolution(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, int pointsLength, ref int level, ref float score, float low = 80, float high = 120) { return(ViewFacePlusNative.QualityOfResolution(imgData, ref img, faceRect, points, pointsLength, ref level, ref score, low, high)); }
public async Task <ActionResult> DeleteConfirmed(int id) { FaceImage faceImage = await db.FaceImages.FindAsync(id); db.FaceImages.Remove(faceImage); await db.SaveChangesAsync(); return(RedirectToAction("Index")); }
private FaceImage SaveFace(FaceImage faceImage) { var facesDir = CreateDirByShortName(Strings.Face_Database_Folder_Name); var fullName = CreateFaceFileFullName(faceImage, facesDir); FaceImage face = SaveToFile(faceImage, fullName); return(face); }
public void Add( String Category, FaceImage Face ) { if ( Categories.Keys.Contains( Category ) ) { Categories[Category].Add( Face ); } else { List<FaceImage> List = new List<FaceImage>(); List.Add( Face ); Categories.Add( Category, List ); } }
public void Add( FaceImage Face ) { if ( ImageList != null ) { ImageList.Add( Face ); } flowLayoutPanel1.SuspendLayout(); flowLayoutPanel1.Controls.Add( new FaceImageControl( Face, this ) ); flowLayoutPanel1.ResumeLayout( true ); flowLayoutPanel1.Refresh(); }
/// <summary> /// 年龄预测。 /// <para> /// 需要模型 <see langword="age_predictor.csta"/> /// </para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <returns></returns> public static int AgePredictor(byte[] imgData, ref FaceImage img, FaceMarkPoint[] points, int pointsLength) { if (Platform64) { return(ViewFacePlus64.AgePredictor(imgData, ref img, points, pointsLength)); } else { return(ViewFacePlus32.AgePredictor(imgData, ref img, points, pointsLength)); } }
public void TestFaceTest() { RecognizeClassViewModel target = new RecognizeClassViewModel(); // TODO: Initialize to an appropriate value FaceImage expected = null; // TODO: Initialize to an appropriate value FaceImage actual; target.TestFace = expected; actual = target.TestFace; Assert.AreEqual(expected, actual); Assert.Inconclusive("Verify the correctness of this test method."); }
/// <summary> /// 性别预测。 /// <para> /// 需要模型 <see langword="gender_predictor.csta"/> /// </para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <returns></returns> public static int GenderPredictor(byte[] imgData, ref FaceImage img, FaceMarkPoint[] points, int pointsLength) { if (Is64BitProcess) { return(ViewFacePlus64.GenderPredictor(imgData, ref img, points, pointsLength)); } else { return(ViewFacePlus32.GenderPredictor(imgData, ref img, points, pointsLength)); } }
public async Task <ActionResult> Edit([Bind(Include = "FaceImageID,FaceURL,Content,TimeStamp,FaceID")] FaceImage faceImage) { if (ModelState.IsValid) { db.Entry(faceImage).State = EntityState.Modified; await db.SaveChangesAsync(); return(RedirectToAction("Index")); } ViewBag.FaceID = new SelectList(db.Faces, "FaceID", "FaceID", faceImage.FaceID); return(View(faceImage)); }
/// <summary> /// 活体检测器 /// <para>视频帧</para> /// </summary> /// <param name="imgData"></param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points"></param> /// <param name="global">是否启用全局检测</param> /// <returns> /// <para> /// 单帧识别返回值会是 <see cref="AntiSpoofingStatus.Real"/>、<see cref="AntiSpoofingStatus.Spoof"/>、<see cref="AntiSpoofingStatus.Fuzzy"/> 或 <see cref="AntiSpoofingStatus.Detecting"/><br /> /// 在视频识别输入帧数不满足需求的时候,返回状态就是 <see cref="AntiSpoofingStatus.Detecting"/> /// </para> /// </returns> public static int AntiSpoofingVideo(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, bool global) { if (Is64BitProcess) { return(ViewFacePlus64.AntiSpoofingVideo(imgData, ref img, faceRect, points, global)); } else { return(ViewFacePlus32.AntiSpoofingVideo(imgData, ref img, faceRect, points, global)); } }
/// <summary> /// 提取人脸特征值 /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="features">人脸特征值 数组</param> /// <param name="type">模型类型。0:face_recognizer;1:face_recognizer_mask;2:face_recognizer_light。</param> /// <returns></returns> public static bool Extract(byte[] imgData, ref FaceImage img, FaceMarkPoint[] points, float[] features, int type = 0) { if (Is64BitProcess) { return(ViewFacePlus64.Extract(imgData, ref img, points, features, type)); } else { return(ViewFacePlus32.Extract(imgData, ref img, points, features, type)); } }
/// <summary> /// 获取人脸关键点 /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="pointX">存储关键点 x 坐标的 数组</param> /// <param name="pointY">存储关键点 y 坐标的 数组</param> /// <param name="type">模型类型。0:face_landmarker_pts68;1:face_landmarker_mask_pts5;2:face_landmarker_pts5。</param> /// <returns></returns> public static bool FaceMark(byte[] imgData, ref FaceImage img, FaceRect faceRect, double[] pointX, double[] pointY, int type = 0) { if (Is64BitProcess) { return(ViewFacePlus64.FaceMark(imgData, ref img, faceRect, pointX, pointY, type)); } else { return(ViewFacePlus32.FaceMark(imgData, ref img, faceRect, pointX, pointY, type)); } }
/// <summary> /// 眼睛状态检测。 /// <para> /// 需要模型 <see langword="eye_state.csta"/> /// </para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <param name="left_eye"></param> /// <param name="right_eye"></param> /// <returns></returns> public static bool EyeStateDetector(byte[] imgData, ref FaceImage img, FaceMarkPoint[] points, int pointsLength, ref int left_eye, ref int right_eye) { if (Is64BitProcess) { return(ViewFacePlus64.EyeStateDetector(imgData, ref img, points, pointsLength, ref left_eye, ref right_eye)); } else { return(ViewFacePlus32.EyeStateDetector(imgData, ref img, points, pointsLength, ref left_eye, ref right_eye)); } }
/// <summary> /// 人脸检测器检测到的人脸数量 /// </summary> /// <param name="imgData"></param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceSize">最小人脸是人脸检测器常用的一个概念,默认值为20,单位像素。 /// <para>最小人脸和检测器性能息息相关。主要方面是速度,使用建议上,我们建议在应用范围内,这个值设定的越大越好。SeetaFace采用的是BindingBox Regresion的方式训练的检测器。如果最小人脸参数设置为80的话,从检测能力上,可以将原图缩小的原来的1/4,这样从计算复杂度上,能够比最小人脸设置为20时,提速到16倍。</para> /// </param> /// <param name="threshold">检测器阈值默认值是0.9,合理范围为[0, 1]。这个值一般不进行调整,除了用来处理一些极端情况。这个值设置的越小,漏检的概率越小,同时误检的概率会提高</param> /// <param name="maxWidth">可检测的图像最大宽度。默认值2000。</param> /// <param name="maxHeight">可检测的图像最大高度。默认值2000。</param> /// <param name="type">模型类型。0:face_detector;1:mask_detector;2:mask_detector。</param> /// <returns></returns> public static int DetectorSize(byte[] imgData, ref FaceImage img, double faceSize = 20, double threshold = 0.9, double maxWidth = 2000, double maxHeight = 2000, int type = 0) { if (Is64BitProcess) { return(ViewFacePlus64.DetectorSize(imgData, ref img, faceSize, threshold, maxWidth, maxHeight, type)); } else { return(ViewFacePlus32.DetectorSize(imgData, ref img, faceSize, threshold, maxWidth, maxHeight, type)); } }
/// <summary> /// 活体检测器 /// <para>单帧检测</para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="global">是否启用全局检测</param> /// <returns>单帧识别返回值会是 <see cref="AntiSpoofingStatus.Real"/>、<see cref="AntiSpoofingStatus.Spoof"/> 或 <see cref="AntiSpoofingStatus.Fuzzy"/></returns> public static int AntiSpoofing(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, bool global) { if (Platform64) { return(ViewFacePlus64.AntiSpoofing(imgData, ref img, faceRect, points, global)); } else { return(ViewFacePlus32.AntiSpoofing(imgData, ref img, faceRect, points, global)); } }
static void InitFRS() { // System.Console.WriteLine(System.Environment.CurrentDirectory); FRSParam param = new FRSParam(); param.nMinFaceSize = 50; param.nRollAngle = 10; param.bOnlyDetect = true; FaceImage.Create(1, param); Feature.Init(1); }
/// <summary> /// 亮度评估。 /// <para>亮度评估就是评估人脸区域内的亮度值是否均匀正常,存在部分或全部的过亮和过暗都会是评价为LOW。</para> /// <para> /// 评估器会将综合的亮度从灰度值映射到level,其映射关系为: <br /> /// • [0, v0), [v3, ~) => <see cref="QualityLevel.Low"/> <br /> /// • [v0, v1), [v2, v3) => <see cref="QualityLevel.Medium"/> <br /> /// • [v1, v2) => <see cref="QualityLevel.High"/> <br /> /// </para> <br /> /// <para><see langword="{v0, v1, v2, v3}"/> 的默认值为 <see langword="{70, 100, 210, 230}"/></para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <param name="level">存储 等级</param> /// <param name="score">存储 分数</param> /// <param name="v0"></param> /// <param name="v1"></param> /// <param name="v2"></param> /// <param name="v3"></param> /// <returns></returns> public static bool QualityOfBrightness(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, int pointsLength, ref int level, ref float score, float v0 = 70, float v1 = 100, float v2 = 210, float v3 = 230) { if (Is64BitProcess) { return(ViewFacePlus64.QualityOfBrightness(imgData, ref img, faceRect, points, pointsLength, ref level, ref score, v0, v1, v2, v3)); } else { return(ViewFacePlus32.QualityOfBrightness(imgData, ref img, faceRect, points, pointsLength, ref level, ref score, v0, v1, v2, v3)); } }
/// <summary> /// 获取跟踪的人脸个数 /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="stable"></param> /// <param name="interval"></param> /// <param name="faceSize"></param> /// <param name="threshold"></param> /// <param name="type">模型类型。0:face_detector;1:mask_detector;2:mask_detector。</param> /// <returns></returns> public static int FaceTrackSize(byte[] imgData, ref FaceImage img, bool stable = false, int interval = 10, double faceSize = 20, double threshold = 0.9, int type = 0) { if (Is64BitProcess) { return(ViewFacePlus64.FaceTrackSize(imgData, ref img, stable, interval, faceSize, threshold, type)); } else { return(ViewFacePlus32.FaceTrackSize(imgData, ref img, stable, interval, faceSize, threshold, type)); } }
public async Task <IActionResult> GetFaceDetails([FromForm] IFormFile file) { if (file == null) { return(BadRequest()); } string SUBSCRIPTION_KEY = _config.GetValue <string>("Keys:SUBSCRIPTION_KEY"); string ENDPOINT = _config.GetValue <string>("Keys:ENDPOINT"); IFaceClient client = Authenticate(ENDPOINT, SUBSCRIPTION_KEY); FaceImage dataToReturn = await DetectFaceExtract(client, file, RECOGNITION_MODEL); return(Ok(dataToReturn)); }
/// <summary> /// 遮挡评估。 /// <para> /// 判断人脸部分的分辨率。 <br /> /// 需要模型 <see langword="face_landmarker_mask_pts5.csta"/> /// </para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <param name="level">存储 等级</param> /// <param name="score">存储 分数</param> /// <returns></returns> public static bool QualityOfNoMask(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, int pointsLength, ref int level, ref float score) { if (Is64BitProcess) { return(ViewFacePlus64.QualityOfNoMask(imgData, ref img, faceRect, points, pointsLength, ref level, ref score)); } else { return(ViewFacePlus32.QualityOfNoMask(imgData, ref img, faceRect, points, pointsLength, ref level, ref score)); } }
// GET: FaceImages/Details/5 public async Task <ActionResult> Details(int?id) { if (id == null) { return(new HttpStatusCodeResult(HttpStatusCode.BadRequest)); } FaceImage faceImage = await db.FaceImages.FindAsync(id); if (faceImage == null) { return(HttpNotFound()); } return(View(faceImage)); }
/// <summary> /// 姿态评估。 /// <para>此姿态评估器是传统方式,通过人脸5点坐标值来判断姿态是否为正面。</para> /// </summary> /// <param name="imgData">图像 BGR 数据</param> /// <param name="img">图像宽高通道信息</param> /// <param name="faceRect">人脸位置信息</param> /// <param name="points">人脸关键点 数组</param> /// <param name="pointsLength">人脸关键点 数组长度</param> /// <param name="level">存储 等级</param> /// <param name="score">存储 分数</param> /// <returns></returns> public static bool QualityOfPose(byte[] imgData, ref FaceImage img, FaceRect faceRect, FaceMarkPoint[] points, int pointsLength, ref int level, ref float score) { if (Platform64) { return(ViewFacePlus64.QualityOfPose(imgData, ref img, faceRect, points, pointsLength, ref level, ref score)); } else { return(ViewFacePlus32.QualityOfPose(imgData, ref img, faceRect, points, pointsLength, ref level, ref score)); } }
// GET: FaceImages/Edit/5 public async Task <ActionResult> Edit(int?id) { if (id == null) { return(new HttpStatusCodeResult(HttpStatusCode.BadRequest)); } FaceImage faceImage = await db.FaceImages.FindAsync(id); if (faceImage == null) { return(HttpNotFound()); } ViewBag.FaceID = new SelectList(db.Faces, "FaceID", "FaceID", faceImage.FaceID); return(View(faceImage)); }
static void TestDetectFace() { FRSParam param = new FRSParam(); param.nMinFaceSize = 50; param.nRollAngle = 60; param.bOnlyDetect = true; FaceImage.Create(1, param); Feature.Init(1); Image img = Image.FromFile("E:/照片/3.jpg"); FRSFacePos [] faces = new FRSFacePos[1]; FaceImage.DetectFace(0, img, 24, faces, 1); Console.WriteLine(faces[0].rcFace.left); }
protected void Page_Load(object sender, EventArgs e) { Status.RecordHit(); String ukeyString = Request.QueryString["ukey"]; bool greyscale = Request.QueryString["greyscale"] == null || Request.QueryString["greyscale"].ToUpper() != "TRUE"; int inukey = ukeyString == null ? -1 : int.Parse(ukeyString); String numFacesString = Request.QueryString["numFaces"]; int numFaces = numFacesString== null ? -1 : int.Parse(numFacesString); inukey = FacesLite.GetRandomUkey(inukey); List<System.Tuple<int /* face ukey */, double /* distance */, List<double> /* evs */>> faces = FacesLite.GetFaceUKeysForCarousel(inukey, numFaces, 3); int meanFaceUKey = 19400; System.Tuple<int /* face ukey */, double /* distance */, List<double> /* evs */> meanFace = FacesLite.GetFaceUKeysForCarousel(meanFaceUKey, 3, 3).Where(x => x.Item1 == meanFaceUKey).Single(); // 19400 is the mean face. faces.Add(meanFace); Message mess = new Message(); mess.FaceImages = new List<FaceImage>(); for (int i = 0; i < faces.Count(); i++) { FaceImage fi = new FaceImage() { ukey = faces[i].Item1, path = String.Format("https://s3.amazonaws.com/FaceSpace/Faces/{0}.grey.jpg", faces[i].Item1), antipath = String.Format("https://s3.amazonaws.com/FaceSpace/Faces/{0}.anti.jpg", faces[i].Item1), selected = faces[i].Item1 == inukey ? 1 : faces[i].Item1 == meanFaceUKey ? -1 : 0, eigenVectors = faces[i].Item3.ToList() }; mess.FaceImages.Add(fi); } DataContractJsonSerializer ser = new DataContractJsonSerializer(typeof(FaceImage[])); MemoryStream ms = new MemoryStream(); ser.WriteObject(ms, mess.FaceImages.ToArray()); string json = Encoding.UTF8.GetString(ms.ToArray()); ms.Close(); Response.Clear(); Response.ContentType = "application/json; charset=utf-8"; Response.Write(json); Response.End(); }
protected void Page_Load(object sender, EventArgs e) { Status.RecordHit(); String numFacesString = Request.QueryString["numFaces"]; int numFaces = numFacesString == null ? 20 : int.Parse(numFacesString); if (numFaces > 100) numFaces = 100; String ukeyString = Request.QueryString["ukey"]; int inukey = ukeyString == null ? -1 : int.Parse(ukeyString); inukey = FacesLite.GetRandomUkey(inukey); String distanceString = Request.QueryString["distance"]; double distance = distanceString == null ? 20 : int.Parse(distanceString); //test var faces = FacesLite.Get2DCloudSample( _requestId++, inukey, distance, numFaces ).ToArray(); Message mess = new Message(); mess.FaceImages = new List<FaceImage>(); for (int i = 0; i < faces.Count(); i++) { FaceImage fi = new FaceImage() { path = faces[i].Item1, xpos = faces[i].Item3, ypos = faces[i].Item4, coords = faces[i].Item2.ToList() }; mess.FaceImages.Add(fi); } DataContractJsonSerializer ser = new DataContractJsonSerializer(typeof(FaceImage[])); MemoryStream ms = new MemoryStream(); ser.WriteObject(ms, mess.FaceImages.ToArray()); string json = Encoding.UTF8.GetString(ms.ToArray()); ms.Close(); Response.Clear(); Response.ContentType = "application/json; charset=utf-8"; Response.Write(json); Response.End(); }
protected void Page_Load(object sender, EventArgs e) { Status.RecordHit(); String ukeyString = Request.QueryString["ukey"]; bool greyscale = Request.QueryString["greyscale"] == null || Request.QueryString["greyscale"].ToUpper() != "TRUE"; int inukey = ukeyString == null ? -1 : int.Parse(ukeyString); //int[] ukeys = new int[] { 12304, 15956, 16181, 15182, 14990, 15860, 13909, 14715, 15059, 15893, 14681, 14891, 14628, 14900, 16040, 15176, 13915, 13904, 14816, 15614, 14825, 14804, 12312, 14660, 15323, 14834, 14861, 14666, 12287, 14632, 14984, 15404 }; inukey = FacesLite.GetRandomUkey(inukey); var faces = FacesLite.GetFaceUKeysForCarousel(inukey, 32, 10).ToArray(); Message mess = new Message(); mess.FaceImages = new List<FaceImage>(); for (int i = 0; i < faces.Count(); i++) { FaceImage fi = new FaceImage() { ukey = faces[i].Item1, path = String.Format("https://s3.amazonaws.com/FaceSpace/Faces/{0}.grey.jpg", faces[i].Item1), selected = faces[i].Item1 == inukey ? 1 : 0, //distance = faces[i].Item2, //eigenVector0 = faces[i].Item3 }; mess.FaceImages.Add(fi); } DataContractJsonSerializer ser = new DataContractJsonSerializer(typeof(FaceImage[])); MemoryStream ms = new MemoryStream(); ser.WriteObject(ms, mess.FaceImages.ToArray()); string json = Encoding.UTF8.GetString(ms.ToArray()); ms.Close(); Response.Clear(); Response.ContentType = "application/json; charset=utf-8"; Response.Write(json); Response.End(); }
private void buttonAddMultiUrl_Click( object sender, EventArgs e ) { if ( tabControl1.SelectedTab == tabPage1 ) return; InputBoxResult r = InputBox.Show( "Copy-paste URL list from Rightload:", "Image URLs", "", null, true ); if ( r.OK ) { String CurrentCategory = tabControl1.SelectedTab.Text; AllFacesControl.SuspendLayout(); FaceControls[CurrentCategory].SuspendLayout(); foreach ( string url in r.Text.Split( new string[] { Environment.NewLine }, StringSplitOptions.RemoveEmptyEntries ) ) { FaceImage Image = new FaceImage( CurrentCategory, url, "", "" ); AllFacesControl.Add( Image ); FaceControls[CurrentCategory].Add( Image ); } AllFacesControl.ResumeLayout( true ); AllFacesControl.Refresh(); FaceControls[CurrentCategory].ResumeLayout( true ); FaceControls[CurrentCategory].Refresh(); this.Refresh(); } }
private void buttonAddImage_Click( object sender, EventArgs e ) { if ( tabControl1.SelectedTab == tabPage1 ) return; OpenFileDialog dialog = new System.Windows.Forms.OpenFileDialog(); dialog.Filter = "Images (*.png, *.jpg, *.gif)|*.png;*.jpg;*.jpeg;*.gif|All Files|*.*"; DialogResult result = dialog.ShowDialog(); if ( result == DialogResult.OK ) { InputBoxResult r = InputBox.Show( "URL:", "Image URL", "", null, false ); if ( r.OK ) { String CurrentCategory = tabControl1.SelectedTab.Text; FaceImage Image = new FaceImage( CurrentCategory, r.Text, dialog.FileName, "" ); AllFacesControl.SuspendLayout(); AllFacesControl.Add( Image ); AllFacesControl.ResumeLayout( true ); AllFacesControl.Refresh(); FaceControls[CurrentCategory].SuspendLayout(); FaceControls[CurrentCategory].Add( Image ); FaceControls[CurrentCategory].ResumeLayout( true ); FaceControls[CurrentCategory].Refresh(); this.Refresh(); } } }
internal void Remove( string Category, FaceImage faceImage ) { Categories[Category].Remove( faceImage ); }
//static int _ukey = 0; protected void Page_Load(object sender, EventArgs e) { Status.RecordHit(); FaceImage fi = null; if (Page.Request.ContentLength > 0) { // this is for post requests.... uses the uploaded coords. var jsonSerializer = new DataContractJsonSerializer(typeof(double[])); double[] coords= (double[])jsonSerializer.ReadObject(Page.Request.InputStream); var path = FacesLite.GenerateFaceFroomCoords(coords); //String suffix = ".json"; //String saveFileName; //lock (this) //{ // _ukey++; // saveFileName = Server.MapPath("~/App_Data/") // + System.IO.Path.DirectorySeparatorChar // + _ukey + suffix; //} //Page.Request.SaveAs(saveFileName, false); if (Request.QueryString["firstTime"] != null) { var temp = FacesLite.GetCoordsForUKey(FacesLite.GetRandomUkey(-1)); fi = new FaceImage() { ukey = -2, path = path, coords = coords.Select(x=>(int)x).ToList(), mins = temp.Item3.Select(x => (int)x).ToList(), maxs = temp.Item4.Select(x => (int)x).ToList() }; } else { fi = new FaceImage() { ukey = -2, path = path, //coords = coords.Select(x=>(int)x).ToList() }; } } else { // this is for GET requests. uses the ukey String ukeyString = Request.QueryString["ukey"]; int inukey = ukeyString == null ? -1 : int.Parse(ukeyString); inukey = FacesLite.GetRandomUkey(inukey); var face = FacesLite.GetCoordsForUKey(inukey); fi = new FaceImage() { ukey = face.Item1, path = String.Format("http://s3.amazonaws.com/FaceSpace/Faces/{0}.reconstituted.jpg", face.Item1), coords = face.Item2.Select(x => (int)x).ToList(), mins = face.Item3.Select(x => (int)x).ToList(), maxs = face.Item4.Select(x => (int)x).ToList(), }; } DataContractJsonSerializer ser = new DataContractJsonSerializer(typeof(FaceImage)); MemoryStream ms = new MemoryStream(); ser.WriteObject(ms, fi); string json = Encoding.UTF8.GetString(ms.ToArray()); ms.Close(); Response.Clear(); Response.ContentType = "application/json; charset=utf-8"; Response.Write(json); Response.End(); }