public static async Task <FaceDetectResult> DetectFaceAsync(string photoPath) { // Face API 呼び出し準備 var apiKey = "YOUR_API_KEY"; var apiEndpoint = "https://YOUR_API_LOCATION.api.cognitive.microsoft.com/face/v1.0"; var client = new FaceServiceClient(apiKey, apiEndpoint); // Face API で判定 var file = new FileFromPath(photoPath); var imageStream = file.Open(FileAccess.Read); var attributes = new FaceAttributeType[] { FaceAttributeType.Age, FaceAttributeType.Gender, FaceAttributeType.Smile, }; var result = await client.DetectAsync(imageStream, false, false, attributes); // 判定結果を代入 var detectResult = new FaceDetectResult(); detectResult.Age = result[0].FaceAttributes.Age; detectResult.Gender = result[0].FaceAttributes.Gender; detectResult.Smile = result[0].FaceAttributes.Smile; return(detectResult); }
public static async Task <FaceDetectResult> DetectFaceAsync(string photoPath) { // Face API 呼び出し準備 var apiKey = "YOUR_API_KEY"; var apiEndpoint = "https://YOUR_API_LOCATION.cognitiveservices.azure.com/"; var client = new FaceClient(new ApiKeyServiceClientCredentials(apiKey)) { Endpoint = apiEndpoint }; // Face API で判定 var file = new FileFromPath(photoPath); var imageStream = file.Open(FileAccess.Read); var attributes = new FaceAttributeType[] { FaceAttributeType.Age, FaceAttributeType.Gender, FaceAttributeType.Smile, }; //var result = await client.Face.DetectWithStreamAsync(imageStream, false, false, attributes); var result = await client.Face.DetectWithStreamAsync(imageStream, false, false, attributes); // 判定結果を代入 var detectResult = new FaceDetectResult(); detectResult.Age = (double)result[0].FaceAttributes.Age; detectResult.Gender = result[0].FaceAttributes.Gender.ToString(); detectResult.Smile = (double)result[0].FaceAttributes.Smile; return(detectResult); }