Пример #1
0
        private async Task startDetection(MediaFile file)
        {
            faceDetectResult = await CognitiveService.FaceDetect(file.GetStreamWithImageRotatedForExternalStorage());

            //await Task.Delay(5000);
            processing = false;
        }
Пример #2
0
        public static async Task <FaceDetectResult> DetectFaceAsync(string photoPath)
        {
            // Face API 呼び出し準備
            var apiKey      = "YOUR_API_KEY";
            var apiEndpoint = "https://YOUR_API_LOCATION.api.cognitive.microsoft.com/face/v1.0";
            var client      = new FaceServiceClient(apiKey, apiEndpoint);

            // Face API で判定
            var file        = new FileFromPath(photoPath);
            var imageStream = file.Open(FileAccess.Read);

            var attributes = new FaceAttributeType[] {
                FaceAttributeType.Age, FaceAttributeType.Gender, FaceAttributeType.Smile,
            };
            var result = await client.DetectAsync(imageStream, false, false, attributes);

            // 判定結果を代入
            var detectResult = new FaceDetectResult();

            detectResult.Age    = result[0].FaceAttributes.Age;
            detectResult.Gender = result[0].FaceAttributes.Gender;
            detectResult.Smile  = result[0].FaceAttributes.Smile;

            return(detectResult);
        }
Пример #3
0
        public static async Task <FaceDetectResult> DetectFaceAsync(string photoPath)
        {
            // Face API 呼び出し準備
            var apiKey      = "YOUR_API_KEY";
            var apiEndpoint = "https://YOUR_API_LOCATION.cognitiveservices.azure.com/";

            var client = new FaceClient(new ApiKeyServiceClientCredentials(apiKey))
            {
                Endpoint = apiEndpoint
            };

            // Face API で判定
            var file        = new FileFromPath(photoPath);
            var imageStream = file.Open(FileAccess.Read);

            var attributes = new FaceAttributeType[] {
                FaceAttributeType.Age, FaceAttributeType.Gender, FaceAttributeType.Smile,
            };
            //var result = await client.Face.DetectWithStreamAsync(imageStream, false, false, attributes);
            var result = await client.Face.DetectWithStreamAsync(imageStream, false, false, attributes);

            // 判定結果を代入
            var detectResult = new FaceDetectResult();

            detectResult.Age    = (double)result[0].FaceAttributes.Age;
            detectResult.Gender = result[0].FaceAttributes.Gender.ToString();
            detectResult.Smile  = (double)result[0].FaceAttributes.Smile;

            return(detectResult);
        }
Пример #4
0
        private async Task startDetection(MediaFile file)
        {
            // Appel de la fonction FaceDetect du dossier Services pour l'analyse du visage
            faceDetectResult = await CognitiveService.FaceDetect(file.GetStreamWithImageRotatedForExternalStorage());

            processing = false;
        }
Пример #5
0
        private async void RunButton_OnClicked(object sender, EventArgs e)
        {
            // 画像の選択
            var photo             = "";
            var imageChoiceResult = await DisplayAlert("どちらの画像を使いますか", "", "カメラ", "ローカルフォルダー");

            try
            {
                if (imageChoiceResult)
                {
                    photo = await TakePhotoAsync();
                }
                else
                {
                    photo = await PickPhotoAsync();
                }
            }
            catch (Exception exception)
            {
                await DisplayAlert("エラーが発生しました", exception.Message, "OK");
            }

            // 画像の判定
            ImagePreview.Source = photo;
            var faceResult = new FaceDetectResult();

            try
            {
                faceResult = await DetectFaceAsync(photo);
            }
            catch (Exception exception)
            {
                await DisplayAlert("Face API の呼び出しに失敗しました", exception.Message, "OK");

                return;
            }

            // 判定結果を配置
            Age.Text    = "年齢 : " + faceResult.Age;
            Gender.Text = "性別 : " + faceResult.Gender;
            Smile.Text  = "笑顔 : " + (faceResult.Smile * 100).ToString("##0") + "点";
        }
        public static async Task <FaceDetectResult> DetectFaceAsync(string photo)
        {
            // Face API 呼び出し準備
            var subKey = "Your_FaceAPISubKey";
            var client = new FaceServiceClient(subKey);

            // Face API で判定
            var file = await FileSystem.Current.GetFileFromPathAsync(photo);

            var imageStream = await file.OpenAsync(FileAccess.Read);

            var result = await client.DetectAsync(imageStream, false, false, Enum.GetValues(typeof(FaceAttributeType)).OfType <FaceAttributeType>().ToArray());

            // 判定結果を代入
            var detectResult = new FaceDetectResult();

            detectResult.age          = result[0].FaceAttributes.Age;
            detectResult.gender       = result[0].FaceAttributes.Gender;
            detectResult.emotionKey   = result[0].FaceAttributes.Emotion.ToRankedList().First <KeyValuePair <string, float> >().Key;
            detectResult.emotionValue = result[0].FaceAttributes.Emotion.ToRankedList().First <KeyValuePair <string, float> >().Value;

            return(detectResult);
        }
        public static async Task <bool> RunDetect(Guid requestID, string apis, string name, string source,
                                                  Stream incomingPicture, string sourceContainerName, string resultContainerName, IAsyncCollector <object> outputItem, TraceWriter log, string videoName = null)
        {
            string apikey = string.Empty;

            try
            {
                string[] apiArr    = apis.Split(',');
                int      randomApi = FaceHelper.Instance.Next(0, apiArr.Length);
                apikey = apiArr[randomApi];
                log.Info($"RunDetect request id: {requestID} apiKey: {apikey} ticks: {DateTime.Now.Ticks}");
                Tuple <HttpClient, PolicyWrap <HttpResponseMessage> > tuple = FaceHelper.HttpClientList.GetOrAdd(apikey, new Tuple <HttpClient, PolicyWrap <HttpResponseMessage> >(
                                                                                                                     new HttpClient(),
                                                                                                                     FaceHelper.DefineAndRetrieveResiliencyStrategy(log)));
                HttpClient client = tuple.Item1;
                PolicyWrap <HttpResponseMessage> policy = tuple.Item2;
                IDatabase cache = FaceHelper.Connection.GetDatabase(int.Parse(FaceHelper.Connection.GetDatabase(1).StringGet(apikey)));

                //the large group id it's based on the mac address we get - each MAC address can work with different face api group
                string largegroupid = ConfigurationManager.AppSettings[source];
                if (videoName == null)
                {
                    log.Info($"Detecting {name} requestId: {requestID} apiKey: {apikey} ticks: {DateTime.Now.Ticks}");
                }
                else
                {
                    log.Info($"Detecting thumbnail {name} from {videoName} requestId: {requestID} apiKey: {apikey} ticks: {DateTime.Now.Ticks}");
                }
                byte[] pictureImage;
                // Convert the incoming image stream to a byte array.
                using (var br = new BinaryReader(incomingPicture))
                {
                    pictureImage = br.ReadBytes((int)incomingPicture.Length);
                }
                var detectionResult = await new FaceDetect(log, client).DetectFaces(pictureImage, apikey, requestID, policy);
                if ((detectionResult != null) && (detectionResult.Length > 0))
                {
                    //prepare identify request
                    int    maxCandidate     = int.Parse(ConfigurationManager.AppSettings["maxNumOfCandidatesReturned"]);
                    double threshold        = double.Parse(ConfigurationManager.AppSettings["confidenceThreshold"]);
                    var    identifyResquest = new FaceIdentifyRequest()
                    {
                        ConfidenceThreshold        = threshold,
                        MaxNumOfCandidatesReturned = maxCandidate,
                        LargePersonGroupId         = largegroupid,
                        FaceIds = detectionResult.Select(s => s.FaceId).ToArray()
                    };
                    var identifyResult = await new FaceIdentify(log, client).IdentifyFaces(identifyResquest, apikey, requestID, policy);
                    if ((identifyResult == null) || (identifyResult.Length == 0))
                    {
                        log.Info($"No identification result requestId: {requestID}, apiKey:{apikey} ticks: {DateTime.Now.Ticks}");
                    }
                    var personResult = new PersonDetails(log, client);
                    //merging results and find person name
                    for (int i = 0; i < detectionResult.Length; i++)
                    {
                        if (videoName == null)
                        {
                            detectionResult[i].FaceBlobName = string.Concat(detectionResult[i].FaceId, "_", name);
                        }
                        else
                        {
                            detectionResult[i].FaceBlobName = videoName + "/" + name;
                        }
                        if ((identifyResult != null) && (identifyResult.Length > 0))
                        {
                            detectionResult[i].Candidates = identifyResult[i].Candidates;
                            for (int j = 0; j < detectionResult[i].Candidates.Length; j++)
                            {
                                string personid   = detectionResult[i].Candidates[j].PersonId.ToString();
                                string personName = cache.StringGet(largegroupid + "-" + personid);
                                if (string.IsNullOrEmpty(personName) == true)
                                {
                                    log.Info($"Missing cache requestId: {requestID} apiKey: {apikey} personId: {personid} ticks: {DateTime.Now.Ticks}");
                                    var tPerson = await personResult.GetPersonName(personid, apikey, largegroupid, requestID, policy);

                                    personName = tPerson.Name;
                                    cache.StringSet(largegroupid + "-" + personid, personName, null, When.Always);
                                }
                                detectionResult[i].Candidates[j].PersonName = new InternalPersonDetails()
                                {
                                    PersonId = detectionResult[i].Candidates[j].PersonId,
                                    Name     = personName
                                };
                            }
                        }
                    }
                }
                else
                {
                    log.Info($"No dectection result requestId: {requestID}, apiKey:{apikey} ticks: {DateTime.Now.Ticks}");
                    //in case of video, we want to create a link to the face detected by AMS (Azure Media Services) although face api didn't recognize it
                    if (videoName != null)
                    {
                        detectionResult = new FaceDetectResult[1] {
                            new FaceDetectResult()
                            {
                                FaceBlobName = videoName + "/" + name
                            }
                        }
                    }
                    ;
                }
                string blobname     = videoName ?? name;
                var    actionResult = new FaceIdentifyResult()
                {
                    BlobName            = blobname,
                    ContainerName       = sourceContainerName,
                    ResultContainerName = resultContainerName,
                    BlobLength          = incomingPicture.Length,
                    CreatedDateTime     = DateTime.UtcNow,
                    RequestId           = requestID,
                    ApiKey           = apikey,
                    LargeGroupId     = largegroupid,
                    Source           = source,
                    DetectResultList = detectionResult
                };
                string strResult = JsonConvert.SerializeObject(actionResult);
                await outputItem.AddAsync(strResult);
            }
            catch (Exception ex)
            {
                log.Error($"Exception Message: {ex.Message}, requestId: {requestID}, apiKey:{apikey} ticks: {DateTime.Now.Ticks}", ex);
                return(false);
            }
            return(true);
        }
        private async Task starDetection(Stream data)
        {
            faceDetectResult = await CognitiveService.FaceDetect(data);

            processing = false;
        }
        private async void RunButton_OnClicked(object sender, EventArgs e)
        {
            // 画像の選択
            var photo             = "";
            var imageChoiceResult = await DisplayAlert("どちらの画像を使いますか", "", "カメラ", "ローカルフォルダー");

            try
            {
                if (imageChoiceResult)
                {
                    photo = await TakePhotoAsync();
                }
                else
                {
                    photo = await PickPhotoAsync();
                }
            }
            catch (Exception exception)
            {
                await DisplayAlert("エラーが発生しました", exception.Message, "OK");
            }

            // 画像の判定
            ImagePreview.Source = photo;
            var faceResult = new FaceDetectResult();

            try
            {
                faceResult = await DetectFaceAsync(photo);
            }
            catch (Exception exception)
            {
                await DisplayAlert("Face API の呼び出しに失敗しました", exception.Message, "OK");

                return;
            }

            // 判定結果を配置
            Age.Text    = "年齢 : " + faceResult.age;
            Gender.Text = "性別 : " + faceResult.gender;

            Emotion.Text = "表情 : ";
            switch (faceResult.emotionKey)
            {
            case "Anger":
                Emotion.Text = Emotion.Text + "怒り";
                break;

            case "Contempt":
                Emotion.Text = Emotion.Text + "軽蔑";
                break;

            case "Disgust":
                Emotion.Text = Emotion.Text + "むかつき";
                break;

            case "Fear":
                Emotion.Text = Emotion.Text + "恐れ";
                break;

            case "Happiness":
                Emotion.Text = Emotion.Text + "喜び";
                break;

            case "Neutral":
                Emotion.Text = Emotion.Text + "無表情";
                break;

            case "Sadness":
                Emotion.Text = Emotion.Text + "悲しみ";
                break;

            case "Surprise":
                Emotion.Text = Emotion.Text + "驚き";
                break;

            default:
                break;
            }
            Emotion.Text = Emotion.Text + "(" + faceResult.emotionValue.ToString("0.00%") + ")";
        }
        private async Task FaceDetectAsync(FaceDetector detector, MediaCapture capture, CancellationToken token)
        {
            if (detector == null || capture == null || token == null)
            {
                throw new ArgumentNullException();
            }

            var previewProperties = capture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
            var videoFrame        = new VideoFrame(BitmapPixelFormat.Bgra8, (int)previewProperties.Width, (int)previewProperties.Height);

            int width  = (int)previewProperties.Width;
            int height = (int)previewProperties.Height;

            FaceDetectResult result = null;

            var stopWatch = Stopwatch.StartNew();

            {
                using (var currentFrame = await capture.GetPreviewFrameAsync(videoFrame))
                    using (var softwareBitmap = currentFrame.SoftwareBitmap)
                    {
                        if (softwareBitmap == null)
                        {
                            return;
                        }

                        // SoftwareBitmap -> byte array
                        var buffer = new byte[4 * width * height];
                        softwareBitmap.CopyToBuffer(buffer.AsBuffer());

                        token.ThrowIfCancellationRequested();

                        // Detect face
                        result = detector.Detect(buffer, width, height);

                        token.ThrowIfCancellationRequested();
                    }
            }
            stopWatch.Stop();

            videoFrame.Dispose();

            // Draw result to Canvas
            await Dispatcher.RunAsync(CoreDispatcherPriority.High, () =>
            {
                FaceDrawCanvas.Width  = width;
                FaceDrawCanvas.Height = height;

                // Draw fps
                FpsTextBlock.Text = (1000 / stopWatch.ElapsedMilliseconds) + "fps";

                // Draw face point
                if (_faceDrawer != null && result != null)
                {
                    List <FaceDetectData> faces = new List <FaceDetectData>();
                    foreach (var f in result.Faces)
                    {
                        FaceDetectData data = new FaceDetectData();
                        data.FaceRect       = f.FaceRect;

                        foreach (var p in f.FacePoints)
                        {
                            data.FaceLandmarks.Add(p);
                        }

                        faces.Add(data);
                    }

                    _faceDrawer.DrawToCanvas(FaceDrawCanvas, faces);
                }
            });
        }