Example #1
0
        private FaceCropResult DetectFaces(ImageCapture capture)
        {
            Rect[] rects = _classifier.DetectMultiScale(capture.image, 1.08, 2, HaarDetectionType.ScaleImage, new Size(30, 30));

            FaceCropResult faces = new FaceCropResult
            {
                CaptureTime = capture.captureTime,
                Faces       = new FaceCropResult.Face[rects.Length]
            };

            for (int i = 0; i < rects.Length; i++)
            {
                // Increase the size of the image with 25%.
                rects[i].Inflate((int)(rects[i].Width * 0.25), (int)(rects[i].Height * 0.25));

                FaceCropResult.Face face = new FaceCropResult.Face();
                face.Id          = i.ToString();
                face.ImageBase64 = Convert.ToBase64String(capture.image.SubMat(rects[i]).ToBytes(".jpg"));

                faces.Faces[i] = face;
            }

            return(faces);
        }
        public async Task <RecognitionResult.Face[]> DetectEmotions(FaceCropResult.Face[] faces)
        {
            AnnotationRequest request = new AnnotationRequest();

            request.Requests = new AnnotationRequest.AnnotateImageRequest[faces.Length];

            for (int i = 0; i < faces.Length; i++)
            {
                FaceCropResult.Face face = faces[i];

                var r = new AnnotationRequest.AnnotateImageRequest
                {
                    ImageData = new AnnotationRequest.AnnotateImageRequest.Image
                    {
                        Content = face.ImageBase64
                    },
                    Features = new AnnotationRequest.AnnotateImageRequest.Feature[]
                    {
                        new AnnotationRequest.AnnotateImageRequest.Feature
                        {
                            Type       = "FACE_DETECTION",
                            MaxResults = 5
                        }
                    }
                };

                request.Requests[i] = r;
            }

            try
            {
                HttpClient  client  = new HttpClient();
                HttpContent content = new StringContent(JsonConvert.SerializeObject(request));

                var httpResponse = await client.PostAsync("https://vision.googleapis.com/v1/images:annotate?key=" + _apiKey, content);

                string responseData = await httpResponse.Content.ReadAsStringAsync();

                if (httpResponse.IsSuccessStatusCode)
                {
                    AnnotationResponse response = JsonConvert.DeserializeObject <AnnotationResponse>(responseData);

                    List <RecognitionResult.Face> faceResults = new List <RecognitionResult.Face>();

                    for (int i = 0; i < response.Responses.Length && i < faces.Length; i++)
                    {
                        AnnotationResponse.AnnotateImageResponse.FaceAnnotations faceAnnotations = response.Responses[i].FaceAnnotationData[0];
                        RecognitionResult.Face faceResult = new RecognitionResult.Face
                        {
                            Emotion = new RecognitionResult.Emotion
                            {
                                Anger     = FromLikelyhood(faceAnnotations.Anger),
                                Happiness = FromLikelyhood(faceAnnotations.Joy),
                                Neutral   = 0.0,
                                Sadness   = FromLikelyhood(faceAnnotations.Sorrow),
                                Surprise  = FromLikelyhood(faceAnnotations.Surprise)
                            }
                        };

                        faceResults.Add(faceResult);
                    }

                    return(faceResults.ToArray());
                }
            }
            catch (Exception ex)
            {
                // TODO?
            }

            return(null);
        }