public async static Task <FaceDetail> GetFaceDetailFromStream(IAmazonRekognition rekognitionClient, MemoryStream stream)
        {
            FaceDetail         result             = null;
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image {
                    Bytes = stream
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                Task <DetectFacesResponse> detectTask          = rekognitionClient.DetectFacesAsync(detectFacesRequest);
                DetectFacesResponse        detectFacesResponse = await detectTask;

                PrintFaceDetails(detectFacesResponse.FaceDetails);

                if (detectFacesResponse.FaceDetails.Count > 0)
                {
                    result = detectFacesResponse.FaceDetails[0]; // take the 1st face only
                }
            }
            catch (AmazonRekognitionException rekognitionException)
            {
                Console.WriteLine(rekognitionException.Message, rekognitionException.InnerException);
            }
            return(result);
        }
예제 #2
0
        public async static Task <FaceDetail> GetObjectDetailFromStream(IAmazonRekognition rekognitionClient, MemoryStream stream)
        {
            FaceDetail          result = null;
            DetectLabelsRequest detectLabelsRequest = new DetectLabelsRequest()
            {
                Image = new Image {
                    Bytes = stream
                },
                MaxLabels     = 10,
                MinConfidence = 75F
            };

            try
            {
                Task <DetectLabelsResponse> detectTask           = rekognitionClient.DetectLabelsAsync(detectLabelsRequest);
                DetectLabelsResponse        detectLabelsResponse = await detectTask;

                PrintObjectDetails(detectLabelsResponse.Labels);

                // if (detectFacesResponse.FaceDetails.Count > 0)
                //     result = detectFacesResponse.FaceDetails[0]; // take the 1st face only
            }
            catch (AmazonRekognitionException rekognitionException)
            {
                Console.WriteLine(rekognitionException.Message, rekognitionException.InnerException);
            }
            return(result);
        }
        public async static void GetFaceDetailFromS3(IAmazonRekognition rekognitionClient, string bucketName, string keyName)
        {
            FaceDetail         result             = null;
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image {
                    S3Object = new S3Object {
                        Bucket = bucketName,
                        Name   = keyName
                    }
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                Task <DetectFacesResponse> detectTask          = rekognitionClient.DetectFacesAsync(detectFacesRequest);
                DetectFacesResponse        detectFacesResponse = await detectTask;

                PrintFaceDetails(detectFacesResponse.FaceDetails);

                if (detectFacesResponse.FaceDetails.Count > 0)
                {
                    result = detectFacesResponse.FaceDetails[0]; // take the 1st face only
                }
            }
            catch (AmazonRekognitionException rekognitionException)
            {
                Console.WriteLine(rekognitionException.Message, rekognitionException.InnerException);
            }
        }
예제 #4
0
        private List <RecognitionItem> GetRecognitionItems(FaceDetail face)
        {
            var result = new List <RecognitionItem>()
            {
                new RecognitionItem()
                {
                    Name       = "Beard",
                    Value      = face.Beard.Value.ToString(),
                    Confidence = face.Beard.Confidence
                },
                new RecognitionItem()
                {
                    Name       = "Eyeglasses",
                    Value      = face.Eyeglasses.Value.ToString(),
                    Confidence = face.Eyeglasses.Confidence
                },
                new RecognitionItem()
                {
                    Name       = "EyesOpen",
                    Value      = face.EyesOpen.Value.ToString(),
                    Confidence = face.EyesOpen.Confidence
                },
                new RecognitionItem()
                {
                    Name       = "MouthOpen",
                    Value      = face.MouthOpen.Value.ToString(),
                    Confidence = face.MouthOpen.Confidence
                },
                new RecognitionItem()
                {
                    Name       = "Mustache",
                    Value      = face.Mustache.Value.ToString(),
                    Confidence = face.Mustache.Confidence
                },
                new RecognitionItem()
                {
                    Name       = "Smile",
                    Value      = face.Smile.Value.ToString(),
                    Confidence = face.Smile.Confidence
                },
                new RecognitionItem()
                {
                    Name       = "Sunglasses",
                    Value      = face.Sunglasses.Value.ToString(),
                    Confidence = face.Sunglasses.Confidence
                }
            };

            return(result);
        }
        public string GetImageInfo(ImageData imageData, byte[] imgData = null)
        {
            try
            {
                var path = Path.Combine(
                    Directory.GetCurrentDirectory(), "wwwroot",
                    imageData.fileName);
                imgData = Convert.FromBase64String(imageData.base64Data);

                _imageData = new MemoryStream(imgData);
                DetectFacesRequest detectFaces = new DetectFacesRequest()
                {
                    Image = new Image()
                    {
                        Bytes = _imageData
                    }
                };

                DetectFacesResponse facesResponse = _rekognitionClient.DetectFacesAsync(detectFaces).Result;

                List <FaceDetail> lstCelebrities = facesResponse.FaceDetails;

                FaceDetail faceDetail = new FaceDetail();

                StringBuilder sbCelebrities = new StringBuilder();
                //foreach (var item in lstCelebrities)
                //{
                //    switch (switch_on)
                //    {
                //        default:
                //    }
                //}
                string Celebrities = sbCelebrities.ToString().TrimEnd(',');

                return(Celebrities);
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.ToString());
                throw;
            }
        }
예제 #6
0
        public List <Emotion> EmotionDetect(string _image)
        {
            {
                String         photo  = _image;
                String         bucket = "ngankhanh98";
                List <Emotion> response;

                AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

                DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
                {
                    Image = new Amazon.Rekognition.Model.Image()
                    {
                        S3Object = new S3Object()
                        {
                            Name   = photo,
                            Bucket = bucket
                        },
                    },
                    Attributes = new List <String>()
                    {
                        "ALL"
                    }
                };

                try
                {
                    DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
                    bool       hasAll = detectFacesRequest.Attributes.Contains("ALL");
                    FaceDetail face   = detectFacesResponse.FaceDetails[0];

                    return(face.Emotions);
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.Message);
                    return(null);
                }
            }
        }
예제 #7
0
        private async Task <bool> ProcessLabels(ILambdaContext context, string jobId)
        {
            GetFaceDetectionResponse response = null;

            do
            {
                GetFaceDetectionRequest request = new GetFaceDetectionRequest()
                {
                    JobId      = jobId,
                    MaxResults = MaxResults,
                    NextToken  = response?.NextToken,
                };

                response = await this.rekClient.GetFaceDetectionAsync(request).ConfigureAwait(false);

                if (response.Faces.Count != 1 || !(response.Faces[0].Face.Confidence > 60))
                {
                    return(false);
                }

                FaceDetail face = response.Faces[0].Face;

                if (face.Emotions.Any(x => this.invalidEmotions.Any(y => y == x.Type)))
                {
                    return(false);
                }

                if (!face.EyesOpen.Value || face.EyesOpen.Confidence < 60)
                {
                    return(false);
                }

                if (face.Quality.Brightness < 50.00f || face.Quality.Sharpness < 50.00f)
                {
                    return(false);
                }

                return(true);
            } while (response?.NextToken != null);
        }
        public async Task <IActionResult> Post([FromBody] GameStagePostImageDTO dto)
        {
            Console.WriteLine("PostImage entered.");

            string bucketName = "reinvent-gottalent";

            // Retrieving image data
            // ex: game/10/Happiness.jpg
            string keyName        = string.Format("game/{0}/{1}.jpg", dto.gameId, dto.actionType);
            string croppedKeyName = string.Format("game/{0}/{1}_cropped.jpg", dto.gameId, dto.actionType);

            byte[] imageByteArray = Convert.FromBase64String(dto.base64Image);
            if (imageByteArray.Length == 0)
            {
                return(BadRequest("Image length is 0."));
            }

            StageLog newStageLog = null;

            using (MemoryStream ms = new MemoryStream(imageByteArray))
            {
                // call Rekonition API
                FaceDetail faceDetail = await RekognitionUtil.GetFaceDetailFromStream(this.RekognitionClient, ms);

                // Crop image to get face only
                System.Drawing.Image originalImage = System.Drawing.Image.FromStream(ms);
                System.Drawing.Image croppedImage  = GetCroppedFaceImage(originalImage, faceDetail.BoundingBox);
                MemoryStream         croppedms     = new MemoryStream();
                croppedImage.Save(croppedms, ImageFormat.Jpeg);

                // Upload image to S3 bucket
                //await Task.Run(() => S3Util.UploadToS3(this.S3Client, bucketName, keyName, ms));
                await Task.Run(() => S3Util.UploadToS3(this.S3Client, bucketName, keyName, croppedms));

                // Get a specific emotion score
                double emotionScore = 0.0f;
                if (dto.actionType != "Profile")
                {
                    emotionScore = RekognitionUtil.GetEmotionScore(faceDetail.Emotions, dto.actionType);
                }

                int    evaluatedAge    = (faceDetail.AgeRange.High + faceDetail.AgeRange.Low) / 2;
                string evaluatedGender = faceDetail.Gender.Value;

                // Database update
                newStageLog = new StageLog {
                    game_id     = dto.gameId,
                    action_type = dto.actionType,
                    score       = emotionScore,
                    file_loc    = keyName,
                    age         = evaluatedAge,
                    gender      = evaluatedGender,
                    log_date    = DateTime.Now
                };

                var value = _context.StageLog.Add(newStageLog);
                await _context.SaveChangesAsync();
            }

            // Send response
            string signedURL = S3Util.GetPresignedURL(this.S3Client, bucketName, keyName);

            newStageLog.file_loc = signedURL;

            return(Ok(newStageLog));
        }