public async Task <IEnumerable <Rectangle> > ExtractFacesAsync()
        {
            if (_facesResponse == null)
            {
                var facesRequest = new DetectFacesRequest()
                {
                    Image = _rekognitionImage
                };
                _facesResponse = await _client.DetectFacesAsync(facesRequest);
            }

            return(ExtractFaces());
        }
예제 #2
0
        public async Task <bool> ProcessAsync(string bucketName, string fileName)
        {
            // form the request
            var detectRequest = new DetectFacesRequest
            {
                Image = new Image {
                    S3Object = new S3Object {
                        Bucket = bucketName, Name = fileName
                    }
                }
            };

            // detect any possible faces
            var response = await _rekognitionClient.DetectFacesAsync(detectRequest);

            if (response == null)
            {
                throw new ApplicationException(Messages.RESPONSE_NULL);
            }

            if (response.FaceDetails.Any())
            {
                return(true);
            }

            return(false);
        }
예제 #3
0
        public static async Task <string> FunctionHandler(String photo)
        {
            String bucket = "moodanalysis";
            //ArrayList result = new ArrayList();

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },

                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

            int count = detectFacesResponse.FaceDetails.Count;

            //return false;
            return((count == 1) + "");
        }
        public async Task <FindFacesResponse> DetectFacesAsync(string sourceImage)
        {
            // Converte a imagem fonte em um objeto MemoryStream
            var imageSource = new Image();

            imageSource.Bytes = _serviceUtils.ConvertImageToMemoryStream(sourceImage);

            // Configura o objeto que fará o request para o AWS Rekognition
            var request = new DetectFacesRequest
            {
                Attributes = new List <string> {
                    "DEFAULT"
                },
                Image = imageSource
            };

            // Faz a chamada do serviço de DetectFaces
            var response = await _rekognitionClient.DetectFacesAsync(request);

            // Chama a função de desenhar quadrados e pega a URL gerada
            var fileName = _serviceUtils.Drawing(imageSource.Bytes, response.FaceDetails);

            // Retorna o objeto com a URL gerada
            return(new FindFacesResponse(fileName));
        }
예제 #5
0
        private static async Task <DetectFacesResponse> IdentifyFaces(Image image)
        {
            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            DetectFacesRequest request = new DetectFacesRequest();

            request.Image = image;
            return(await rekognitionClient.DetectFacesAsync(request));
        }
예제 #6
0
        async void Analyze_Clicked(System.Object sender, System.EventArgs e)
        {
            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(AWSAccessKeyID, AWSSecretAccessKey, Amazon.RegionEndpoint.USEast1);

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Amazon.Rekognition.Model.Image()
                {
                    S3Object = new S3Object
                    {
                        Bucket = "babytech-images",
                        Name   = "baby-eyes-mouth-open.jpg"
                    }
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    const float confidence_threshold = 0.8F;

                    // check if mouth is open
                    if ((face.MouthOpen != null) && (face.MouthOpen.Confidence > confidence_threshold))
                    {
                        FacialAnalysisData += (face.MouthOpen.Value) ? "\n✔ Baby's mouth is open." : "\n❌ Baby's mouth is not open.";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Unable to determine if baby's mouth is open.";
                    }

                    // check if eyes are open
                    if ((face.EyesOpen != null) && (face.EyesOpen.Confidence > confidence_threshold))
                    {
                        FacialAnalysisData += (face.EyesOpen.Value) ? "\n✔ Baby's eyes are open." : "\n❌ Baby's eyes are not open.";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Unable to determine if baby's eyes are open.";
                    }
                }

                DisplayAlert("Analysis Results", FacialAnalysisData, "OK");
            }
            catch (Exception exception)
            {
                Console.WriteLine(exception.Message);
            }
        }
예제 #7
0
        public async Task <List <DetectedFaceDetail> > GetFaceDetailsFromLocalFileAsync(string filePath)
        {
            //画像のMemoryStreamを作成
            var imageStream = await GenerateImageStreamFromLocalFileAsync(filePath);

            if (imageStream == null)
            {
                return(null);
            }

            try
            {
                //AWS Rekognition Client を作成
                using (var rekognitionClient = new AmazonRekognitionClient(Secrets.AccessKey,
                                                                           Secrets.SecretKey,
                                                                           RegionEndpoint.APNortheast1))
                {
                    var request = new DetectFacesRequest
                    {
                        Image = new Image
                        {
                            Bytes = imageStream
                        },
                        Attributes = new List <string> {
                            "ALL"
                        }
                    };

                    //responseを受け取り、必要な情報を抽出
                    var response = await rekognitionClient.DetectFacesAsync(request);

                    var faceList = new List <DetectedFaceDetail>();
                    foreach (var face in response.FaceDetails)
                    {
                        faceList.Add(new DetectedFaceDetail
                        {
                            Gender              = face.Gender.Value,
                            GenderConfidence    = face.Gender.Confidence,
                            HappinessConfidence = face.Emotions.Find(x => x.Type.Value == EmotionName.HAPPY).Confidence,
                            AgeRangeHigh        = face.AgeRange.High,
                            AgeRangeLow         = face.AgeRange.Low
                        });
                    }

                    return(faceList);
                }
            }
            catch (Exception e)
            {
                Debug.WriteLine(e.Message);
            }

            return(null);
        }
예제 #8
0
        /// <summary>
        /// Identifies faces in the image file. If faces are found, the
        /// method adds bounding boxes.
        /// </summary>
        /// <param name="client">The Rekognition client used to call
        /// RecognizeCelebritiesAsync.</param>
        /// <param name="filename">The name of the file that potentially
        /// contins images of celebrities.</param>
        public static async Task IdentifyFaces(AmazonRekognitionClient client, string filename)
        {
            // Request needs image bytes, so read and add to request.
            byte[] data = File.ReadAllBytes(filename);

            DetectFacesRequest request = new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    Bytes = new MemoryStream(data),
                },
            };

            DetectFacesResponse response = await client.DetectFacesAsync(request);

            if (response.FaceDetails.Count > 0)
            {
                // Load a bitmap to modify with face bounding box rectangles.
                Bitmap facesHighlighted = new Bitmap(filename);
                Pen    pen = new Pen(Color.Black, 3);

                // Create a graphics context.
                using (var graphics = System.Drawing.Graphics.FromImage(facesHighlighted))
                {
                    foreach (var fd in response.FaceDetails)
                    {
                        // Get the bounding box.
                        BoundingBox bb = fd.BoundingBox;
                        Console.WriteLine($"Bounding box = ({bb.Left}, {bb.Top}, {bb.Height}, {bb.Width})");

                        // Draw the rectangle using the bounding box values.
                        // They are percentages so scale them to the picture.
                        graphics.DrawRectangle(
                            pen,
                            x: facesHighlighted.Width * bb.Left,
                            y: facesHighlighted.Height * bb.Top,
                            width: facesHighlighted.Width * bb.Width,
                            height: facesHighlighted.Height * bb.Height);
                    }
                }

                // Save the image with highlights as PNG.
                string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");
                facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);

                Console.WriteLine(">>> " + response.FaceDetails.Count + " face(s) highlighted in file " + fileout);
            }
            else
            {
                Console.WriteLine(">>> No faces found");
            }
        }
예제 #9
0
        // snippet-start:[Rekognition.dotnetv3.DetectFacesExample]
        public static async Task Main()
        {
            string photo  = "input.jpg";
            string bucket = "bucket";

            var rekognitionClient = new AmazonRekognitionClient();

            var detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket,
                    },
                },

                // Attributes can be "ALL" or "DEFAULT".
                // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
                // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
                Attributes = new List <string>()
                {
                    "ALL"
                },
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    Console.WriteLine($"BoundingBox: top={face.BoundingBox.Left} left={face.BoundingBox.Top} width={face.BoundingBox.Width} height={face.BoundingBox.Height}");
                    Console.WriteLine($"Confidence: {face.Confidence}");
                    Console.WriteLine($"Landmarks: {face.Landmarks.Count}");
                    Console.WriteLine($"Pose: pitch={face.Pose.Pitch} roll={face.Pose.Roll} yaw={face.Pose.Yaw}");
                    Console.WriteLine($"Brightness: {face.Quality.Brightness}\tSharpness: {face.Quality.Sharpness}");

                    if (hasAll)
                    {
                        Console.WriteLine($"Estimated age is between {face.AgeRange.Low} and {face.AgeRange.High} years old.");
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
예제 #10
0
 public static IAsyncOperation <string> GetFaceDetails(string base64, string AccessKey, string SecretKey)
 {
     return(Task.Run <string>(async() =>
     {
         byte[] imageBytes;
         try
         {
             base64 = base64.Substring(base64.IndexOf(',') + 1).Trim('\0');
             imageBytes = System.Convert.FromBase64String(base64);
         }
         catch (Exception e) {
             return e.Message;
         }
         string sJSONResponse = "";
         AWSCredentials credentials;
         try
         {
             credentials = new BasicAWSCredentials(AccessKey, SecretKey);
         }
         catch (Exception e)
         {
             throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                                             + "Please make sure that your credentials file is at the correct "
                                             + "location (/Users/<userid>/.aws/credentials), and is in a valid format.", e);
         }
         DetectFacesRequest request = new DetectFacesRequest {
             Attributes = new List <string>(new string[] { "ALL" })
         };
         DetectFacesResponse result = null;
         request.Image = new Image {
             Bytes = new MemoryStream(imageBytes, 0, imageBytes.Length)
         };
         AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(credentials, RegionEndpoint.USWest2);
         try
         {
             result = await rekognitionClient.DetectFacesAsync(request);
         }
         catch (AmazonRekognitionException e)
         {
             throw e;
         }
         // Return server status as unhealthy with appropriate status code
         sJSONResponse = JsonConvert.SerializeObject(result.FaceDetails);
         return sJSONResponse;
     }).AsAsyncOperation());
 }
        public async Task <RecognitionResult> Recognise(string bucketName, string key, RecognitionRequest request)
        {
            var client       = new AmazonRekognitionClient(_configuration.GetSection("AWS:AccessKey").Value.ToString(), _configuration.GetSection("AWS:SecretKey").Value.ToString(), Amazon.RegionEndpoint.APSoutheast2);
            var faceResponse = await client.DetectFacesAsync(new DetectFacesRequest
            {
                Image = new Image
                {
                    S3Object = new Amazon.Rekognition.Model.S3Object
                    {
                        Bucket = bucketName,
                        Name   = key
                    }
                },
                Attributes = new List <string> {
                    "ALL"
                }
            });

            List <FaceDetail> faceDetails = faceResponse?.FaceDetails;

            if (faceDetails != null)
            {
                var results = FilterFaces(faceDetails, request);
                if (results.Any())
                {
                    Console.WriteLine($"Found at least one person in {key.GetFileFromKey()} meeting the criteria.");
                    return(new RecognitionResult()
                    {
                        IsFound = true,
                        IsAlone = results.Count() == 1 && faceDetails.Count() == 1
                    });
                }
            }

            return(new RecognitionResult()
            {
                IsFound = false,
                IsAlone = false
            });
        }
예제 #12
0
        // For Image analysis
        public List <FaceDetail> DetectFaces(MemoryStream stream, string target, out string message)
        {
            string outMessage = "";
            var    response   = _client.DetectFacesAsync(new DetectFacesRequest
            {
                Attributes = { "ALL" },
                Image      = new Image {
                    Bytes = stream
                }
            }).Result;

            int faceCounter = 1;

            foreach (var faceDetail in response.FaceDetails)
            {
                float  emotionConfidence = 0;
                string emotionName       = string.Empty;
                //Determines dominant emotion
                foreach (var emotion in faceDetail.Emotions)
                {
                    if (emotion.Confidence > emotionConfidence)
                    {
                        emotionConfidence = emotion.Confidence;
                        emotionName       = emotion.Type;
                    }
                }
                if (faceDetail.Gender.Value.ToString().ToLower() == target.ToLower())
                {
                    outMessage = "The Object '" + target.ToUpper() + "' in your watchlist has been found in live stream with '" + Convert.ToInt32(faceDetail.Gender.Confidence) + "%' confidence.";
                }
                faceCounter++;
            }
            message = outMessage;
            LogResponse(GetIndentedJson(response), "DetectLabels");
            return(response.FaceDetails);
        }
예제 #13
0
        public static async Task <string> FunctionHandler(String photo)
        {
            String bucket = "moodanalysis";
            //ArrayList result = new ArrayList();
            string result = "";

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            // Recognizes User's face
            CompareFacesRequest CFR = new CompareFacesRequest()
            {
                //SimilarityThreshold = 50,

                SourceImage = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = "referencePhoto.jpg",
                        Bucket = bucket
                    },
                },

                TargetImage = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },
            };

            CompareFacesResponse compareFacesResponse = await rekognitionClient.CompareFacesAsync(CFR);

            string howManyFaces = "";

            if (compareFacesResponse.FaceMatches.Count == 0)
            {
                return("");
            }

            //int index = 0, bestIndex = 0;
            var         bestMatch       = compareFacesResponse.FaceMatches[0];
            float       bestMatchResult = compareFacesResponse.FaceMatches[0].Similarity;
            BoundingBox bestBoundingBox = compareFacesResponse.FaceMatches[0].Face.BoundingBox;

            foreach (var faceMatch in compareFacesResponse.FaceMatches)
            {
                howManyFaces += faceMatch.Similarity + ",";

                if (bestMatchResult < faceMatch.Similarity)
                {
                    bestMatch       = faceMatch;
                    bestBoundingBox = faceMatch.Face.BoundingBox;
                    bestMatchResult = faceMatch.Similarity;
                }
            }

            // Detects emotions of faces in photo
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },

                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

            //int i = 0;
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                if (face.BoundingBox.Height == bestBoundingBox.Height &&
                    face.BoundingBox.Left == bestBoundingBox.Left &&
                    face.BoundingBox.Top == bestBoundingBox.Top &&
                    face.BoundingBox.Width == bestBoundingBox.Width)
                {
                    //var emotQuery = FilterEmotions(face, IsLowConfidence);

                    FilterEmotions filter = delegate(FaceDetail faceFilter, ConfidenceFilterDelegate confFilter)
                    {
                        return(faceFilter.Emotions.FindAll(n => confFilter(n)).ToList());
                    };

                    var emotQuery = filter(face, IsLowConfidence);

                    //IEnumerable<Emotion> emotQuery =
                    //    from faceEmotion in face.Emotions
                    //    where faceEmotion.Confidence > 10
                    //    select faceEmotion;

                    // GRAB THE EMOTION
                    foreach (Emotion emot in emotQuery)
                    {
                        result += emot.Type + ",";
                    }

                    break;
                }
            }

            //delete the last ,
            if (result.Length != 0)
            {
                result = result.Substring(0, result.Length - 1);
            }

            return(result);
        }
예제 #14
0
        // snippet-start:[Rekognition.dotnetv3.ImageOrientationBoundingBox]
        public static async Task Main()
        {
            string photo = @"D:\Development\AWS-Examples\Rekognition\target.jpg"; // "photo.jpg";

            var rekognitionClient = new AmazonRekognitionClient();

            var image = new Amazon.Rekognition.Model.Image();

            try
            {
                using var fs = new FileStream(photo, FileMode.Open, FileAccess.Read);
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
            catch (Exception)
            {
                Console.WriteLine("Failed to load file " + photo);
                return;
            }

            int height;
            int width;

            // Used to extract original photo width/height
            using (var imageBitmap = new Bitmap(photo))
            {
                height = imageBitmap.Height;
                width  = imageBitmap.Width;
            }

            Console.WriteLine("Image Information:");
            Console.WriteLine(photo);
            Console.WriteLine("Image Height: " + height);
            Console.WriteLine("Image Width: " + width);

            try
            {
                var detectFacesRequest = new DetectFacesRequest()
                {
                    Image      = image,
                    Attributes = new List <string>()
                    {
                        "ALL"
                    },
                };

                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                detectFacesResponse.FaceDetails.ForEach(face =>
                {
                    Console.WriteLine("Face:");
                    ShowBoundingBoxPositions(
                        height,
                        width,
                        face.BoundingBox,
                        detectFacesResponse.OrientationCorrection);

                    Console.WriteLine($"BoundingBox: top={face.BoundingBox.Left} left={face.BoundingBox.Top} width={face.BoundingBox.Width} height={face.BoundingBox.Height}");
                    Console.WriteLine($"The detected face is estimated to be between {face.AgeRange.Low} and {face.AgeRange.High} years old.\n");
                });
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
예제 #15
0
        async void Analyze_Clicked(System.Object sender, System.EventArgs e)
        {
            CognitoAWSCredentials credentials = new CognitoAWSCredentials(
                AWS.IdentityPoolId,
                RegionEndpoint.USEast1
                );

            S3Uploader uploader   = new S3Uploader(credentials);
            string     bucketName = "babytech-images";
            string     keyName    = patient.PatientID + ".jpg";
            await uploader.UploadFileAsync(bucketName, keyName, PhotoPath);

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(credentials, Amazon.RegionEndpoint.USEast1);

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Amazon.Rekognition.Model.Image()
                {
                    S3Object = new S3Object
                    {
                        Bucket = bucketName,
                        Name   = keyName
                    }
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    // check if mouth is open
                    if ((face.MouthOpen != null) && (face.MouthOpen.Value))
                    {
                        FacialAnalysisData += "\n❌ Baby's mouth should be closed";
                    }
                    if ((face.MouthOpen != null) && (!face.MouthOpen.Value) && (face.EyesOpen.Confidence > 0.88F))
                    {
                        FacialAnalysisData += "\n✔ Baby's mouth should be closed";
                    }

                    // check if eyes are open

                    if ((face.EyesOpen != null) && (face.EyesOpen.Value))
                    {
                        FacialAnalysisData += "\n✔ Baby's eyes should be open";
                    }
                    if ((face.EyesOpen != null) && (!face.EyesOpen.Value) && (face.EyesOpen.Confidence > 0.93F))
                    {
                        FacialAnalysisData += "\n❌ Baby's eyes should be open";
                    }

                    // check for eyeglasses
                    if ((face.Eyeglasses != null) && (face.Eyeglasses.Value))
                    {
                        FacialAnalysisData += "\n❌ Baby should not be wearing eyeglasses";
                    }
                    if ((face.Eyeglasses != null) && (!face.Eyeglasses.Value))
                    {
                        FacialAnalysisData += "\n✔ Baby should not be wearing eyeglasses";
                    }

                    //check brightness
                    if ((face.Quality.Brightness != null) && (face.Quality.Brightness > 0.61F) && (face.Quality.Brightness < 0.97F))
                    {
                        FacialAnalysisData += "\n✔  Picture is acceptable brightness";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌  Picture is acceptable brightness";
                    }

                    //check sharpness
                    if ((face.Quality.Sharpness != null) && (face.Quality.Sharpness > 0.67F))
                    {
                        FacialAnalysisData += "\n✔  Picture is acceptable sharpness";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Picture is acceptable sharpness";
                    }


                    // check for smile
                    if ((face.Smile != null) && (face.Smile.Value) && (face.Smile.Confidence < 0.83F))
                    {
                        FacialAnalysisData += "\n❌ Baby should not be smiling";
                    }
                    if ((face.Eyeglasses != null) && (!face.Eyeglasses.Value))
                    {
                        FacialAnalysisData += "\n✔ Baby should not be smiling";
                    }


                    // check for calm expression
                    Emotion calmEmotion = face.Emotions.Find(emotion => emotion.Type == "CALM");

                    if (calmEmotion.Confidence > 0.93F)
                    {
                        FacialAnalysisData += "\n ✔ Baby should have a neutral facial expression";
                    }
                    else
                    {
                        FacialAnalysisData += "\n ❌ BBaby should have a neutral facial expression";
                    }


                    //check sharpness
                    if ((face.Quality.Sharpness != null) && (face.Quality.Sharpness > 0.67F))
                    {
                        FacialAnalysisData += "\n✔  Picture is acceptable sharpness";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Picture is acceptable sharpness";
                    }

                    //check brightness
                    if ((face.Quality.Brightness != null) && (face.Quality.Brightness > 0.61F) && (face.Quality.Brightness < 0.97F))
                    {
                        FacialAnalysisData += "\n✔  Picture is acceptable brightness";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌  Picture is acceptable brightness";
                    }
                }

                await DisplayAlert("Analysis Results", FacialAnalysisData, "OK");
            }
            catch (Exception exception)
            {
                Console.WriteLine(exception.Message);
            }
        }