Example #1
0
        /// <summary>
        /// Add All detected faces to a specific collection
        /// </summary>
        /// <param name="collectionId"></param>
        /// <param name="imageId"></param>
        /// <param name="image"></param>
        public FaceRecord AddImageToCollection(string collectionId, Amazon.Rekognition.Model.Image image)
        {
            AmazonRekognitionClient rekognitionClient = AmazonClient.GetInstance();

            //Validate that image contains only one face.
            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(new Amazon.Rekognition.Model.DetectFacesRequest
            {
                Attributes = new List <string> {
                    "ALL"
                },
                Image = image
            });

            if (null != detectFacesResponse.FaceDetails && detectFacesResponse.FaceDetails.Count > 1)
            {
                throw new ArgumentNullException("Many faces in the image");
            }

            IndexFacesRequest indexFacesRequest = new IndexFacesRequest()
            {
                Image               = image,
                CollectionId        = collectionId,
                DetectionAttributes = new List <String>()
                {
                    "ALL"
                }
            };

            IndexFacesResponse indexFacesResponse = rekognitionClient.IndexFaces(indexFacesRequest);

            return(indexFacesResponse.FaceRecords.FirstOrDefault());
        }
        public void ScoreFaces(DetectFacesResponse detectFactResponse)
        {
            LogInfo(JsonConvert.SerializeObject(detectFactResponse));

            // LEVEL 4: choose one or more categories to build criteria from
            // ageRange, beard, boundingBox, eyeglasses, eyesOpen, gender, mouthOpen, mustache, pose, quality, smile, sunglasses
        }
        /// <summary>
        /// Unmarshaller the response from the service to the response class.
        /// </summary>
        /// <param name="context"></param>
        /// <returns></returns>
        public override AmazonWebServiceResponse Unmarshall(JsonUnmarshallerContext context)
        {
            DetectFacesResponse response = new DetectFacesResponse();

            context.Read();
            int targetDepth = context.CurrentDepth;

            while (context.ReadAtDepth(targetDepth))
            {
                if (context.TestExpression("FaceDetails", targetDepth))
                {
                    var unmarshaller = new ListUnmarshaller <FaceDetail, FaceDetailUnmarshaller>(FaceDetailUnmarshaller.Instance);
                    response.FaceDetails = unmarshaller.Unmarshall(context);
                    continue;
                }
                if (context.TestExpression("OrientationCorrection", targetDepth))
                {
                    var unmarshaller = StringUnmarshaller.Instance;
                    response.OrientationCorrection = unmarshaller.Unmarshall(context);
                    continue;
                }
            }

            return(response);
        }
        public async static void GetFaceDetailFromS3(IAmazonRekognition rekognitionClient, string bucketName, string keyName)
        {
            FaceDetail         result             = null;
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image {
                    S3Object = new S3Object {
                        Bucket = bucketName,
                        Name   = keyName
                    }
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                Task <DetectFacesResponse> detectTask          = rekognitionClient.DetectFacesAsync(detectFacesRequest);
                DetectFacesResponse        detectFacesResponse = await detectTask;

                PrintFaceDetails(detectFacesResponse.FaceDetails);

                if (detectFacesResponse.FaceDetails.Count > 0)
                {
                    result = detectFacesResponse.FaceDetails[0]; // take the 1st face only
                }
            }
            catch (AmazonRekognitionException rekognitionException)
            {
                Console.WriteLine(rekognitionException.Message, rekognitionException.InnerException);
            }
        }
Example #5
0
        public static async Task <string> FunctionHandler(String photo)
        {
            String bucket = "moodanalysis";
            //ArrayList result = new ArrayList();

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },

                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

            int count = detectFacesResponse.FaceDetails.Count;

            //return false;
            return((count == 1) + "");
        }
        public async static Task <FaceDetail> GetFaceDetailFromStream(IAmazonRekognition rekognitionClient, MemoryStream stream)
        {
            FaceDetail         result             = null;
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image {
                    Bytes = stream
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                Task <DetectFacesResponse> detectTask          = rekognitionClient.DetectFacesAsync(detectFacesRequest);
                DetectFacesResponse        detectFacesResponse = await detectTask;

                PrintFaceDetails(detectFacesResponse.FaceDetails);

                if (detectFacesResponse.FaceDetails.Count > 0)
                {
                    result = detectFacesResponse.FaceDetails[0]; // take the 1st face only
                }
            }
            catch (AmazonRekognitionException rekognitionException)
            {
                Console.WriteLine(rekognitionException.Message, rekognitionException.InnerException);
            }
            return(result);
        }
        public DetectFacesResponse DetectFaces(DetectFaceParams dfp)
        {
            DetectFacesResponse resp = null;
            var conf = new AmazonRekognitionConfig()
            {
                RegionEndpoint = dfp.RegEndpoint
            };

            using (recClient = new AmazonRekognitionClient(awsAccessKeyId, awsSecretAccessKey, conf))
            {
                DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
                {
                    Image = new Image()
                    {
                        S3Object = new S3Object()
                        {
                            Name   = dfp.PhotoName,
                            Bucket = dfp.BucketName
                        },
                    },
                    // Attributes can be "ALL" or "DEFAULT".
                    // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
                    // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
                    Attributes = new List <String>()
                    {
                        "ALL"
                    }
                };

                try
                {
                    resp = recClient.DetectFaces(detectFacesRequest);

                    if (resp == null)
                    {
                        throw new Exception("AmazonRekognitionClient DetectFaces method call return null.");
                    }
                    //bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
                    //foreach (FaceDetail face in resp.Result.FaceDetails)
                    //{
                    //    Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
                    //        face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                    //    Console.WriteLine("Confidence: {0}\nLandmarks: {1}\nPose: pitch={2} roll={3} yaw={4}\nQuality: {5}",
                    //        face.Confidence, face.Landmarks.Count, face.Pose.Pitch,
                    //        face.Pose.Roll, face.Pose.Yaw, face.Quality);
                    //    if (hasAll)
                    //        Console.WriteLine("The detected face is estimated to be between " +
                    //            face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                    //}
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.Message);
                }
            }

            return(resp);
        }
Example #8
0
        async void Analyze_Clicked(System.Object sender, System.EventArgs e)
        {
            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(AWSAccessKeyID, AWSSecretAccessKey, Amazon.RegionEndpoint.USEast1);

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Amazon.Rekognition.Model.Image()
                {
                    S3Object = new S3Object
                    {
                        Bucket = "babytech-images",
                        Name   = "baby-eyes-mouth-open.jpg"
                    }
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    const float confidence_threshold = 0.8F;

                    // check if mouth is open
                    if ((face.MouthOpen != null) && (face.MouthOpen.Confidence > confidence_threshold))
                    {
                        FacialAnalysisData += (face.MouthOpen.Value) ? "\n✔ Baby's mouth is open." : "\n❌ Baby's mouth is not open.";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Unable to determine if baby's mouth is open.";
                    }

                    // check if eyes are open
                    if ((face.EyesOpen != null) && (face.EyesOpen.Confidence > confidence_threshold))
                    {
                        FacialAnalysisData += (face.EyesOpen.Value) ? "\n✔ Baby's eyes are open." : "\n❌ Baby's eyes are not open.";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Unable to determine if baby's eyes are open.";
                    }
                }

                DisplayAlert("Analysis Results", FacialAnalysisData, "OK");
            }
            catch (Exception exception)
            {
                Console.WriteLine(exception.Message);
            }
        }
Example #9
0
        public async Task <State> Run(State state, ILambdaContext context)
        {
            var photo = new Image
            {
                S3Object = new Amazon.Rekognition.Model.S3Object
                {
                    Bucket = state.Bucket,
                    Name   = state.ImageKey
                }
            };

            Console.WriteLine($"Analyzing image {state.Bucket}:{state.ImageKey}");
            var detectLabels = await this.RekognitionClient.DetectLabelsAsync(new DetectLabelsRequest
            {
                MinConfidence = MinConfidence,
                Image         = photo
            });

            var tags = new Dictionary <string, string>();

            foreach (var label in detectLabels.Labels.Take(10))
            {
                Console.WriteLine($"\tFound Label {label.Name} with confidence {label.Confidence}");
                tags.Add(label.Name, label.Confidence.ToString());
            }

            // Now detect faces
            DetectFacesResponse detectFaces = await this.RekognitionClient.DetectFacesAsync(new DetectFacesRequest
            {
                Attributes = new List <string> {
                    "ALL"
                },
                Image = photo
            });

            foreach (var face in detectFaces.FaceDetails)
            {
                Console.WriteLine($"\tFound face {face.BoundingBox.ToString()} with age {face.AgeRange.ToString()}");
            }

            var celebrityResult = await this.RekognitionClient.RecognizeCelebritiesAsync(new RecognizeCelebritiesRequest
            {
                Image = photo
            });

            if (celebrityResult.CelebrityFaces.Count > 0)
            {
                state.Celebrity = celebrityResult.CelebrityFaces.First();
            }

            state.Labels = tags;
            state.Faces  = detectFaces.FaceDetails;

            return(state);
        }
Example #10
0
        static void IdentifyFaces(string filename)
        {
            // Using USWest2, not the default region
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            // Request needs image bytes, so read and add to request
            byte[] data = File.ReadAllBytes(filename);

            DetectFacesRequest dfr = new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    Bytes = new MemoryStream(data)
                }
            };

            DetectFacesResponse outcome = rekoClient.DetectFaces(dfr);

            if (outcome.FaceDetails.Count > 0)
            {
                // Load a bitmap to modify with face bounding box rectangles
                Bitmap facesHighlighted = new Bitmap(filename);
                Pen    pen = new Pen(Color.Black, 3);

                // Create a graphics context
                using (var graphics = Graphics.FromImage(facesHighlighted))
                {
                    foreach (var fd in outcome.FaceDetails)
                    {
                        // Get the bounding box
                        BoundingBox bb = fd.BoundingBox;
                        Console.WriteLine($"Bounding box = ({bb.Left}, {bb.Top}, {bb.Height}, {bb.Width})");

                        // Draw the rectangle using the bounding box values
                        // They are percentages so scale them to picture
                        graphics.DrawRectangle(pen,
                                               x: facesHighlighted.Width * bb.Left,
                                               y: facesHighlighted.Height * bb.Top,
                                               width: facesHighlighted.Width * bb.Width,
                                               height: facesHighlighted.Height * bb.Height);
                    }
                }

                // Save the image with highlights as PNG
                string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");
                facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);

                Console.WriteLine(">>> " + outcome.FaceDetails.Count + " face(s) highlighted in file " + fileout);
            }
            else
            {
                Console.WriteLine(">>> No faces found");
            }
        }
Example #11
0
        /// <summary>
        /// Identifies faces in the image file. If faces are found, the
        /// method adds bounding boxes.
        /// </summary>
        /// <param name="client">The Rekognition client used to call
        /// RecognizeCelebritiesAsync.</param>
        /// <param name="filename">The name of the file that potentially
        /// contins images of celebrities.</param>
        public static async Task IdentifyFaces(AmazonRekognitionClient client, string filename)
        {
            // Request needs image bytes, so read and add to request.
            byte[] data = File.ReadAllBytes(filename);

            DetectFacesRequest request = new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    Bytes = new MemoryStream(data),
                },
            };

            DetectFacesResponse response = await client.DetectFacesAsync(request);

            if (response.FaceDetails.Count > 0)
            {
                // Load a bitmap to modify with face bounding box rectangles.
                Bitmap facesHighlighted = new Bitmap(filename);
                Pen    pen = new Pen(Color.Black, 3);

                // Create a graphics context.
                using (var graphics = System.Drawing.Graphics.FromImage(facesHighlighted))
                {
                    foreach (var fd in response.FaceDetails)
                    {
                        // Get the bounding box.
                        BoundingBox bb = fd.BoundingBox;
                        Console.WriteLine($"Bounding box = ({bb.Left}, {bb.Top}, {bb.Height}, {bb.Width})");

                        // Draw the rectangle using the bounding box values.
                        // They are percentages so scale them to the picture.
                        graphics.DrawRectangle(
                            pen,
                            x: facesHighlighted.Width * bb.Left,
                            y: facesHighlighted.Height * bb.Top,
                            width: facesHighlighted.Width * bb.Width,
                            height: facesHighlighted.Height * bb.Height);
                    }
                }

                // Save the image with highlights as PNG.
                string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");
                facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);

                Console.WriteLine(">>> " + response.FaceDetails.Count + " face(s) highlighted in file " + fileout);
            }
            else
            {
                Console.WriteLine(">>> No faces found");
            }
        }
        public async Task <IEnumerable <Rectangle> > ExtractFacesAsync()
        {
            if (_facesResponse == null)
            {
                var facesRequest = new DetectFacesRequest()
                {
                    Image = _rekognitionImage
                };
                _facesResponse = await _client.DetectFacesAsync(facesRequest);
            }

            return(ExtractFaces());
        }
        // snippet-start:[Rekognition.dotnetv3.DetectFacesExample]
        public static async Task Main()
        {
            string photo  = "input.jpg";
            string bucket = "bucket";

            var rekognitionClient = new AmazonRekognitionClient();

            var detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket,
                    },
                },

                // Attributes can be "ALL" or "DEFAULT".
                // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
                // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
                Attributes = new List <string>()
                {
                    "ALL"
                },
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    Console.WriteLine($"BoundingBox: top={face.BoundingBox.Left} left={face.BoundingBox.Top} width={face.BoundingBox.Width} height={face.BoundingBox.Height}");
                    Console.WriteLine($"Confidence: {face.Confidence}");
                    Console.WriteLine($"Landmarks: {face.Landmarks.Count}");
                    Console.WriteLine($"Pose: pitch={face.Pose.Pitch} roll={face.Pose.Roll} yaw={face.Pose.Yaw}");
                    Console.WriteLine($"Brightness: {face.Quality.Brightness}\tSharpness: {face.Quality.Sharpness}");

                    if (hasAll)
                    {
                        Console.WriteLine($"Estimated age is between {face.AgeRange.Low} and {face.AgeRange.High} years old.");
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
        private List <Mat> detectFace(Bitmap bitmap)
        {
            Mat src = null;

            try
            {
                src = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap);
            }catch (Exception e)
            {
            }
            Amazon.Rekognition.Model.Image image = Utils.bitmapToAWSImage(bitmap);

            DetectFacesRequest request = new DetectFacesRequest()
            {
                Image = image
            };

            try
            {
                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(request);

                float bitmapWidth  = (float)bitmap.Width;
                float bitmapHeight = (float)bitmap.Height;

                List <Mat> matList = new List <Mat>();

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    int faceLeft   = (int)(face.BoundingBox.Left * bitmapWidth);
                    int faceTop    = (int)(face.BoundingBox.Top * bitmapHeight);
                    int faceWidth  = (int)(face.BoundingBox.Width * bitmapWidth);
                    int faceHeight = (int)(face.BoundingBox.Height * bitmapHeight);

                    Rect rectCrop = new Rect(faceLeft, faceTop, faceWidth, faceHeight);
                    //Console.WriteLine("Confidence : {0}\nAge :" + face.Confidence + ", " + face.BoundingBox.Top + ", " + face.BoundingBox.Left + ", " +
                    //    face.BoundingBox.Height + ", " + face.BoundingBox.Width);

                    Mat img = new Mat(src, rectCrop);
                    matList.Add(img);
                }

                return(matList);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }

            return(null);
        }
        public IEnumerable <Rectangle> ExtractFaces()
        {
            if (_facesResponse == null)
            {
                var facesRequest = new DetectFacesRequest()
                {
                    Image = _rekognitionImage
                };
                _facesResponse = _client.DetectFacesAsync(facesRequest).Result;
            }

            return(_facesResponse.FaceDetails.Select(f =>
                                                     AmazonRekognitionCoordinateTranslator.RelativeBoxToAbsolute(f.BoundingBox, _width, _height)));
        }
Example #16
0
    public static void Example()
    {
        String photo  = "input.jpg";
        String bucket = "bucket";

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
        {
            Image = new Image()
            {
                S3Object = new S3Object()
                {
                    Name   = photo,
                    Bucket = bucket
                },
            },
            // Attributes can be "ALL" or "DEFAULT".
            // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
            // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
            Attributes = new List <String>()
            {
                "ALL"
            }
        };

        try
        {
            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
            bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
                                  face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                Console.WriteLine("Confidence: {0}\nLandmarks: {1}\nPose: pitch={2} roll={3} yaw={4}\nQuality: {5}",
                                  face.Confidence, face.Landmarks.Count, face.Pose.Pitch,
                                  face.Pose.Roll, face.Pose.Yaw, face.Quality);
                if (hasAll)
                {
                    Console.WriteLine("The detected face is estimated to be between " +
                                      face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                }
            }
        }
        catch (Exception e)
        {
            Console.WriteLine(e.Message);
        }
    }
Example #17
0
 public static IAsyncOperation <string> GetFaceDetails(string base64, string AccessKey, string SecretKey)
 {
     return(Task.Run <string>(async() =>
     {
         byte[] imageBytes;
         try
         {
             base64 = base64.Substring(base64.IndexOf(',') + 1).Trim('\0');
             imageBytes = System.Convert.FromBase64String(base64);
         }
         catch (Exception e) {
             return e.Message;
         }
         string sJSONResponse = "";
         AWSCredentials credentials;
         try
         {
             credentials = new BasicAWSCredentials(AccessKey, SecretKey);
         }
         catch (Exception e)
         {
             throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                                             + "Please make sure that your credentials file is at the correct "
                                             + "location (/Users/<userid>/.aws/credentials), and is in a valid format.", e);
         }
         DetectFacesRequest request = new DetectFacesRequest {
             Attributes = new List <string>(new string[] { "ALL" })
         };
         DetectFacesResponse result = null;
         request.Image = new Image {
             Bytes = new MemoryStream(imageBytes, 0, imageBytes.Length)
         };
         AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(credentials, RegionEndpoint.USWest2);
         try
         {
             result = await rekognitionClient.DetectFacesAsync(request);
         }
         catch (AmazonRekognitionException e)
         {
             throw e;
         }
         // Return server status as unhealthy with appropriate status code
         sJSONResponse = JsonConvert.SerializeObject(result.FaceDetails);
         return sJSONResponse;
     }).AsAsyncOperation());
 }
        public string GetImageInfo(ImageData imageData, byte[] imgData = null)
        {
            try
            {
                var path = Path.Combine(
                    Directory.GetCurrentDirectory(), "wwwroot",
                    imageData.fileName);
                imgData = Convert.FromBase64String(imageData.base64Data);

                _imageData = new MemoryStream(imgData);
                DetectFacesRequest detectFaces = new DetectFacesRequest()
                {
                    Image = new Image()
                    {
                        Bytes = _imageData
                    }
                };

                DetectFacesResponse facesResponse = _rekognitionClient.DetectFacesAsync(detectFaces).Result;

                List <FaceDetail> lstCelebrities = facesResponse.FaceDetails;

                FaceDetail faceDetail = new FaceDetail();

                StringBuilder sbCelebrities = new StringBuilder();
                //foreach (var item in lstCelebrities)
                //{
                //    switch (switch_on)
                //    {
                //        default:
                //    }
                //}
                string Celebrities = sbCelebrities.ToString().TrimEnd(',');

                return(Celebrities);
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.ToString());
                throw;
            }
        }
Example #19
0
        public List <Emotion> EmotionDetect(string _image)
        {
            {
                String         photo  = _image;
                String         bucket = "ngankhanh98";
                List <Emotion> response;

                AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

                DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
                {
                    Image = new Amazon.Rekognition.Model.Image()
                    {
                        S3Object = new S3Object()
                        {
                            Name   = photo,
                            Bucket = bucket
                        },
                    },
                    Attributes = new List <String>()
                    {
                        "ALL"
                    }
                };

                try
                {
                    DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
                    bool       hasAll = detectFacesRequest.Attributes.Contains("ALL");
                    FaceDetail face   = detectFacesResponse.FaceDetails[0];

                    return(face.Emotions);
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.Message);
                    return(null);
                }
            }
        }
Example #20
0
        private static void detectFace(Amazon.Rekognition.Model.Image image, AmazonRekognitionClient rekognitionClient)
        {
            DetectFacesRequest request = new DetectFacesRequest()
            {
                Image = image
            };

            try
            {
                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(request);

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    Console.WriteLine("Confidence : {0}\nAge :" + face.Confidence + ", " + face.BoundingBox.Top + ", " + face.BoundingBox.Left + ", " +
                                      face.BoundingBox.Height + ", " + face.BoundingBox.Width);
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
Example #21
0
        public async Task IdentifyFacesTests()
        {
            var mockClient = new Mock <AmazonRekognitionClient>();

            mockClient.Setup(client => client.DetectFacesAsync(
                                 It.IsAny <DetectFacesRequest>(),
                                 It.IsAny <CancellationToken>()
                                 )).Returns((DetectFacesRequest r, CancellationToken token) =>
            {
                return(Task.FromResult(new DetectFacesResponse()
                {
                    HttpStatusCode = System.Net.HttpStatusCode.OK,
                }));
            });

            byte[] data = File.ReadAllBytes(_filename);

            DetectFacesRequest request = new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    Bytes = new MemoryStream(data),
                },
            };

            var client = mockClient.Object;
            DetectFacesResponse response = await client.DetectFacesAsync(request);

            bool gotResult = response is not null;

            Assert.True(gotResult, "DetectFacesAsync returned a response.");

            bool ok = response.HttpStatusCode == System.Net.HttpStatusCode.OK;

            Assert.True(ok, $"Successfully searched image for faces.");
        }
Example #22
0
        public static async Task <string> FunctionHandler(String photo)
        {
            String bucket = "moodanalysis";
            //ArrayList result = new ArrayList();
            string result = "";

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            // Recognizes User's face
            CompareFacesRequest CFR = new CompareFacesRequest()
            {
                //SimilarityThreshold = 50,

                SourceImage = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = "referencePhoto.jpg",
                        Bucket = bucket
                    },
                },

                TargetImage = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },
            };

            CompareFacesResponse compareFacesResponse = await rekognitionClient.CompareFacesAsync(CFR);

            string howManyFaces = "";

            if (compareFacesResponse.FaceMatches.Count == 0)
            {
                return("");
            }

            //int index = 0, bestIndex = 0;
            var         bestMatch       = compareFacesResponse.FaceMatches[0];
            float       bestMatchResult = compareFacesResponse.FaceMatches[0].Similarity;
            BoundingBox bestBoundingBox = compareFacesResponse.FaceMatches[0].Face.BoundingBox;

            foreach (var faceMatch in compareFacesResponse.FaceMatches)
            {
                howManyFaces += faceMatch.Similarity + ",";

                if (bestMatchResult < faceMatch.Similarity)
                {
                    bestMatch       = faceMatch;
                    bestBoundingBox = faceMatch.Face.BoundingBox;
                    bestMatchResult = faceMatch.Similarity;
                }
            }

            // Detects emotions of faces in photo
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },

                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

            //int i = 0;
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                if (face.BoundingBox.Height == bestBoundingBox.Height &&
                    face.BoundingBox.Left == bestBoundingBox.Left &&
                    face.BoundingBox.Top == bestBoundingBox.Top &&
                    face.BoundingBox.Width == bestBoundingBox.Width)
                {
                    //var emotQuery = FilterEmotions(face, IsLowConfidence);

                    FilterEmotions filter = delegate(FaceDetail faceFilter, ConfidenceFilterDelegate confFilter)
                    {
                        return(faceFilter.Emotions.FindAll(n => confFilter(n)).ToList());
                    };

                    var emotQuery = filter(face, IsLowConfidence);

                    //IEnumerable<Emotion> emotQuery =
                    //    from faceEmotion in face.Emotions
                    //    where faceEmotion.Confidence > 10
                    //    select faceEmotion;

                    // GRAB THE EMOTION
                    foreach (Emotion emot in emotQuery)
                    {
                        result += emot.Type + ",";
                    }

                    break;
                }
            }

            //delete the last ,
            if (result.Length != 0)
            {
                result = result.Substring(0, result.Length - 1);
            }

            return(result);
        }
Example #23
0
        public EmotionalData DetectImage(byte[] image)
        {
            var awsImage = new Image();

            try
            {
                //using (FileStream stream = new FileStream(image, FileMode.Open, FileAccess.Read))
                //{
                //    byte[] data = new byte[stream.Length];
                //    stream.Read(data, 0, (int)stream.Length);
                //    awsImage.Bytes = new MemoryStream(data);
                //}
                awsImage.Bytes = new MemoryStream(image);
            }
            catch (Exception ex)
            {
            }

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(
                "", "", RegionEndpoint.APSoutheast1
                );
            DetectLabelsRequest detectlabelsRequest = new DetectLabelsRequest()
            {
                Image         = awsImage,
                MaxLabels     = 10,
                MinConfidence = 77F
            };

            var faceAttr = new List <string>();

            //faceAttr.Add("Emotions");
            faceAttr.Add("ALL");

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest
            {
                Attributes = faceAttr,
                Image      = awsImage
            };

            try
            {
                //DetectLabelsResponse detectLabelsResponse =
                //rekognitionClient.DetectLabels(detectlabelsRequest);
                //foreach (Label label in detectLabelsResponse.Labels)
                //    Console.WriteLine("{0}: {1}", label.Name, label.Confidence);

                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
                if (detectFacesResponse.FaceDetails != null)
                {
                    var customerFace        = detectFacesResponse.FaceDetails.OrderByDescending(x => x.Confidence).FirstOrDefault();
                    var customerEmotionData = customerFace.Emotions.OrderByDescending(x => x.Confidence).FirstOrDefault();

                    return(new EmotionalData
                    {
                        Emotion = customerEmotionData.Type,
                        Probability = Convert.ToDecimal(customerEmotionData.Confidence)
                    });
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
            return(new EmotionalData());
        }
Example #24
0
        // Face detection method
        private async Task FacialRecognitionScan(ApplicationUser user, UsersInGymDetail currentFacilityDetail)
        {
            // initialize similarity threshold for accepting face match, source and target img.
            // S3 bucket img, dynamically selected based on user currently logged in.
            float  similarityThreshold = 70F;
            string photo       = $"{user.FirstName}_{user.Id}.jpg";
            String targetImage = $"{user.FirstName}_{user.Id}_Target.jpg";

            try
            {
                // create image objects
                Image imageSource = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                };
                Image imageTarget = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = targetImage,
                        Bucket = bucket
                    },
                };
                // create a compare face request object
                CompareFacesRequest compareFacesRequest = new CompareFacesRequest()
                {
                    SourceImage         = imageSource,
                    TargetImage         = imageTarget,
                    SimilarityThreshold = similarityThreshold
                };

                // detect face features of img scanned
                CompareFacesResponse compareFacesResponse = await AmazonRekognition.CompareFacesAsync(compareFacesRequest);

                // Display results
                foreach (CompareFacesMatch match in compareFacesResponse.FaceMatches)
                {
                    ComparedFace face = match.Face;
                    // if confidence for similarity is over 90 then grant access
                    if (match.Similarity > 90)
                    {
                        // if there is a match set scan success
                        user.IsCameraScanSuccessful = true;
                    }
                    else
                    {
                        ViewBag.MatchResult = "Facial Match Failed!";
                    }
                }
            }
            catch (Exception e)
            {
                _logger.LogInformation(e.Message);
            }

            // now add get facial details to display in the view.
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = targetImage,
                        Bucket = bucket
                    },
                },
                // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await AmazonRekognition.DetectFacesAsync(detectFacesRequest);

                bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    // if the face found has all attributes within a Detect Face object then save these values to the database.
                    if (hasAll)
                    {
                        currentFacilityDetail.IsSmiling    = face.Smile.Value;
                        currentFacilityDetail.Gender       = face.Gender.Value.ToString();
                        currentFacilityDetail.AgeRangeLow  = face.AgeRange.Low;
                        currentFacilityDetail.AgeRangeHigh = face.AgeRange.High;
                    }
                }
            }
            catch (Exception e)
            {
                _logger.LogInformation(e.Message);
            }
        }
Example #25
0
    public static void Example()
    {
        String photo = "photo.jpg";

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        Image image = new Image();

        try
        {
            using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
            {
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
        }
        catch (Exception)
        {
            Console.WriteLine("Failed to load file " + photo);
            return;
        }

        int height;
        int width;

        // Used to extract original photo width/height
        using (System.Drawing.Bitmap imageBitmap = new System.Drawing.Bitmap(photo))
        {
            height = imageBitmap.Height;
            width  = imageBitmap.Width;
        }

        Console.WriteLine("Image Information:");
        Console.WriteLine(photo);
        Console.WriteLine("Image Height: " + height);
        Console.WriteLine("Image Width: " + width);

        try
        {
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image      = image,
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                Console.WriteLine("Face:");
                ShowBoundingBoxPositions(height, width,
                                         face.BoundingBox, detectFacesResponse.OrientationCorrection);
                Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
                                  face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                Console.WriteLine("The detected face is estimated to be between " +
                                  face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                Console.WriteLine();
            }
        }
        catch (Exception e)
        {
            Console.WriteLine(e.Message);
        }
    }
Example #26
0
        async void Analyze_Clicked(System.Object sender, System.EventArgs e)
        {
            CognitoAWSCredentials credentials = new CognitoAWSCredentials(
                AWS.IdentityPoolId,
                RegionEndpoint.USEast1
                );

            S3Uploader uploader   = new S3Uploader(credentials);
            string     bucketName = "babytech-images";
            string     keyName    = patient.PatientID + ".jpg";
            await uploader.UploadFileAsync(bucketName, keyName, PhotoPath);

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(credentials, Amazon.RegionEndpoint.USEast1);

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Amazon.Rekognition.Model.Image()
                {
                    S3Object = new S3Object
                    {
                        Bucket = bucketName,
                        Name   = keyName
                    }
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    // check if mouth is open
                    if ((face.MouthOpen != null) && (face.MouthOpen.Value))
                    {
                        FacialAnalysisData += "\n❌ Baby's mouth should be closed";
                    }
                    if ((face.MouthOpen != null) && (!face.MouthOpen.Value) && (face.EyesOpen.Confidence > 0.88F))
                    {
                        FacialAnalysisData += "\n✔ Baby's mouth should be closed";
                    }

                    // check if eyes are open

                    if ((face.EyesOpen != null) && (face.EyesOpen.Value))
                    {
                        FacialAnalysisData += "\n✔ Baby's eyes should be open";
                    }
                    if ((face.EyesOpen != null) && (!face.EyesOpen.Value) && (face.EyesOpen.Confidence > 0.93F))
                    {
                        FacialAnalysisData += "\n❌ Baby's eyes should be open";
                    }

                    // check for eyeglasses
                    if ((face.Eyeglasses != null) && (face.Eyeglasses.Value))
                    {
                        FacialAnalysisData += "\n❌ Baby should not be wearing eyeglasses";
                    }
                    if ((face.Eyeglasses != null) && (!face.Eyeglasses.Value))
                    {
                        FacialAnalysisData += "\n✔ Baby should not be wearing eyeglasses";
                    }

                    //check brightness
                    if ((face.Quality.Brightness != null) && (face.Quality.Brightness > 0.61F) && (face.Quality.Brightness < 0.97F))
                    {
                        FacialAnalysisData += "\n✔  Picture is acceptable brightness";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌  Picture is acceptable brightness";
                    }

                    //check sharpness
                    if ((face.Quality.Sharpness != null) && (face.Quality.Sharpness > 0.67F))
                    {
                        FacialAnalysisData += "\n✔  Picture is acceptable sharpness";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Picture is acceptable sharpness";
                    }


                    // check for smile
                    if ((face.Smile != null) && (face.Smile.Value) && (face.Smile.Confidence < 0.83F))
                    {
                        FacialAnalysisData += "\n❌ Baby should not be smiling";
                    }
                    if ((face.Eyeglasses != null) && (!face.Eyeglasses.Value))
                    {
                        FacialAnalysisData += "\n✔ Baby should not be smiling";
                    }


                    // check for calm expression
                    Emotion calmEmotion = face.Emotions.Find(emotion => emotion.Type == "CALM");

                    if (calmEmotion.Confidence > 0.93F)
                    {
                        FacialAnalysisData += "\n ✔ Baby should have a neutral facial expression";
                    }
                    else
                    {
                        FacialAnalysisData += "\n ❌ BBaby should have a neutral facial expression";
                    }


                    //check sharpness
                    if ((face.Quality.Sharpness != null) && (face.Quality.Sharpness > 0.67F))
                    {
                        FacialAnalysisData += "\n✔  Picture is acceptable sharpness";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Picture is acceptable sharpness";
                    }

                    //check brightness
                    if ((face.Quality.Brightness != null) && (face.Quality.Brightness > 0.61F) && (face.Quality.Brightness < 0.97F))
                    {
                        FacialAnalysisData += "\n✔  Picture is acceptable brightness";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌  Picture is acceptable brightness";
                    }
                }

                await DisplayAlert("Analysis Results", FacialAnalysisData, "OK");
            }
            catch (Exception exception)
            {
                Console.WriteLine(exception.Message);
            }
        }
Example #27
0
        // snippet-start:[Rekognition.dotnetv3.ImageOrientationBoundingBox]
        public static async Task Main()
        {
            string photo = @"D:\Development\AWS-Examples\Rekognition\target.jpg"; // "photo.jpg";

            var rekognitionClient = new AmazonRekognitionClient();

            var image = new Amazon.Rekognition.Model.Image();

            try
            {
                using var fs = new FileStream(photo, FileMode.Open, FileAccess.Read);
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
            catch (Exception)
            {
                Console.WriteLine("Failed to load file " + photo);
                return;
            }

            int height;
            int width;

            // Used to extract original photo width/height
            using (var imageBitmap = new Bitmap(photo))
            {
                height = imageBitmap.Height;
                width  = imageBitmap.Width;
            }

            Console.WriteLine("Image Information:");
            Console.WriteLine(photo);
            Console.WriteLine("Image Height: " + height);
            Console.WriteLine("Image Width: " + width);

            try
            {
                var detectFacesRequest = new DetectFacesRequest()
                {
                    Image      = image,
                    Attributes = new List <string>()
                    {
                        "ALL"
                    },
                };

                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                detectFacesResponse.FaceDetails.ForEach(face =>
                {
                    Console.WriteLine("Face:");
                    ShowBoundingBoxPositions(
                        height,
                        width,
                        face.BoundingBox,
                        detectFacesResponse.OrientationCorrection);

                    Console.WriteLine($"BoundingBox: top={face.BoundingBox.Left} left={face.BoundingBox.Top} width={face.BoundingBox.Width} height={face.BoundingBox.Height}");
                    Console.WriteLine($"The detected face is estimated to be between {face.AgeRange.Low} and {face.AgeRange.High} years old.\n");
                });
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }