Esempio n. 1
0
        public async Task <bool> ProcessAsync(string bucketName, string fileName)
        {
            // form the request
            var detectRequest = new DetectFacesRequest
            {
                Image = new Image {
                    S3Object = new S3Object {
                        Bucket = bucketName, Name = fileName
                    }
                }
            };

            // detect any possible faces
            var response = await _rekognitionClient.DetectFacesAsync(detectRequest);

            if (response == null)
            {
                throw new ApplicationException(Messages.RESPONSE_NULL);
            }

            if (response.FaceDetails.Any())
            {
                return(true);
            }

            return(false);
        }
Esempio n. 2
0
        public static async Task <string> FunctionHandler(String photo)
        {
            String bucket = "moodanalysis";
            //ArrayList result = new ArrayList();

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },

                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

            int count = detectFacesResponse.FaceDetails.Count;

            //return false;
            return((count == 1) + "");
        }
        public async Task <FindFacesResponse> DetectFacesAsync(string sourceImage)
        {
            // Converte a imagem fonte em um objeto MemoryStream
            var imageSource = new Image();

            imageSource.Bytes = _serviceUtils.ConvertImageToMemoryStream(sourceImage);

            // Configura o objeto que fará o request para o AWS Rekognition
            var request = new DetectFacesRequest
            {
                Attributes = new List <string> {
                    "DEFAULT"
                },
                Image = imageSource
            };

            // Faz a chamada do serviço de DetectFaces
            var response = await _rekognitionClient.DetectFacesAsync(request);

            // Chama a função de desenhar quadrados e pega a URL gerada
            var fileName = _serviceUtils.Drawing(imageSource.Bytes, response.FaceDetails);

            // Retorna o objeto com a URL gerada
            return(new FindFacesResponse(fileName));
        }
        public async static void GetFaceDetailFromS3(IAmazonRekognition rekognitionClient, string bucketName, string keyName)
        {
            FaceDetail         result             = null;
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image {
                    S3Object = new S3Object {
                        Bucket = bucketName,
                        Name   = keyName
                    }
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                Task <DetectFacesResponse> detectTask          = rekognitionClient.DetectFacesAsync(detectFacesRequest);
                DetectFacesResponse        detectFacesResponse = await detectTask;

                PrintFaceDetails(detectFacesResponse.FaceDetails);

                if (detectFacesResponse.FaceDetails.Count > 0)
                {
                    result = detectFacesResponse.FaceDetails[0]; // take the 1st face only
                }
            }
            catch (AmazonRekognitionException rekognitionException)
            {
                Console.WriteLine(rekognitionException.Message, rekognitionException.InnerException);
            }
        }
Esempio n. 5
0
        internal DetectFacesResponse DetectFaces(DetectFacesRequest request)
        {
            var marshaller   = new DetectFacesRequestMarshaller();
            var unmarshaller = DetectFacesResponseUnmarshaller.Instance;

            return(Invoke <DetectFacesRequest, DetectFacesResponse>(request, marshaller, unmarshaller));
        }
        public async static Task <FaceDetail> GetFaceDetailFromStream(IAmazonRekognition rekognitionClient, MemoryStream stream)
        {
            FaceDetail         result             = null;
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image {
                    Bytes = stream
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                Task <DetectFacesResponse> detectTask          = rekognitionClient.DetectFacesAsync(detectFacesRequest);
                DetectFacesResponse        detectFacesResponse = await detectTask;

                PrintFaceDetails(detectFacesResponse.FaceDetails);

                if (detectFacesResponse.FaceDetails.Count > 0)
                {
                    result = detectFacesResponse.FaceDetails[0]; // take the 1st face only
                }
            }
            catch (AmazonRekognitionException rekognitionException)
            {
                Console.WriteLine(rekognitionException.Message, rekognitionException.InnerException);
            }
            return(result);
        }
Esempio n. 7
0
        private Dictionary <string, float> getemotion(MemoryStream image)
        {
            Dictionary <string, float> emos = new Dictionary <string, float>();
            IAmazonRekognition         reg  = new AmazonRekognitionClient(ConfigurationManager.AppSettings["AWSAccessKey"], ConfigurationManager.AppSettings["AWSSecretKey"], RegionEndpoint.EUWest1);

            var request = new DetectFacesRequest()
            {
                Image = new Amazon.Rekognition.Model.Image {
                    Bytes = image
                },
                Attributes = new List <string>
                {
                    "ALL"
                }
            };

            var respFace = reg.DetectFaces(request);

            foreach (var detail in respFace.FaceDetails)
            {
                foreach (var item in detail.Emotions)
                {
                    if (!emos.ContainsKey(item.Type))
                    {
                        emos.Add(item.Type, item.Confidence);
                    }
                }
            }
            return(emos);
        }
Esempio n. 8
0
        /// <summary>
        /// Initiates the asynchronous execution of the DetectFaces operation.
        /// </summary>
        ///
        /// <param name="request">Container for the necessary parameters to execute the DetectFaces operation.</param>
        /// <param name="cancellationToken">
        ///     A cancellation token that can be used by other objects or threads to receive notice of cancellation.
        /// </param>
        /// <returns>The task object representing the asynchronous operation.</returns>
        public Task <DetectFacesResponse> DetectFacesAsync(DetectFacesRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
        {
            var marshaller   = new DetectFacesRequestMarshaller();
            var unmarshaller = DetectFacesResponseUnmarshaller.Instance;

            return(InvokeAsync <DetectFacesRequest, DetectFacesResponse>(request, marshaller,
                                                                         unmarshaller, cancellationToken));
        }
        public DetectFacesResponse DetectFaces(DetectFaceParams dfp)
        {
            DetectFacesResponse resp = null;
            var conf = new AmazonRekognitionConfig()
            {
                RegionEndpoint = dfp.RegEndpoint
            };

            using (recClient = new AmazonRekognitionClient(awsAccessKeyId, awsSecretAccessKey, conf))
            {
                DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
                {
                    Image = new Image()
                    {
                        S3Object = new S3Object()
                        {
                            Name   = dfp.PhotoName,
                            Bucket = dfp.BucketName
                        },
                    },
                    // Attributes can be "ALL" or "DEFAULT".
                    // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
                    // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
                    Attributes = new List <String>()
                    {
                        "ALL"
                    }
                };

                try
                {
                    resp = recClient.DetectFaces(detectFacesRequest);

                    if (resp == null)
                    {
                        throw new Exception("AmazonRekognitionClient DetectFaces method call return null.");
                    }
                    //bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
                    //foreach (FaceDetail face in resp.Result.FaceDetails)
                    //{
                    //    Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
                    //        face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                    //    Console.WriteLine("Confidence: {0}\nLandmarks: {1}\nPose: pitch={2} roll={3} yaw={4}\nQuality: {5}",
                    //        face.Confidence, face.Landmarks.Count, face.Pose.Pitch,
                    //        face.Pose.Roll, face.Pose.Yaw, face.Quality);
                    //    if (hasAll)
                    //        Console.WriteLine("The detected face is estimated to be between " +
                    //            face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                    //}
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.Message);
                }
            }

            return(resp);
        }
Esempio n. 10
0
        private static async Task <DetectFacesResponse> IdentifyFaces(Image image)
        {
            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            DetectFacesRequest request = new DetectFacesRequest();

            request.Image = image;
            return(await rekognitionClient.DetectFacesAsync(request));
        }
Esempio n. 11
0
        async void Analyze_Clicked(System.Object sender, System.EventArgs e)
        {
            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(AWSAccessKeyID, AWSSecretAccessKey, Amazon.RegionEndpoint.USEast1);

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Amazon.Rekognition.Model.Image()
                {
                    S3Object = new S3Object
                    {
                        Bucket = "babytech-images",
                        Name   = "baby-eyes-mouth-open.jpg"
                    }
                },
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    const float confidence_threshold = 0.8F;

                    // check if mouth is open
                    if ((face.MouthOpen != null) && (face.MouthOpen.Confidence > confidence_threshold))
                    {
                        FacialAnalysisData += (face.MouthOpen.Value) ? "\n✔ Baby's mouth is open." : "\n❌ Baby's mouth is not open.";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Unable to determine if baby's mouth is open.";
                    }

                    // check if eyes are open
                    if ((face.EyesOpen != null) && (face.EyesOpen.Confidence > confidence_threshold))
                    {
                        FacialAnalysisData += (face.EyesOpen.Value) ? "\n✔ Baby's eyes are open." : "\n❌ Baby's eyes are not open.";
                    }
                    else
                    {
                        FacialAnalysisData += "\n❌ Unable to determine if baby's eyes are open.";
                    }
                }

                DisplayAlert("Analysis Results", FacialAnalysisData, "OK");
            }
            catch (Exception exception)
            {
                Console.WriteLine(exception.Message);
            }
        }
Esempio n. 12
0
        static void IdentifyFaces(string filename)
        {
            // Using USWest2, not the default region
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            // Request needs image bytes, so read and add to request
            byte[] data = File.ReadAllBytes(filename);

            DetectFacesRequest dfr = new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    Bytes = new MemoryStream(data)
                }
            };

            DetectFacesResponse outcome = rekoClient.DetectFaces(dfr);

            if (outcome.FaceDetails.Count > 0)
            {
                // Load a bitmap to modify with face bounding box rectangles
                Bitmap facesHighlighted = new Bitmap(filename);
                Pen    pen = new Pen(Color.Black, 3);

                // Create a graphics context
                using (var graphics = Graphics.FromImage(facesHighlighted))
                {
                    foreach (var fd in outcome.FaceDetails)
                    {
                        // Get the bounding box
                        BoundingBox bb = fd.BoundingBox;
                        Console.WriteLine($"Bounding box = ({bb.Left}, {bb.Top}, {bb.Height}, {bb.Width})");

                        // Draw the rectangle using the bounding box values
                        // They are percentages so scale them to picture
                        graphics.DrawRectangle(pen,
                                               x: facesHighlighted.Width * bb.Left,
                                               y: facesHighlighted.Height * bb.Top,
                                               width: facesHighlighted.Width * bb.Width,
                                               height: facesHighlighted.Height * bb.Height);
                    }
                }

                // Save the image with highlights as PNG
                string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");
                facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);

                Console.WriteLine(">>> " + outcome.FaceDetails.Count + " face(s) highlighted in file " + fileout);
            }
            else
            {
                Console.WriteLine(">>> No faces found");
            }
        }
Esempio n. 13
0
        public async Task <List <DetectedFaceDetail> > GetFaceDetailsFromLocalFileAsync(string filePath)
        {
            //画像のMemoryStreamを作成
            var imageStream = await GenerateImageStreamFromLocalFileAsync(filePath);

            if (imageStream == null)
            {
                return(null);
            }

            try
            {
                //AWS Rekognition Client を作成
                using (var rekognitionClient = new AmazonRekognitionClient(Secrets.AccessKey,
                                                                           Secrets.SecretKey,
                                                                           RegionEndpoint.APNortheast1))
                {
                    var request = new DetectFacesRequest
                    {
                        Image = new Image
                        {
                            Bytes = imageStream
                        },
                        Attributes = new List <string> {
                            "ALL"
                        }
                    };

                    //responseを受け取り、必要な情報を抽出
                    var response = await rekognitionClient.DetectFacesAsync(request);

                    var faceList = new List <DetectedFaceDetail>();
                    foreach (var face in response.FaceDetails)
                    {
                        faceList.Add(new DetectedFaceDetail
                        {
                            Gender              = face.Gender.Value,
                            GenderConfidence    = face.Gender.Confidence,
                            HappinessConfidence = face.Emotions.Find(x => x.Type.Value == EmotionName.HAPPY).Confidence,
                            AgeRangeHigh        = face.AgeRange.High,
                            AgeRangeLow         = face.AgeRange.Low
                        });
                    }

                    return(faceList);
                }
            }
            catch (Exception e)
            {
                Debug.WriteLine(e.Message);
            }

            return(null);
        }
Esempio n. 14
0
        /// <summary>
        /// Identifies faces in the image file. If faces are found, the
        /// method adds bounding boxes.
        /// </summary>
        /// <param name="client">The Rekognition client used to call
        /// RecognizeCelebritiesAsync.</param>
        /// <param name="filename">The name of the file that potentially
        /// contins images of celebrities.</param>
        public static async Task IdentifyFaces(AmazonRekognitionClient client, string filename)
        {
            // Request needs image bytes, so read and add to request.
            byte[] data = File.ReadAllBytes(filename);

            DetectFacesRequest request = new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    Bytes = new MemoryStream(data),
                },
            };

            DetectFacesResponse response = await client.DetectFacesAsync(request);

            if (response.FaceDetails.Count > 0)
            {
                // Load a bitmap to modify with face bounding box rectangles.
                Bitmap facesHighlighted = new Bitmap(filename);
                Pen    pen = new Pen(Color.Black, 3);

                // Create a graphics context.
                using (var graphics = System.Drawing.Graphics.FromImage(facesHighlighted))
                {
                    foreach (var fd in response.FaceDetails)
                    {
                        // Get the bounding box.
                        BoundingBox bb = fd.BoundingBox;
                        Console.WriteLine($"Bounding box = ({bb.Left}, {bb.Top}, {bb.Height}, {bb.Width})");

                        // Draw the rectangle using the bounding box values.
                        // They are percentages so scale them to the picture.
                        graphics.DrawRectangle(
                            pen,
                            x: facesHighlighted.Width * bb.Left,
                            y: facesHighlighted.Height * bb.Top,
                            width: facesHighlighted.Width * bb.Width,
                            height: facesHighlighted.Height * bb.Height);
                    }
                }

                // Save the image with highlights as PNG.
                string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");
                facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);

                Console.WriteLine(">>> " + response.FaceDetails.Count + " face(s) highlighted in file " + fileout);
            }
            else
            {
                Console.WriteLine(">>> No faces found");
            }
        }
        public DetectFacesResponse IdentifyFaces(byte[] request)
        {
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(_credentials, Amazon.RegionEndpoint.USEast2);

            DetectFacesRequest dfr = new DetectFacesRequest();

            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();

            img.Bytes = new MemoryStream(request);
            dfr.Image = img;

            return(rekoClient.DetectFaces(dfr));
        }
        public async Task <IEnumerable <Rectangle> > ExtractFacesAsync()
        {
            if (_facesResponse == null)
            {
                var facesRequest = new DetectFacesRequest()
                {
                    Image = _rekognitionImage
                };
                _facesResponse = await _client.DetectFacesAsync(facesRequest);
            }

            return(ExtractFaces());
        }
Esempio n. 17
0
        // snippet-start:[Rekognition.dotnetv3.DetectFacesExample]
        public static async Task Main()
        {
            string photo  = "input.jpg";
            string bucket = "bucket";

            var rekognitionClient = new AmazonRekognitionClient();

            var detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket,
                    },
                },

                // Attributes can be "ALL" or "DEFAULT".
                // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
                // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
                Attributes = new List <string>()
                {
                    "ALL"
                },
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

                bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    Console.WriteLine($"BoundingBox: top={face.BoundingBox.Left} left={face.BoundingBox.Top} width={face.BoundingBox.Width} height={face.BoundingBox.Height}");
                    Console.WriteLine($"Confidence: {face.Confidence}");
                    Console.WriteLine($"Landmarks: {face.Landmarks.Count}");
                    Console.WriteLine($"Pose: pitch={face.Pose.Pitch} roll={face.Pose.Roll} yaw={face.Pose.Yaw}");
                    Console.WriteLine($"Brightness: {face.Quality.Brightness}\tSharpness: {face.Quality.Sharpness}");

                    if (hasAll)
                    {
                        Console.WriteLine($"Estimated age is between {face.AgeRange.Low} and {face.AgeRange.High} years old.");
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
        private List <Mat> detectFace(Bitmap bitmap)
        {
            Mat src = null;

            try
            {
                src = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap);
            }catch (Exception e)
            {
            }
            Amazon.Rekognition.Model.Image image = Utils.bitmapToAWSImage(bitmap);

            DetectFacesRequest request = new DetectFacesRequest()
            {
                Image = image
            };

            try
            {
                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(request);

                float bitmapWidth  = (float)bitmap.Width;
                float bitmapHeight = (float)bitmap.Height;

                List <Mat> matList = new List <Mat>();

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    int faceLeft   = (int)(face.BoundingBox.Left * bitmapWidth);
                    int faceTop    = (int)(face.BoundingBox.Top * bitmapHeight);
                    int faceWidth  = (int)(face.BoundingBox.Width * bitmapWidth);
                    int faceHeight = (int)(face.BoundingBox.Height * bitmapHeight);

                    Rect rectCrop = new Rect(faceLeft, faceTop, faceWidth, faceHeight);
                    //Console.WriteLine("Confidence : {0}\nAge :" + face.Confidence + ", " + face.BoundingBox.Top + ", " + face.BoundingBox.Left + ", " +
                    //    face.BoundingBox.Height + ", " + face.BoundingBox.Width);

                    Mat img = new Mat(src, rectCrop);
                    matList.Add(img);
                }

                return(matList);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }

            return(null);
        }
        public IEnumerable <Rectangle> ExtractFaces()
        {
            if (_facesResponse == null)
            {
                var facesRequest = new DetectFacesRequest()
                {
                    Image = _rekognitionImage
                };
                _facesResponse = _client.DetectFacesAsync(facesRequest).Result;
            }

            return(_facesResponse.FaceDetails.Select(f =>
                                                     AmazonRekognitionCoordinateTranslator.RelativeBoxToAbsolute(f.BoundingBox, _width, _height)));
        }
Esempio n. 20
0
    public static void Example()
    {
        String photo  = "input.jpg";
        String bucket = "bucket";

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
        {
            Image = new Image()
            {
                S3Object = new S3Object()
                {
                    Name   = photo,
                    Bucket = bucket
                },
            },
            // Attributes can be "ALL" or "DEFAULT".
            // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
            // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
            Attributes = new List <String>()
            {
                "ALL"
            }
        };

        try
        {
            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
            bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
                                  face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                Console.WriteLine("Confidence: {0}\nLandmarks: {1}\nPose: pitch={2} roll={3} yaw={4}\nQuality: {5}",
                                  face.Confidence, face.Landmarks.Count, face.Pose.Pitch,
                                  face.Pose.Roll, face.Pose.Yaw, face.Quality);
                if (hasAll)
                {
                    Console.WriteLine("The detected face is estimated to be between " +
                                      face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                }
            }
        }
        catch (Exception e)
        {
            Console.WriteLine(e.Message);
        }
    }
Esempio n. 21
0
 public static IAsyncOperation <string> GetFaceDetails(string base64, string AccessKey, string SecretKey)
 {
     return(Task.Run <string>(async() =>
     {
         byte[] imageBytes;
         try
         {
             base64 = base64.Substring(base64.IndexOf(',') + 1).Trim('\0');
             imageBytes = System.Convert.FromBase64String(base64);
         }
         catch (Exception e) {
             return e.Message;
         }
         string sJSONResponse = "";
         AWSCredentials credentials;
         try
         {
             credentials = new BasicAWSCredentials(AccessKey, SecretKey);
         }
         catch (Exception e)
         {
             throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                                             + "Please make sure that your credentials file is at the correct "
                                             + "location (/Users/<userid>/.aws/credentials), and is in a valid format.", e);
         }
         DetectFacesRequest request = new DetectFacesRequest {
             Attributes = new List <string>(new string[] { "ALL" })
         };
         DetectFacesResponse result = null;
         request.Image = new Image {
             Bytes = new MemoryStream(imageBytes, 0, imageBytes.Length)
         };
         AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(credentials, RegionEndpoint.USWest2);
         try
         {
             result = await rekognitionClient.DetectFacesAsync(request);
         }
         catch (AmazonRekognitionException e)
         {
             throw e;
         }
         // Return server status as unhealthy with appropriate status code
         sJSONResponse = JsonConvert.SerializeObject(result.FaceDetails);
         return sJSONResponse;
     }).AsAsyncOperation());
 }
Esempio n. 22
0
        static void IdentifyFaces(string filename)
        {
            // Using USWest2, not the default region
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            DetectFacesRequest dfr = new DetectFacesRequest();

            // Request needs image butes, so read and add to request
            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();
            byte[] data = null;
            using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
            img.Bytes = new MemoryStream(data);
            dfr.Image = img;
            var outcome = rekoClient.DetectFaces(dfr);

            // Load a bitmap to modify with face bounding box rectangles
            System.Drawing.Bitmap facesHighlighted = new System.Drawing.Bitmap(filename);
            Pen pen = new Pen(Color.Black, 3);

            // Create a graphics context
            using (var graphics = Graphics.FromImage(facesHighlighted))
            {
                foreach (var fd in outcome.FaceDetails)
                {
                    // Get the bounding box
                    BoundingBox bb = fd.BoundingBox;
                    Console.WriteLine("Bounding box = (" + fd.BoundingBox.Left + ", " + fd.BoundingBox.Top + ", " +
                                      fd.BoundingBox.Height + ", " + fd.BoundingBox.Width + ")");
                    // Draw the rectangle using the bounding box values
                    // They are percentages so scale them to picture
                    graphics.DrawRectangle(pen, x: facesHighlighted.Width * bb.Left,
                                           y: facesHighlighted.Height * bb.Top,
                                           width: facesHighlighted.Width * bb.Width,
                                           height: facesHighlighted.Height * bb.Height);
                }
            }
            // Save the image with highlights as PNG
            string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");

            facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);
            Console.WriteLine(">>> Faces highlighted in file " + fileout);
        }
        public string GetImageInfo(ImageData imageData, byte[] imgData = null)
        {
            try
            {
                var path = Path.Combine(
                    Directory.GetCurrentDirectory(), "wwwroot",
                    imageData.fileName);
                imgData = Convert.FromBase64String(imageData.base64Data);

                _imageData = new MemoryStream(imgData);
                DetectFacesRequest detectFaces = new DetectFacesRequest()
                {
                    Image = new Image()
                    {
                        Bytes = _imageData
                    }
                };

                DetectFacesResponse facesResponse = _rekognitionClient.DetectFacesAsync(detectFaces).Result;

                List <FaceDetail> lstCelebrities = facesResponse.FaceDetails;

                FaceDetail faceDetail = new FaceDetail();

                StringBuilder sbCelebrities = new StringBuilder();
                //foreach (var item in lstCelebrities)
                //{
                //    switch (switch_on)
                //    {
                //        default:
                //    }
                //}
                string Celebrities = sbCelebrities.ToString().TrimEnd(',');

                return(Celebrities);
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.ToString());
                throw;
            }
        }
        public async Task <float> DetectFaceInS3Object(string s3ObjectName)
        {
            var detectFacesRequest = new DetectFacesRequest()
            {
                Image = GetImageDefinition(s3ObjectName),
                // Attributes can be "ALL" or "DEFAULT".
                // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
                // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                var detectFacesResponse = await _client.DetectFacesAsync(detectFacesRequest);

                bool hasAll = detectFacesRequest.Attributes.Contains("ALL");

                var face = detectFacesResponse.FaceDetails.SingleOrDefault();
                if (face == null)
                {
                    return(0);
                }

                System.Diagnostics.Debug.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left, face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                System.Diagnostics.Debug.WriteLine("Confidence: {0}\nLandmarks: {1}\nPose: pitch={2} roll={3} yaw={4}\nQuality: {5}", face.Confidence, face.Landmarks.Count, face.Pose.Pitch, face.Pose.Roll, face.Pose.Yaw, face.Quality);

                if (hasAll)
                {
                    System.Diagnostics.Debug.WriteLine("The detected face is estimated to be between " + face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                }

                return(face.Confidence);
            }
            catch (Exception e)
            {
                System.Diagnostics.Debug.WriteLine(e.Message);
                throw;
            }
        }
Esempio n. 25
0
        public List <Emotion> EmotionDetect(string _image)
        {
            {
                String         photo  = _image;
                String         bucket = "ngankhanh98";
                List <Emotion> response;

                AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

                DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
                {
                    Image = new Amazon.Rekognition.Model.Image()
                    {
                        S3Object = new S3Object()
                        {
                            Name   = photo,
                            Bucket = bucket
                        },
                    },
                    Attributes = new List <String>()
                    {
                        "ALL"
                    }
                };

                try
                {
                    DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
                    bool       hasAll = detectFacesRequest.Attributes.Contains("ALL");
                    FaceDetail face   = detectFacesResponse.FaceDetails[0];

                    return(face.Emotions);
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.Message);
                    return(null);
                }
            }
        }
Esempio n. 26
0
        private static void detectFace(Amazon.Rekognition.Model.Image image, AmazonRekognitionClient rekognitionClient)
        {
            DetectFacesRequest request = new DetectFacesRequest()
            {
                Image = image
            };

            try
            {
                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(request);

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    Console.WriteLine("Confidence : {0}\nAge :" + face.Confidence + ", " + face.BoundingBox.Top + ", " + face.BoundingBox.Left + ", " +
                                      face.BoundingBox.Height + ", " + face.BoundingBox.Width);
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
        /// <summary>
        /// This method will detect  the input image is face or not
        /// </summary>
        /// <param name="filename"></param>
        /// <returns></returns>
        public bool DetectFaces(string filename)
        {
            IAmazonRekognition rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USEast1);
            DetectFacesRequest dfr        = new DetectFacesRequest();

            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();



            // Request needs image butes, so read and add to request



            byte[] data = null;

            using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];

                fs.Read(data, 0, (int)fs.Length);
            }

            img.Bytes = new MemoryStream(data);

            dfr.Image = img;

            var outcome = rekoClient.DetectFaces(dfr);

            if (outcome.FaceDetails.Count > 0)
            {
                return(true);
            }
            else
            {
                return(false);
            }
        }
Esempio n. 28
0
        public async Task IdentifyFacesTests()
        {
            var mockClient = new Mock <AmazonRekognitionClient>();

            mockClient.Setup(client => client.DetectFacesAsync(
                                 It.IsAny <DetectFacesRequest>(),
                                 It.IsAny <CancellationToken>()
                                 )).Returns((DetectFacesRequest r, CancellationToken token) =>
            {
                return(Task.FromResult(new DetectFacesResponse()
                {
                    HttpStatusCode = System.Net.HttpStatusCode.OK,
                }));
            });

            byte[] data = File.ReadAllBytes(_filename);

            DetectFacesRequest request = new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    Bytes = new MemoryStream(data),
                },
            };

            var client = mockClient.Object;
            DetectFacesResponse response = await client.DetectFacesAsync(request);

            bool gotResult = response is not null;

            Assert.True(gotResult, "DetectFacesAsync returned a response.");

            bool ok = response.HttpStatusCode == System.Net.HttpStatusCode.OK;

            Assert.True(ok, $"Successfully searched image for faces.");
        }
Esempio n. 29
0
        public static async Task <string> FunctionHandler(String photo)
        {
            String bucket = "moodanalysis";
            //ArrayList result = new ArrayList();
            string result = "";

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            // Recognizes User's face
            CompareFacesRequest CFR = new CompareFacesRequest()
            {
                //SimilarityThreshold = 50,

                SourceImage = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = "referencePhoto.jpg",
                        Bucket = bucket
                    },
                },

                TargetImage = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },
            };

            CompareFacesResponse compareFacesResponse = await rekognitionClient.CompareFacesAsync(CFR);

            string howManyFaces = "";

            if (compareFacesResponse.FaceMatches.Count == 0)
            {
                return("");
            }

            //int index = 0, bestIndex = 0;
            var         bestMatch       = compareFacesResponse.FaceMatches[0];
            float       bestMatchResult = compareFacesResponse.FaceMatches[0].Similarity;
            BoundingBox bestBoundingBox = compareFacesResponse.FaceMatches[0].Face.BoundingBox;

            foreach (var faceMatch in compareFacesResponse.FaceMatches)
            {
                howManyFaces += faceMatch.Similarity + ",";

                if (bestMatchResult < faceMatch.Similarity)
                {
                    bestMatch       = faceMatch;
                    bestBoundingBox = faceMatch.Face.BoundingBox;
                    bestMatchResult = faceMatch.Similarity;
                }
            }

            // Detects emotions of faces in photo
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                },

                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = await rekognitionClient.DetectFacesAsync(detectFacesRequest);

            //int i = 0;
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                if (face.BoundingBox.Height == bestBoundingBox.Height &&
                    face.BoundingBox.Left == bestBoundingBox.Left &&
                    face.BoundingBox.Top == bestBoundingBox.Top &&
                    face.BoundingBox.Width == bestBoundingBox.Width)
                {
                    //var emotQuery = FilterEmotions(face, IsLowConfidence);

                    FilterEmotions filter = delegate(FaceDetail faceFilter, ConfidenceFilterDelegate confFilter)
                    {
                        return(faceFilter.Emotions.FindAll(n => confFilter(n)).ToList());
                    };

                    var emotQuery = filter(face, IsLowConfidence);

                    //IEnumerable<Emotion> emotQuery =
                    //    from faceEmotion in face.Emotions
                    //    where faceEmotion.Confidence > 10
                    //    select faceEmotion;

                    // GRAB THE EMOTION
                    foreach (Emotion emot in emotQuery)
                    {
                        result += emot.Type + ",";
                    }

                    break;
                }
            }

            //delete the last ,
            if (result.Length != 0)
            {
                result = result.Substring(0, result.Length - 1);
            }

            return(result);
        }
Esempio n. 30
0
    public static void Example()
    {
        String photo = "photo.jpg";

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        Image image = new Image();

        try
        {
            using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
            {
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
        }
        catch (Exception)
        {
            Console.WriteLine("Failed to load file " + photo);
            return;
        }

        int height;
        int width;

        // Used to extract original photo width/height
        using (System.Drawing.Bitmap imageBitmap = new System.Drawing.Bitmap(photo))
        {
            height = imageBitmap.Height;
            width  = imageBitmap.Width;
        }

        Console.WriteLine("Image Information:");
        Console.WriteLine(photo);
        Console.WriteLine("Image Height: " + height);
        Console.WriteLine("Image Width: " + width);

        try
        {
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image      = image,
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                Console.WriteLine("Face:");
                ShowBoundingBoxPositions(height, width,
                                         face.BoundingBox, detectFacesResponse.OrientationCorrection);
                Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
                                  face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                Console.WriteLine("The detected face is estimated to be between " +
                                  face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                Console.WriteLine();
            }
        }
        catch (Exception e)
        {
            Console.WriteLine(e.Message);
        }
    }