Example #1
0
        private Dictionary <string, float> getemotion(MemoryStream image)
        {
            Dictionary <string, float> emos = new Dictionary <string, float>();
            IAmazonRekognition         reg  = new AmazonRekognitionClient(ConfigurationManager.AppSettings["AWSAccessKey"], ConfigurationManager.AppSettings["AWSSecretKey"], RegionEndpoint.EUWest1);

            var request = new DetectFacesRequest()
            {
                Image = new Amazon.Rekognition.Model.Image {
                    Bytes = image
                },
                Attributes = new List <string>
                {
                    "ALL"
                }
            };

            var respFace = reg.DetectFaces(request);

            foreach (var detail in respFace.FaceDetails)
            {
                foreach (var item in detail.Emotions)
                {
                    if (!emos.ContainsKey(item.Type))
                    {
                        emos.Add(item.Type, item.Confidence);
                    }
                }
            }
            return(emos);
        }
Example #2
0
        /// <summary>
        /// Add All detected faces to a specific collection
        /// </summary>
        /// <param name="collectionId"></param>
        /// <param name="imageId"></param>
        /// <param name="image"></param>
        public FaceRecord AddImageToCollection(string collectionId, Amazon.Rekognition.Model.Image image)
        {
            AmazonRekognitionClient rekognitionClient = AmazonClient.GetInstance();

            //Validate that image contains only one face.
            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(new Amazon.Rekognition.Model.DetectFacesRequest
            {
                Attributes = new List <string> {
                    "ALL"
                },
                Image = image
            });

            if (null != detectFacesResponse.FaceDetails && detectFacesResponse.FaceDetails.Count > 1)
            {
                throw new ArgumentNullException("Many faces in the image");
            }

            IndexFacesRequest indexFacesRequest = new IndexFacesRequest()
            {
                Image               = image,
                CollectionId        = collectionId,
                DetectionAttributes = new List <String>()
                {
                    "ALL"
                }
            };

            IndexFacesResponse indexFacesResponse = rekognitionClient.IndexFaces(indexFacesRequest);

            return(indexFacesResponse.FaceRecords.FirstOrDefault());
        }
Example #3
0
        static void IdentifyFaces(string filename)
        {
            // Using USWest2, not the default region
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            // Request needs image bytes, so read and add to request
            byte[] data = File.ReadAllBytes(filename);

            DetectFacesRequest dfr = new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    Bytes = new MemoryStream(data)
                }
            };

            DetectFacesResponse outcome = rekoClient.DetectFaces(dfr);

            if (outcome.FaceDetails.Count > 0)
            {
                // Load a bitmap to modify with face bounding box rectangles
                Bitmap facesHighlighted = new Bitmap(filename);
                Pen    pen = new Pen(Color.Black, 3);

                // Create a graphics context
                using (var graphics = Graphics.FromImage(facesHighlighted))
                {
                    foreach (var fd in outcome.FaceDetails)
                    {
                        // Get the bounding box
                        BoundingBox bb = fd.BoundingBox;
                        Console.WriteLine($"Bounding box = ({bb.Left}, {bb.Top}, {bb.Height}, {bb.Width})");

                        // Draw the rectangle using the bounding box values
                        // They are percentages so scale them to picture
                        graphics.DrawRectangle(pen,
                                               x: facesHighlighted.Width * bb.Left,
                                               y: facesHighlighted.Height * bb.Top,
                                               width: facesHighlighted.Width * bb.Width,
                                               height: facesHighlighted.Height * bb.Height);
                    }
                }

                // Save the image with highlights as PNG
                string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");
                facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);

                Console.WriteLine(">>> " + outcome.FaceDetails.Count + " face(s) highlighted in file " + fileout);
            }
            else
            {
                Console.WriteLine(">>> No faces found");
            }
        }
        public DetectFacesResponse IdentifyFaces(byte[] request)
        {
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(_credentials, Amazon.RegionEndpoint.USEast2);

            DetectFacesRequest dfr = new DetectFacesRequest();

            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();

            img.Bytes = new MemoryStream(request);
            dfr.Image = img;

            return(rekoClient.DetectFaces(dfr));
        }
        private List <Mat> detectFace(Bitmap bitmap)
        {
            Mat src = null;

            try
            {
                src = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap);
            }catch (Exception e)
            {
            }
            Amazon.Rekognition.Model.Image image = Utils.bitmapToAWSImage(bitmap);

            DetectFacesRequest request = new DetectFacesRequest()
            {
                Image = image
            };

            try
            {
                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(request);

                float bitmapWidth  = (float)bitmap.Width;
                float bitmapHeight = (float)bitmap.Height;

                List <Mat> matList = new List <Mat>();

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    int faceLeft   = (int)(face.BoundingBox.Left * bitmapWidth);
                    int faceTop    = (int)(face.BoundingBox.Top * bitmapHeight);
                    int faceWidth  = (int)(face.BoundingBox.Width * bitmapWidth);
                    int faceHeight = (int)(face.BoundingBox.Height * bitmapHeight);

                    Rect rectCrop = new Rect(faceLeft, faceTop, faceWidth, faceHeight);
                    //Console.WriteLine("Confidence : {0}\nAge :" + face.Confidence + ", " + face.BoundingBox.Top + ", " + face.BoundingBox.Left + ", " +
                    //    face.BoundingBox.Height + ", " + face.BoundingBox.Width);

                    Mat img = new Mat(src, rectCrop);
                    matList.Add(img);
                }

                return(matList);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }

            return(null);
        }
Example #6
0
    public static void Example()
    {
        String photo  = "input.jpg";
        String bucket = "bucket";

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
        {
            Image = new Image()
            {
                S3Object = new S3Object()
                {
                    Name   = photo,
                    Bucket = bucket
                },
            },
            // Attributes can be "ALL" or "DEFAULT".
            // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
            // "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
            Attributes = new List <String>()
            {
                "ALL"
            }
        };

        try
        {
            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
            bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
                                  face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                Console.WriteLine("Confidence: {0}\nLandmarks: {1}\nPose: pitch={2} roll={3} yaw={4}\nQuality: {5}",
                                  face.Confidence, face.Landmarks.Count, face.Pose.Pitch,
                                  face.Pose.Roll, face.Pose.Yaw, face.Quality);
                if (hasAll)
                {
                    Console.WriteLine("The detected face is estimated to be between " +
                                      face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                }
            }
        }
        catch (Exception e)
        {
            Console.WriteLine(e.Message);
        }
    }
Example #7
0
        static void IdentifyFaces(string filename)
        {
            // Using USWest2, not the default region
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            DetectFacesRequest dfr = new DetectFacesRequest();

            // Request needs image butes, so read and add to request
            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();
            byte[] data = null;
            using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
            img.Bytes = new MemoryStream(data);
            dfr.Image = img;
            var outcome = rekoClient.DetectFaces(dfr);

            // Load a bitmap to modify with face bounding box rectangles
            System.Drawing.Bitmap facesHighlighted = new System.Drawing.Bitmap(filename);
            Pen pen = new Pen(Color.Black, 3);

            // Create a graphics context
            using (var graphics = Graphics.FromImage(facesHighlighted))
            {
                foreach (var fd in outcome.FaceDetails)
                {
                    // Get the bounding box
                    BoundingBox bb = fd.BoundingBox;
                    Console.WriteLine("Bounding box = (" + fd.BoundingBox.Left + ", " + fd.BoundingBox.Top + ", " +
                                      fd.BoundingBox.Height + ", " + fd.BoundingBox.Width + ")");
                    // Draw the rectangle using the bounding box values
                    // They are percentages so scale them to picture
                    graphics.DrawRectangle(pen, x: facesHighlighted.Width * bb.Left,
                                           y: facesHighlighted.Height * bb.Top,
                                           width: facesHighlighted.Width * bb.Width,
                                           height: facesHighlighted.Height * bb.Height);
                }
            }
            // Save the image with highlights as PNG
            string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");

            facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);
            Console.WriteLine(">>> Faces highlighted in file " + fileout);
        }
Example #8
0
        public void RekognitionDetectFaces()
        {
            #region to-detect-faces-in-an-image-1481841782793

            var client   = new AmazonRekognitionClient();
            var response = client.DetectFaces(new DetectFacesRequest
            {
                Image = new Image {
                    S3Object = new S3Object {
                        Bucket = "mybucket",
                        Name   = "myphoto"
                    }
                }
            });

            List <FaceDetail> faceDetails           = response.FaceDetails;
            string            orientationCorrection = response.OrientationCorrection;

            #endregion
        }
Example #9
0
        public List <Emotion> EmotionDetect(string _image)
        {
            {
                String         photo  = _image;
                String         bucket = "ngankhanh98";
                List <Emotion> response;

                AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

                DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
                {
                    Image = new Amazon.Rekognition.Model.Image()
                    {
                        S3Object = new S3Object()
                        {
                            Name   = photo,
                            Bucket = bucket
                        },
                    },
                    Attributes = new List <String>()
                    {
                        "ALL"
                    }
                };

                try
                {
                    DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
                    bool       hasAll = detectFacesRequest.Attributes.Contains("ALL");
                    FaceDetail face   = detectFacesResponse.FaceDetails[0];

                    return(face.Emotions);
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.Message);
                    return(null);
                }
            }
        }
Example #10
0
        private static void detectFace(Amazon.Rekognition.Model.Image image, AmazonRekognitionClient rekognitionClient)
        {
            DetectFacesRequest request = new DetectFacesRequest()
            {
                Image = image
            };

            try
            {
                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(request);

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    Console.WriteLine("Confidence : {0}\nAge :" + face.Confidence + ", " + face.BoundingBox.Top + ", " + face.BoundingBox.Left + ", " +
                                      face.BoundingBox.Height + ", " + face.BoundingBox.Width);
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
Example #11
0
        public DetectFacesResponse Recognize(Bitmap image)
        {
            MemoryStream memoryStream = new MemoryStream();

            image.Save(memoryStream, System.Drawing.Imaging.ImageFormat.Jpeg);

            var result = _client.DetectFaces(new DetectFacesRequest()
            {
                Attributes = new List <string> {
                    "ALL"
                },
                Image = new Amazon.Rekognition.Model.Image()
                {
                    Bytes = memoryStream
                }
            });

            var serialized = JsonConvert.SerializeObject(result);

            File.WriteAllText("real_result.json", serialized);

            return(result);
        }
        /// <summary>
        /// This method will detect  the input image is face or not
        /// </summary>
        /// <param name="filename"></param>
        /// <returns></returns>
        public bool DetectFaces(string filename)
        {
            IAmazonRekognition rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USEast1);
            DetectFacesRequest dfr        = new DetectFacesRequest();

            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();



            // Request needs image butes, so read and add to request



            byte[] data = null;

            using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];

                fs.Read(data, 0, (int)fs.Length);
            }

            img.Bytes = new MemoryStream(data);

            dfr.Image = img;

            var outcome = rekoClient.DetectFaces(dfr);

            if (outcome.FaceDetails.Count > 0)
            {
                return(true);
            }
            else
            {
                return(false);
            }
        }
Example #13
0
    public static void Example()
    {
        String photo = "photo.jpg";

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        Image image = new Image();

        try
        {
            using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
            {
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
        }
        catch (Exception)
        {
            Console.WriteLine("Failed to load file " + photo);
            return;
        }

        int height;
        int width;

        // Used to extract original photo width/height
        using (System.Drawing.Bitmap imageBitmap = new System.Drawing.Bitmap(photo))
        {
            height = imageBitmap.Height;
            width  = imageBitmap.Width;
        }

        Console.WriteLine("Image Information:");
        Console.WriteLine(photo);
        Console.WriteLine("Image Height: " + height);
        Console.WriteLine("Image Width: " + width);

        try
        {
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image      = image,
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
            foreach (FaceDetail face in detectFacesResponse.FaceDetails)
            {
                Console.WriteLine("Face:");
                ShowBoundingBoxPositions(height, width,
                                         face.BoundingBox, detectFacesResponse.OrientationCorrection);
                Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
                                  face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
                Console.WriteLine("The detected face is estimated to be between " +
                                  face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
                Console.WriteLine();
            }
        }
        catch (Exception e)
        {
            Console.WriteLine(e.Message);
        }
    }
        public void IdentifyFaces(string filename, string path, string bucketName)
        {
            // Using USWest2, not the default region
            IAmazonRekognition rekoClient = new AmazonRekognitionClient("AKIAIYKAM62F6DZ2CEGA", "HmHyI439/ZdyOOxjnrpW3izOzOWcu3kS5qwpV1Kd", Amazon.RegionEndpoint.USEast1);

            Amazon.Rekognition.Model.Image img;
            var response = rekoClient.DetectFaces(new DetectFacesRequest
            {
                Image = new Amazon.Rekognition.Model.Image
                {
                    S3Object = new Amazon.Rekognition.Model.S3Object
                    {
                        Bucket = bucketName,
                        Name   = path,
                    }
                }
            });
            int k = response.FaceDetails.Count;
            //List<facedetail> faceDetails = response.FaceDetails;
            string orientationCorrection = response.OrientationCorrection;


            //</facedetail>
            DetectFacesRequest dfr = new DetectFacesRequest();

            // Request needs image butes, so read and add to request



            byte[] data = null;
            using (FileStream fs = new FileStream(path, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
            //img.Bytes = new MemoryStream(data);

            //if (img.S3Object != null)
            //{
            //    img.S3Object.Name = "blazarName";
            //    img.S3Object.Bucket = "blazarstorage";
            //}

            //dfr.Image = img;

            //var outcome = rekoClient.DetectFaces(dfr);

            if (response.FaceDetails.Count > 0)
            {
                // Load a bitmap to modify with face bounding box rectangles
                System.Drawing.Bitmap facesHighlighted = new System.Drawing.Bitmap(filename);
                Pen pen = new Pen(Color.Black, 3);

                // Create a graphics context
                using (var graphics = Graphics.FromImage(facesHighlighted))
                {
                    foreach (var fd in response.FaceDetails)
                    {
                        // Get the bounding box
                        BoundingBox bb = fd.BoundingBox;
                        Console.WriteLine("Bounding box = (" + bb.Left + ", " + bb.Top + ", " +
                                          bb.Height + ", " + bb.Width + ")");
                        // Draw the rectangle using the bounding box values
                        // They are percentages so scale them to picture
                        graphics.DrawRectangle(pen, x: facesHighlighted.Width * bb.Left,
                                               y: facesHighlighted.Height * bb.Top,
                                               width: facesHighlighted.Width * bb.Width,
                                               height: facesHighlighted.Height * bb.Height);
                    }
                }
                // Save the image with highlights as PNG
                string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");
                facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);
                Console.WriteLine(">>> " + response.FaceDetails.Count + " face(s) highlighted in file " + fileout);
            }
            else
            {
                Console.WriteLine(">>> No faces found");
            }
        }
Example #15
0
        public EmotionalData DetectImage(byte[] image)
        {
            var awsImage = new Image();

            try
            {
                //using (FileStream stream = new FileStream(image, FileMode.Open, FileAccess.Read))
                //{
                //    byte[] data = new byte[stream.Length];
                //    stream.Read(data, 0, (int)stream.Length);
                //    awsImage.Bytes = new MemoryStream(data);
                //}
                awsImage.Bytes = new MemoryStream(image);
            }
            catch (Exception ex)
            {
            }

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(
                "", "", RegionEndpoint.APSoutheast1
                );
            DetectLabelsRequest detectlabelsRequest = new DetectLabelsRequest()
            {
                Image         = awsImage,
                MaxLabels     = 10,
                MinConfidence = 77F
            };

            var faceAttr = new List <string>();

            //faceAttr.Add("Emotions");
            faceAttr.Add("ALL");

            DetectFacesRequest detectFacesRequest = new DetectFacesRequest
            {
                Attributes = faceAttr,
                Image      = awsImage
            };

            try
            {
                //DetectLabelsResponse detectLabelsResponse =
                //rekognitionClient.DetectLabels(detectlabelsRequest);
                //foreach (Label label in detectLabelsResponse.Labels)
                //    Console.WriteLine("{0}: {1}", label.Name, label.Confidence);

                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
                if (detectFacesResponse.FaceDetails != null)
                {
                    var customerFace        = detectFacesResponse.FaceDetails.OrderByDescending(x => x.Confidence).FirstOrDefault();
                    var customerEmotionData = customerFace.Emotions.OrderByDescending(x => x.Confidence).FirstOrDefault();

                    return(new EmotionalData
                    {
                        Emotion = customerEmotionData.Type,
                        Probability = Convert.ToDecimal(customerEmotionData.Confidence)
                    });
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
            return(new EmotionalData());
        }