Provides the source image either as bytes or an S3 object.
Esempio n. 1
0
        static void Main(string[] args)
        {
            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();


            MemoryStream ms = null;

            try
            {
                //Bitmap은 base64형식이 아닌듯 하다. AWS에서는 base64형식의 이미지 파일이 필요한데 이를 바꿔주는 코드는 아래와 같다.
                //Base64는 8비트짜리 바이트 3개를 6비트씩 4개로 쪼개어 표현한다.
                Bitmap bitmap = new Bitmap(@"D:\picture\insup.bmp");
                ms = new MemoryStream();
                bitmap.Save(ms, ImageFormat.Jpeg);
                byte[] data = new byte[ms.Length];
                ms.Read(data, 0, (int)ms.Length);
                image.Bytes = ms;
            }catch (Exception e)
            {
            }

            searchFace(image, rekognitionClient);

            //using (FileStream fs = new FileStream(@"D:\picture\paul.bmp", FileMode.Open, FileAccess.Read))
            //{
            //    byte[] data = null;
            //    data = new byte[fs.Length];
            //    fs.Read(data, 0, (int)fs.Length);
            //    image.Bytes = new MemoryStream(data);
            //}
        }
        /// <summary>
        /// Constructor for a feature extractor using Amazon Web Services SDK, using a given annotator client,
        /// optimized to be used with a batch of images.
        /// </summary>
        /// <param name="imagePath"></param>
        /// <param name="client"></param>
        internal AwsFeatureExtractor(string imagePath, AmazonRekognitionClient client)
        {
            var size        = System.Drawing.Image.FromFile(imagePath).Size;
            var orientation = SKCodec.Create(imagePath).EncodedOrigin;

            if ((int)orientation % 2 == 1)
            {
                // use standard bounding if image is not rotated or rotated by 180 degrees
                _height = size.Height;
                _width  = size.Width;
            }
            else
            {
                // flip height and width if image is rotated by 90 or 270 degrees
                _height = size.Width;
                _width  = size.Height;
            }

            _rekognitionImage = new Image();

            using FileStream fs = new FileStream(imagePath, FileMode.Open, FileAccess.Read);
            var data = new byte[fs.Length];

            fs.Read(data, 0, (int)fs.Length);
            _rekognitionImage.Bytes = new MemoryStream(data);

            _client = client;
        }
Esempio n. 3
0
        public IHttpActionResult Get(string imageURl)
        {
            //Get Image
            WebClient webClient = new WebClient();

            byte[] imageBinary = webClient.DownloadData(imageURl);

            //Create Image Model ...
            Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();
            image.Bytes = new MemoryStream(imageBinary);

            List <FaceRecord> blackListResult = _rekognitionService.Recognize(Constants.BlackListCollectionId, image);

            List <FaceRecord> missingResuList = _rekognitionService.Recognize(Constants.WhiteListCollectionId, image);

            RecognizResult result = new RecognizResult
            {
                BlackListFaces   = blackListResult,
                MissingListFaces = missingResuList
            };

            //Create Cache Control Header...
            CacheControlHeaderValue cacheControlHeader = new CacheControlHeaderValue()
            {
                Public = true,
                MaxAge = new TimeSpan(0, 0, 1)
            };

            return(new OkResultWithCaching <RecognizResult>(result, this)
            {
                CacheControlHeader = cacheControlHeader
            });
        }
Esempio n. 4
0
        // snippet-start:[Rekognition.dotnetv3.CompareFacesExample]
        public static async Task Main()
        {
            float  similarityThreshold = 70F;
            string sourceImage         = "source.jpg";
            string targetImage         = "target.jpg";

            var rekognitionClient = new AmazonRekognitionClient();

            Amazon.Rekognition.Model.Image imageSource = new Amazon.Rekognition.Model.Image();

            try
            {
                using FileStream fs = new FileStream(sourceImage, FileMode.Open, FileAccess.Read);
                byte[] data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                imageSource.Bytes = new MemoryStream(data);
            }
            catch (Exception)
            {
                Console.WriteLine($"Failed to load source image: {sourceImage}");
                return;
            }

            Amazon.Rekognition.Model.Image imageTarget = new Amazon.Rekognition.Model.Image();

            try
            {
                using FileStream fs = new FileStream(targetImage, FileMode.Open, FileAccess.Read);
                byte[] data = new byte[fs.Length];
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                imageTarget.Bytes = new MemoryStream(data);
            }
            catch (Exception ex)
            {
                Console.WriteLine($"Failed to load target image: {targetImage}");
                Console.WriteLine(ex.Message);
                return;
            }

            var compareFacesRequest = new CompareFacesRequest
            {
                SourceImage         = imageSource,
                TargetImage         = imageTarget,
                SimilarityThreshold = similarityThreshold,
            };

            // Call operation
            var compareFacesResponse = await rekognitionClient.CompareFacesAsync(compareFacesRequest);

            // Display results
            compareFacesResponse.FaceMatches.ForEach(match =>
            {
                ComparedFace face    = match.Face;
                BoundingBox position = face.BoundingBox;
                Console.WriteLine($"Face at {position.Left} {position.Top} matches with {match.Similarity}% confidence.");
            });

            Console.WriteLine($"Found {compareFacesResponse.UnmatchedFaces.Count} face(s) that did not match.");
        }
Esempio n. 5
0
        /// <summary>
        /// Add All detected faces to a specific collection
        /// </summary>
        /// <param name="collectionId"></param>
        /// <param name="imageId"></param>
        /// <param name="image"></param>
        public FaceRecord AddImageToCollection(string collectionId, Amazon.Rekognition.Model.Image image)
        {
            AmazonRekognitionClient rekognitionClient = AmazonClient.GetInstance();

            //Validate that image contains only one face.
            DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(new Amazon.Rekognition.Model.DetectFacesRequest
            {
                Attributes = new List <string> {
                    "ALL"
                },
                Image = image
            });

            if (null != detectFacesResponse.FaceDetails && detectFacesResponse.FaceDetails.Count > 1)
            {
                throw new ArgumentNullException("Many faces in the image");
            }

            IndexFacesRequest indexFacesRequest = new IndexFacesRequest()
            {
                Image               = image,
                CollectionId        = collectionId,
                DetectionAttributes = new List <String>()
                {
                    "ALL"
                }
            };

            IndexFacesResponse indexFacesResponse = rekognitionClient.IndexFaces(indexFacesRequest);

            return(indexFacesResponse.FaceRecords.FirstOrDefault());
        }
        /// <summary>
        /// Constructor for a feature extractor using Amazon Web Services SDK, optimized for analysing a single image.
        /// </summary>
        /// <param name="imageStream"></param>
        /// <param name="accessKey"></param>
        /// <param name="secretKey"></param>
        /// <param name="endpoint"></param>
        public AwsFeatureExtractor(Stream imageStream, string accessKey, string secretKey, RegionEndpoint endpoint)
        {
            var size        = System.Drawing.Image.FromStream(imageStream).Size;
            var orientation = SKCodec.Create(imageStream).EncodedOrigin;

            if ((int)orientation % 2 == 1)
            {
                // use standard bounding if image is not rotated or rotated by 180 degrees
                _height = size.Height;
                _width  = size.Width;
            }
            else
            {
                // flip height and width if image is rotated by 90 or 270 degrees
                _height = size.Width;
                _width  = size.Height;
            }

            _rekognitionImage = new Image();

            var data = new byte[imageStream.Length];

            imageStream.Read(data, 0, (int)imageStream.Length);
            _rekognitionImage.Bytes = new MemoryStream(data);

            _client = new AmazonRekognitionClient(new BasicAWSCredentials(accessKey, secretKey), endpoint);
        }
Esempio n. 7
0
        private void btnLoad_Click(object sender, RoutedEventArgs e)
        {
            OpenFileDialog op = new OpenFileDialog();

            op.Title  = "Select a picture";
            op.Filter = "All supported graphics|*.jpg;*.jpeg;*.png|" +
                        "JPEG (*.jpg;*.jpeg)|*.jpg;*.jpeg|" +
                        "Portable Network Graphic (*.png)|*.png";
            if (op.ShowDialog() == true)
            {
                imgPhoto.Source = new BitmapImage(new Uri(op.FileName));
            }

            String photo = imgPhoto.Source.ToString().Substring(8);

            Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();
            try
            {
                using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
                {
                    byte[] data = null;
                    data = new byte[fs.Length];
                    fs.Read(data, 0, (int)fs.Length);
                    image.Bytes = new MemoryStream(data);
                }
            }
            catch (Exception)
            {
                return;
            }

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            DetectModerationLabelsRequest detectModerationLabelsRequest = new DetectModerationLabelsRequest()
            {
                Image         = image,
                MinConfidence = 60F
            };

            try
            {
                DetectModerationLabelsResponse detectModerationLabelsResponse = rekognitionClient.DetectModerationLabels(detectModerationLabelsRequest);
                List <Moderation> items = new List <Moderation>();

                foreach (ModerationLabel label in detectModerationLabelsResponse.ModerationLabels)
                {
                    items.Add(new Moderation()
                    {
                        Name = label.Name, Confidence = label.Confidence, ParentName = label.ParentName
                    });
                }
                lvModeration.ItemsSource = items;
            }
            catch (Exception)
            {
                Console.WriteLine("Error!!!");
                return;
            }
        }
Esempio n. 8
0
        public async Task <IActionResult> Login(IFormFile file)
        {
            CelebrityModel celeb = new CelebrityModel();

            Directory.Delete(_appEnvironment.WebRootPath + "/resources/", true);
            Directory.CreateDirectory(_appEnvironment.WebRootPath + "/resources/");

            if (null != file && file.Length > 0)
            {
                string speechFileName = "notjeff.mp3";
                string speechText     = "Nice try. You're not Jeff, I can't let you in.";

                AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

                RecognizeCelebritiesRequest    recognizeCelebritiesRequest = new RecognizeCelebritiesRequest();
                Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();

                MemoryStream memStream = new MemoryStream();
                file.CopyTo(memStream);
                img.Bytes = memStream;
                recognizeCelebritiesRequest.Image = img;

                RecognizeCelebritiesResponse recognizeCelebritiesResponse = await rekognitionClient.RecognizeCelebritiesAsync(recognizeCelebritiesRequest);

                if (null != recognizeCelebritiesResponse && recognizeCelebritiesResponse.CelebrityFaces.Count > 0)
                {
                    celeb.CelebrityName = recognizeCelebritiesResponse.CelebrityFaces[0].Name;
                    celeb.Confidence    = recognizeCelebritiesResponse.CelebrityFaces[0].MatchConfidence;

                    if (celeb.CelebrityName == "Jeff Bezos")
                    {
                        speechText     = "Hello Boss, Welcome to the Deployment Bot. Please continue to start the deployment.";
                        celeb.IsJeff   = true;
                        speechFileName = "jeff.mp3";
                    }
                }
                else
                {
                    celeb.CelebrityName = "Sure, you're popular among your friends. But that doesn't make you a celebrity.";
                    celeb.Confidence    = 0;
                }

                AmazonPollyClient pollyclient = new AmazonPollyClient();
                Amazon.Polly.Model.SynthesizeSpeechResponse speechResponse =
                    await pollyclient.SynthesizeSpeechAsync(new Amazon.Polly.Model.SynthesizeSpeechRequest()
                {
                    OutputFormat = OutputFormat.Mp3, Text = speechText, VoiceId = VoiceId.Joanna
                });

                var stream = new FileStream(_appEnvironment.WebRootPath + "/resources/" + speechFileName, FileMode.Create);
                await speechResponse.AudioStream.CopyToAsync(stream);

                stream.Close();
            }

            return(View("Login", celeb));
        }
Esempio n. 9
0
        private void btnLoad1_Click(object sender, RoutedEventArgs e)
        {
            OpenFileDialog op = new OpenFileDialog();

            op.Title  = "Select a picture";
            op.Filter = "All supported graphics|*.jpg;*.jpeg;*.png|" +
                        "JPEG (*.jpg;*.jpeg)|*.jpg;*.jpeg|" +
                        "Portable Network Graphic (*.png)|*.png";
            if (op.ShowDialog() == true)
            {
                imgPhoto1.Source = new BitmapImage(new Uri(op.FileName));
            }

            String photo = imgPhoto1.Source.ToString().Substring(8);

            Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();
            try
            {
                using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
                {
                    byte[] data = null;
                    data = new byte[fs.Length];
                    fs.Read(data, 0, (int)fs.Length);
                    image.Bytes = new MemoryStream(data);
                }
            }
            catch (Exception)
            {
                return;
            }

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            DetectTextRequest detectTextRequest = new DetectTextRequest()
            {
                Image = image,
            };

            try
            {
                DetectTextResponse detectTextResponse = rekognitionClient.DetectText(detectTextRequest);

                List <Moderation1> items = new List <Moderation1>();
                foreach (TextDetection text in detectTextResponse.TextDetections)
                {
                    items.Add(new Moderation1()
                    {
                        Confidence = text.Confidence, Detected = text.DetectedText, Type = text.Type
                    });
                }
                lvModeration1.ItemsSource = items;
            }
            catch (Exception)
            {
            }
        }
Esempio n. 10
0
        private List <Guid> SearchOneFace(string faceFileName)
        {
            List <Guid> facesData = null;

            try
            {
                Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image()
                {
                    Bytes = new MemoryStream(System.IO.File.ReadAllBytes(faceFileName))
                };
                SearchFacesByImageRequest searchFacesByImageRequest = new SearchFacesByImageRequest()
                {
                    CollectionId = awsCollectionId, Image = image, FaceMatchThreshold = awsFaceMatchThreshold, MaxFaces = 1000
                };
                using (AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(awsAccessKeyId, awsSecretAccessKey, awsRegionEndpoint))
                {
                    SearchFacesByImageResponse searchFacesByImageResponse = rekognitionClient.SearchFacesByImageAsync(searchFacesByImageRequest).Result;
                    if (searchFacesByImageResponse != null && searchFacesByImageResponse.FaceMatches.Count > 0)
                    {
                        facesData = new List <Guid>();
                        for (int f = 0; f < searchFacesByImageResponse.FaceMatches.Count; f++)
                        {
                            string dateMask = searchFacesByImageResponse.FaceMatches[f].Face.ExternalImageId;
                            if (dateMask.Length > 7)
                            {
                                dateMask = dateMask.Substring(0, 8);
                                if (dates != null && dates.Contains(dateMask))
                                {
                                    if (searchFacesByImageResponse.FaceMatches[f].Similarity >= awsSimilarityLevel)
                                    {
                                        Guid faceId;
                                        if (Guid.TryParse(searchFacesByImageResponse.FaceMatches[f].Face.FaceId, out faceId))
                                        {
                                            if (!facesData.Contains(faceId))
                                            {
                                                facesData.Add(faceId);
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }
            catch (Exception exc)
            {
            }

            if (facesData != null && facesData.Count == 0)
            {
                facesData = null;
            }
            return(facesData);
        }
Esempio n. 11
0
        public async Task <bool> AuthenticateUserByFace(byte[] targetImage) //FileStream targetImage
        {
            float  similarityThreshold = 90F;
            String sourceImage         = "https://hackathonimagedump.s3.us-east-2.amazonaws.com/123456.jpeg";
            // String targetImage = "https://hackathonimagedump.s3.us-east-2.amazonaws.com/HappyFace.jpeg";


            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient("AKIAX2ZTBCX4OX6XH77Q", "X/FcCoEFyuIl5+hmwE+IVMk4t1089mgf0jIQI7Xo", RegionEndpoint.USWest2);

            Amazon.Rekognition.Model.Image imageSource = new Amazon.Rekognition.Model.Image();
            try
            {
                var    webClient  = new WebClient();
                byte[] imageBytes = webClient.DownloadData(sourceImage);
                imageSource.Bytes = new MemoryStream(imageBytes);
            }
            catch
            {
            }

            Amazon.Rekognition.Model.Image imageTarget = new Amazon.Rekognition.Model.Image();
            try
            {
                imageTarget.Bytes = new MemoryStream(targetImage);
            }
            catch
            {
            }

            CompareFacesRequest compareFacesRequest = new CompareFacesRequest()
            {
                SourceImage         = imageSource,
                TargetImage         = imageTarget,
                SimilarityThreshold = similarityThreshold
            };

            // Call operation
            CompareFacesResponse compareFacesResponse = await rekognitionClient.CompareFacesAsync(compareFacesRequest);

            if (compareFacesResponse.HttpStatusCode == HttpStatusCode.OK)
            {
                if (compareFacesResponse.SourceImageFace.Confidence > 90F)
                {
                    return(true);
                }
                else
                {
                    return(false);
                }
            }
            else
            {
                return(false);
            }
        }
        private void ImageForm_Load(object sender, EventArgs e)
        {
            // Load images

            var image = new Amazon.Rekognition.Model.Image();

            try
            {
                using (var fs = new FileStream(imageUrl, FileMode.Open, FileAccess.Read))
                {
                    byte[] data = null;
                    data = new byte[fs.Length];
                    fs.Read(data, 0, (int)fs.Length);
                    image.Bytes = new MemoryStream(data);
                }
            }
            catch (Exception)
            {
                MessageBox.Show("Không thể mở tệp " + imageUrl, "Thông báo", MessageBoxButtons.OK, MessageBoxIcon.Error);
                return;
            }

            var rekognitionClient = new AmazonRekognitionClient();

            var detectlabelsRequest = new DetectLabelsRequest()
            {
                Image         = image,
                MaxLabels     = 10,
                MinConfidence = 77F
            };

            try
            {
                DetectLabelsResponse detectLabelsResponse = rekognitionClient.DetectLabels(detectlabelsRequest);
                //MessageBox.Show("Detected labels for " + imageUrl);
                foreach (var label in detectLabelsResponse.Labels)
                {
                    //MessageBox.Show(label.Name + " : " + label.Confidence);
                    foreach (var item in label.Instances)
                    {
                        //MessageBox.Show("Left : " + item.BoundingBox.Left);
                        boundingEnum.Add(new PositionClass(
                                             item.BoundingBox.Top * bmpHeight,
                                             item.BoundingBox.Left * bmpWidth,
                                             item.BoundingBox.Width * bmpWidth,
                                             item.BoundingBox.Height * bmpHeight)
                                         );
                    }
                }
            }
            catch (Exception)
            {
                MessageBox.Show("Không thể phân tích hình ảnh", "Lỗi", MessageBoxButtons.OK, MessageBoxIcon.Error);
            }
        }
Esempio n. 13
0
        static void IdentifyCelebrityFaces(string filename)
        {
            // Using USWest2, not the default region
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            RecognizeCelebritiesRequest dfr = new RecognizeCelebritiesRequest();

            // Request needs image butes, so read and add to request
            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();
            byte[] data = null;
            using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
            img.Bytes = new MemoryStream(data);
            dfr.Image = img;
            var outcome = rekoClient.RecognizeCelebrities(dfr);

            if (outcome.CelebrityFaces.Count > 0)
            {
                // Load a bitmap to modify with face bounding box rectangles
                System.Drawing.Bitmap facesHighlighted = new System.Drawing.Bitmap(filename);
                Pen  pen      = new Pen(Color.Black, 3);
                Font drawFont = new Font("Arial", 12);

                // Create a graphics context
                using (var graphics = Graphics.FromImage(facesHighlighted))
                {
                    foreach (var fd in outcome.CelebrityFaces)
                    {
                        // Get the bounding box
                        BoundingBox bb = fd.Face.BoundingBox;
                        Console.WriteLine("Bounding box = (" + bb.Left + ", " + bb.Top + ", " +
                                          bb.Height + ", " + bb.Width + ")");
                        // Draw the rectangle using the bounding box values
                        // They are percentages so scale them to picture
                        graphics.DrawRectangle(pen, x: facesHighlighted.Width * bb.Left,
                                               y: facesHighlighted.Height * bb.Top,
                                               width: facesHighlighted.Width * bb.Width,
                                               height: facesHighlighted.Height * bb.Height);
                        graphics.DrawString(fd.Name, drawFont, Brushes.White, facesHighlighted.Width * bb.Left,
                                            facesHighlighted.Height * bb.Top + facesHighlighted.Height * bb.Height);
                    }
                }
                // Save the image with highlights as PNG
                string fileout = filename.Replace(Path.GetExtension(filename), "_celebrityfaces.png");
                facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);
                Console.WriteLine(">>> " + outcome.CelebrityFaces.Count + " celebrity face(s) highlighted in file " + fileout);
            }
            else
            {
                Console.WriteLine(">>> No celebrity faces found");
            }
        }
Esempio n. 14
0
        static Amazon.Rekognition.Model.Image ToBytesStream(string filename)
        {
            var image = new Amazon.Rekognition.Model.Image();

            using (var fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
            {
                var data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
            return(image);
        }
Esempio n. 15
0
        public void FindObject()
        {
            Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();
            try
            {
                using (FileStream fs = new FileStream(filePath, FileMode.Open, FileAccess.Read))
                {
                    byte[] data = null;
                    data = new byte[fs.Length];
                    fs.Read(data, 0, (int)fs.Length);
                    image.Bytes = new MemoryStream(data);
                }
            }
            catch (Exception)
            {
                Console.WriteLine("Failed to load file " + filePath);
                return;
            }

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            DetectLabelsRequest detectlabelsRequest = new DetectLabelsRequest()
            {
                Image         = image,
                MaxLabels     = 10,
                MinConfidence = 77F
            };

            try
            {
                DetectLabelsResponse detectLabelsResponse = rekognitionClient.DetectLabels(detectlabelsRequest);

                double width  = imgPictureFrame.Width;
                double height = imgPictureFrame.Height;

                foreach (Label label in detectLabelsResponse.Labels)
                {
                    List <Instance> instances = label.Instances;
                    foreach (Instance instance in instances)
                    {
                        string      data        = $"{label.Name}: {Math.Round(instance.Confidence,2)}%";
                        BoundingBox boundingBox = instance.BoundingBox;
                        BindingBox  bindingBox  = new BindingBox(width * boundingBox.Width, height * boundingBox.Height, height * boundingBox.Top, width * boundingBox.Left, data);
                        gContainer.Children.Add(bindingBox);
                    }
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
        public DetectFacesResponse IdentifyFaces(byte[] request)
        {
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(_credentials, Amazon.RegionEndpoint.USEast2);

            DetectFacesRequest dfr = new DetectFacesRequest();

            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();

            img.Bytes = new MemoryStream(request);
            dfr.Image = img;

            return(rekoClient.DetectFaces(dfr));
        }
        private List <Mat> detectFace(Bitmap bitmap)
        {
            Mat src = null;

            try
            {
                src = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap);
            }catch (Exception e)
            {
            }
            Amazon.Rekognition.Model.Image image = Utils.bitmapToAWSImage(bitmap);

            DetectFacesRequest request = new DetectFacesRequest()
            {
                Image = image
            };

            try
            {
                DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(request);

                float bitmapWidth  = (float)bitmap.Width;
                float bitmapHeight = (float)bitmap.Height;

                List <Mat> matList = new List <Mat>();

                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    int faceLeft   = (int)(face.BoundingBox.Left * bitmapWidth);
                    int faceTop    = (int)(face.BoundingBox.Top * bitmapHeight);
                    int faceWidth  = (int)(face.BoundingBox.Width * bitmapWidth);
                    int faceHeight = (int)(face.BoundingBox.Height * bitmapHeight);

                    Rect rectCrop = new Rect(faceLeft, faceTop, faceWidth, faceHeight);
                    //Console.WriteLine("Confidence : {0}\nAge :" + face.Confidence + ", " + face.BoundingBox.Top + ", " + face.BoundingBox.Left + ", " +
                    //    face.BoundingBox.Height + ", " + face.BoundingBox.Width);

                    Mat img = new Mat(src, rectCrop);
                    matList.Add(img);
                }

                return(matList);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }

            return(null);
        }
Esempio n. 18
0
        public static DetectLabelsResponse GetInfo(string filePath)
        {
            String photo = filePath;

            Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();
            try
            {
                using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
                {
                    byte[] data = null;
                    data = new byte[fs.Length];
                    fs.Read(data, 0, (int)fs.Length);
                    image.Bytes = new MemoryStream(data);
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine("Failed to load file " + photo);
                return(null);
            }

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            DetectLabelsRequest detectlabelsRequest = new DetectLabelsRequest()
            {
                Image         = image,
                MaxLabels     = 10,
                MinConfidence = 77F
            };

            try
            {
                DetectLabelsResponse detectLabelsResponse = rekognitionClient.DetectLabels(detectlabelsRequest);

                Console.WriteLine("Detected labels for " + photo);
                foreach (Label label in detectLabelsResponse.Labels)
                {
                    Console.WriteLine("{0}: {1}", label.Name, label.Confidence);
                }

                return(detectLabelsResponse);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }

            return(null);
        }
Esempio n. 19
0
 public static Amazon.Rekognition.Model.Image matTOAWSImage(Mat mat)
 {
     try
     {
         MemoryStream ms   = mat.ToMemoryStream(".jpg");
         byte[]       data = new byte[ms.Length];
         ms.Read(data, 0, (int)ms.Length);
         Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();
         image.Bytes = ms;
         return(image);
     }catch (Exception e)
     {
     }
     return(null);
 }
Esempio n. 20
0
 public static Amazon.Rekognition.Model.Image bitmapToAWSImage(Bitmap bitmap)
 {
     try
     {
         MemoryStream ms = new MemoryStream();
         bitmap.Save(ms, ImageFormat.Jpeg);
         byte[] data = new byte[ms.Length];
         ms.Read(data, 0, (int)ms.Length);
         Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();
         image.Bytes = ms;
         return(image);
     }catch (Exception e)
     {
     }
     return(null);
 }
Esempio n. 21
0
    public static void Example()
    {
        String photo = "moviestars.jpg";

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        RecognizeCelebritiesRequest recognizeCelebritiesRequest = new RecognizeCelebritiesRequest();

        Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();
        byte[] data = null;
        try
        {
            using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
        }
        catch (Exception)
        {
            Console.WriteLine("Failed to load file " + photo);
            return;
        }

        img.Bytes = new MemoryStream(data);
        recognizeCelebritiesRequest.Image = img;

        Console.WriteLine("Looking for celebrities in image " + photo + "\n");

        RecognizeCelebritiesResponse recognizeCelebritiesResponse = rekognitionClient.RecognizeCelebrities(recognizeCelebritiesRequest);

        Console.WriteLine(recognizeCelebritiesResponse.CelebrityFaces.Count + " celebrity(s) were recognized.\n");
        foreach (Celebrity celebrity in recognizeCelebritiesResponse.CelebrityFaces)
        {
            Console.WriteLine("Celebrity recognized: " + celebrity.Name);
            Console.WriteLine("Celebrity ID: " + celebrity.Id);
            BoundingBox boundingBox = celebrity.Face.BoundingBox;
            Console.WriteLine("position: " +
                              boundingBox.Left + " " + boundingBox.Top);
            Console.WriteLine("Further information (if available):");
            foreach (String url in celebrity.Urls)
            {
                Console.WriteLine(url);
            }
        }
        Console.WriteLine(recognizeCelebritiesResponse.UnrecognizedFaces.Count + " face(s) were unrecognized.");
    }
Esempio n. 22
0
        public List <FaceRecord> Recognize(string collectionId, Amazon.Rekognition.Model.Image image)
        {
            //1- Detect faces in the input image and adds them to the specified collection.
            AmazonRekognitionClient rekognitionClient = AmazonClient.GetInstance();

            IndexFacesRequest indexFacesRequest = new IndexFacesRequest()
            {
                Image               = image,
                CollectionId        = collectionId,
                DetectionAttributes = new List <String>()
                {
                    "DEFAULT"
                }
            };

            IndexFacesResponse indexFacesResponse = rekognitionClient.IndexFaces(indexFacesRequest);

            //2- Search all detected faces in the collection
            SearchFacesResponse searchFacesResponse = null;

            List <FaceRecord> matchedFaces = new List <FaceRecord>();

            if (null != indexFacesResponse && null != indexFacesResponse.FaceRecords && 0 != indexFacesResponse.FaceRecords.Count)
            {
                foreach (FaceRecord face in indexFacesResponse.FaceRecords)
                {
                    searchFacesResponse = rekognitionClient.SearchFaces(new SearchFacesRequest
                    {
                        CollectionId       = collectionId,
                        FaceId             = face.Face.FaceId,
                        FaceMatchThreshold = 70F,
                        MaxFaces           = 2
                    });

                    if (searchFacesResponse.FaceMatches != null && searchFacesResponse.FaceMatches.Count != 0)
                    {
                        matchedFaces.Add(face);
                    }
                }

                //Remove newly added faces to the collection

                _collectionService.RemoveFacesFromCollection(collectionId, indexFacesResponse.FaceRecords.Select(x => x.Face.FaceId).ToList());
            }

            return(matchedFaces);
        }
        // snippet-start:[Rekognition.dotnetv3.CelebritiesInImageExample]
        public static async Task Main(string[] args)
        {
            string photo = "moviestars.jpg";

            var rekognitionClient = new AmazonRekognitionClient();

            var recognizeCelebritiesRequest = new RecognizeCelebritiesRequest();

            var img = new Amazon.Rekognition.Model.Image();

            byte[] data = null;
            try
            {
                using var fs = new FileStream(photo, FileMode.Open, FileAccess.Read);
                data         = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
            catch (Exception)
            {
                Console.WriteLine($"Failed to load file {photo}");
                return;
            }

            img.Bytes = new MemoryStream(data);
            recognizeCelebritiesRequest.Image = img;

            Console.WriteLine($"Looking for celebrities in image {photo}\n");

            var recognizeCelebritiesResponse = await rekognitionClient.RecognizeCelebritiesAsync(recognizeCelebritiesRequest);

            Console.WriteLine($"{recognizeCelebritiesResponse.CelebrityFaces.Count} celebrity(s) were recognized.\n");
            recognizeCelebritiesResponse.CelebrityFaces.ForEach(celeb =>
            {
                Console.WriteLine($"Celebrity recognized: {celeb.Name}");
                Console.WriteLine($"Celebrity ID: {celeb.Id}");
                BoundingBox boundingBox = celeb.Face.BoundingBox;
                Console.WriteLine($"position: {boundingBox.Left} {boundingBox.Top}");
                Console.WriteLine("Further information (if available):");
                celeb.Urls.ForEach(url =>
                {
                    Console.WriteLine(url);
                });
            });

            Console.WriteLine($"{recognizeCelebritiesResponse.UnrecognizedFaces.Count} face(s) were unrecognized.");
        }
Esempio n. 24
0
        private static void searchFace(Amazon.Rekognition.Model.Image image, AmazonRekognitionClient rekognitionClient)
        {
            String collectionId = "MyCollection";

            SearchFacesByImageRequest request = new SearchFacesByImageRequest()
            {
                CollectionId = collectionId,
                Image        = image
            };

            SearchFacesByImageResponse response = rekognitionClient.SearchFacesByImage(request);

            foreach (FaceMatch face in response.FaceMatches)
            {
                Console.WriteLine("FaceId: " + face.Face.FaceId + ", Similarity: " + face.Similarity);
            }
        }
Esempio n. 25
0
        private string CompareFaces(string strPersonName, MemoryStream msCapture, MemoryStream msFacePic)
        {
            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient("", "", Amazon.RegionEndpoint.USEast1);


            CompareFacesRequest req = new CompareFacesRequest();

            Amazon.Rekognition.Model.Image src = new Amazon.Rekognition.Model.Image();
            src.Bytes       = msCapture;
            req.SourceImage = src;


            Amazon.Rekognition.Model.Image trg = new Amazon.Rekognition.Model.Image();
            trg.Bytes = msFacePic;

            req.TargetImage = trg;
            try
            {
                CompareFacesResponse     compareFacesResult = rekognitionClient.CompareFaces(req);
                List <CompareFacesMatch> faceDetails        = compareFacesResult.FaceMatches;


                ComparedFace face = null;
                foreach (CompareFacesMatch match in faceDetails)
                {
                    face = match.Face;
                    BoundingBox position = face.BoundingBox;
                    System.Diagnostics.Debug.Write("Face at " + position.Left
                                                   + " " + position.Top
                                                   + " matches with " + face.Confidence
                                                   + "% confidence.");
                    if (face.Confidence > 75)
                    {
                        return(strPersonName);
                    }
                }
            }
            catch (Exception ex)
            {
                return("Fail");
            }


            return("Unknown");
        }
Esempio n. 26
0
        // snippet-start:[Rekognition.dotnetv3.DetectLabelsLocalFile]
        public static async Task Main()
        {
            string photo = "input.jpg";

            var image = new Amazon.Rekognition.Model.Image();

            try
            {
                using var fs = new FileStream(photo, FileMode.Open, FileAccess.Read);
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
            catch (Exception)
            {
                Console.WriteLine("Failed to load file " + photo);
                return;
            }

            var rekognitionClient = new AmazonRekognitionClient();

            var detectlabelsRequest = new DetectLabelsRequest
            {
                Image         = image,
                MaxLabels     = 10,
                MinConfidence = 77F,
            };

            try
            {
                DetectLabelsResponse detectLabelsResponse = await rekognitionClient.DetectLabelsAsync(detectlabelsRequest);

                Console.WriteLine($"Detected labels for {photo}");
                foreach (Label label in detectLabelsResponse.Labels)
                {
                    Console.WriteLine($"{label.Name}: {label.Confidence}");
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
        public CompareFacesResponse CompareFaces(byte[] source, byte[] target)
        {
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(_credentials, Amazon.RegionEndpoint.USWest2);

            CompareFacesRequest cfr = new CompareFacesRequest();

            Amazon.Rekognition.Model.Image sourceImage = new Amazon.Rekognition.Model.Image();
            Amazon.Rekognition.Model.Image targetImage = new Amazon.Rekognition.Model.Image();

            var sourceStream = new MemoryStream(source);
            var targetStream = new MemoryStream(target);

            sourceImage.Bytes = sourceStream;
            targetImage.Bytes = targetStream;

            cfr.SourceImage = sourceImage;
            cfr.TargetImage = targetImage;

            return(rekoClient.CompareFaces(cfr));
        }
Esempio n. 28
0
        public async Task <FaceMatchResponse> CompareFacesAsync(string sourceImage, string targetImage)
        {
            // Converte a imagem fonte em um objeto MemoryStream
            var imageSource = new Amazon.Rekognition.Model.Image();

            imageSource.Bytes = _serviceUtils.ConvertImageToMemoryStream(sourceImage);

            // Converte a imagem alvo em um objeto MemoryStream
            var imageTarget = new Amazon.Rekognition.Model.Image();

            imageTarget.Bytes = _serviceUtils.ConvertImageToMemoryStream(targetImage);

            // Configura o objeto que fará o request para o AWS Rekognition
            // A propriedade SimilarityThreshold ajusta o nível de similaridade na comparação das imagens
            var request = new CompareFacesRequest
            {
                SourceImage         = imageSource,
                TargetImage         = imageTarget,
                SimilarityThreshold = 80f
            };

            // Faz a chamada do serviço de CompareFaces
            var response = await _rekognitionClient.CompareFacesAsync(request);

            // Verifica se houve algum match nas imagens
            var hasMatch = response.FaceMatches.Any();

            // Se não houve match ele retorna um objeto não encontrado
            if (!hasMatch)
            {
                return(new FaceMatchResponse(hasMatch, null, string.Empty));
            }

            // Com a imagem fonte e os parâmetros de retorno do match contornamos o rosto encontrado na imagem
            var fileName = _serviceUtils.Drawing(imageSource.Bytes, response.SourceImageFace);
            // Pega o percentual de similaridade da imagem encontrada
            var similarity = response.FaceMatches.FirstOrDefault().Similarity;

            // Retorna o objeto com as informações encontradas e com a URL para verificar a imagem
            return(new FaceMatchResponse(hasMatch, similarity, fileName));
        }
Esempio n. 29
0
        static Amazon.Rekognition.Model.Image ToBytesStream(string filename)
        {
            var image = new Amazon.Rekognition.Model.Image();

            using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
            {
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
            var client  = new AmazonRekognitionClient();
            var request = new DetectLabelsRequest()
            {
                Image         = image,
                MaxLabels     = 10,
                MinConfidence = 77F
            };

            return(image);
        }
Esempio n. 30
0
        public DetectTextResponse ImageToText()
        {
            Image image = new Image();

            using (FileStream fs = new FileStream(filePath, FileMode.Open, FileAccess.Read))
            {
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }

            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

            DetectTextRequest detectTextRequest = new DetectTextRequest()
            {
                Image = image,
            };

            DetectTextResponse detectTextResponse = rekognitionClient.DetectText(detectTextRequest);

            double width  = imgPictureFrame.Width;
            double height = imgPictureFrame.Height;

            foreach (TextDetection text in detectTextResponse.TextDetections)
            {
                bool isLine = true;
                if (text.Type != TextTypes.LINE)
                {
                    isLine = false;
                }

                BoundingBox boundingBox = text.Geometry.BoundingBox;
                BindingBox  bindingBox  = new BindingBox(width * boundingBox.Width, height * boundingBox.Height, height * boundingBox.Top, width * boundingBox.Left, text.Id.ToString(), isLine);
                gContainer.Children.Add(bindingBox);
                bindingBoxes.Add(bindingBox);
            }

            return(detectTextResponse);
        }