Esempio n. 1
0
        public void DetectLabelsTest()
        {
            //Arrange
            var param = new DetectLabelParams()
            {
                BucketName    = "nvirginiadekanybucket",
                PhotoName     = "",
                PhotoVersion  = "1",
                MaxLabels     = 10,
                MinConfidence = 75F
            };

            //Act
            AmazonRekognition service = new AmazonRekognition(awsAccessKeyId, awsSecretAccessKey);
            var resp = service.DetectLabels(param);

            //Assert
        }
Esempio n. 2
0
        public void DetectFacesTest()
        {
            //Arrange
            var param = new DetectFaceParams()
            {
                BucketName   = "nvirginiadekanybucket",
                PhotoName    = "steve",
                PhotoVersion = String.Empty
            };

            AmazonRekognition service = new AmazonRekognition(awsAccessKeyId, awsSecretAccessKey);

            //Act
            var  resp    = service.DetectFaces(param);
            bool hasItem = resp.FaceDetails.Any();

            //Assert
            Assert.AreEqual(true, hasItem);
        }
Esempio n. 3
0
        public void RecogniseTest()
        {
            Stream stream = new MemoryStream(GetFileBytes());

            //Arrange
            var req = new RecogniseParams()
            {
                PhotoName    = "steve",
                PhotoVersion = String.Empty,
                BucketName   = "nvirginiadekanybucket",
                RegEndpoint  = RegionEndpoint.USEast1,
                InputStream  = stream,
                ContentType  = "image/jpeg"
            };

            //Act
            AmazonRekognition service = new AmazonRekognition(awsAccessKeyId, awsSecretAccessKey);
            var resp = service.Recognise(req);

            //Assert
            Assert.AreEqual(System.Net.HttpStatusCode.OK, resp.HttpStatusCode);
        }
        //public IHttpActionResult Recognize([FromBody]String base64String)
        public IHttpActionResult Recognize(RecognizeParam param)
        {
            if (String.IsNullOrEmpty(param.Base64String))
            {
                return(BadRequest("Bad request: base64String parameter must have value."));
            }

            var stream     = new MemoryStream();
            var photoBytes = Convert.FromBase64String(param.Base64String);

            using (var ms = new MemoryStream(photoBytes, 0, photoBytes.Length))
            {
                Image image = Image.FromStream(ms, true);
                image.Save(stream, ImageFormat.Jpeg);
            }

            var awsAccessKeyId     = ConfigurationManager.AppSettings["awsAccessKeyId"];
            var awsSecretAccessKey = ConfigurationManager.AppSettings["awsSecretAccessKey"];
            var bucketName         = ConfigurationManager.AppSettings["bucketName"];
            var photoName          = $"{DateTime.Now}_{Guid.NewGuid()}";

            var req = new RecogniseParams()
            {
                PhotoName    = photoName,
                PhotoVersion = "1",
                BucketName   = bucketName,
                RegEndpoint  = RegionEndpoint.USEast1,
                InputStream  = stream,
                ContentType  = "image/jpeg"
            };

            AmazonRekognition service = new AmazonRekognition(awsAccessKeyId, awsSecretAccessKey);
            var resp = service.Recognise(req);

            return(Ok(resp));
        }
Esempio n. 5
0
        // Face detection method
        private async Task FacialRecognitionScan(ApplicationUser user, UsersInGymDetail currentFacilityDetail)
        {
            // initialize similarity threshold for accepting face match, source and target img.
            // S3 bucket img, dynamically selected based on user currently logged in.
            float  similarityThreshold = 70F;
            string photo       = $"{user.FirstName}_{user.Id}.jpg";
            String targetImage = $"{user.FirstName}_{user.Id}_Target.jpg";

            try
            {
                // create image objects
                Image imageSource = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = photo,
                        Bucket = bucket
                    },
                };
                Image imageTarget = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = targetImage,
                        Bucket = bucket
                    },
                };
                // create a compare face request object
                CompareFacesRequest compareFacesRequest = new CompareFacesRequest()
                {
                    SourceImage         = imageSource,
                    TargetImage         = imageTarget,
                    SimilarityThreshold = similarityThreshold
                };

                // detect face features of img scanned
                CompareFacesResponse compareFacesResponse = await AmazonRekognition.CompareFacesAsync(compareFacesRequest);

                // Display results
                foreach (CompareFacesMatch match in compareFacesResponse.FaceMatches)
                {
                    ComparedFace face = match.Face;
                    // if confidence for similarity is over 90 then grant access
                    if (match.Similarity > 90)
                    {
                        // if there is a match set scan success
                        user.IsCameraScanSuccessful = true;
                    }
                    else
                    {
                        ViewBag.MatchResult = "Facial Match Failed!";
                    }
                }
            }
            catch (Exception e)
            {
                _logger.LogInformation(e.Message);
            }

            // now add get facial details to display in the view.
            DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
            {
                Image = new Image()
                {
                    S3Object = new S3Object()
                    {
                        Name   = targetImage,
                        Bucket = bucket
                    },
                },
                // "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
                Attributes = new List <String>()
                {
                    "ALL"
                }
            };

            try
            {
                DetectFacesResponse detectFacesResponse = await AmazonRekognition.DetectFacesAsync(detectFacesRequest);

                bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
                foreach (FaceDetail face in detectFacesResponse.FaceDetails)
                {
                    // if the face found has all attributes within a Detect Face object then save these values to the database.
                    if (hasAll)
                    {
                        currentFacilityDetail.IsSmiling    = face.Smile.Value;
                        currentFacilityDetail.Gender       = face.Gender.Value.ToString();
                        currentFacilityDetail.AgeRangeLow  = face.AgeRange.Low;
                        currentFacilityDetail.AgeRangeHigh = face.AgeRange.High;
                    }
                }
            }
            catch (Exception e)
            {
                _logger.LogInformation(e.Message);
            }
        }