示例#1
0
 public StreamManager(
     String spName,
     String kvStreamArn,
     String kdStreamArn,
     String iamRoleArn,
     String collId,
     float threshold)
 {
     streamProcessorName   = spName;
     kinesisVideoStreamArn = kvStreamArn;
     kinesisDataStreamArn  = kdStreamArn;
     roleArn           = iamRoleArn;
     collectionId      = collId;
     matchThreshold    = threshold;
     rekognitionClient = new AmazonRekognitionClient(MyAWSConfigs.faceCollectionRegion);
 }
示例#2
0
    public static void Example()
    {
        String photo = "moviestars.jpg";

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        RecognizeCelebritiesRequest recognizeCelebritiesRequest = new RecognizeCelebritiesRequest();

        Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();
        byte[] data = null;
        try
        {
            using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
        }
        catch (Exception)
        {
            Console.WriteLine("Failed to load file " + photo);
            return;
        }

        img.Bytes = new MemoryStream(data);
        recognizeCelebritiesRequest.Image = img;

        Console.WriteLine("Looking for celebrities in image " + photo + "\n");

        RecognizeCelebritiesResponse recognizeCelebritiesResponse = rekognitionClient.RecognizeCelebrities(recognizeCelebritiesRequest);

        Console.WriteLine(recognizeCelebritiesResponse.CelebrityFaces.Count + " celebrity(s) were recognized.\n");
        foreach (Celebrity celebrity in recognizeCelebritiesResponse.CelebrityFaces)
        {
            Console.WriteLine("Celebrity recognized: " + celebrity.Name);
            Console.WriteLine("Celebrity ID: " + celebrity.Id);
            BoundingBox boundingBox = celebrity.Face.BoundingBox;
            Console.WriteLine("position: " +
                              boundingBox.Left + " " + boundingBox.Top);
            Console.WriteLine("Further information (if available):");
            foreach (String url in celebrity.Urls)
            {
                Console.WriteLine(url);
            }
        }
        Console.WriteLine(recognizeCelebritiesResponse.UnrecognizedFaces.Count + " face(s) were unrecognized.");
    }
        /// <summary>
        /// This method is called for every Lambda invocation. This method takes in an S3 event object and can be used
        /// to respond to S3 notifications.
        /// </summary>
        /// <param name="evnt"></param>
        /// <param name="context"></param>
        /// <returns></returns>
        public async Task <string> FunctionHandler(S3Event evnt, ILambdaContext context)
        {
            var s3Event = evnt.Records?[0].S3;

            if (s3Event == null)
            {
                return(null);
            }

            try
            {
                AmazonRekognitionClient client = new AmazonRekognitionClient(RegionEndpoint.USEast1);
                // get the file's name from event
                string            imageTitle = s3Event.Object.Key;
                DetectTextRequest q          = new DetectTextRequest();
                // get the file from S3
                Image img = new Image()
                {
                    S3Object = getObject(imageTitle)
                };
                q.Image = img;
                // detect text from the image
                var task = client.DetectTextAsync(q, new System.Threading.CancellationToken());
                task.Wait();
                DetectTextResponse r     = task.Result;
                string             plate = "";
                // filter recognized text
                foreach (TextDetection t in r.TextDetections)
                {
                    if (isCapitaLettersNumbers(t.DetectedText))
                    {
                        plate = t.DetectedText;
                        //send message to plate's owner
                        sendMessage(plate);
                    }
                }
            }
            catch (Exception e)
            {
                context.Logger.LogLine($"Error getting object {s3Event.Object.Key} from bucket {s3Event.Bucket.Name}. Make sure they exist and your bucket is in the same region as this function.");
                context.Logger.LogLine(e.Message);
                context.Logger.LogLine(e.StackTrace);
                throw;
            }

            return("Lamda has returned");
        }
        public async Task <ModerationResponse> AnalyzeImage(MemoryStream imageStream)
        {
            using (var client = new AmazonRekognitionClient(Endpoint))
            {
                var request = new DetectModerationLabelsRequest()
                {
                    Image = new Image()
                    {
                        Bytes = imageStream
                    },
                    MinConfidence = 0 //do this so that scores are always returned?
                };

                var awsResponse = await client.DetectModerationLabelsAsync(request);

                var response = new ModerationResponse();

                if (awsResponse.HttpStatusCode != System.Net.HttpStatusCode.OK)
                {
                    response.Pass             = false;
                    response.ModerationScores = new[] { new ModerationScore()
                                                        {
                                                            Category = $"ServerError:{awsResponse.HttpStatusCode}", Score = 100
                                                        } };
                }
                else
                {
                    if (awsResponse.ModerationLabels.Any(s => s.Confidence >= 50))
                    {
                        response.Pass = false;
                    }
                    else
                    {
                        response.Pass = true;
                    }

                    response.ModerationScores = awsResponse.ModerationLabels
                                                .Select(m => new ModerationScore()
                    {
                        Category = $"{m.ParentName}:{m.Name}",
                        Score    = m.Confidence
                    });
                }

                return(response);
            }
        }
示例#5
0
        public List <FaceRecord> Recognize(string collectionId, Amazon.Rekognition.Model.Image image)
        {
            //1- Detect faces in the input image and adds them to the specified collection.
            AmazonRekognitionClient rekognitionClient = AmazonClient.GetInstance();

            IndexFacesRequest indexFacesRequest = new IndexFacesRequest()
            {
                Image               = image,
                CollectionId        = collectionId,
                DetectionAttributes = new List <String>()
                {
                    "DEFAULT"
                }
            };

            IndexFacesResponse indexFacesResponse = rekognitionClient.IndexFaces(indexFacesRequest);

            //2- Search all detected faces in the collection
            SearchFacesResponse searchFacesResponse = null;

            List <FaceRecord> matchedFaces = new List <FaceRecord>();

            if (null != indexFacesResponse && null != indexFacesResponse.FaceRecords && 0 != indexFacesResponse.FaceRecords.Count)
            {
                foreach (FaceRecord face in indexFacesResponse.FaceRecords)
                {
                    searchFacesResponse = rekognitionClient.SearchFaces(new SearchFacesRequest
                    {
                        CollectionId       = collectionId,
                        FaceId             = face.Face.FaceId,
                        FaceMatchThreshold = 70F,
                        MaxFaces           = 2
                    });

                    if (searchFacesResponse.FaceMatches != null && searchFacesResponse.FaceMatches.Count != 0)
                    {
                        matchedFaces.Add(face);
                    }
                }

                //Remove newly added faces to the collection

                _collectionService.RemoveFacesFromCollection(collectionId, indexFacesResponse.FaceRecords.Select(x => x.Face.FaceId).ToList());
            }

            return(matchedFaces);
        }
示例#6
0
        private static void searchFace(Amazon.Rekognition.Model.Image image, AmazonRekognitionClient rekognitionClient)
        {
            String collectionId = "MyCollection";

            SearchFacesByImageRequest request = new SearchFacesByImageRequest()
            {
                CollectionId = collectionId,
                Image        = image
            };

            SearchFacesByImageResponse response = rekognitionClient.SearchFacesByImage(request);

            foreach (FaceMatch face in response.FaceMatches)
            {
                Console.WriteLine("FaceId: " + face.Face.FaceId + ", Similarity: " + face.Similarity);
            }
        }
示例#7
0
        private void btn_DetectAdultContent_Click(object sender, EventArgs e)
        {
            txt_adultContent.Text = "";
            var source  = ToBytesStream($"{sourceAdultContent}");
            var client  = new AmazonRekognitionClient();
            var request = new DetectModerationLabelsRequest
            {
                Image = source
            };
            var response = client.DetectModerationLabels(request);

            txt_adultContent.Text = ($"Found {response.ModerationLabels.Count} labels: \n");
            foreach (var label in response.ModerationLabels)
            {
                txt_adultContent.Text += $"- {label.Name}\n";
            }
        }
示例#8
0
        // snippet-start:[Rekognition.dotnetv3.DeleteCollectionExample]
        public static async Task Main()
        {
            var rekognitionClient = new AmazonRekognitionClient();

            string collectionId = "MyCollection";

            Console.WriteLine("Deleting collection: " + collectionId);

            var deleteCollectionRequest = new DeleteCollectionRequest()
            {
                CollectionId = collectionId,
            };

            var deleteCollectionResponse = await rekognitionClient.DeleteCollectionAsync(deleteCollectionRequest);

            Console.WriteLine($"{collectionId}: {deleteCollectionResponse.StatusCode}");
        }
示例#9
0
 public static IAsyncOperation <string> GetFaceDetails(string base64, string AccessKey, string SecretKey)
 {
     return(Task.Run <string>(async() =>
     {
         byte[] imageBytes;
         try
         {
             base64 = base64.Substring(base64.IndexOf(',') + 1).Trim('\0');
             imageBytes = System.Convert.FromBase64String(base64);
         }
         catch (Exception e) {
             return e.Message;
         }
         string sJSONResponse = "";
         AWSCredentials credentials;
         try
         {
             credentials = new BasicAWSCredentials(AccessKey, SecretKey);
         }
         catch (Exception e)
         {
             throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                                             + "Please make sure that your credentials file is at the correct "
                                             + "location (/Users/<userid>/.aws/credentials), and is in a valid format.", e);
         }
         DetectFacesRequest request = new DetectFacesRequest {
             Attributes = new List <string>(new string[] { "ALL" })
         };
         DetectFacesResponse result = null;
         request.Image = new Image {
             Bytes = new MemoryStream(imageBytes, 0, imageBytes.Length)
         };
         AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(credentials, RegionEndpoint.USWest2);
         try
         {
             result = await rekognitionClient.DetectFacesAsync(request);
         }
         catch (AmazonRekognitionException e)
         {
             throw e;
         }
         // Return server status as unhealthy with appropriate status code
         sJSONResponse = JsonConvert.SerializeObject(result.FaceDetails);
         return sJSONResponse;
     }).AsAsyncOperation());
 }
示例#10
0
        public static string Create(string _collectionId)
        {
            if (GetFaceCollectionList().Contains(_collectionId))
            {
                return("");
            }

            string collectionId  = _collectionId;
            string collectionArn = "";

            try
            {
                using (rekognitionClient = new AmazonRekognitionClient(collectionRegion))
                {
                    CreatingCollection();
                }

                void CreatingCollection()
                {
                    Console.WriteLine("Creating collection: " + collectionId);

                    CreateCollectionRequest createCollectionRequest = new CreateCollectionRequest()
                    {
                        CollectionId = collectionId
                    };

                    CreateCollectionResponse createCollectionResponse = rekognitionClient.CreateCollection(createCollectionRequest);

                    collectionArn = createCollectionResponse.CollectionArn;

                    Console.WriteLine("Status code : " + createCollectionResponse.StatusCode);
                }
            }
            catch (AmazonRekognitionException e)
            {
                Console.WriteLine("AmazonRekognitionException: " + e);
                collectionArn = "error";
            }
            catch (Exception e)
            {
                Console.WriteLine("Error: " + e);
                collectionArn = "error";
            }

            return(collectionArn);
        }
        /// <summary>
        /// Get Face Matches for Celebrities from Amazon service
        /// </summary>
        /// <param name="photo"></param>
        /// <returns></returns>
        public async Task <ResponseDTO> GetMatches(IFormFile photo)
        {
            try
            {
                AmazonRekognitionClient     rekognitionClient           = new AmazonRekognitionClient(_awsCredentials.Value.Id, _awsCredentials.Value.Key, Amazon.RegionEndpoint.USEast2);
                RecognizeCelebritiesRequest recognizeCelebritiesRequest = new RecognizeCelebritiesRequest();
                Image  img  = new Image();
                byte[] data = null;
                try
                {
                    if (photo.Length > 0)
                    {
                        using (var ms = new MemoryStream())
                        {
                            photo.CopyTo(ms);
                            data = ms.ToArray();
                        }
                    }
                }
                catch (Exception ex)
                {
                    throw ex;
                }
                img.Bytes = new MemoryStream(data);
                recognizeCelebritiesRequest.Image = img;
                RecognizeCelebritiesResponse recognizeCelebritiesResponse = await rekognitionClient.RecognizeCelebritiesAsync(recognizeCelebritiesRequest);

                foreach (Celebrity celebrity in recognizeCelebritiesResponse.CelebrityFaces)
                {
                    CelebrityDetail.Name = celebrity.Name;
                    foreach (string url in celebrity.Urls)
                    {
                        CelebrityDetail.Url = url;
                    }
                    CelebrityDetails.Add(CelebrityDetail);
                    CelebrityDetail = new CelebrityDetail();
                }
                ResponseDetail.CelebrityDetails = CelebrityDetails;
                ResponseDetail.UnMatchCount     = recognizeCelebritiesResponse.UnrecognizedFaces.Count;
                return(ResponseDetail);
            }
            catch (Exception ex)
            {
                throw ex;
            }
        }
示例#12
0
        static void IdentifyFaces(string filename)
        {
            // Using USWest2, not the default region
            AmazonRekognitionClient rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USWest2);

            DetectFacesRequest dfr = new DetectFacesRequest();

            // Request needs image butes, so read and add to request
            Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();
            byte[] data = null;
            using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
            {
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
            img.Bytes = new MemoryStream(data);
            dfr.Image = img;
            var outcome = rekoClient.DetectFaces(dfr);

            // Load a bitmap to modify with face bounding box rectangles
            System.Drawing.Bitmap facesHighlighted = new System.Drawing.Bitmap(filename);
            Pen pen = new Pen(Color.Black, 3);

            // Create a graphics context
            using (var graphics = Graphics.FromImage(facesHighlighted))
            {
                foreach (var fd in outcome.FaceDetails)
                {
                    // Get the bounding box
                    BoundingBox bb = fd.BoundingBox;
                    Console.WriteLine("Bounding box = (" + fd.BoundingBox.Left + ", " + fd.BoundingBox.Top + ", " +
                                      fd.BoundingBox.Height + ", " + fd.BoundingBox.Width + ")");
                    // Draw the rectangle using the bounding box values
                    // They are percentages so scale them to picture
                    graphics.DrawRectangle(pen, x: facesHighlighted.Width * bb.Left,
                                           y: facesHighlighted.Height * bb.Top,
                                           width: facesHighlighted.Width * bb.Width,
                                           height: facesHighlighted.Height * bb.Height);
                }
            }
            // Save the image with highlights as PNG
            string fileout = filename.Replace(Path.GetExtension(filename), "_faces.png");

            facesHighlighted.Save(fileout, System.Drawing.Imaging.ImageFormat.Png);
            Console.WriteLine(">>> Faces highlighted in file " + fileout);
        }
        // snippet-start:[Rekognition.dotnetv3.CelebritiesInImageExample]
        public static async Task Main(string[] args)
        {
            string photo = "moviestars.jpg";

            var rekognitionClient = new AmazonRekognitionClient();

            var recognizeCelebritiesRequest = new RecognizeCelebritiesRequest();

            var img = new Amazon.Rekognition.Model.Image();

            byte[] data = null;
            try
            {
                using var fs = new FileStream(photo, FileMode.Open, FileAccess.Read);
                data         = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
            }
            catch (Exception)
            {
                Console.WriteLine($"Failed to load file {photo}");
                return;
            }

            img.Bytes = new MemoryStream(data);
            recognizeCelebritiesRequest.Image = img;

            Console.WriteLine($"Looking for celebrities in image {photo}\n");

            var recognizeCelebritiesResponse = await rekognitionClient.RecognizeCelebritiesAsync(recognizeCelebritiesRequest);

            Console.WriteLine($"{recognizeCelebritiesResponse.CelebrityFaces.Count} celebrity(s) were recognized.\n");
            recognizeCelebritiesResponse.CelebrityFaces.ForEach(celeb =>
            {
                Console.WriteLine($"Celebrity recognized: {celeb.Name}");
                Console.WriteLine($"Celebrity ID: {celeb.Id}");
                BoundingBox boundingBox = celeb.Face.BoundingBox;
                Console.WriteLine($"position: {boundingBox.Left} {boundingBox.Top}");
                Console.WriteLine("Further information (if available):");
                celeb.Urls.ForEach(url =>
                {
                    Console.WriteLine(url);
                });
            });

            Console.WriteLine($"{recognizeCelebritiesResponse.UnrecognizedFaces.Count} face(s) were unrecognized.");
        }
示例#14
0
        public void RekognitionDeleteFaces()
        {
            #region to-delete-a-face-1482182799377

            var client   = new AmazonRekognitionClient();
            var response = client.DeleteFaces(new DeleteFacesRequest
            {
                CollectionId = "myphotos",
                FaceIds      = new List <string> {
                    "ff43d742-0c13-5d16-a3e8-03d3f58e980b"
                }
            });

            List <string> deletedFaces = response.DeletedFaces;

            #endregion
        }
示例#15
0
    public static void Example()
    {
        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        String collectionId = "MyCollection";

        Console.WriteLine("Deleting collection: " + collectionId);

        DeleteCollectionRequest deleteCollectionRequest = new DeleteCollectionRequest()
        {
            CollectionId = collectionId
        };

        DeleteCollectionResponse deleteCollectionResponse = rekognitionClient.DeleteCollection(deleteCollectionRequest);

        Console.WriteLine(collectionId + ": " + deleteCollectionResponse.StatusCode);
    }
示例#16
0
        private string CompareFaces(string strPersonName, MemoryStream msCapture, MemoryStream msFacePic)
        {
            AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient("", "", Amazon.RegionEndpoint.USEast1);


            CompareFacesRequest req = new CompareFacesRequest();

            Amazon.Rekognition.Model.Image src = new Amazon.Rekognition.Model.Image();
            src.Bytes       = msCapture;
            req.SourceImage = src;


            Amazon.Rekognition.Model.Image trg = new Amazon.Rekognition.Model.Image();
            trg.Bytes = msFacePic;

            req.TargetImage = trg;
            try
            {
                CompareFacesResponse     compareFacesResult = rekognitionClient.CompareFaces(req);
                List <CompareFacesMatch> faceDetails        = compareFacesResult.FaceMatches;


                ComparedFace face = null;
                foreach (CompareFacesMatch match in faceDetails)
                {
                    face = match.Face;
                    BoundingBox position = face.BoundingBox;
                    System.Diagnostics.Debug.Write("Face at " + position.Left
                                                   + " " + position.Top
                                                   + " matches with " + face.Confidence
                                                   + "% confidence.");
                    if (face.Confidence > 75)
                    {
                        return(strPersonName);
                    }
                }
            }
            catch (Exception ex)
            {
                return("Fail");
            }


            return("Unknown");
        }
示例#17
0
    public static void Example()
    {
        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        String collectionId = "MyCollection";

        Console.WriteLine("Creating collection: " + collectionId);

        CreateCollectionRequest createCollectionRequest = new CreateCollectionRequest()
        {
            CollectionId = collectionId
        };

        CreateCollectionResponse createCollectionResponse = rekognitionClient.CreateCollection(createCollectionRequest);

        Console.WriteLine("CollectionArn : " + createCollectionResponse.CollectionArn);
        Console.WriteLine("Status code : " + createCollectionResponse.StatusCode);
    }
 public void SearchFaces(string fileName, string bucketName)
 {
     IAmazonRekognition rekoClient = new AmazonRekognitionClient(Amazon.RegionEndpoint.USEast1);
     var response = rekoClient.SearchFacesByImage(new SearchFacesByImageRequest
     {
         CollectionId       = "myphotos",
         FaceMatchThreshold = 95,
         Image = new Amazon.Rekognition.Model.Image
         {
             S3Object = new Amazon.Rekognition.Model.S3Object
             {
                 Bucket = bucketName,
                 Name   = fileName,
             }
         },
         MaxFaces = 5
     });
 }
示例#19
0
        // snippet-start:[Rekognition.dotnetv3.CreateCollectionExample]
        public static async Task Main()
        {
            var rekognitionClient = new AmazonRekognitionClient();

            string collectionId = "MyCollection";

            Console.WriteLine("Creating collection: " + collectionId);

            var createCollectionRequest = new CreateCollectionRequest
            {
                CollectionId = collectionId,
            };

            CreateCollectionResponse createCollectionResponse = await rekognitionClient.CreateCollectionAsync(createCollectionRequest);

            Console.WriteLine($"CollectionArn : {createCollectionResponse.CollectionArn}");
            Console.WriteLine($"Status code : {createCollectionResponse.StatusCode}");
        }
示例#20
0
        private async Task <object> GetImageLabels(string fileName, AmazonRekognitionClient rekognitionClient)
        {
            var detectResponses = await rekognitionClient.DetectLabelsAsync(new DetectLabelsRequest
            {
                MinConfidence = 50,

                Image = new Image
                {
                    S3Object = new Amazon.Rekognition.Model.S3Object
                    {
                        Bucket = bucketName,
                        Name   = fileName
                    }
                }
            });

            return(detectResponses.Labels);
        }
示例#21
0
        public void RekognitionSearchFaces()
        {
            #region to-delete-a-face-1482182799377

            var client   = new AmazonRekognitionClient();
            var response = client.SearchFaces(new SearchFacesRequest
            {
                CollectionId       = "myphotos",
                FaceId             = "70008e50-75e4-55d0-8e80-363fb73b3a14",
                FaceMatchThreshold = 90,
                MaxFaces           = 10
            });

            List <FaceMatch> faceMatches    = response.FaceMatches;
            string           searchedFaceId = response.SearchedFaceId;

            #endregion
        }
示例#22
0
        // snippet-start:[Rekognition.dotnetv3.DetectLabelsLocalFile]
        public static async Task Main()
        {
            string photo = "input.jpg";

            var image = new Amazon.Rekognition.Model.Image();

            try
            {
                using var fs = new FileStream(photo, FileMode.Open, FileAccess.Read);
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
            catch (Exception)
            {
                Console.WriteLine("Failed to load file " + photo);
                return;
            }

            var rekognitionClient = new AmazonRekognitionClient();

            var detectlabelsRequest = new DetectLabelsRequest
            {
                Image         = image,
                MaxLabels     = 10,
                MinConfidence = 77F,
            };

            try
            {
                DetectLabelsResponse detectLabelsResponse = await rekognitionClient.DetectLabelsAsync(detectlabelsRequest);

                Console.WriteLine($"Detected labels for {photo}");
                foreach (Label label in detectLabelsResponse.Labels)
                {
                    Console.WriteLine($"{label.Name}: {label.Confidence}");
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
示例#23
0
    public static void Example()
    {
        String photo = "input.jpg";

        Amazon.Rekognition.Model.Image image = new Amazon.Rekognition.Model.Image();
        try
        {
            using (FileStream fs = new FileStream(photo, FileMode.Open, FileAccess.Read))
            {
                byte[] data = null;
                data = new byte[fs.Length];
                fs.Read(data, 0, (int)fs.Length);
                image.Bytes = new MemoryStream(data);
            }
        }
        catch (Exception)
        {
            Console.WriteLine("Failed to load file " + photo);
            return;
        }

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();

        DetectLabelsRequest detectlabelsRequest = new DetectLabelsRequest()
        {
            Image         = image,
            MaxLabels     = 10,
            MinConfidence = 77F
        };

        try
        {
            DetectLabelsResponse detectLabelsResponse = rekognitionClient.DetectLabels(detectlabelsRequest);
            Console.WriteLine("Detected labels for " + photo);
            foreach (Label label in detectLabelsResponse.Labels)
            {
                Console.WriteLine("{0}: {1}", label.Name, label.Confidence);
            }
        }
        catch (Exception e)
        {
            Console.WriteLine(e.Message);
        }
    }
示例#24
0
        static void Main(string[] args)
        {
            //string filePath = "banner.png";
            string filePath = "banner1.jpg";
            Image  image    = new Image();

            try
            {
                using (FileStream fs = new FileStream(filePath, FileMode.Open, FileAccess.Read))
                {
                    byte[] data = null;
                    data = new byte[fs.Length];
                    fs.Read(data, 0, (int)fs.Length);
                    image.Bytes = new MemoryStream(data);
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
                Console.WriteLine("Failed to load file " + filePath);
            }

            AmazonRekognitionClient client  = new AmazonRekognitionClient(Amazon.RegionEndpoint.USEast1);
            DetectTextRequest       request = new DetectTextRequest
            {
                Image = image
            };

            try
            {
                DetectTextResponse response = client.DetectText(request);
                foreach (var text in response.TextDetections)
                {
                    Console.WriteLine(text.DetectedText);
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }

            Console.ReadLine();
        }
示例#25
0
        public bool AddCollection(string collectionId)
        {
            try
            {
                AmazonRekognitionClient rekognitionClient = AmazonClient.GetInstance();

                CreateCollectionRequest createCollectionRequest = new CreateCollectionRequest()
                {
                    CollectionId = collectionId
                };

                CreateCollectionResponse createCollectionResponse = rekognitionClient.CreateCollection(createCollectionRequest);
                return(true);
            }
            catch (Exception ex)
            {
                return(false);
            }
        }
        public static void Example()
        {
            var rekognitionClient = new AmazonRekognitionClient(RegionEndpoint.APSouth1);

            var collectionId = "BhavCollection";

            Console.WriteLine("Creating collection: " + collectionId);

            var createCollectionRequest = new CreateCollectionRequest()
            {
                CollectionId = collectionId
            };

            var createCollectionResponse =
                rekognitionClient.CreateCollectionAsync(createCollectionRequest);

            Console.WriteLine("CollectionArn : " + createCollectionResponse.Result.CollectionArn);
            Console.WriteLine("Status code : " + createCollectionResponse.Result.StatusCode);
        }
示例#27
0
        // Uses the Amazon Rekognition service to detect labels within an image.
        public async Task <List <WorkItem> > DetectLabels(string bucketName, string photo)
        {
            var rekognitionClient   = new AmazonRekognitionClient(RegionEndpoint.USWest2);
            var labelList           = new List <WorkItem>();
            var detectlabelsRequest = new DetectLabelsRequest
            {
                Image = new Image()
                {
                    S3Object = new Amazon.Rekognition.Model.S3Object()
                    {
                        Name   = photo,
                        Bucket = bucketName,
                    },
                },
                MaxLabels     = 10,
                MinConfidence = 75F,
            };

            try
            {
                DetectLabelsResponse detectLabelsResponse = await rekognitionClient.DetectLabelsAsync(detectlabelsRequest);

                Console.WriteLine("Detected labels for " + photo);
                WorkItem workItem;
                foreach (Label label in detectLabelsResponse.Labels)
                {
                    workItem            = new WorkItem();
                    workItem.Key        = photo;
                    workItem.Confidence = label.Confidence.ToString();
                    workItem.Name       = label.Name;
                    labelList.Add(workItem);
                }

                return(labelList);
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }

            return(null);
        }
示例#28
0
        static void Main(string[] args)
        {
            String bucket = ExtFunc.Read("\nAmazon S3 Bucket-Name with a picture:");
            String photo  = ExtFunc.Read("\nPicture Filename in Bucket:");

            AmazonRekognitionClient rekognitionClient =
                new AmazonRekognitionClient(Amazon.RegionEndpoint.USEast1);

            var analyzed_image = new Image()
            {
                S3Object = new Amazon.Rekognition.Model.S3Object()
                {
                    Name   = photo,
                    Bucket = bucket
                },
            };

            DetectLabelsRequest detectlabelsRequest = new DetectLabelsRequest()
            {
                Image         = analyzed_image,
                MaxLabels     = 10,
                MinConfidence = 75F
            };

            try
            {
                DetectLabelsResponse detectLabelsResponse = rekognitionClient.DetectLabels(detectlabelsRequest);
                Console.WriteLine("Detected labels for " + photo);

                foreach (Label label in detectLabelsResponse.Labels)
                {
                    Console.WriteLine("{0}: {1}", label.Name, label.Confidence);
                }

                var tempUrl = GetS3Url(RegionEndpoint.USEast1, photo, bucket, 1);
                System.Diagnostics.Process.Start(tempUrl);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
示例#29
0
        private static async Task <object> GetVideoLabelsResult(string fileName, AmazonRekognitionClient rekognitionClient)
        {
            var startRequest = new StartLabelDetectionRequest
            {
                MinConfidence = 50,
                Video         = new Video
                {
                    S3Object = new Amazon.Rekognition.Model.S3Object
                    {
                        Bucket = bucketName,
                        Name   = fileName
                    }
                },
                JobTag = "DetectingLabels"
            };

            var startLabelDetectionResult = await rekognitionClient.StartLabelDetectionAsync(startRequest);

            return(startLabelDetectionResult.JobId);
        }
示例#30
0
        public void RekognitionDetectFaces()
        {
            #region to-detect-faces-in-an-image-1481841782793

            var client   = new AmazonRekognitionClient();
            var response = client.DetectFaces(new DetectFacesRequest
            {
                Image = new Image {
                    S3Object = new S3Object {
                        Bucket = "mybucket",
                        Name   = "myphoto"
                    }
                }
            });

            List <FaceDetail> faceDetails           = response.FaceDetails;
            string            orientationCorrection = response.OrientationCorrection;

            #endregion
        }