public async Task <IEnumerable <Rectangle> > ExtractTextAsync() { if (_textResponse == null) { var textRequest = new DetectTextRequest() { Image = _rekognitionImage }; _textResponse = await _client.DetectTextAsync(textRequest); } return(ExtractText()); }
public async void SendImage(string bs4) { DetectTextRequest detectTextRequest = new DetectTextRequest() { Image = new Image() { Bytes = new MemoryStream(System.Text.Encoding.UTF8.GetBytes(bs4)) } }; try { Debug.Log("starting detectText"); //DetectTextResponse detectTextResponse = _rekClient.DetectText(detectTextRequest); DetectTextResponse detectTextResponse = await _rekClient.DetectTextAsync(detectTextRequest); Debug.Log("Response for Image: "); foreach (TextDetection text in detectTextResponse.TextDetections) { Debug.Log("Detected: " + text.DetectedText); Debug.Log("Confidence: " + text.Confidence); Debug.Log("Id : " + text.Id); Debug.Log("Parent Id: " + text.ParentId); Debug.Log("Type: " + text.Type); } } finally{ Debug.Log("completed detectText"); }; }
public async Task <List <string> > TextDetectionAsync(Bitmap b) { List <string> res = new List <string>(); DetectTextRequest detectText = new DetectTextRequest() { Image = new Amazon.Rekognition.Model.Image() { Bytes = new MemoryStream(ImageToBytes(b)) } }; try { DetectTextResponse detectTextResponse = await rekognitionClient.DetectTextAsync(detectText); foreach (TextDetection text in detectTextResponse.TextDetections) { if (text.Type == TextTypes.LINE) { res.Add(text.DetectedText); Console.WriteLine("Left : " + text.Geometry.BoundingBox.Left); Console.WriteLine("Top " + text.Geometry.BoundingBox.Top); Console.WriteLine("Height " + text.Geometry.BoundingBox.Height); Console.WriteLine("width " + text.Geometry.BoundingBox.Width); } Console.WriteLine("------------------------------------"); } return(res); } catch { return(new List <string>()); } }
// For Image analysis public List <TextDetection> DetectText(MemoryStream stream, string target, out string message) { string outMessage = ""; DetectTextRequest detectTextRequest = new DetectTextRequest() { Image = new Image() { Bytes = stream } }; DetectTextResponse response = _client.DetectTextAsync(detectTextRequest).Result; //Console.WriteLine($"Texts Found: {response.TextDetections.Count}"); //Console.WriteLine(); foreach (TextDetection text in response.TextDetections) { //Console.WriteLine("text: " + text.DetectedText); //Console.WriteLine("Confidence: " + text.Confidence); //Console.WriteLine("Type: " + text.Type); //Console.WriteLine(); if (text.DetectedText.ToLower() == target.ToLower() || text.Type.ToString().ToLower() == target.ToLower()) { outMessage = "The Object '" + target.ToUpper() + "' in your watchlist has been found in live stream with '" + Convert.ToInt32(text.Confidence) + "%' confidence"; } } message = outMessage; LogResponse(JsonConvert.SerializeObject(response, Formatting.Indented), "DetectText"); return(response.TextDetections); }
/// <summary> /// This method is called for every Lambda invocation. This method takes in an S3 event object and can be used /// to respond to S3 notifications. /// </summary> /// <param name="evnt"></param> /// <param name="context"></param> /// <returns></returns> public async Task <string> FunctionHandler(S3Event evnt, ILambdaContext context) { var s3Event = evnt.Records?[0].S3; if (s3Event == null) { return(null); } try { AmazonRekognitionClient client = new AmazonRekognitionClient(RegionEndpoint.USEast1); // get the file's name from event string imageTitle = s3Event.Object.Key; DetectTextRequest q = new DetectTextRequest(); // get the file from S3 Image img = new Image() { S3Object = getObject(imageTitle) }; q.Image = img; // detect text from the image var task = client.DetectTextAsync(q, new System.Threading.CancellationToken()); task.Wait(); DetectTextResponse r = task.Result; string plate = ""; // filter recognized text foreach (TextDetection t in r.TextDetections) { if (isCapitaLettersNumbers(t.DetectedText)) { plate = t.DetectedText; //send message to plate's owner sendMessage(plate); } } } catch (Exception e) { context.Logger.LogLine($"Error getting object {s3Event.Object.Key} from bucket {s3Event.Bucket.Name}. Make sure they exist and your bucket is in the same region as this function."); context.Logger.LogLine(e.Message); context.Logger.LogLine(e.StackTrace); throw; } return("Lamda has returned"); }
public async Task <AwsOcrResponse> GetOcrResultWithoutCacheAsync(string filePath, string language = null, bool runAnywayWithBadLanguage = true) { MemoryStream ms = new MemoryStream(); using (FileStream fileStream = new FileStream(filePath, FileMode.Open)) { fileStream.CopyTo(ms); } var request = new DetectTextRequest { Image = new Image() { Bytes = ms } }; DetectTextResponse result = await rekognitionClient.DetectTextAsync(request); return(AwsResponseMapper.Get(result)); }
public async Task <DetectTextResponse> ConvertToTextAsync(MemoryStream data) { try { DetectTextRequest detectTextRequest = new DetectTextRequest() { Image = new Image() { Bytes = data } }; return(await rekognitionClient.DetectTextAsync(detectTextRequest)); } catch (Exception e) { Console.WriteLine(e.Message); return(new DetectTextResponse()); } }
public static async Task <string> MakeOCRRequest(string imageFilePath) { try { Image image = new Image(); image.Bytes = ImageRequest.GetImageAsMemoryStream(imageFilePath); DetectTextRequest detectTextRequest = new DetectTextRequest() { Image = image }; DetectTextResponse detectTextResponse = await client.DetectTextAsync(detectTextRequest); string json = System.Text.Json.JsonSerializer.Serialize(detectTextResponse.TextDetections); return(json); } catch (Exception) { return("error"); } }
private async Task <string> DetectText(MemoryStream memoryStream, AmazonRekognitionClient client, ILambdaContext context) { string message = ""; List <string> lines = new List <string>(); try { DetectTextResponse detecttextResponses = await client.DetectTextAsync(new DetectTextRequest { Image = new Image { Bytes = memoryStream } }); if (detecttextResponses != null && detecttextResponses.HttpStatusCode == HttpStatusCode.OK && detecttextResponses.TextDetections.Count > 0) { foreach (var item in detecttextResponses.TextDetections) { if (item.Type == TextTypes.LINE) { lines.Add(item.DetectedText); } } message = "There is a text written as " + string.Join(", ", lines.ToArray()); } else { message = "Sorry, no text was detected. "; } } catch (Exception ex) { context.Logger.LogLine(ex.Message); message = "Sorry, no text was detected. "; } return(message); }
// snippet-start:[Rekognition.dotnetv3.DetectTextExample] public static async Task Main() { string photo = "Dad_photographer.jpg"; // "input.jpg"; string bucket = "igsmiths3photos"; // "bucket"; var rekognitionClient = new AmazonRekognitionClient(); var detectTextRequest = new DetectTextRequest() { Image = new Image() { S3Object = new S3Object() { Name = photo, Bucket = bucket, }, }, }; try { DetectTextResponse detectTextResponse = await rekognitionClient.DetectTextAsync(detectTextRequest); Console.WriteLine($"Detected lines and words for {photo}"); detectTextResponse.TextDetections.ForEach(text => { Console.WriteLine($"Detected: {text.DetectedText}"); Console.WriteLine($"Confidence: {text.Confidence}"); Console.WriteLine($"Id : {text.Id}"); Console.WriteLine($"Parent Id: {text.ParentId}"); Console.WriteLine($"Type: {text.Type}"); }); } catch (Exception e) { Console.WriteLine(e.Message); } }
public IActionResult GetImageText() { string photo = "image_2020_03_09T10_13_27_425Z.png"; string bucket = "imagebucket080304"; string imageText = ""; AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(RegionEndpoint.USEast2); DetectTextRequest detectTextRequest = new DetectTextRequest() { Image = new Image() { S3Object = new S3Object() { Name = photo, Bucket = bucket } } }; try { var detectTextResponse = rekognitionClient.DetectTextAsync(detectTextRequest); var imageResponse = detectTextResponse.Result; var image = imageResponse.TextDetections.Where(te => te.Type == "Line" && te.Confidence > 90).OrderByDescending(t => t.Confidence).FirstOrDefault(); if (image != null) { imageText = image.DetectedText; } } catch (Exception e) { imageText = $"exception occured { e.Message }"; } return(Ok(imageText)); }
public async Task <bool> FunctionHandler(FileInfo fileInfo, ILambdaContext context) { DetectTextRequest request = new DetectTextRequest { Video = new Video { S3Object = new S3Object { Bucket = fileInfo.Bucket, Name = fileInfo.Key } }, NotificationChannel = new NotificationChannel { RoleArn = "arn:aws:iam::518495728486:role/hackathon-rek-role", SNSTopicArn = "arn:aws:sns:us-east-1:518495728486:AmazonRekognition-hackathon-2018" } }; StartPersonTrackingResponse response = await rekClient.DetectTextAsync(request).ConfigureAwait(false); bool validLength = await ProcessVideoMessages(context, response); return(validLength); }
public TextExtractionResults Extract(byte[] image) { TextExtractionResults rs = new TextExtractionResults(); List <TextBlock> rsTextBoxes = new List <TextBlock>(); AmazonRekognitionClient rekognitionClient = CreateAwsClient(); using (var stm = new System.IO.MemoryStream(image)) { double imgWidth, imgHeight; using (var bmp = new System.Drawing.Bitmap(stm)) { imgWidth = bmp.Width; imgHeight = bmp.Height; } var awsImage = new Amazon.Rekognition.Model.Image(); awsImage.Bytes = new System.IO.MemoryStream(image); var req = new Amazon.Rekognition.Model.DetectTextRequest { Image = awsImage }; var detectTextResponse = rekognitionClient.DetectTextAsync(req).Result; foreach (Amazon.Rekognition.Model.TextDetection textResult in detectTextResponse.TextDetections) { string text = textResult.DetectedText; Amazon.Rekognition.Model.Geometry oGeom = textResult.Geometry; TextBlock rsText = new TextBlock(); rsText.Text = text; rsText.X1 = oGeom.BoundingBox.Left * imgWidth; rsText.X2 = rsText.X1 + oGeom.BoundingBox.Width * imgWidth; rsText.Y1 = oGeom.BoundingBox.Top * imgHeight; rsText.Y2 = rsText.Y1 + oGeom.BoundingBox.Height * imgHeight; rsTextBoxes.Add(rsText); } } rs.Blocks = rsTextBoxes.ToArray(); return(rs); }
private static async Task Main(string[] args) { const string AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID"; const string AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY"; Console.WriteLine("Hello World!"); var self = await File.ReadAllBytesAsync("assets\\self.jpg"); var front = await File.ReadAllBytesAsync("assets\\front.png"); var back = await File.ReadAllBytesAsync("assets\\back.png"); var command = new AnalizeDocumentCommand { Self = self, Back = back, Front = front }; var region = RegionEndpoint.USEast1; var client = new AmazonRekognitionClient(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, region); #region Analiza se é documento using (var stream = new MemoryStream(command.Back)) { var request = new DetectLabelsRequest { Image = new Image { Bytes = stream } }; var response = await client.DetectLabelsAsync(request); var labels = response.Labels; foreach (var label in labels) { var accuracy = Accuracy.GetAccuracy(label.Confidence); if (DocumentTypes.IsValidDocument(label.Name)) { if (accuracy.IsLow) { Console.WriteLine("Não é um documento"); } if (accuracy.IsMedium) { Console.WriteLine("Pode ser que seja um documento"); } if (accuracy.IsHigh) { Console.WriteLine("É muito provável que seja um documento"); } break; } } } #endregion #region Compara com a self using (var source = new MemoryStream(command.Self)) using (var target = new MemoryStream(command.Front)) { var request = new CompareFacesRequest { SourceImage = new Image { Bytes = source }, TargetImage = new Image { Bytes = target } }; var response = await client.CompareFacesAsync(request); var faces = response.FaceMatches; if (faces.Count != 1) { Console.WriteLine("Resultado inconsistente"); } var accuracy = Accuracy.GetAccuracy(faces.First().Similarity); if (accuracy.IsLow) { Console.WriteLine("Esse documento não da mesma pessoa"); } if (accuracy.IsMedium) { Console.WriteLine("Pode ser que este documento seja da mesma pessoa"); } if (accuracy.IsHigh) { Console.WriteLine("É muito provável que este documento seja da mesma pessoa"); } } #endregion #region Verifica se é do portador válido using (var stream = new MemoryStream(command.Back)) { var request = new DetectTextRequest { Image = new Image { Bytes = stream } }; var response = await client.DetectTextAsync(request); var texts = response.TextDetections; foreach (var text in texts) { var accuracy = Accuracy.GetAccuracy(text.Confidence); if ("CPF".Equals(text.DetectedText, StringComparison.InvariantCultureIgnoreCase)) { if (accuracy.IsLow) { Console.WriteLine("não contém um número de CPF"); } if (accuracy.IsMedium) { Console.WriteLine("Pode ser que contenha um número de CPF"); } if (accuracy.IsHigh) { Console.WriteLine("É muito provável que contenha um número de CPF"); } break; } } } #endregion Console.WriteLine("That's all folks!"); }
/// <summary> /// Reads a picture and returns the data obtained from it into a dictionary with key-value pairs. /// For this specifically, items in a picture are connected (ie. fruit : apple), except its for /// (judgement, values) in the game. This will return the appropriate pairing. Until I rework this /// to be able to grab both the judgement and its value into one var. /// </summary> /// <param name="photo">photo file to be analyzed</param> /// <param name="tupleList">list containing what to search for in the image</param> /// <returns>A dictionary containing values sought out by the keys in ProcessorOptions</returns> public async Task <IDictionary <string, string> > ProcessPictureInfoAsync(FileResult photo, IEnumerable <ProcessorOptions> tupleList) { try { DetectTextRequest req = new DetectTextRequest { Image = new Image() }; // Grab image bytes using (var memoryStream = new MemoryStream()) { var stream = await photo.OpenReadAsync(); stream.CopyTo(memoryStream); req.Image.Bytes = memoryStream; } DetectTextResponse res = await arClient.DetectTextAsync(req); foreach (TextDetection text in res.TextDetections) { if (text.Confidence < confidenceThreshold) // Yeet the words its not confident in { continue; } // REGEX MATCHING TIME (Consider having 2 lists: not found and already found regexes, so it doesn't repeat over) using (var iteratorTuple = tupleList.GetEnumerator()) { while (iteratorTuple.MoveNext()) { ProcessorOptions curr = iteratorTuple.Current; // Fix this so I can iterate through while editing the Found part of ProcessorOptions T-T Match match = curr.Rgx.Match(text.DetectedText); if (match.Success) { if (!HashMap.ContainsKey(curr.Key)) { HashMap.Add(curr.Key, match.Groups[1].Value); Debug.WriteLine($"{curr.Key} IS NOW SET TO: {HashMap[curr.Key]}"); // Debug Statement curr.Found = true; // This does not work because this enumerator is read-only } break; } } } } } catch (Exception e) { Debug.WriteLine("Could not process picture into a song.\n"); Debug.WriteLine(e.Message); } return(HashMap); // return this as a read only dictionary }