コード例 #1
0
        /// <summary>
        /// Perform the work for this scenario.
        /// </summary>
        /// <param name="imageUri">The URI of the image to run against the scenario.</param>
        /// <param name="upload">Upload the image to Cognitive Services if [true]; submit the Uri as a remote URL if [false].</param>
        /// <returns>Awaitable OCR result.</returns>
        protected override async Task DoWorkAsync(Uri imageUri, bool upload)
        {
            _status.Text = "Performing OCR...";

            OcrLanguages languageCode = (languageComboBox.SelectedItem as RecognizeLanguage).OcrEnum;

            //
            // Either upload an image, or supply a URL.
            //
            OcrResult ocrResult;

            if (upload)
            {
                ocrResult = await UploadAndRecognizeImageAsync(imageUri.LocalPath, languageCode);
            }
            else
            {
                ocrResult = await RecognizeUrlAsync(imageUri.AbsoluteUri, languageCode);
            }
            _status.Text = "OCR Done";

            //
            // Log analysis result in the log window.
            //
            Log("");
            Log("OCR Result:");
            LogOcrResults(ocrResult);
        }
コード例 #2
0
        /// <summary>
        /// Sends a URL to Cognitive Services and performs OCR.
        /// </summary>
        /// <param name="imageUrl">The image URL for which to perform recognition.</param>
        /// <param name="language">The language code to recognize.</param>
        /// <returns>Awaitable OCR result.</returns>
        private async Task <OcrResult> RecognizeUrlAsync(string imageUrl, OcrLanguages language)
        {
            // -----------------------------------------------------------------------
            // KEY SAMPLE CODE STARTS HERE
            // -----------------------------------------------------------------------

            //
            // Create Cognitive Services Vision API Service client.
            //
            using (var client = new ComputerVisionClient(Credentials)
            {
                Endpoint = Endpoint
            })
            {
                Log("ComputerVisionClient is created");

                //
                // Perform OCR on the given URL.
                //
                Log("Calling ComputerVisionClient.RecognizePrintedTextAsync()...");
                OcrResult ocrResult = await client.RecognizePrintedTextAsync(!DetectOrientation, imageUrl, language);

                return(ocrResult);
            }

            // -----------------------------------------------------------------------
            // KEY SAMPLE CODE ENDS HERE
            // -----------------------------------------------------------------------
        }
コード例 #3
0
        /// <summary>
        /// Recognizes text in a <see cref="byte[]"/> array of an image.
        /// </summary>
        /// <param name="imageBytes">A <see cref="byte[]"/> of an image</param>
        /// <param name="language">A <see cref="OcrLanguages"/> language</param>
        /// <param name="detectOrientation">Explicitly requests to detect image orientation</param>
        /// <returns>The <see cref="OcrResponse"/> with the structured text</returns>
        public async Task <OcrResponse> RecognizeAsync(byte[] imageBytes, OcrLanguages language = OcrLanguages.NotSet, bool?detectOrientation = null)
        {
            PrepareQueryString(language, detectOrientation);

            using (var content = new ByteArrayContent(imageBytes))
            {
                content.Headers.ContentType = new System.Net.Http.Headers.MediaTypeHeaderValue("application/octet-stream");

                return(await GetResponse(content));
            }
        }
コード例 #4
0
        /// <summary>
        /// Sends a url to Project Oxford and performs OCR
        /// </summary>
        /// <param name="imageUrl">The url to perform recognition on</param>
        /// <param name="language">The language code to recognize for</param>
        /// <returns></returns>
        private async Task <OcrResult> RecognizeUrl(string imageUrl, OcrLanguages language)
        {
            Log("Calling VisionServiceClient.RecognizeTextAsync()...");
            var ocrResult = await VisionServiceClient.RecognizePrintedTextAsync(true, imageUrl, language);

            return(ocrResult);

            // -----------------------------------------------------------------------
            // KEY SAMPLE CODE ENDS HERE
            // -----------------------------------------------------------------------
        }
コード例 #5
0
        protected virtual void PrepareQueryString(OcrLanguages language, bool?detectOrientation = null)
        {
            if (language != OcrLanguages.NotSet)
            {
                QueryStringValues.Add("language", Globalization.Languages.FirstOrDefault((KeyValuePair <string, string> l) => l.Value == language.ToString()).Key);
            }

            if (detectOrientation.HasValue)
            {
                QueryStringValues.Add("detectOrientation", detectOrientation.Value.ToString());
            }
        }
コード例 #6
0
        /// <summary>
        /// Uploads the image to Cognitive Services and performs OCR.
        /// </summary>
        /// <param name="inputImage">The image byte[] to be analized.</param>
        /// <param name="language">The language code to recognize. Use Unk for automatic detection</param>
        /// <returns>Awaitable OCR result.</returns>
        public async Task <OcrResult> RecognizeImageOCRAsync(byte[] inputImage, OcrLanguages language)
        {
            // Create Cognitive Services Vision API Service client.
            using (var client = new ComputerVisionClient(Credentials)
            {
                Endpoint = Endpoint
            })
            {
                Log.LogInformation("ComputerVisionClient is created");

                // Upload an image and perform OCR.
                Log.LogInformation("Calling ComputerVisionClient.RecognizePrintedTextInStreamAsync()...");
                OcrResult ocrResult = await client.RecognizePrintedTextInStreamAsync(!DetectOrientation, new MemoryStream(inputImage), language);

                return(ocrResult);
            }
        }
コード例 #7
0
        public async Task <ImageAnalysis> AnalyzeImageAsync(byte[] inputImage, OcrLanguages language)
        {
            // Create Cognitive Services Vision API Service client.
            using (var client = new ComputerVisionClient(Credentials)
            {
                Endpoint = Endpoint
            })
            {
                Log.LogInformation("ComputerVisionClient is created");

                // Upload an image and perform OCR.
                Log.LogInformation("Calling ComputerVisionClient.RecognizePrintedTextInStreamAsync()...");
                ImageAnalysis cognitiveResult = await client.AnalyzeImageInStreamAsync(new MemoryStream(inputImage));

                return(cognitiveResult);
            }
        }
コード例 #8
0
        public void RecognizesWithLanguage(OcrLanguages language)
        {
            // Arrange
            OcrClient   client = new OcrClient(API_KEY, REGION);
            OcrResponse response;

            // Act
            response = Task.Run(() => client.RecognizeAsync(new Uri(@"https://oxfordportal.blob.core.windows.net/vision/doc-vision-overview-ocr01.png"), language: language)).Result;

            // Assert
            Assert.NotNull(response);
            Assert.Null(response.ErrorCode);
            Assert.Equal(response.Language, "en");
            Assert.NotNull(response.Regions);
            Assert.NotEmpty(response.Regions);
            Assert.NotEqual(response.Regions.First().BoundingBox.Left, default(int));
            Assert.NotEqual(response.Regions.First().BoundingBox.Top, default(int));
            Assert.NotEqual(response.Regions.First().BoundingBox.Width, default(int));
            Assert.NotEqual(response.Regions.First().BoundingBox.Height, default(int));
            Assert.NotNull(response.Regions.First().Lines);
            Assert.NotEmpty(response.Regions.First().Lines);
            Assert.NotNull(response.Regions.First().Lines.First().Words);
        }
コード例 #9
0
        /*
         * READ FILE - URL
         * Extracts text.
         */
        public static async Task <OcrResult> RecognizeTextFromImageUrl(ComputerVisionClient client, string urlFile, bool detectOrientation, OcrLanguages language = OcrLanguages.En)
        {
            Console.WriteLine("----------------------------------------------------------");
            Console.WriteLine("READ FILE FROM URL");
            Console.WriteLine();

            // Read text from URL
            var ocrResults = await client.RecognizePrintedTextAsync(detectOrientation, urlFile, language);

            return(ocrResults);
        }
コード例 #10
0
 /// <summary>
 /// Optical Character Recognition (OCR) detects text in an image and extracts
 /// the recognized characters into a machine-usable character stream.
 /// Upon success, the OCR results will be returned.
 /// Upon failure, the error code together with an error message will be
 /// returned. The error code can be one of InvalidImageUrl, InvalidImageFormat,
 /// InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
 /// InternalServerError.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='detectOrientation'>
 /// Whether detect the text orientation in the image. With
 /// detectOrientation=true the OCR service tries to detect the image
 /// orientation and correct it before further processing (e.g. if it's
 /// upside-down).
 /// </param>
 /// <param name='image'>
 /// An image stream.
 /// </param>
 /// <param name='language'>
 /// The BCP-47 language code of the text to be detected in the image. The
 /// default value is 'unk'. Possible values include: 'unk', 'zh-Hans',
 /// 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it',
 /// 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
 /// 'sr-Cyrl', 'sr-Latn', 'sk'
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <OcrResult> RecognizePrintedTextInStreamAsync(this IComputerVisionClient operations, bool detectOrientation, Stream image, OcrLanguages language = default(OcrLanguages), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.RecognizePrintedTextInStreamWithHttpMessagesAsync(detectOrientation, image, language, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #11
0
 public static async Task <OcrResult> RecognizeTextFromImageUri(ComputerVisionClient client, Uri imageUri, bool detectOrientation, OcrLanguages language = OcrLanguages.En)
 {
     if (imageUri.Scheme == Uri.UriSchemeHttps || imageUri.Scheme == Uri.UriSchemeHttp)
     {
         return(await RecognizeTextFromImageUrl(client, imageUri.ToString(), detectOrientation, language));
     }
     else if (imageUri.Scheme == Uri.UriSchemeFile)
     {
         return(await RecognizeTextFromImageLocal(client, imageUri.AbsolutePath, detectOrientation, language));
     }
     return(null);
 }
コード例 #12
0
 private async Task <OcrResult> ComputerVisionRecognizedPrintedTextByStreamAsync(Stream imageStream, OcrLanguages ocrLanguage)
 {
     return(await _computerVisionClient.RecognizePrintedTextInStreamAsync(true, imageStream, ocrLanguage));
 }
コード例 #13
0
        public async Task <ComputerVisionAnalyzeResponse> AnalyzeAsync(string imageUrl, IFormFile file,
                                                                       AnalysisLanguage analysisLanguage, OcrLanguages ocrLanguage, ReadLanguage readLanguage)
        {
            // Setup
            _computerVisionClient = new ComputerVisionClient(new ApiKeyServiceClientCredentials(_subscriptionKey))
            {
                Endpoint = _endpoint
            };

            // Computer vision
            if (!string.IsNullOrWhiteSpace(imageUrl))
            {
                var imageAnalysis         = ComputerVisionAnalyzeImageByUrlAsync(imageUrl, analysisLanguage);
                var areaOfInterest        = ComputerVisionGetAreaOfInterestByUrlAsync(imageUrl);
                var read                  = ComputerVisionReadByUrlAsync(imageUrl, readLanguage);
                var recognizedPrintedText = ComputerVisionRecognizedPrintedTextByUrlAsync(imageUrl, ocrLanguage);

                // Combine
                var task = Task.WhenAll(imageAnalysis, areaOfInterest, read, recognizedPrintedText);

                try
                {
                    await task;

                    return(new ComputerVisionAnalyzeResponse
                    {
                        ImageInfo = new ImageInfo
                        {
                            Src = imageUrl,
                            Description = imageAnalysis.Result.Description?.Captions?.FirstOrDefault()?.Text.ToSentence(),

                            Width = imageAnalysis.Result.Metadata.Width,
                            Height = imageAnalysis.Result.Metadata.Height
                        },

                        AnalyzeVisualFeatureTypes = AnalyzeVisualFeatureTypes,
                        AnalyzeDetails = AnalyzeDetails,

                        AnalysisResult = imageAnalysis.Result,
                        AreaOfInterestResult = areaOfInterest.Result,

                        OcrResult = recognizedPrintedText.Result,
                        ReadResult = read.Result
                    });
                }
                catch (ComputerVisionErrorException ex)
                {
                    var exceptionMessage = ex.Response.Content;
                    var parsedJson       = JToken.Parse(exceptionMessage);

                    if (ex.Response.StatusCode == System.Net.HttpStatusCode.BadRequest)
                    {
                        return(new ComputerVisionAnalyzeResponse
                        {
                            ApiRequestErrorMessage = $"Bad request thrown by the underlying API from Microsoft:",
                            ApiRequestErrorContent = parsedJson.ToString(Formatting.Indented)
                        });
                    }
                    else
                    {
                        return(new ComputerVisionAnalyzeResponse
                        {
                            OtherErrorMessage = $"Error thrown by the underlying API from Microsoft:",
                            OtherErrorContent = parsedJson.ToString(Formatting.Indented)
                        });
                    }
                }
            }
            else
            {
                using (var analyzeStream = new MemoryStream())
                    using (var areaOfInterestStream = new MemoryStream())
                        using (var readStream = new MemoryStream())
                            using (var ocrStream = new MemoryStream())
                                using (var outputStream = new MemoryStream())
                                {
                                    // Get initial value
                                    await file.CopyToAsync(analyzeStream);

                                    // Duplicate for parallel access to the streams
                                    analyzeStream.Seek(0, SeekOrigin.Begin);
                                    await analyzeStream.CopyToAsync(areaOfInterestStream);

                                    analyzeStream.Seek(0, SeekOrigin.Begin);
                                    await analyzeStream.CopyToAsync(readStream);

                                    analyzeStream.Seek(0, SeekOrigin.Begin);
                                    await analyzeStream.CopyToAsync(ocrStream);

                                    analyzeStream.Seek(0, SeekOrigin.Begin);
                                    await analyzeStream.CopyToAsync(outputStream);

                                    // Reset the stream for consumption
                                    analyzeStream.Seek(0, SeekOrigin.Begin);
                                    areaOfInterestStream.Seek(0, SeekOrigin.Begin);
                                    readStream.Seek(0, SeekOrigin.Begin);
                                    ocrStream.Seek(0, SeekOrigin.Begin);
                                    outputStream.Seek(0, SeekOrigin.Begin);

                                    var imageAnalysis         = ComputerVisionAnalyzeImageByStreamAsync(analyzeStream, analysisLanguage);
                                    var areaOfInterest        = ComputerVisionGetAreaOfInterestByStreamAsync(areaOfInterestStream);
                                    var read                  = ComputerVisionReadByStreamAsync(readStream, readLanguage);
                                    var recognizedPrintedText = ComputerVisionRecognizedPrintedTextByStreamAsync(ocrStream, ocrLanguage);

                                    // Combine
                                    var task = Task.WhenAll(imageAnalysis, areaOfInterest, read, recognizedPrintedText);

                                    try
                                    {
                                        await task;

                                        // Get image for display
                                        var fileBytes = outputStream.ToArray();
                                        var imageData = $"data:{file.ContentType};base64,{Convert.ToBase64String(fileBytes)}";

                                        return(new ComputerVisionAnalyzeResponse
                                        {
                                            ImageInfo = new ImageInfo
                                            {
                                                Src = imageData,
                                                Description = imageAnalysis.Result.Description?.Captions?.FirstOrDefault()?.Text.ToSentence(),

                                                Width = imageAnalysis.Result.Metadata.Width,
                                                Height = imageAnalysis.Result.Metadata.Height
                                            },

                                            AnalyzeVisualFeatureTypes = AnalyzeVisualFeatureTypes,
                                            AnalyzeDetails = AnalyzeDetails,

                                            AnalysisResult = imageAnalysis.Result,
                                            AreaOfInterestResult = areaOfInterest.Result,

                                            OcrResult = recognizedPrintedText.Result,
                                            ReadResult = read.Result
                                        });
                                    }
                                    catch (ComputerVisionErrorException ex)
                                    {
                                        var exceptionMessage = ex.Response.Content;
                                        var parsedJson       = JToken.Parse(exceptionMessage);

                                        if (ex.Response.StatusCode == System.Net.HttpStatusCode.BadRequest)
                                        {
                                            return(new ComputerVisionAnalyzeResponse
                                            {
                                                ApiRequestErrorMessage = $"Bad request thrown by the underlying API from Microsoft:",
                                                ApiRequestErrorContent = parsedJson.ToString(Formatting.Indented)
                                            });
                                        }
                                        else
                                        {
                                            return(new ComputerVisionAnalyzeResponse
                                            {
                                                OtherErrorMessage = $"Error thrown by the underlying API from Microsoft:",
                                                OtherErrorContent = parsedJson.ToString(Formatting.Indented)
                                            });
                                        }
                                    }
                                }
            }
        }
コード例 #14
0
 internal RecognizeLanguage(string shortCode, OcrLanguages ocrEnum, string longName)
 {
     ShortCode = shortCode;
     OcrEnum   = ocrEnum;
     LongName  = longName;
 }
コード例 #15
0
 private async Task <OcrResult> ComputerVisionRecognizedPrintedTextByUrlAsync(string imageUrl, OcrLanguages ocrLanguage)
 {
     return(await _computerVisionClient.RecognizePrintedTextAsync(true, imageUrl, ocrLanguage));
 }
コード例 #16
0
ファイル: OCR.cs プロジェクト: nicogis/SnippingOCR
        public static async Task <ResultOCR> Process(Image image, OcrLanguages ocrLanguages, FormatImageCognitiveService formatImage)
        {
            bool   result           = false;
            string error            = null;
            string text             = null;
            Bitmap destinationImage = null;

            try
            {
                IComputerVisionClient client = new ComputerVisionClient(new ApiKeyServiceClientCredentials(key))
                {
                    Endpoint = OCR.endPoint
                };

                if ((image.Height < dimensioneMin) || (image.Width < dimensioneMin))
                {
                    int newW = image.Width < dimensioneMin ? dimensioneMin + delta : image.Width;
                    int newH = image.Height < dimensioneMin ? dimensioneMin + delta : image.Height;

                    destinationImage = new Bitmap(newW, newH);

                    using (Graphics g = Graphics.FromImage(destinationImage))
                    {
                        g.DrawImage(image, new Rectangle((newW - image.Width) / 2, (newH - image.Height) / 2, image.Width, image.Height), new Rectangle(0, 0, image.Width, image.Height), GraphicsUnit.Pixel);
                    }

                    image = destinationImage;

                    //image.Save(@"c:\temp\prova.png",ImageFormat.Png);
                }

                if ((image.Height > dimensioneMax) || (image.Width > dimensioneMax))
                {
                    throw new ApplicationException("Capture a smaller area!");
                }

                ImageFormat format = ImageFormat.Png;
                if (formatImage == FormatImageCognitiveService.jpeg)
                {
                    format = ImageFormat.Jpeg;
                }
                else if (formatImage == FormatImageCognitiveService.bmp)
                {
                    format = ImageFormat.Bmp;
                }

                using (Stream imageFileStream = new MemoryStream())
                {
                    image.Save(imageFileStream, format);

                    if (Helper.ConvertBytesToMegabytes(imageFileStream.Length) > 4d)
                    {
                        throw new ApplicationException("Capture a smaller area!");
                    }

                    imageFileStream.Seek(0, SeekOrigin.Begin);

                    OcrResult t = await client.RecognizePrintedTextInStreamAsync(false, imageFileStream, ocrLanguages);

                    if (t.Regions.Count == 0)
                    {
                        throw new ApplicationException("Failed to convert the text!");
                    }


                    text = string.Join("\n",
                                       t.Regions.ToList().Select(region =>
                                                                 string.Join("\n", region.Lines.ToList().Select(line =>
                                                                                                                string.Join(" ", line.Words.ToList().Select(word =>
                                                                                                                                                            word.Text).ToArray())).ToArray())).ToArray());


                    if (!string.IsNullOrWhiteSpace(text))
                    {
                        Clipboard.SetText(text);
                    }

                    result = true;
                }
            }
            catch (Exception ex)
            {
                error = ex.Message;
            }
            finally
            {
                destinationImage?.Dispose();
            }

            return(new ResultOCR()
            {
                Success = result, Error = error, Text = text
            });
        }
コード例 #17
0
        /// <summary>
        /// Recognizes text in an image that's available via a public URI.
        /// </summary>
        /// <param name="imageUri">A <see cref="Uri"/> of the image</param>
        /// <param name="imageFormat">A <see cref="OcrImageFormats"/> format</param>
        /// <param name="language">A <see cref="OcrLanguages"/> language</param>
        /// <param name="detectOrientation">Explicitly requests to detect image orientation</param>
        /// <returns>The <see cref="OcrResponse"/> with the structured text</returns>
        public async Task <OcrResponse> RecognizeAsync(Uri imageUri, OcrImageFormats imageFormat = OcrImageFormats.NotSet, OcrLanguages language = OcrLanguages.NotSet, bool?detectOrientation = null)
        {
            PrepareQueryString(language, detectOrientation);

            var urlContent = new UrlContent
            {
                Url = imageUri.AbsoluteUri
            };

            using (var content = new StringContent(await Json.SerializeAsync(urlContent)))
            {
                content.Headers.ContentType = new System.Net.Http.Headers.MediaTypeHeaderValue("application/json");

                return(await GetResponse(content));
            }
        }
コード例 #18
0
        private async Task <OcrResult> UploadAndRecognizeImageAsync(string imageFilePath, OcrLanguages language)
        {
            //
            // Create Cognitive Services Vision API Service client.
            //
            using (var client = new ComputerVisionClient(Credentials)
            {
                Endpoint = "https://westcentralus.api.cognitive.microsoft.com/vision/v1.0"
            })
            {
                using (Stream imageFileStream = File.OpenRead(imageFilePath))
                {
                    //
                    // Upload an image and perform OCR.
                    //

                    OcrResult ocrResult = await client.RecognizePrintedTextInStreamAsync(!DetectOrientation, imageFileStream, language);

                    return(ocrResult);
                }
            }
        }
コード例 #19
0
        public static async Task <OcrResult> RecognizeTextFromImageLocal(ComputerVisionClient client, string localFile, bool detectOrientation, OcrLanguages language = OcrLanguages.En)
        {
            Console.WriteLine("----------------------------------------------------------");
            Console.WriteLine($"Recognize Printed Text (OCR) From a local image FILE: {Path.GetFileName(localFile)} ");
            Console.WriteLine();

            // Read text from URL
            var ocrResults = await client.RecognizePrintedTextInStreamAsync(detectOrientation, File.OpenRead(localFile), language);

            return(ocrResults);
        }
コード例 #20
0
        internal static async Task <OcrResult> UploadAndRecognizeImageAsync(string imageFilePath, OcrLanguages language)
        {
            string key         = ConfigurationManager.AppSettings["ComputerVisionApiKey"];
            string endPoint    = ConfigurationManager.AppSettings["ComputerVisionEndpoint"];
            var    credentials = new ApiKeyServiceClientCredentials(key);

            using (var client = new ComputerVisionClient(credentials)
            {
                Endpoint = endPoint
            })
            {
                using (Stream imageFileStream = File.OpenRead(imageFilePath))
                {
                    OcrResult ocrResult = await client.RecognizePrintedTextInStreamAsync(false, imageFileStream, language);

                    return(ocrResult);
                }
            }
        }
コード例 #21
0
ファイル: OCRServices.cs プロジェクト: cvera310/TuCredito_WPF
        internal static async Task <OcrResult> UploadAndRecognizeImageAsync(string imageFilePath, OcrLanguages language)
        {
            string key         = "0d8a60b23e9b4441b748d01c93c8a88f";
            string endPoint    = "https://pruebaiavision.cognitiveservices.azure.com/";
            var    credentials = new ApiKeyServiceClientCredentials(key);

            using (var client = new ComputerVisionClient(credentials)
            {
                Endpoint = endPoint
            })
            {
                using (Stream imageFileStream = File.OpenRead(imageFilePath))
                {
                    OcrResult ocrResult = await client.RecognizePrintedTextInStreamAsync(false, imageFileStream, language);

                    return(ocrResult);
                }
            }
        }
コード例 #22
0
        private async Task <OcrResult> UploadAndRecognizeImage(StorageFile imageFile, OcrLanguages language)
        {
            var stream = await imageFile.OpenStreamForReadAsync();

            Log("Calling VisionServiceClient.RecognizeTextAsync()...");
            var ocrResult = await VisionServiceClient.RecognizePrintedTextInStreamAsync(true, stream, language);

            return(ocrResult);
        }
コード例 #23
0
        internal static string ToSerializedValue(this OcrLanguages value)
        {
            switch (value)
            {
            case OcrLanguages.Unk:
                return("unk");

            case OcrLanguages.ZhHans:
                return("zh-Hans");

            case OcrLanguages.ZhHant:
                return("zh-Hant");

            case OcrLanguages.Cs:
                return("cs");

            case OcrLanguages.Da:
                return("da");

            case OcrLanguages.Nl:
                return("nl");

            case OcrLanguages.En:
                return("en");

            case OcrLanguages.Fi:
                return("fi");

            case OcrLanguages.Fr:
                return("fr");

            case OcrLanguages.De:
                return("de");

            case OcrLanguages.El:
                return("el");

            case OcrLanguages.Hu:
                return("hu");

            case OcrLanguages.It:
                return("it");

            case OcrLanguages.Ja:
                return("ja");

            case OcrLanguages.Ko:
                return("ko");

            case OcrLanguages.Nb:
                return("nb");

            case OcrLanguages.Pl:
                return("pl");

            case OcrLanguages.Pt:
                return("pt");

            case OcrLanguages.Ru:
                return("ru");

            case OcrLanguages.Es:
                return("es");

            case OcrLanguages.Sv:
                return("sv");

            case OcrLanguages.Tr:
                return("tr");

            case OcrLanguages.Ar:
                return("ar");

            case OcrLanguages.Ro:
                return("ro");

            case OcrLanguages.SrCyrl:
                return("sr-Cyrl");

            case OcrLanguages.SrLatn:
                return("sr-Latn");

            case OcrLanguages.Sk:
                return("sk");
            }
            return(null);
        }
コード例 #24
0
        /// <summary>
        /// Uploads the image to Cognitive Services and performs OCR.
        /// </summary>
        /// <param name="imageFilePath">The image file path.</param>
        /// <param name="language">The language code to recognize.</param>
        /// <returns>Awaitable OCR result.</returns>
        private async Task <OcrResult> UploadAndRecognizeImageAsync(string imageFilePath, OcrLanguages language)
        {
            // -----------------------------------------------------------------------
            // KEY SAMPLE CODE STARTS HERE
            // -----------------------------------------------------------------------

            //
            // Create Cognitive Services Vision API Service client.
            //
            using (var client = new ComputerVisionClient(Credentials)
            {
                Endpoint = Endpoint
            })
            {
                Log("ComputerVisionClient is created");

                using (Stream imageFileStream = File.OpenRead(imageFilePath))
                {
                    //
                    // Upload an image and perform OCR.
                    //
                    Log("Calling ComputerVisionClient.RecognizePrintedTextInStreamAsync()...");
                    OcrResult ocrResult = await client.RecognizePrintedTextInStreamAsync(!DetectOrientation, imageFileStream, language);

                    return(ocrResult);
                }
            }

            // -----------------------------------------------------------------------
            // KEY SAMPLE CODE ENDS HERE
            // -----------------------------------------------------------------------
        }