コード例 #1
0
        public void TagImageTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "TagImageTest");

                string imageUrl = GetTestImageUrl("house.jpg");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                {
                    TagResult result = client.TagImageAsync(imageUrl).Result;

                    Assert.Matches("^\\d{4}-\\d{2}-\\d{2}(-preview)?$", result.ModelVersion);

                    var expects = new string[] { "grass", "outdoor", "building", "plant", "property", "home",
                                                 "house", "real estate", "sky", "siding", "porch", "yard", "cottage", "garden buildings",
                                                 "door", "lawn", "window", "farmhouse", "tree", "backyard", "driveway", "shed", "roof", "land lot" };

                    var intersect = expects.Intersect(result.Tags.Select(tag => tag.Name).ToArray()).ToArray();

                    Assert.True(intersect.Length == expects.Length);

                    // Confirm tags are in descending confidence order
                    var orignalConfidences = result.Tags.Select(tag => tag.Confidence).ToArray();
                    var sortedConfidences  = orignalConfidences.OrderByDescending(c => c).ToArray();
                    Assert.Equal(sortedConfidences, orignalConfidences);
                }
            }
        }
コード例 #2
0
        public void AnalyzeCelebritiesDomainImageTest()
        {
            using (MockContext context = MockContext.Start(this.GetType().FullName))
            {
                HttpMockServer.Initialize(this.GetType().FullName, "AnalyzeCelebritiesDomainTest");

                string celebrityUrl = GetTestImageUrl("satya.jpg");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                {
                    DomainModelResults results = client.AnalyzeImageByDomainAsync("celebrities", celebrityUrl).Result;

                    var jobject = results.Result as JObject;
                    Assert.NotNull(jobject);

                    var celebrities = jobject.ToObject <CelebrityResults>();
                    Assert.NotNull(celebrities);
                    Assert.Equal(1, celebrities.Celebrities.Count);

                    var celebrity = celebrities.Celebrities[0];
                    Assert.Equal("Satya Nadella", celebrity.Name);
                    Assert.True(celebrity.Confidence > 0.98);
                    Assert.True(celebrity.FaceRectangle.Width > 0);
                    Assert.True(celebrity.FaceRectangle.Height > 0);
                }
            }
        }
コード例 #3
0
        public void AnalyzeImageInvalidUrlTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "AnalyzeImageInvalidUrlTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                {
                    try
                    {
                        ImageAnalysis result = client.AnalyzeImageAsync(
                            "https://invalidurl",
                            new List <VisualFeatureTypes?>()
                        {
                            VisualFeatureTypes.Categories
                        })
                                               .Result;
                    }
                    catch (Exception ex) when(ex.InnerException is ComputerVisionErrorResponseException cverex)
                    {
                        Assert.Equal("InvalidImageUrl", cverex.Body.Error.Innererror.Code);
                    }
                }
            }
        }
コード例 #4
0
        public void DescribeImageInStreamTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "DescribeImageInStreamTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                    using (FileStream stream = new FileStream(GetTestImagePath("house.jpg"), FileMode.Open))
                    {
                        ImageDescription result = client.DescribeImageInStreamAsync(stream).Result;

                        Assert.Matches("^\\d{4}-\\d{2}-\\d{2}(-preview)?$", result.ModelVersion);
                        Assert.Equal(result.Tags, new string[] {
                            "grass",
                            "outdoor",
                            "sky",
                            "house",
                            "building",
                            "green",
                            "lawn",
                            "residential",
                            "grassy"
                        });
                        Assert.Equal(1, result.Captions.Count);
                        Assert.Equal("a house with a flag on the front", result.Captions[0].Text);
                        Assert.True(result.Captions[0].Confidence > 0.41);
                    }
            }
        }
コード例 #5
0
        public void RecognizeTextInStreamTest()
        {
            using (MockContext context = MockContext.Start(this.GetType().FullName))
            {
                HttpMockServer.Initialize(this.GetType().FullName, "RecognizeTextInStreamTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                    using (FileStream stream = new FileStream(GetTestImagePath("whiteboard.jpg"), FileMode.Open))
                    {
                        RecognizeTextInStreamHeaders headers = client.RecognizeTextInStreamAsync(stream, TextRecognitionMode.Handwritten).Result;

                        Assert.NotNull(headers.OperationLocation);

                        TextRecognitionResult recognitionResult = GetRecognitionResultWithPolling(client, headers.OperationLocation);

                        Assert.NotNull(recognitionResult);

                        Assert.Equal(
                            new string[] { "You must be the change", "you want to see in the world!!" },
                            recognitionResult.Lines.Select(line => line.Text));
                        Assert.Equal(2, recognitionResult.Lines.Count);
                        Assert.Equal(5, recognitionResult.Lines[0].Words.Count);
                        Assert.Equal(7, recognitionResult.Lines[1].Words.Count);
                    }
            }
        }
コード例 #6
0
        /// <summary>
        /// This operation generates a thumbnail image with the user-specified width
        /// and height. By default, the service analyzes the image, identifies the
        /// region of interest (ROI), and generates smart cropping coordinates based on
        /// the ROI. Smart cropping helps when you specify an aspect ratio that differs
        /// from that of the input image.
        /// A successful response contains the thumbnail image binary. If the request
        /// failed, the response contains an error code and a message to help determine
        /// what went wrong.
        /// Upon failure, the error code and an error message are returned. The error
        /// code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
        /// InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
        /// InternalServerError.
        /// </summary>
        /// <param name='operations'>
        /// The operations group for this extension method.
        /// </param>
        /// <param name='width'>
        /// Width of the thumbnail, in pixels. It must be between 1 and 1024.
        /// Recommended minimum of 50.
        /// </param>
        /// <param name='height'>
        /// Height of the thumbnail, in pixels. It must be between 1 and 1024.
        /// Recommended minimum of 50.
        /// </param>
        /// <param name='image'>
        /// An image stream.
        /// </param>
        /// <param name='smartCropping'>
        /// Boolean flag for enabling smart cropping.
        /// </param>
        /// <param name='cancellationToken'>
        /// The cancellation token.
        /// </param>
        public static async Task <Stream> GenerateThumbnailInStreamAsync(this IComputerVisionClient operations, int width, int height, Stream image, bool?smartCropping = false, CancellationToken cancellationToken = default(CancellationToken))
        {
            var _result = await operations.GenerateThumbnailInStreamWithHttpMessagesAsync(width, height, image, smartCropping, null, cancellationToken).ConfigureAwait(false);

            _result.Request.Dispose();
            return(_result.Body);
        }
コード例 #7
0
 /// <summary>
 /// This operation extracts a rich set of visual features based on the image
 /// content.
 /// Two input methods are supported -- (1) Uploading an image or (2) specifying
 /// an image URL. Within your request, there is an optional parameter to allow
 /// you to choose which features to return. By default, image categories are
 /// returned in the response.
 /// A successful response will be returned in JSON. If the request failed, the
 /// response will contain an error code and a message to help understand what
 /// went wrong.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='url'>
 /// Publicly reachable URL of an image.
 /// </param>
 /// <param name='visualFeatures'>
 /// A string indicating what visual feature types to return. Multiple values
 /// should be comma-separated. Valid visual feature types include: Categories -
 /// categorizes image content according to a taxonomy defined in documentation.
 /// Tags - tags the image with a detailed list of words related to the image
 /// content. Description - describes the image content with a complete English
 /// sentence. Faces - detects if faces are present. If present, generate
 /// coordinates, gender and age. ImageType - detects if image is clipart or a
 /// line drawing. Color - determines the accent color, dominant color, and
 /// whether an image is black&amp;white. Adult - detects if the image is
 /// pornographic in nature (depicts nudity or a sex act).  Sexually suggestive
 /// content is also detected. Objects - detects various objects within an
 /// image, including the approximate location. The Objects argument is only
 /// available in English. Brands - detects various brands within an image,
 /// including the approximate location. The Brands argument is only available
 /// in English.
 /// </param>
 /// <param name='details'>
 /// A string indicating which domain-specific details to return. Multiple
 /// values should be comma-separated. Valid visual feature types include:
 /// Celebrities - identifies celebrities if detected in the image, Landmarks -
 /// identifies notable landmarks in the image.
 /// </param>
 /// <param name='language'>
 /// The desired language for output generation. If this parameter is not
 /// specified, the default value is &amp;quot;en&amp;quot;.Supported
 /// languages:en - English, Default. es - Spanish, ja - Japanese, pt -
 /// Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
 /// 'ja', 'pt', 'zh'
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <ImageAnalysis> AnalyzeImageAsync(this IComputerVisionClient operations, string url, IList <VisualFeatureTypes> visualFeatures = default(IList <VisualFeatureTypes>), IList <Details> details = default(IList <Details>), string language = default(string), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.AnalyzeImageWithHttpMessagesAsync(url, visualFeatures, details, language, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
        public static async Task <List <string> > ExtractTextFromBlob(IComputerVisionClient client, Stream blobStream,
                                                                      ILogger log)
        {
            const int numberOfCharsInOperationId = 36;

            log.LogInformation("Extract text from blob");

            var blobTextHeaders = await client.ReadInStreamAsync(blobStream);

            var operationLocation = blobTextHeaders.OperationLocation;
            var operationId       = operationLocation.Substring(operationLocation.Length - numberOfCharsInOperationId);

            var                 i          = 0;
            const int           maxRetries = 10;
            ReadOperationResult results;

            do
            {
                results = await client.GetReadResultAsync(Guid.Parse(operationId));

                log.LogInformation("Server status: {0}, waiting {1} seconds...", results.Status, i);
                await Task.Delay(1000);

                if (i == 9)
                {
                    log.LogInformation("Server timed out.");
                }
            } while ((results.Status == OperationStatusCodes.Running ||
                      results.Status == OperationStatusCodes.NotStarted) && i++ < maxRetries);

            var lines = results.AnalyzeResult.ReadResults.SelectMany(x => x.Lines).Select(x => x.Text).ToList();

            return(lines);
        }
コード例 #9
0
        public void BatchReadPdfFileInStreamTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "BatchReadPdfFileInStreamTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                    using (FileStream stream = new FileStream(GetTestImagePath("menu.pdf"), FileMode.Open))
                    {
                        ReadInStreamHeaders headers = client.ReadInStreamWithHttpMessagesAsync(stream).Result.Headers;

                        Assert.NotNull(headers.OperationLocation);

                        ReadOperationResult readOperationResult = GetRecognitionResultWithPolling(client, headers.OperationLocation);

                        Assert.NotNull(readOperationResult);
                        Assert.Equal(OperationStatusCodes.Succeeded, readOperationResult.Status);

                        Assert.NotNull(readOperationResult.AnalyzeResult);
                        Assert.Equal(1, readOperationResult.AnalyzeResult.ReadResults.Count);

                        var recognitionResult = readOperationResult.AnalyzeResult.ReadResults[0];

                        Assert.Equal(1, recognitionResult.Page);
                        Assert.Equal(8.5, recognitionResult.Width);
                        Assert.Equal(11, recognitionResult.Height);
                        Assert.Equal(TextRecognitionResultDimensionUnit.Inch, recognitionResult.Unit);

                        Assert.Equal(28, recognitionResult.Lines.Count);
                        Assert.Equal("Microsoft", recognitionResult.Lines[0].Text);
                    }
            }
        }
コード例 #10
0
ファイル: VisionAnalyzeTests.cs プロジェクト: sjh37/aznetsdk
        public void AnalyzeImageTest()
        {
            using (MockContext context = MockContext.Start(this.GetType().FullName))
            {
                HttpMockServer.Initialize(this.GetType().FullName, "AnalyzeImageTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                {
                    ImageAnalysis result = client.AnalyzeImageAsync(
                        GetTestImageUrl("house.jpg"),
                        new List <VisualFeatureTypes>()
                    {
                        VisualFeatureTypes.Adult,
                        VisualFeatureTypes.Categories,
                        VisualFeatureTypes.Color,
                        VisualFeatureTypes.Faces,
                        VisualFeatureTypes.ImageType,
                        VisualFeatureTypes.Tags
                    })
                                           .Result;

                    Assert.Equal("grass", result.Tags[0].Name);
                    Assert.True(result.Tags[0].Confidence > 0.9);
                    Assert.Equal("Jpeg", result.Metadata.Format);
                    Assert.False(result.Adult.IsAdultContent);
                    Assert.False(result.Adult.IsRacyContent);
                    Assert.True(result.Adult.AdultScore < 0.1);
                    Assert.True(result.Adult.RacyScore < 0.1);
                    Assert.Equal("building_", result.Categories[0].Name);
                    Assert.True(result.Categories[0].Score > 0.5);
                    Assert.Equal("Green", result.Color.DominantColorBackground);
                    Assert.Equal("Green", result.Color.DominantColorForeground);
                }
            }
        }
コード例 #11
0
        public void ThumbnailImageInStreamTest()
        {
            using (MockContext context = MockContext.Start(this.GetType().FullName))
            {
                HttpMockServer.Initialize(this.GetType().FullName, "ThumbnailImageInStreamTest");

                IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance());
                using (FileStream stream = new FileStream(GetTestImagePath("house.jpg"), FileMode.Open))
                    using (Stream result = client.GenerateThumbnailInStreamAsync(64, 64, stream).Result)
                    {
                        // Note - .NET Core 2.0 doesn't support System.Drawing.Bitmap

                        byte[] expected = File.ReadAllBytes(GetTestImagePath("house_thumbnail.jpg"));
                        byte[] actual   = new byte[expected.Length];
                        result.Read(actual, 0, expected.Length);

                        // Reinstate for playback when HttpRecorder is fixed
                        if (HttpMockServer.Mode == HttpRecorderMode.Record)
                        {
                            Assert.Equal(EOF, result.ReadByte());
                            Assert.Equal(expected, actual);
                        }
                    }
            }
        }
コード例 #12
0
 public void SetupComputerVisionClient(string key, string endpoint)
 {
     computerVisionClient = new ComputerVisionClient(
         new Microsoft.Azure.CognitiveServices.Vision.ComputerVision.ApiKeyServiceClientCredentials(key),
         new System.Net.Http.DelegatingHandler[] { });
     computerVisionClient.Endpoint = endpoint;
 }
コード例 #13
0
        public void OcrImageTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "OcrImageTest");

                string germanTextUrl = GetTestImageUrl("achtung.jpg");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                {
                    const bool DetectOrientation = true;
                    OcrResult  result            = client.RecognizePrintedTextAsync(DetectOrientation, germanTextUrl, OcrLanguages.De).Result;

                    Assert.Matches("^\\d{4}-\\d{2}-\\d{2}(-preview)?$", result.ModelVersion);
                    Assert.Equal("de", result.Language);
                    Assert.Equal("Up", result.Orientation);
                    Assert.True(result.TextAngle > 0);
                    Assert.Equal(1, result.Regions.Count);
                    Assert.Equal(1, result.Regions[0].Lines.Count);
                    Assert.Equal(1, result.Regions[0].Lines[0].Words.Count);
                    Assert.Equal("ACHTUNG", result.Regions[0].Lines[0].Words[0].Text);
                    Assert.True(result.Regions[0].BoundingBox == result.Regions[0].Lines[0].BoundingBox);
                }
            }
        }
コード例 #14
0
        public void OcrImageInStreamTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "OcrImageInStreamTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                    using (FileStream stream = new FileStream(GetTestImagePath("signage.jpg"), FileMode.Open))
                    {
                        const bool DetectOrientation = true;
                        OcrResult  result            = client.RecognizePrintedTextInStreamAsync(DetectOrientation, stream).Result;

                        Assert.Matches("^\\d{4}-\\d{2}-\\d{2}(-preview)?$", result.ModelVersion);
                        Assert.Equal("en", result.Language);
                        Assert.Equal("Up", result.Orientation);
                        Assert.True(result.TextAngle < 0);
                        Assert.Equal(1, result.Regions.Count);
                        Assert.Equal(3, result.Regions[0].Lines.Count);
                        Assert.Equal(1, result.Regions[0].Lines[0].Words.Count);
                        Assert.Equal("WEST", result.Regions[0].Lines[0].Words[0].Text);
                        Assert.Equal("520", result.Regions[0].Lines[1].Words[0].Text);
                        Assert.Equal("Seattle", result.Regions[0].Lines[2].Words[0].Text);
                    }
            }
        }
コード例 #15
0
 /// <summary>
 /// This operation generates a description of an image in human readable
 /// language with complete sentences. The description is based on a collection
 /// of content tags, which are also returned by the operation. More than one
 /// description can be generated for each image. Descriptions are ordered by
 /// their confidence score. All descriptions are in English.
 /// Two input methods are supported -- (1) Uploading an image or (2) specifying
 /// an image URL.
 /// A successful response will be returned in JSON. If the request failed, the
 /// response will contain an error code and a message to help understand what
 /// went wrong.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='image'>
 /// An image stream.
 /// </param>
 /// <param name='maxCandidates'>
 /// Maximum number of candidate descriptions to be returned.  The default is 1.
 /// </param>
 /// <param name='language'>
 /// The desired language for output generation. If this parameter is not
 /// specified, the default value is &amp;quot;en&amp;quot;.Supported
 /// languages:en - English, Default. es - Spanish, ja - Japanese, pt -
 /// Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
 /// 'ja', 'pt', 'zh'
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <ImageDescription> DescribeImageInStreamAsync(this IComputerVisionClient operations, Stream image, int?maxCandidates = 1, string language = default(string), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.DescribeImageInStreamWithHttpMessagesAsync(image, maxCandidates, language, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #16
0
 /// <summary>
 /// This operation generates a description of an image in human readable
 /// language with complete sentences. The description is based on a collection
 /// of content tags, which are also returned by the operation. More than one
 /// description can be generated for each image. Descriptions are ordered by
 /// their confidence score. Descriptions may include results from celebrity and
 /// landmark domain models, if applicable.
 /// Two input methods are supported -- (1) Uploading an image or (2) specifying
 /// an image URL.
 /// A successful response will be returned in JSON. If the request failed, the
 /// response will contain an error code and a message to help understand what
 /// went wrong.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='url'>
 /// Publicly reachable URL of an image.
 /// </param>
 /// <param name='maxCandidates'>
 /// Maximum number of candidate descriptions to be returned.  The default is 1.
 /// </param>
 /// <param name='language'>
 /// The desired language for output generation. If this parameter is not
 /// specified, the default value is &amp;quot;en&amp;quot;.Supported
 /// languages:en - English, Default. es - Spanish, ja - Japanese, pt -
 /// Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
 /// 'ja', 'pt', 'zh'
 /// </param>
 /// <param name='descriptionExclude'>
 /// Turn off specified domain models when generating the description.
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <ImageDescription> DescribeImageAsync(this IComputerVisionClient operations, string url, int?maxCandidates = 1, string language = default(string), IList <DescriptionExclude?> descriptionExclude = default(IList <DescriptionExclude?>), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.DescribeImageWithHttpMessagesAsync(url, maxCandidates, language, descriptionExclude, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #17
0
 /// <summary>
 /// Performs object detection on the specified image.
 /// Two input methods are supported -- (1) Uploading an image or (2) specifying
 /// an image URL.
 /// A successful response will be returned in JSON. If the request failed, the
 /// response will contain an error code and a message to help understand what
 /// went wrong.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='image'>
 /// An image stream.
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <DetectResult> DetectObjectsInStreamAsync(this IComputerVisionClient operations, Stream image, CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.DetectObjectsInStreamWithHttpMessagesAsync(image, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #18
0
 /// <summary>
 /// Use this interface to get the result of a Read operation, employing the
 /// state-of-the-art Optical Character Recognition (OCR) algorithms optimized
 /// for text-heavy documents. When you use the Read interface, the response
 /// contains a field called 'Operation-Location'. The 'Operation-Location'
 /// field contains the URL that you must use for your 'GetReadResult' operation
 /// to access OCR results.​
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='url'>
 /// Publicly reachable URL of an image.
 /// </param>
 /// <param name='language'>
 /// The BCP-47 language code of the text in the document. Currently, only
 /// English ('en'), Dutch (‘nl’), French (‘fr’), German (‘de’), Italian (‘it’),
 /// Portuguese (‘pt), and Spanish ('es') are supported. Read supports auto
 /// language identification and multi-language documents, so only provide a
 /// language code if you would like to force the documented to be processed as
 /// that specific language. Possible values include: 'en', 'es', 'fr', 'de',
 /// 'it', 'nl', 'pt'
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <ReadHeaders> ReadAsync(this IComputerVisionClient operations, string url, string language = default(string), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.ReadWithHttpMessagesAsync(url, language, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Headers);
     }
 }
コード例 #19
0
 /// <summary>
 /// Optical Character Recognition (OCR) detects text in an image and extracts
 /// the recognized characters into a machine-usable character stream.
 /// Upon success, the OCR results will be returned.
 /// Upon failure, the error code together with an error message will be
 /// returned. The error code can be one of InvalidImageUrl, InvalidImageFormat,
 /// InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
 /// InternalServerError.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='detectOrientation'>
 /// Whether detect the text orientation in the image. With
 /// detectOrientation=true the OCR service tries to detect the image
 /// orientation and correct it before further processing (e.g. if it's
 /// upside-down).
 /// </param>
 /// <param name='image'>
 /// An image stream.
 /// </param>
 /// <param name='language'>
 /// The BCP-47 language code of the text to be detected in the image. The
 /// default value is 'unk'. Possible values include: 'unk', 'zh-Hans',
 /// 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it',
 /// 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
 /// 'sr-Cyrl', 'sr-Latn', 'sk'
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <OcrResult> RecognizePrintedTextInStreamAsync(this IComputerVisionClient operations, bool detectOrientation, Stream image, OcrLanguages language = default(OcrLanguages), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.RecognizePrintedTextInStreamWithHttpMessagesAsync(detectOrientation, image, language, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #20
0
        public void DetectImageInStreamTest()
        {
            using (MockContext context = MockContext.Start(this.GetType().FullName))
            {
                HttpMockServer.Initialize(this.GetType().FullName, "DetectImageInStreamTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                    using (FileStream stream = new FileStream(GetTestImagePath("people.jpg"), FileMode.Open))
                    {
                        DetectResult result = client.DetectObjectsInStreamAsync(stream).Result;

                        Assert.NotNull(result.Objects);
                        Assert.Equal(5, result.Objects.Count);
                        Assert.Equal("person", result.Objects[0].ObjectProperty);
                        Assert.Equal("person", result.Objects[1].ObjectProperty);
                        Assert.Equal("person", result.Objects[2].ObjectProperty);
                        Assert.Equal("person", result.Objects[3].ObjectProperty);

                        var firstObject = result.Objects[0];
                        Assert.Equal(0, firstObject.Rectangle.X);
                        Assert.Equal(46, firstObject.Rectangle.Y);
                        Assert.Equal(698, firstObject.Rectangle.H);
                        Assert.Equal(229, firstObject.Rectangle.W);
                        Assert.Equal(0.554, result.Objects[0].Confidence);

                        var secondObject = result.Objects[1];
                        Assert.Equal(5, secondObject.Rectangle.X);
                        Assert.Equal(71, secondObject.Rectangle.Y);
                        Assert.Equal(671, secondObject.Rectangle.H);
                        Assert.Equal(532, secondObject.Rectangle.W);
                        Assert.Equal(0.953, secondObject.Confidence);
                    }
            }
        }
コード例 #21
0
 /// <summary>
 /// This operation generates a list of words, or tags, that are relevant to the
 /// content of the supplied image. The Computer Vision API can return tags
 /// based on objects, living beings, scenery or actions found in images. Unlike
 /// categories, tags are not organized according to a hierarchical
 /// classification system, but correspond to image content. Tags may contain
 /// hints to avoid ambiguity or provide context, for example the tag "cello"
 /// may be accompanied by the hint "musical instrument". All tags are in
 /// English.
 /// Two input methods are supported -- (1) Uploading an image or (2) specifying
 /// an image URL.
 /// A successful response will be returned in JSON. If the request failed, the
 /// response will contain an error code and a message to help understand what
 /// went wrong.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='image'>
 /// An image stream.
 /// </param>
 /// <param name='language'>
 /// The desired language for output generation. If this parameter is not
 /// specified, the default value is &amp;quot;en&amp;quot;.Supported
 /// languages:en - English, Default. es - Spanish, ja - Japanese, pt -
 /// Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
 /// 'ja', 'pt', 'zh'
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <TagResult> TagImageInStreamAsync(this IComputerVisionClient operations, Stream image, string language = default(string), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.TagImageInStreamWithHttpMessagesAsync(image, language, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #22
0
 /// <summary>
 /// This operation returns the list of domain-specific models that are
 /// supported by the Computer Vision API. Currently, the API supports following
 /// domain-specific models: celebrity recognizer, landmark recognizer.
 /// A successful response will be returned in JSON. If the request failed, the
 /// response will contain an error code and a message to help understand what
 /// went wrong.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <ListModelsResult> ListModelsAsync(this IComputerVisionClient operations, CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.ListModelsWithHttpMessagesAsync(null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #23
0
        public void DescribeImageTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "DescribeImageTest");

                string imageUrl = GetTestImageUrl("dog.jpg");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                {
                    const int maxCandidates = 2;

                    ImageDescription result = client.DescribeImageAsync(imageUrl, maxCandidates).Result;

                    Assert.Matches("^\\d{4}-\\d{2}-\\d{2}(-preview)?$", result.ModelVersion);
                    Assert.Equal(result.Tags, new string[] {
                        "dog",
                        "tree",
                        "outdoor",
                        "sitting",
                        "ground",
                        "animal",
                        "mammal",
                        "close"
                    });
                    Assert.Equal(1, result.Captions.Count);
                    Assert.Equal("a dog with its mouth open", result.Captions[0].Text);
                    Assert.True(result.Captions[0].Confidence > 0.5);
                }
            }
        }
コード例 #24
0
 /// <summary>
 /// This operation recognizes content within an image by applying a
 /// domain-specific model. The list of domain-specific models that are
 /// supported by the Computer Vision API can be retrieved using the /models GET
 /// request. Currently, the API provides following domain-specific models:
 /// celebrities, landmarks.
 /// Two input methods are supported -- (1) Uploading an image or (2) specifying
 /// an image URL.
 /// A successful response will be returned in JSON.
 /// If the request failed, the response will contain an error code and a
 /// message to help understand what went wrong.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='model'>
 /// The domain-specific content to recognize.
 /// </param>
 /// <param name='url'>
 /// Publicly reachable URL of an image.
 /// </param>
 /// <param name='language'>
 /// The desired language for output generation. If this parameter is not
 /// specified, the default value is &amp;quot;en&amp;quot;.Supported
 /// languages:en - English, Default. es - Spanish, ja - Japanese, pt -
 /// Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
 /// 'ja', 'pt', 'zh'
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <DomainModelResults> AnalyzeImageByDomainAsync(this IComputerVisionClient operations, string model, string url, string language = default(string), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.AnalyzeImageByDomainWithHttpMessagesAsync(model, url, language, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #25
0
        public void RecognizeTextTest()
        {
            using (MockContext context = MockContext.Start(this.GetType().FullName))
            {
                HttpMockServer.Initialize(this.GetType().FullName, "RecognizeTextTest");

                string imageUrl = GetTestImageUrl("signage.jpg");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                {
                    RecognizeTextHeaders headers = client.RecognizeTextAsync(imageUrl, TextRecognitionMode.Printed).Result;

                    Assert.NotNull(headers.OperationLocation);

                    TextRecognitionResult recognitionResult = GetRecognitionResultWithPolling(client, headers.OperationLocation);

                    Assert.NotNull(recognitionResult);

                    Assert.Equal(
                        new string[] { "520", "WEST", "Seattle" },
                        recognitionResult.Lines.Select(line => line.Text));
                    Assert.Equal(
                        new string[] { "520", "WEST", "Seattle" },
                        recognitionResult.Lines.SelectMany(line => line.Words).Select(word => word.Text));
                    Assert.Equal(3, recognitionResult.Lines.Count);
                }
            }
        }
コード例 #26
0
 /// <summary>
 /// Use this interface to get the result of a Read operation, employing the
 /// state-of-the-art Optical Character Recognition (OCR) algorithms optimized
 /// for text-heavy documents. When you use the Read File interface, the
 /// response contains a field called "Operation-Location". The
 /// "Operation-Location" field contains the URL that you must use for your
 /// "Read Operation Result" operation to access OCR results.​
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='mode'>
 /// Type of text to recognize. Possible values include: 'Handwritten',
 /// 'Printed'
 /// </param>
 /// <param name='url'>
 /// Publicly reachable URL of an image.
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <BatchReadFileHeaders> BatchReadFileAsync(this IComputerVisionClient operations, string url, TextRecognitionMode mode, CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.BatchReadFileWithHttpMessagesAsync(url, mode, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Headers);
     }
 }
コード例 #27
0
        public void AnalyzeLandmarksDomainImageTest()
        {
            using (MockContext context = MockContext.Start(this.GetType().FullName))
            {
                HttpMockServer.Initialize(this.GetType().FullName, "AnalyzeLandmarksDomainImageTest");

                string       landmarksUrl = GetTestImageUrl("spaceneedle.jpg");
                const string Portuguese   = "pt";

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                {
                    DomainModelResults results = client.AnalyzeImageByDomainAsync("landmarks", landmarksUrl, Portuguese).Result;

                    var jobject = results.Result as JObject;
                    Assert.NotNull(jobject);

                    var landmarks = jobject.ToObject <LandmarkResults>();
                    Assert.NotNull(landmarks);
                    Assert.Equal(1, landmarks.Landmarks.Count);

                    var landmark = landmarks.Landmarks[0];
                    Assert.Equal("Obelisco Espacial", landmark.Name);
                    Assert.True(landmark.Confidence > 0.99);
                }
            }
        }
コード例 #28
0
 /// <summary>
 /// This interface is used for getting OCR results of Read operation. The URL
 /// to this interface should be retrieved from "Operation-Location" field
 /// returned from Batch Read File interface.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='operationId'>
 /// Id of read operation returned in the response of the "Batch Read File"
 /// interface.
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <ReadOperationResult> GetReadOperationResultAsync(this IComputerVisionClient operations, string operationId, CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.GetReadOperationResultWithHttpMessagesAsync(operationId, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
コード例 #29
0
        public void AnalyzeBrandsTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "AnalyzeBrandsTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                    using (FileStream stream = new FileStream(GetTestImagePath("microsoft.jpg"), FileMode.Open))
                    {
                        ImageAnalysis result = client.AnalyzeImageInStreamAsync(
                            stream,
                            new List <VisualFeatureTypes?>()
                        {
                            VisualFeatureTypes.Brands
                        })
                                               .Result;

                        Assert.Matches("^\\d{4}-\\d{2}-\\d{2}(-preview)?$", result.ModelVersion);
                        Assert.Equal("Microsoft", result.Brands[0].Name);
                        Assert.True(result.Brands[0].Confidence > 0.7);
                        Assert.True(result.Brands[0].Rectangle.X >= 0);
                        Assert.True(result.Brands[0].Rectangle.W >= 0);
                        Assert.True(result.Brands[0].Rectangle.X + result.Brands[0].Rectangle.W <= result.Metadata.Width);
                        Assert.True(result.Brands[0].Rectangle.Y >= 0);
                        Assert.True(result.Brands[0].Rectangle.H >= 0);
                        Assert.True(result.Brands[0].Rectangle.Y + result.Brands[0].Rectangle.H <= result.Metadata.Height);
                    }
            }
        }
コード例 #30
0
        public void TagImageInStreamTest()
        {
            using (MockContext context = MockContext.Start(this.GetType()))
            {
                HttpMockServer.Initialize(this.GetType(), "TagImageInStreamTest");

                using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance()))
                    using (FileStream stream = new FileStream(GetTestImagePath("house.jpg"), FileMode.Open))
                    {
                        const string Chinese = "zh";

                        TagResult result = client.TagImageInStreamAsync(stream, Chinese).Result;

                        Assert.Matches("^\\d{4}-\\d{2}-\\d{2}(-preview)?$", result.ModelVersion);

                        var expects = new string[] { "草", "户外", "建筑", "植物", "财产", "家", "屋子", "不动产", "天空",
                                                     "护墙板", "门廊", "院子", "小别墅", "花园建筑", "门", "草坪", "窗户/车窗", "农舍", "树", "后院",
                                                     "车道", "小屋", "屋顶", "地段" };

                        var intersect = expects.Intersect(result.Tags.Select(tag => tag.Name).ToArray()).ToArray();

                        Assert.True(intersect.Length == expects.Length);
                    }
            }
        }