Exemplo n.º 1
0
 public AllergyDataController(AllergyContext context, IAllergySpotterService allergySpotterService, ComputerVisionClient client, ComputerVisionService cvService)
 {
     _context = context;
     this.allergySpotterService = allergySpotterService;
     _cvService = cvService;
     _client    = client;
 }
        public static async Task <bool> CarPictureValidator(PromptValidatorContext <IList <Attachment> > promptContext, string carType)
        {
            // Add validation code here
            var computerVisionService = new ComputerVisionService();
            var detectResult          = await computerVisionService.Detect(promptContext.Recognized.Value[0].ContentUrl);

            if (!detectResult.IsCar)
            {
                await promptContext.Context.SendActivityAsync($"That doesn't look like a car. It looks more like {detectResult.Description}.");

                return(false);
            }

            // Add Custom Vision validation here
            var customVisionService = new CustomVisionService();
            var predictedCarType    = await customVisionService.Analyze(promptContext.Recognized.Value[0].ContentUrl);

            var isRightCarType = string.Equals(predictedCarType, carType, StringComparison.OrdinalIgnoreCase);

            if (!isRightCarType)
            {
                await promptContext.Context.SendActivityAsync($"That doesn't look like a {carType}.");

                return(false);
            }


            return(true);
        }
Exemplo n.º 3
0
        private async Task ResumeAfterPictureClarification(IDialogContext context, IAwaitable <IEnumerable <Attachment> > result)
        {
            await context.PostAsync(Response.CrowdInsights_PictureSent);

            try
            {
                var attachments = await result;
                var contentUrl  = attachments.First().ContentUrl;

                // Let the Cognitive Services to their work
                var detectFacesAndGenderTask = FaceApiService.DetectFacesAndGenderAsync(contentUrl);
                var visionTask = ComputerVisionService.DescribeAsync(contentUrl);
                await Task.WhenAll(detectFacesAndGenderTask, visionTask);

                var facesAndGender = await detectFacesAndGenderTask;
                var vision         = await visionTask;

                // Parse the result
                await context.PostAsync($"I think you're looking at _{vision.Text}_ , neat! I'm about _{Math.Floor(vision.Confidence * 100)}_% sure.");

                await context.PostAsync($"Your crowd consists of *{facesAndGender.Length}* people, from which *{facesAndGender.Where(x => x.FaceAttributes.Gender.Equals("male")).Count()}* are male and *{facesAndGender.Where(x => x.FaceAttributes.Gender.Equals("female")).Count()}* are female.");
            }
            catch (Exception ex)
            {
                await context.PostAsync($"ERROR: {ex.Message}");

                await context.PostAsync(Response.Error);
            }

            context.Done <object>(null);
        }
Exemplo n.º 4
0
        static void Main(string[] args)
        {
            var faceImage  = "https://pbs.twimg.com/profile_images/747601253266395136/2HeCGdiG_400x400.jpg";
            var ocrImage   = "https://pbs.twimg.com/media/DtdfaSeVsAAeRis.jpg";
            var colluptUrl = "xxxxxxxx";

            Console.WriteLine("Cognitive Services - Face - DetectFace\n");

            var faceClient = new FaceService();
            var faces      = faceClient.GetRemoteEmotionsAsync(colluptUrl).Result;

            Console.WriteLine($"Detected: {faces.Count} Person.");
            foreach (var face in faces)
            {
                Console.WriteLine($"Emotion Result: \nAge:{face.Age} Gender:{face.Gender} Happiness:{face.Happiness}%\n\n");
            }


            Console.WriteLine("Cognitive Services - ComputerVision - OCR\n");

            var computerVisionClient = new ComputerVisionService();
            var regions = computerVisionClient.ExtractRemoteTextAsync(ocrImage).Result;

            Console.WriteLine($"Detedted: {regions.Count} Regions");
            foreach (var region in regions)
            {
                Console.WriteLine($"OCR Result:\n{region}\n\n");
            }


            Console.ReadLine();
        }
Exemplo n.º 5
0
        public async Task <string> GetResponseStringCVAsync()
        {
            var client = ComputerVisionService.GetCVClient();

            VMimageResult = await ComputerVisionService
                            .ExtractUrlLocal(client, VMimagePath);

            return(VMimageResult);
        }
Exemplo n.º 6
0
        public DialogAnalyzerClient(string computerVisionApiRegion, string computerVisionSubscriptionKey,
                                    string textAnalyticsApiRegion, string textAnalyticsSubscriptionKey)
        {
            // Computer Vision Service
            this.ComputerVisionService = new ComputerVisionService(computerVisionApiRegion, computerVisionSubscriptionKey);

            // Text Analytics Service
            this.TextAnalyticsService = new TextAnalyticsService(textAnalyticsApiRegion, textAnalyticsSubscriptionKey);
        }
        public static async Task <IActionResult> Run(
            //HTTP Trigger (Functions allow only a single trigger)
            [HttpTrigger(AuthorizationLevel.Function, "post", Route = "NewCognitiveThumbnail/{tileWidth}/{tileHeight}/{iconWidth}/{iconHeight}")] NewRequest <SmartDoc> newRequest,

            // Inputs
            [Blob("smartdocs/{RequestItem.DocName}", FileAccess.Read, Connection = "SmartDocsStorageConnection")] byte[] smartDocImage,
            int tileWidth,
            int tileHeight,
            int iconWidth,
            int iconHeight,

            // Outputs
            [Blob("smartdocs-tile/{RequestItem.DocName}", FileAccess.Write)] Stream tileImage,
            [Blob("smartdocs-icon/{RequestItem.DocName}", FileAccess.Write)] Stream iconImage,

            // Logger
            ILogger log)
        {
            log.LogInformation($"New Direct-HTTP Thumbnail Request triggered: {JsonConvert.SerializeObject(newRequest)}");

            string stepName = InstructionFlag.Thumbnail.ToString();

            if (httpClient == null)
            {
                httpClient = new HttpClient();
            }

            try
            {
                var tileResult = await ComputerVisionService.GetThumbnailAsync(httpClient, smartDocImage, tileImage, tileWidth, tileHeight);

                var iconResult = await ComputerVisionService.GetThumbnailAsync(httpClient, smartDocImage, iconImage, iconWidth, iconHeight);

                //Update the request information with the newly processed data
                newRequest.RequestItem.CognitivePipelineActions.Add(new ProcessingStep
                {
                    StepName      = stepName,
                    LastUpdatedAt = DateTime.UtcNow,
                    Output        = JsonConvert.SerializeObject(new Thumbnail[] { tileResult, iconResult }),
                    Status        = SmartDocStatus.ProccessedSuccessfully.ToString()
                });

                return((ActionResult) new OkObjectResult(newRequest));
            }
            catch (Exception ex)
            {
                newRequest.RequestItem.CognitivePipelineActions.Add(new ProcessingStep
                {
                    StepName      = stepName,
                    LastUpdatedAt = DateTime.UtcNow,
                    Output        = ex.Message,
                    Status        = SmartDocStatus.ProcessedUnsuccessfully.ToString()
                });
                return((ActionResult) new BadRequestObjectResult(newRequest));
            }
        }
        async void OnConvertToTextClicked(object sender, EventArgs e)
        {
            (sender as Button).IsEnabled = false;
            var client = ComputerVisionService.GetCVClient();

            viewModel.VMimageResult = await ComputerVisionService
                                      .ExtractUrlLocal(client, viewModel.VMimagePath);

            ConversionResult.Text        = viewModel.VMimageResult;
            (sender as Button).IsEnabled = true;
        }
Exemplo n.º 9
0
        static void Main(string[] args)
        {
            IComputerVisionService   computerVisionClient    = new ComputerVisionService();
            IKeywordGeneratorService keywordGeneratorService = new KeywordGeneratorService();

            AnalysisResult analysisResult = computerVisionClient.AnalyzeImageAsync("https://www.polyvore.com/cgi/img-thing?.out=jpg&size=l&tid=8389163").Result;

            List <string> keywords = keywordGeneratorService.GenerateMetaVisionTags(analysisResult);

            Console.WriteLine(analysisResult);
            Console.ReadLine();
        }
        public static async Task <IActionResult> Run(
            //HTTP Trigger (Functions allow only a single trigger)
            [HttpTrigger(AuthorizationLevel.Function, "post", Route = "NewCognitiveFaceAuth/{personId}")] NewRequest <SmartDoc> newRequest,

            // Inputs
            [Blob("smartdocs/{RequestItem.DocName}", FileAccess.Read, Connection = "SmartDocsStorageConnection")] byte[] smartDocImage,
            string personId,

            // Logger
            ILogger log)
        {
            string stepName = InstructionFlag.FaceAuthentication.ToString();

            log.LogInformation($"***New {stepName} Direct-HTTP Request triggered: {JsonConvert.SerializeObject(newRequest)}");

            if (httpClient == null)
            {
                httpClient = new HttpClient();
            }

            try
            {
                var result = await ComputerVisionService.GetFaceAuthAsync(httpClient, smartDocImage, personId);

                var resultJson = JsonConvert.SerializeObject(result);
                //Update the request information with the newly processed data
                newRequest.RequestItem.CognitivePipelineActions.Add(new ProcessingStep
                {
                    StepName      = stepName,
                    LastUpdatedAt = DateTime.UtcNow,
                    Output        = resultJson,
                    Status        = SmartDocStatus.ProccessedSuccessfully.ToString()
                });

                return((ActionResult) new OkObjectResult(newRequest));
            }
            catch (Exception ex)
            {
                newRequest.RequestItem.CognitivePipelineActions.Add(new ProcessingStep
                {
                    StepName      = stepName,
                    LastUpdatedAt = DateTime.UtcNow,
                    Output        = ex.Message,
                    Status        = SmartDocStatus.ProcessedUnsuccessfully.ToString()
                });

                return((ActionResult) new BadRequestObjectResult(newRequest));
            }
        }
Exemplo n.º 11
0
        async void OcrButton_Clicked(object sender, EventArgs e)
        {
            var client  = new ComputerVisionService();
            var regions = await client.ExtractLocalTextAsync(file.Path);

            var sb = new StringBuilder();

            sb.Append($"Extracted Regions: {regions.Count}\n\n");
            foreach (var region in regions)
            {
                sb.Append($"OCR Result:\n{region}\n");
            }

            await DisplayAlert("OCR", sb.ToString(), "OK");
        }
Exemplo n.º 12
0
        private async Task RunCognitiveServicesOnNewsFeedPostAsync(Stream stream, NewsFeedPost newsFeedPost)
        {
            Task <ContentModeratorTextResults> contentModeratorTextTask = null;

            if (IsMessageEditorTextValid(newsFeedPost.Message))
            {
                contentModeratorTextTask = ContentModeratorService.ScreenTextAsync(newsFeedPost.Message, TermListId);
            }

            var computerVisionTask = ComputerVisionService.GetComputerVisionResultsAsync(stream);

            var tasks = new List <Task>()
            {
                computerVisionTask
            };

            if (contentModeratorTextTask != null)
            {
                tasks.Add(contentModeratorTextTask);
            }

            await Task.WhenAll(tasks);

            if (computerVisionTask.IsFaulted)
            {
                throw new Exception("Something went wrong with your image upload.  Please try again later.");
            }

            ValidatePostImage(computerVisionTask.Result);
            newsFeedPost.ImageVisionResults = computerVisionTask.Result;
            PopulateCaptionProperty(newsFeedPost);
            PopulateDescriptionTagsProperty(newsFeedPost);
            PopulateCelebritiesProperty(newsFeedPost);
            PopulateLandmarksProperty(newsFeedPost);
            PopulateBrandsProperty(newsFeedPost);

            if (contentModeratorTextTask != null)
            {
                if (contentModeratorTextTask.IsFaulted)
                {
                    throw new Exception("Something went wrong with your message post.  Please try again later.");
                }

                ValidatePostMessage(contentModeratorTextTask.Result);
                newsFeedPost.MessageModeratorResults = contentModeratorTextTask.Result;
            }
        }
Exemplo n.º 13
0
        public async Task <ActionResult <string> > UploadImageAsync(IList <IFormFile> files)
        {
            IFormFile file = files[0];
            string    response;

            if (file == null || file.Length == 0)
            {
                return(BadRequest());
            }

            using (var memoryStream = new MemoryStream())
            {
                await file.CopyToAsync(memoryStream);

                byte[] imageBytes = memoryStream.ToArray();

                response = await ComputerVisionService.Analyze(imageBytes);
            }

            return(response);
        }
Exemplo n.º 14
0
 public FingerPaintPage()
 {
     InitializeComponent();
     _computerVisionService = new ComputerVisionService();
 }
Exemplo n.º 15
0
        /// <summary>
        /// POST: api/Messages
        /// Receive a message from a user and reply to it
        /// </summary>
        public async Task <HttpResponseMessage> Post([FromBody] Activity activity)
        {
            if (activity != null && activity.GetActivityType() == ActivityTypes.Message)
            {
                ConnectorClient connector = new ConnectorClient(new Uri(activity.ServiceUrl));

                // Get the saved profile values
                //http://aihelpwebsite.com/Blog/EntryId/8/Introduction-To-FormFlow-With-The-Microsoft-Bot-Framework

                // Get any saved values
                StateClient sc       = activity.GetStateClient();
                BotData     userData = sc.BotState.GetPrivateConversationData(activity.ChannelId, activity.Conversation.Id, activity.From.Id);

                var boolDataComplete = userData.GetProperty <bool>("DataComplete");

                if (!boolDataComplete)
                {
                    // Call our FormFlow by calling MakeRootDialog
                    await Conversation.SendAsync(activity, MakeRootDialog);
                }
                else
                {
                    var height        = userData.GetProperty <Int64>("Height");
                    var width         = userData.GetProperty <Int64>("Width");
                    var smartCropping = userData.GetProperty <bool>("SmartCrop");

                    if (activity.Attachments.Count > 0)
                    {
                        //get the source image
                        var sourceImage = await connector.HttpClient.GetStreamAsync(activity.Attachments.FirstOrDefault().ContentUrl);

                        //resize the image using the cognitive services computer vision api
                        var resizedImage = await ComputerVisionService.GetImageThumbnail(sourceImage, height, width, smartCropping);

                        //construct reply
                        var replyText = (smartCropping == true) ?
                                        "I smartly resized an image for you, I'm good like that" :
                                        "I resized an image for you, I'm good like that";
                        Activity replyToConversation = activity.CreateReply(replyText);
                        replyToConversation.Recipient   = activity.From;
                        replyToConversation.Type        = "message";
                        replyToConversation.Attachments = new List <Attachment>();

                        //add attachment to reply
                        var replyFile = new Attachment();
                        var image     = "data:image/png;base64," + Convert.ToBase64String(resizedImage);
                        replyToConversation.Attachments.Add(new Attachment {
                            ContentUrl = image, ContentType = "image/png"
                        });

                        //send reply
                        var reply = await connector.Conversations.SendToConversationAsync(replyToConversation);

                        //reset user data
                        await sc.BotState.DeleteStateForUserAsync(activity.ChannelId, activity.From.Id);
                    }
                    else
                    {
                        Activity noPictureReply = activity.CreateReply($"Please send me an image.");
                        await connector.Conversations.SendToConversationAsync(noPictureReply);
                    }
                }
            }
            else
            {
                HandleSystemMessage(activity);
            }
            var response = Request.CreateResponse(HttpStatusCode.OK);

            return(response);
        }
 public screen_new_image(BlobService blobService, ComputerVisionService computerVisionService, QueueService queueService)
 {
     _blobService           = blobService;
     _computerVisionService = computerVisionService;
     _queueService          = queueService;
 }
Exemplo n.º 17
0
 public ComputerVisionController(ComputerVisionService computerVision)
 {
     _computerVision = computerVision;
 }