示例#1
0
 public Item(string[] tag, Color color, string imageName)
 {
     this.tag       = tag;
     this.color     = color;
     this.imageName = imageName;
     this.image     = Google.Cloud.Vision.V1.Image.FromFile("DatabaseIMG/" + imageName);
 }
示例#2
0
        public List <string> getMatches(string filePath)
        {
            string path = MapPath("~/My Project 25680-6d24c8bb9131.json");
            //var credential = GoogleCredential.FromFile(path);
            //Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", "");
            var output = new List <string>();

            //var client = ImageAnnotatorClient.Create(credential);



            GoogleCredential googleCredential;

            using (Stream m = new FileStream(path, FileMode.Open))
                googleCredential = GoogleCredential.FromStream(m);
            var channel = new Grpc.Core.Channel(ImageAnnotatorClient.DefaultEndpoint.Host,
                                                googleCredential.ToChannelCredentials());
            var client = ImageAnnotatorClient.Create(channel);


            var image    = Image.FromFile(filePath);
            var response = client.DetectLogos(image);

            foreach (var annotation in response)
            {
                if (annotation.Description != null)
                {
                    output.Add(annotation.Description);
                }
            }
            return(output);
        }
示例#3
0
        public IEnumerable <IMachineTag> GetTagsForImageUrl(string imageUrl)
        {
            var image = Image.FromUri(imageUrl);
            IReadOnlyList <EntityAnnotation> labels = null;
            WebDetection webInfos = null;

            try
            {
                labels   = this.client.DetectLabels(image);
                webInfos = this.client.DetectWebInformation(image);
            }
            catch (Exception e)
            {
                if (e.Message.Contains("The URL does not appear to be accessible by us.") ||
                    e.Message.Contains("We can not access the URL currently."))
                {
                    yield break;
                }

                Console.WriteLine(e);
            }

            if (labels == null || webInfos == null)
            {
                yield break;
            }

            foreach (var machineTag in ToMTags(labels, webInfos))
            {
                yield return(machineTag);
            }
        }
        public override async Task RunCommand(object sender)
        {
            var engine          = (IAutomationEngineInstance)sender;
            var vAPICredentials = (string)await v_APICredentials.EvaluateCode(engine);

            string vFilePath = null;

            if (v_ImageType == "Filepath")
            {
                vFilePath = (string)await v_FilePath.EvaluateCode(engine);
            }
            else
            {
                Bitmap vBitmap = (Bitmap)await v_Bitmap.EvaluateCode(engine);

                vFilePath = engine.EngineContext.ProjectPath + "\\tempOCRBitmap.bmp";
                FileStream imageStream = new FileStream(vFilePath, FileMode.OpenOrCreate);
                vBitmap.Save(imageStream, ImageFormat.Bmp);
                imageStream.Close();
            }
            Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", vAPICredentials);

            string                           foundText       = "";
            GoogleImage                      image           = GoogleImage.FromFile(vFilePath);
            ImageAnnotatorClient             client          = ImageAnnotatorClient.Create();
            IReadOnlyList <EntityAnnotation> textAnnotations = client.DetectText(image);

            foundText = textAnnotations[0].Description;
            foundText.SetVariableValue(engine, v_OutputUserVariableName);
        }
示例#5
0
        /// <summary>
        /// Constructor for a feature extractor using Google Cloud Platform SDK, using a given annotator client,
        /// optimized to be used with a batch of images.
        /// </summary>
        /// <param name="imageStream"></param>
        /// <param name="client"></param>
        internal GcpFeatureExtractor(Stream imageStream, ImageAnnotatorClient client)
        {
            SetRotation(SKCodec.Create(imageStream).EncodedOrigin);
            var size = System.Drawing.Image.FromStream(imageStream).Size;

            Width   = size.Width;
            Height  = size.Height;
            _image  = Image.FromStream(imageStream);
            _client = client;
        }
示例#6
0
        Image ConvertBitmapToGoogleImage(Bitmap bitmap)
        {
            using (var bitmapStream = new MemoryStream())
            {
                bitmap.Save(bitmapStream, ImageFormat.Bmp);
                bitmapStream.Position = 0;

                return(Image.FromStream(bitmapStream));
            }
        }
示例#7
0
        public IEnumerable <IMachineTag> GetTagsForImageBytes(byte[] bytes)
        {
            var image = Image.FromBytes(bytes);

            var labels   = this.client.DetectLabels(image);
            var webInfos = this.client.DetectWebInformation(image);

            foreach (var machineTag in ToMTags(labels, webInfos))
            {
                yield return(machineTag);
            }
        }
示例#8
0
        /// <summary>
        /// Constructor for a feature extractor using Google Cloud Platform SDK, optimized for analysing a single image.
        /// </summary>
        /// <param name="imageStream"></param>
        /// <param name="credentialsPath"></param>
        public GcpFeatureExtractor(Stream imageStream, string credentialsPath)
        {
            SetRotation(SKCodec.Create(imageStream).EncodedOrigin);
            var size = System.Drawing.Image.FromStream(imageStream).Size;

            Width  = size.Width;
            Height = size.Height;
            var visionImage = Image.FromStream(imageStream);

            _client = new ImageAnnotatorClientBuilder()
            {
                CredentialsPath = credentialsPath
            }.Build();
            _image = Image.FromStream(imageStream);
        }
示例#9
0
        public Item(string line)
        {
            var l = line.Split('\t');

            image     = Google.Cloud.Vision.V1.Image.FromFile(l[0]);
            imageName = l[0];
            color     = Color.FromArgb(
                int.Parse(l[1]),
                int.Parse(l[2]),
                int.Parse(l[3]));

            for (int i = 4; i < l.Length; i++)
            {
                tag[i - 4] = l[i];
            }
        }
示例#10
0
        private async Task <IReadOnlyList <EntityAnnotation> > OcrFromGoogle(Stream image, string?inputLanguage = null)
        {
            image.Position = 0;
            var googleImage = await Image.FromStreamAsync(image);

            var imageContext = new ImageContext();

            if (inputLanguage != null)
            {
                imageContext.LanguageHints.Add(inputLanguage);
            }

            var response = await _ocrClientLazy.Value.DetectTextAsync(googleImage, imageContext);

            return(response);
        }
示例#11
0
        protected void Page_Load(object sender, EventArgs e)
        {
            // Instantiates a client
            var client = ImageAnnotatorClient.Create();
            // Load the image file into memory
            var image = Image.FromFile(@"C:\Users\NecC\Downloads\69597698_2263657030613474_151965357500792832_n.jpg");
            // Performs label detection on the image file
            var response = client.DetectLabels(image);

            foreach (var annotation in response)
            {
                if (annotation.Description != null)
                {
                    Console.WriteLine(annotation.Description);
                }
            }
        }
示例#12
0
        public string doc_text_dection(string GVA_File_Path, string Credential_Path)
        {
            //var credential = GoogleCredential.FromFile(Credential_Path);
            Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", "Your_Json_File_Name.json");
            //Load the image file into memory
            var image = Image.FromFile(GVA_File_Path);

            // Instantiates a client
            ImageAnnotatorClient client = ImageAnnotatorClient.Create();

            TextAnnotation text = client.DetectDocumentText(image);

            //Console.WriteLine($"Text: {text.Text}");

            return($"Text: {text.Text}");
            //return "test image...";
        }
示例#13
0
        public ResultFromOCRBindingModel GetData(
            ResultFromOCRBindingModel model, IMatFileUploadEntry file)
        {
            ResultFromOCRBindingModel result =
                new ResultFromOCRBindingModel();

            Regex snRegex = SerialNumberRegexes
                            .GetSNRegex(model.ApplianceBrand);

            Regex modelRegex = LGModels
                               .GetModelRegex(model.ApplianceType);

            ImageAnnotatorClient client =
                ImageAnnotatorClient.Create();

            MemoryStream stream = new MemoryStream();

            file.WriteToStreamAsync(stream);

            Google.Cloud.Vision.V1.Image image =
                Google.Cloud.Vision.V1.Image.FromStream(stream);

            var annotations = client.DetectText(image);

            foreach (var annotation in annotations)
            {
                if (snRegex.Match(annotation.Description)
                    .Success)
                {
                    result.ApplianceSerialNumber =
                        annotation.Description;
                }
                else if (modelRegex.Match(annotation.Description)
                         .Success)
                {
                    result.ApplianceModel = annotation.Description;
                }
            }

            return(result);
        }
示例#14
0
        // Отдельнй поток с отправкой снимков для анализа
        private void backgroundWorkerDetection_DoWork(object sender, DoWorkEventArgs e)
        {
            // Объявление переменной с количеством отправляемых снимков
            int cases = 1;
            // Создание клиента для работы с Google Vision API
            ImageAnnotatorClient client = ImageAnnotatorClient.Create();
            // Создание списка, в котором будут хранится результаты анализа
            List <FaceDetectionData> faceDetectionData = new List <FaceDetectionData> {
            };

            // Создание массива содержащего пути ко всем файлам, отправляемым на анализ
            string[] allfiles = Directory.GetFiles(textBoxDirectory.Text + "\\snapshots");
            // Цикл, в котором каждый файл отправляется на анализ, а полученные результаты записываются в созданный CSV-файл
            foreach (string filename in allfiles)
            {
                // Берётся отдельный снимок по пути из списка
                Google.Cloud.Vision.V1.Image image = Google.Cloud.Vision.V1.Image.FromFile($"{filename}");
                // Результат записывается в списке
                IReadOnlyList <FaceAnnotation> result = client.DetectFaces(image);
                // Цикл в котором каждый результат, в нужном представлении, записывается в CSV-файл
                foreach (FaceAnnotation face in result)
                {
                    faceDetectionData.Add(new FaceDetectionData()
                    {
                        Case = $"{cases++}", RecordingTime = $"{Path.GetFileNameWithoutExtension(filename)}", Joy = $"{face.JoyLikelihood}", Surprise = $"{face.SurpriseLikelihood}", Sorrow = $"{face.SorrowLikelihood}", Anger = $"{face.AngerLikelihood}"
                    });
                    using (StreamWriter streamReader = new StreamWriter($"{textBoxDirectory.Text}\\table.csv"))
                    {
                        using (CsvWriter csvReader = new CsvWriter(streamReader, CultureInfo.InvariantCulture))
                        {
                            csvReader.Configuration.Delimiter = ",";
                            csvReader.WriteRecords(faceDetectionData);
                        }
                    }
                }
            }
            // Отображение сообщения о том, что анализ проведён и результаты записаны
            MessageBox.Show("Результаты записаны");
            // Завершает отдельный поток, в котором проводится анализ снимков и запись результатов
            backgroundWorkerDetection.CancelAsync();
        }
示例#15
0
        private async Task DoOCR()
        {
            try
            {
                string Identifer = Utility.RandomHex();
                DebugLog.Log("Making OCR request [" + Identifer + "]");

                if (!File.Exists(Properties.Settings.Default.apiKeyPath))
                {
                    throw new FileNotFoundException("Keyfile not present at " + Properties.Settings.Default.apiKeyPath);
                }

                // Wait for rate limiter before starting the clock
                GoogleAsyncStatic.rate.Check();
                Stopwatch sw = new Stopwatch();

                // Dump the provided image to a memory stream
                var stream = new MemoryStream();
                image.Save(stream, ImageFormat.Png);
                stream.Position = 0;

                // Load the stream as a gimage
                GImage gimage = GImage.FromStream(stream);

                // Make our connection client
                ImageAnnotatorClient client = new ImageAnnotatorClientBuilder
                {
                    CredentialsPath = Properties.Settings.Default.apiKeyPath,
                }.Build();

                // Ask for OCR
                sw.Start();
                var response = await client.DetectTextAsync(gimage);

                sw.Stop();

                // If we didn't get anything back
                if (response.Count == 0)
                {
                    _bigBox     = OCRBox.ErrorBigBox();
                    _smallBoxes = new OCRBox[] { };
                }
                else
                {
                    // First result is the big box
                    _bigBox = new OCRBox(response.First());

                    // Following results are the small boxes
                    _smallBoxes = response.Skip(1)
                                  .Select(ann => new OCRBox(ann))
                                  .ToArray();
                }

                _timeStamp = string.Format("{0:00}:{1:00}:{2:00}.{3:000}",
                                           sw.Elapsed.Hours,
                                           sw.Elapsed.Minutes,
                                           sw.Elapsed.Seconds,
                                           sw.Elapsed.Milliseconds);

                isDone = true;
                callback?.Invoke(this);

                DebugLog.Log("Finished OCR request [" + Identifer + "]");
            }
            catch (Grpc.Core.RpcException e)
            {
                string url = "";

                // Define a regular expression for repeated words.
                Regex rx = new Regex(@"(http\S*)",
                                     RegexOptions.Compiled | RegexOptions.IgnoreCase);

                // Find matches.
                MatchCollection matches = rx.Matches(e.Message);

                if (matches.Count > 0)
                {
                    url = matches[0].Groups[0].Value;
                }

                Form.Invoke(Form.SafeLogWorkerError, new object[] { e.Message, url });
            } catch (Exception e)
            {
                Form.Invoke(Form.SafeLogWorkerError, new object[] { e.Message, "" });
            }
        }
示例#16
0
        ////~~~~~~~~~~~~~~~~~~~~~~~~~ Some crucial methods
        ///
        private void analyze()
        {
            //Do different image segmentations, depending on the choices
            if (comboBox1.SelectedIndex > -1)
            {
                String select = comboBox1.Text;

                if (select == "K Clustering")
                {
                    //do the K clustering method

                    segmentation = "K Cluster";
                    //System.Windows.Forms.MessageBox.Show("No image imported");
                }

                else if (select == "Edge Detection")
                {
                    //have this done in edge detection

                    segmentation = "Edge Detection";
                }
                else
                {
                    System.Windows.Forms.MessageBox.Show("No Segmentation Type Selected.");
                    return;
                }
            }
            //first, the base case: we analyze when the user didn't put in a picture

            if (pic == null)
            {
                System.Windows.Forms.MessageBox.Show("No image imported");
                return;
            }
            else
            {
                if (segmentation == "K Cluster")
                {
                    //before doing the segmentation, check if "temp.jpg" exists and "Segmented Image" exists



                    //using the method from KMC.cs
                    if (searched == 0)
                    {
                        Bitmap temp = (Bitmap)pic;
                        //set the name of the files created from the segmentation
                        temp.Save("t3mp.jpg", System.Drawing.Imaging.ImageFormat.Jpeg);
                        //temp.Dispose();
                        searched = 1;
                    }
                    //string directoryName = Path.GetDirectoryName("temp.jpg");
                    //string sourcePath = Directory.GetCurrentDirectory();


                    string path = Directory.GetCurrentDirectory();
                    //Now the color segmented images are in a folder called "Segmented Image"
                    string directory = path + "\\S3gmented Image";
                    //if there has not been K-clustering, skip the process and go to the API
                    if (!(System.IO.Directory.Exists(directory)))
                    {
                        //Trying Amanda's method
                        ImageSegmentation.Compute("t3mp.jpg");
                    }


                    files = Directory.GetFiles(directory, "*", SearchOption.AllDirectories);
                    int numFiles = files.GetLength(0);

                    //if the user enters another term after the images were already sent to API, we don't need to send to API again
                    if (API_sent == 0)
                    {
                        ImageCollections = new string[numFiles];

                        int tempCounter = 0;
                        //array that collects the accurate terms list
                        API_results = new List <string> [numFiles];
                        //Now we go inside the Segmented Image folder and loop
                        foreach (string file in files)
                        {
                            ImageCollections[tempCounter] = file;
                            Google.Cloud.Vision.V1.Image integer = Google.Cloud.Vision.V1.Image.FromFile(ImageCollections[tempCounter]);
                            //Keeps track of the terms used
                            API_results[tempCounter] = Vision.containsElement(integer);
                            tempCounter++;
                        }
                        //so we don't have to loop over again. We already have the information
                        API_sent = 1;
                    }

                    //our algorithm is first loop over each picture and if the user_query match, save the matching image and the rank in the array
                    // then continue over and if the user_query match on the next file and if the rank in the array is lower (more front), switch the saved image
                    int rank = 999;

                    for (int i = 1; i < numFiles; i++)
                    {
                        int count = API_results[i].Count;
                        for (int k = 0; k < count; k++)
                        {
                            if (String.Equals(user_query, API_results[i][k]))
                            {
                                //found a better and more accurate image. higher ranked
                                if (rank > k)
                                {
                                    //want to free up our files, so we can delete later
                                    if (resultBitmap != null)
                                    {
                                        resultBitmap.Dispose();
                                    }
                                    resultBitmap = new Bitmap(ImageCollections[i]);
                                    rank         = k;
                                }
                            }
                        }
                    }
                    // no API term found
                    if (rank == 999)
                    {
                        System.Windows.Forms.MessageBox.Show("No search term of \"" + user_query + "\" found");
                    }


                    /*
                     * List<String> output = API_results[1];
                     * String printing = output[1];
                     *
                     * System.Windows.Forms.MessageBox.Show(printing.ToString());
                     */
                    //string name = ImageCollections[1];
                    //testNum = Vision.containsElement(ImageCollections[1], theText1);


                    //for the test purpose, get the second image that has been cropped

                    //resultBitmap = new Bitmap(ImageCollections[2]);
                    //Google.Cloud.Vision.V1.Image i = Google.Cloud.Vision.V1.Image.FromFile(ImageCollections[2]);
                    //testNum = Vision.containsElement(i, theText1);

                    //System.Windows.Forms.MessageBox.Show(testNum.ToString());
                    //pictureBox1.SizeMode = PictureBoxSizeMode.Zoom;
                    if (resultBitmap != null)
                    {
                        pictureBox1.Image = resultBitmap;
                    }
                }
                //System.Windows.Forms.MessageBox.Show(segmentation);

                if (segmentation == "Edge Detection")
                {
                    if (resultBitmap != null)
                    {
                        resultBitmap.Dispose();
                    }
                    //System.Windows.Forms.MessageBox.Show("Here");

                    resultBitmap = Filter.ApplyFilter((Bitmap)pic, EdgeFilter);

                    //Image finaloutput = (Image)resultBitmap;
                    //pictureBox2.SizeMode = PictureBoxSizeMode.Zoom;
                    //resultBitmap.UnlockBits
                    pictureBox1.SizeMode = PictureBoxSizeMode.Zoom;

                    pictureBox1.Image = resultBitmap;
                }
            }
        }
示例#17
0
        public Embed GetBeatmapInfoFromImage(string username = null)
        {
            Google.Cloud.Vision.V1.Image image = new Google.Cloud.Vision.V1.Image();
            try
            {
                using (var temporaryStream = new MemoryStream())
                    using (Sd.Bitmap croppedBeatmapImage = AcquireAndCropBeatmapImage(lastAttachment))
                    {
                        croppedBeatmapImage.Save(temporaryStream, Sd.Imaging.ImageFormat.Png);
                        temporaryStream.Position = 0;
                        image = Google.Cloud.Vision.V1.Image.FromStream(temporaryStream);
                    }
            }
            catch (Exception e)
            {
                throw new BeatmapAnalysisException("Failed to save image", e);
            }

            var textList = ImageAnnotatorClient.DetectTextAsync(image).Result;

            string[] beatmapInformation       = textList.First().Description.Split('\n');
            string   beatmapNameAndDifficulty = beatmapInformation[0];
            int      locationOfBy             = beatmapInformation[1].IndexOf("by");
            string   beatmapper = username ?? beatmapInformation[1].Substring(locationOfBy);
            IEnumerable <BeatmapSetResult> sortedBeatmaps = GetBeatmapsByMapper(beatmapper);
            BeatmapSetResult beatmapResult = sortedBeatmaps.FirstOrDefault();

            if (beatmapResult == null)
            {
                throw new BeatmapAnalysisException("Failed to detect creator. Try the command again by specifiying the creator.");
            }

            var splitIndex     = -1;
            var bestSimilarity = 0.0;

            for (var candidateSplitIndex = 0; candidateSplitIndex <= beatmapNameAndDifficulty.Length; candidateSplitIndex++)
            {
                var candidateSimilarity = Extensions.CalculateSimilarity(beatmapResult.Name, beatmapNameAndDifficulty.Substring(0, candidateSplitIndex));
                if (candidateSimilarity > bestSimilarity)
                {
                    splitIndex     = candidateSplitIndex;
                    bestSimilarity = candidateSimilarity;
                }
            }
            var beatmapName    = beatmapNameAndDifficulty.Substring(0, splitIndex);
            var difficultyName = beatmapNameAndDifficulty.Substring(splitIndex);

            IEnumerable <Beatmap> potentialBeatmaps = Enumerable.Empty <Beatmap>();

            foreach (BeatmapSetResult potentialBeatmapResult in sortedBeatmaps.TakeWhile(result => Extensions.CalculateSimilarity(result.Name, beatmapName) / bestSimilarity > 0.99))
            {
                potentialBeatmaps = potentialBeatmaps.Concat(OsuApi.GetBeatmapSet.WithSetId(potentialBeatmapResult.SetId).Results(20).Result);
            }
            var selectedBeatmap = potentialBeatmaps.OrderByDescending(beatmap => Extensions.CalculateSimilarity(beatmap.Difficulty, difficultyName)).FirstOrDefault();

            if (selectedBeatmap == null)
            {
                throw new BeatmapAnalysisException("Failed to retrieve beatmap");
            }

            return(FormatBeatmapInfo(selectedBeatmap));
        }
示例#18
0
        public void ProcessImage(Bitmap bitmap, IExtractionResultBuilder builder)
        {
            Image image = ConvertBitmapToGoogleImage(bitmap);
            //MessageBox.Show("Here");
            ImageAnnotatorClient client = ImageAnnotatorClient.Create();

            TextAnnotation response = client.DetectDocumentText(image);

            //MessageBox.Show(response.Text);
            if (response == null)
            {
                return;
            }

            //MessageBox.Show(response.Text);
            foreach (Page page in response.Pages)
            {
                foreach (Block block in page.Blocks)
                {
                    foreach (Paragraph paragraph in block.Paragraphs)
                    {
                        foreach (Word word in paragraph.Words)
                        {
                            foreach (Symbol symbol in word.Symbols)
                            {
                                Character s = new Character();

                                s.Text       = symbol.Text;
                                s.Confidence = symbol.Confidence;

                                s.Bound[0]   = new Vertices();
                                s.Bound[0].X = symbol.BoundingBox.Vertices[0].X;
                                s.Bound[0].Y = symbol.BoundingBox.Vertices[0].Y;
                                s.Bound[1]   = new Vertices();
                                s.Bound[1].X = symbol.BoundingBox.Vertices[1].X;
                                s.Bound[1].Y = symbol.BoundingBox.Vertices[1].Y;
                                s.Bound[2]   = new Vertices();
                                s.Bound[2].X = symbol.BoundingBox.Vertices[2].X;
                                s.Bound[2].Y = symbol.BoundingBox.Vertices[2].Y;
                                s.Bound[3]   = new Vertices();
                                s.Bound[3].X = symbol.BoundingBox.Vertices[3].X;
                                s.Bound[3].Y = symbol.BoundingBox.Vertices[3].Y;

                                Rectangle bounds = new Rectangle(s.Bound[0].X, s.Bound[0].Y, s.Bound[1].X - s.Bound[0].X, s.Bound[3].Y - s.Bound[0].Y);
                                builder.AddNewCharacter(s.Text, (int)(Math.Round(s.Confidence * 100)), bounds);

                                if (symbol.Property?.DetectedBreak != null)
                                {
                                    switch (symbol.Property.DetectedBreak.Type)
                                    {
                                    case TextAnnotation.Types.DetectedBreak.Types.BreakType.EolSureSpace:
                                        builder.AddNewLine();
                                        break;

                                    case TextAnnotation.Types.DetectedBreak.Types.BreakType.Hyphen:
                                        break;

                                    case TextAnnotation.Types.DetectedBreak.Types.BreakType.LineBreak:
                                        builder.AddNewLine();
                                        break;

                                    case TextAnnotation.Types.DetectedBreak.Types.BreakType.Space:
                                        builder.AddWhiteSpace();
                                        break;

                                    case TextAnnotation.Types.DetectedBreak.Types.BreakType.SureSpace:
                                        builder.AddWhiteSpace();
                                        break;

                                    case TextAnnotation.Types.DetectedBreak.Types.BreakType.Unknown:
                                        builder.AddWhiteSpace();
                                        break;
                                    }
                                }
                            }
                        }
                    }
                }
            }
        }
示例#19
0
        public async Task <IHttpActionResult> describeImageWithVoice([FromBody] ImageToVoice imageToVoice)
        {
            try
            {
                var user = await UserManager.FindByIdAsync(imageToVoice.userId);

                var faceDescription = new FaceDescription();
                var googleClient    = await ImageAnnotatorClient.CreateAsync();

                var byteImage = Convert.FromBase64String(imageToVoice.base64Image);
                var image     = Image.FromBytes(byteImage);

                var responseForFacesGoogle = await googleClient.DetectFacesAsync(image);

                var responseForLabels = await googleClient.DetectLabelsAsync(image);

                var responseForLandmark = await googleClient.DetectLandmarksAsync(image);

                var responseForLogo = await googleClient.DetectLogosAsync(image);

                var analyzeImage = new AnalyzeImage();
                analyzeImage.responseFaceGoogle     = responseForFacesGoogle;
                analyzeImage.responseForLabels      = responseForLabels;
                analyzeImage.responseForLogoGoogle  = responseForLogo;
                analyzeImage.responseLandMarkGoogle = responseForLandmark;

                var responseFormMicrosoftFace = new List <CognitiveMicrosoft>();
                if (responseForFacesGoogle.Count > 0)
                {
                    responseFormMicrosoftFace = await faceDescription.MakeAnalysisRequestAsync(byteImage);

                    analyzeImage.responseForFacesMicrosft = responseFormMicrosoftFace;
                }

                string base64Voice = analyzeImage.describeImageWithVoice();

                rela.ImagesProceseds.Add(new ImagesProcesed {
                    UserId = user.Id, date = DateTime.Now, image = imageToVoice.base64Image
                });
                await rela.SaveChangesAsync();

                int imageId = rela.ImagesProceseds.OrderByDescending(img => img.date).ToList()[0].imageId;

                if (responseForFacesGoogle.Count > 0)
                {
                    rela.GoogleFaces.Add(new GoogleFace {
                        GoogleFace1 = JsonConvert.SerializeObject(responseForFacesGoogle), imageId = imageId
                    });
                }

                if (responseForLabels.Count > 0)
                {
                    rela.GoogleLabels.Add(new GoogleLabel {
                        GoogleLabel1 = JsonConvert.SerializeObject(responseForLabels), imageId = imageId
                    });
                }

                if (responseForLandmark.Count > 0)
                {
                    rela.GoogleLandmarks.Add(new GoogleLandmark {
                        GoogleLandamark = JsonConvert.SerializeObject(responseForLandmark), imageId = imageId
                    });
                }

                if (responseForLogo.Count > 0)
                {
                    rela.GoogleLogoes.Add(new GoogleLogo {
                        GoogleLogo1 = JsonConvert.SerializeObject(responseForLogo), imageId = imageId
                    });
                }

                if (responseFormMicrosoftFace.Count > 0)
                {
                    rela.MicrosoftFaces.Add(new MicrosoftFace {
                        imageId = imageId, MicrosoftFace1 = JsonConvert.SerializeObject(responseFormMicrosoftFace)
                    });
                }

                rela.Voices.Add(new Voice {
                    imageId = imageId, GoogleVoice = base64Voice
                });

                await rela.SaveChangesAsync();

                DescribeImage describeImage = new DescribeImage()
                {
                    googleFace    = responseForFacesGoogle,
                    label         = responseForLabels,
                    landmark      = responseForLandmark,
                    logo          = responseForLogo,
                    voiceBase64   = base64Voice,
                    microsoftFace = responseFormMicrosoftFace
                };

                return(Ok(describeImage));
            }
            catch (Exception ex) {
                return(BadRequest("Error"));
            }
        }
示例#20
0
        public static async System.Threading.Tasks.Task Run(
            [BlobTrigger("pending/{name}")] Stream image,
            [Queue(Constants.QUEUE_NAME)] IAsyncCollector <string> applicationQueue,
            string name,
            ILogger log,
            ExecutionContext executionContext)
        {
            var sourceStream = new MemoryStream();
            await image.CopyToAsync(sourceStream);

            var bitmap = new Bitmap(sourceStream);

            var customVisionPredictionClient = new CustomVisionPredictionClient
            {
                ApiKey   = Environment.GetEnvironmentVariable("CustomVisionPredictionClient_ApiKey"),
                Endpoint = Environment.GetEnvironmentVariable("CustomVisionPredictionClient_Endpoint")
            };

            sourceStream.Position = 0;

            var response = await customVisionPredictionClient.DetectImageAsync(Guid.Parse(Environment.GetEnvironmentVariable("CustomVisionPredictionClient_ProjectId")), "Completed Route",
                                                                               sourceStream);

            var routes = new List <string>();

            foreach (var predictionModel in response.Predictions)
            {
                if (predictionModel.TagName == "Completed Route" && predictionModel.Probability > 0.85)
                {
                    var cropped = CropBitmap(bitmap,
                                             predictionModel.BoundingBox.Left,
                                             predictionModel.BoundingBox.Top,
                                             predictionModel.BoundingBox.Width,
                                             predictionModel.BoundingBox.Height);

                    var memoryStream = new MemoryStream();
                    //ONLY FOR DEBUG
                    //cropped.Save(Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.Desktop),Guid.NewGuid().ToString()));
                    cropped.Save(memoryStream, System.Drawing.Imaging.ImageFormat.Jpeg);
                    memoryStream.Position = 0;

                    //https://stackoverflow.com/questions/53367132/where-to-store-files-for-azure-function
                    var path = Path.Combine(executionContext.FunctionAppDirectory, "Zwift-5c2367dfe003.json");

                    Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", path);

                    Image tmpImage = await Image.FromStreamAsync(memoryStream);

                    var client = await ImageAnnotatorClient.CreateAsync();

                    var tmp = await client.DetectTextAsync(tmpImage);

                    var annotation = tmp.FirstOrDefault();

                    if (annotation?.Description != null)
                    {
                        routes.Add(annotation.Description.Replace("\n", " ").Trim());
                    }
                }
            }

            if (routes.Count > 0)
            {
                var user = name.Split("_").First();
                await applicationQueue.AddAsync(JsonConvert.SerializeObject(new MultipleRoutesCompletedModel
                {
                    UserId = user,
                    Routes = routes
                }));

                await applicationQueue.FlushAsync();
            }
        }
示例#21
0
        public async Task <AnalysisResult> AnalyzeAsync(Image image, ICollection <string> categories)
        {
            var client = await ImageAnnotatorClient.CreateAsync().ConfigureAwait(false);

            var labels = await client.DetectLabelsAsync(image).ConfigureAwait(false);

            var suggestions    = new List <string>();
            var category       = Constants.NoValueString;
            var categoryFound  = false;
            var bestLabel      = Constants.NoValueString;
            var bestLabelFound = false;

            foreach (var label in labels)
            {
                if (!bestLabelFound)
                {
                    var invalid = false;

                    foreach (var word in Constants.InvalidSuggestions)
                    {
                        if (label.Description.ToLower().Equals(word.ToLower()))
                        {
                            invalid = true;
                        }
                    }

                    if (!invalid)
                    {
                        bestLabel      = label.Description;
                        bestLabelFound = true;
                    }
                }

                suggestions.Add(label.Description);

                if (!categoryFound)
                {
                    foreach (var cat in categories)
                    {
                        if (!Constants.AllCategoriesToKeywords.ContainsKey(cat))
                        {
                            throw new ArgumentException(Constants.InvalidCategories);
                        }
                        else
                        {
                            foreach (var keyword in Constants.AllCategoriesToKeywords[cat])
                            {
                                if (label.Description.ToLower().Contains(keyword.ToLower()) && !categoryFound)
                                {
                                    category      = cat;
                                    categoryFound = true;
                                }
                            }
                        }
                    }
                }
            }

            if (!categoryFound)
            {
                category = Constants.NoCategory;
            }

            // Refactor: apply logic to read labels
            if (category.Equals(Constants.NoCategory) || category.Equals(Constants.ManufacturedCategory))
            {
                bestLabel = "";
            }

            Console.WriteLine(category);

            var name = bestLabel;

            AnalysisResult result = new AnalysisResult(suggestions, category, name);

            return(result);
        }