/// <summary>
        /// Transcripts the provided audio file.
        /// </summary>
        /// <remarks>WAV format is currently required.</remarks>
        /// <param name="filepath">The path to the audio file.</param>
        /// <returns>The transcript retrieved, if any.</returns>
        public string SpeechToText(string filepath)
        {
            if (string.IsNullOrEmpty(filepath))
            {
                throw new ArgumentNullException(nameof(filepath));
            }

            if (!File.Exists(filepath))
            {
                throw new ArgumentException((this as ILocalizedService <SpeechToTextService>).GetLocalized("FileNotFoundError", filepath), nameof(filepath));
            }

            // TODO: Voir maintenant que le front a un polyfill pour le support, si un format plus léger serait tout aussi efficace.
            SpeechClient      speech   = SpeechClient.Create();
            RecognizeResponse response = speech.Recognize(
                new RecognitionConfig()
            {
                Encoding        = AudioEncoding.Linear16,
                SampleRateHertz = 48000,
                LanguageCode    = this.appSettings.Google.SpeechToText.LanguageCode,
            },
                RecognitionAudio.FromFile(filepath));

            foreach (SpeechRecognitionResult result in response.Results)
            {
                foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                {
                    return(alternative.Transcript);
                }
            }

            return(null);
        }
示例#2
0
        /// <summary>
        /// Transcribe a short audio file with punctuation
        /// </summary>
        /// <param name="localFilePath">Path to local audio file, e.g. /path/audio.wav</param>
        public static void SampleRecognize(string localFilePath)
        {
            SpeechClient speechClient = SpeechClient.Create();
            // string localFilePath = "resources/commercial_mono.wav"
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    // When enabled, trascription results may include punctuation (available for select languages).
                    EnableAutomaticPunctuation = true,
                    // The language of the supplied audio. Even though additional languages are
                    // provided by alternative_language_codes, a primary language is still required.
                    LanguageCode = "en-US",
                },
                Audio = new RecognitionAudio
                {
                    Content = ByteString.CopyFrom(File.ReadAllBytes(localFilePath)),
                },
            };
            RecognizeResponse response = speechClient.Recognize(request);

            foreach (var result in response.Results)
            {
                // First alternative is the most probable result
                SpeechRecognitionAlternative alternative = result.Alternatives[0];
                Console.WriteLine($"Transcript: {alternative.Transcript}");
            }
        }
        public static int Main(string[] args)
        {
            // Create client
            SpeechClient client = SpeechClient.Create();

            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig
            {
                LanguageCode    = "en-US",
                SampleRateHertz = 44100,
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
            };
            RecognitionAudio audio = new RecognitionAudio
            {
                Uri = "gs://gapic-toolkit/hello.flac",
            };

            // Call API method
            RecognizeResponse response = client.Recognize(config, audio);

            // Show the result
            Console.WriteLine(response);

            // Success
            Console.WriteLine("Smoke test passed OK");
            return(0);
        }
示例#4
0
        //This function calls the google speech api and translate the audio from the path provided.
        //The text is then returned as a string for more processing
        //It willl print NO Response if google could not detect anything

        public string Send_Value(string path)
        {
            string            file_path = path;
            RecognitionAudio  audio1    = RecognitionAudio.FromFile(file_path);
            SpeechClient      client    = SpeechClient.Create();
            RecognitionConfig config    = new RecognitionConfig
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 44100,
                LanguageCode    = LanguageCodes.English.UnitedStates
            };
            RecognizeResponse response = client.Recognize(config, audio1);

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            var output = response.Results;

            if (output.Count != 0)
            {
                var finaloutput = output[0].Alternatives;
                return(finaloutput[0].Transcript);
            }

            else
            {
                return("NO RESPONSE");
            }
        }
示例#5
0
        /// <summary>Snippet for RecognizeAsync</summary>
        public async Task RecognizeAsync_RequestObject()
        {
            // Snippet: RecognizeAsync(RecognizeRequest,CallSettings)
            // Additional: RecognizeAsync(RecognizeRequest,CancellationToken)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                    SampleRateHertz = 44100,
                    LanguageCode    = "en-US",
                },
                Audio = new RecognitionAudio
                {
                    Uri = "gs://bucket_name/file_name.flac",
                },
            };
            // Make the request
            RecognizeResponse response = await speechClient.RecognizeAsync(request);

            // End snippet
        }
        /// <summary>
        /// Performs synchronous speech recognition on an audio file.
        /// </summary>
        /// <param name="sampleRateHertz">Sample rate in Hertz of the audio data sent in all `RecognitionAudio`
        /// messages. Valid values are: 8000-48000.</param>
        /// <param name="languageCode">The language of the supplied audio.</param>
        /// <param name="uriPath">Path to the audio file stored on GCS.</param>
        public static void SampleRecognize(int sampleRateHertz, string languageCode, string uriPath)
        {
            SpeechClient speechClient = SpeechClient.Create();
            // int sampleRateHertz = 44100
            // string languageCode = "en-US"
            // string uriPath = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    Encoding = RecognitionConfig.Types.AudioEncoding.Mp3,
                    // Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are:
                    // 8000-48000.
                    SampleRateHertz = 44100,
                    // The language of the supplied audio.
                    LanguageCode = "en-US",
                },
                Audio = new RecognitionAudio
                {
                    // Path to the audio file stored on GCS.
                    Uri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3",
                },
            };
            RecognizeResponse response = speechClient.Recognize(request);

            foreach (var result in response.Results)
            {
                string transcript = result.Alternatives[0].Transcript;
                Console.WriteLine($"Transcript: {transcript}");
            }
        }
示例#7
0
        /// <summary>
        /// Adds additional details short audio file included in this recognition request
        /// </summary>
        /// <param name="localFilePath">Path to local audio file, e.g. /path/audio.wav</param>
        public static void SampleRecognize(string localFilePath)
        {
            SpeechClient speechClient = SpeechClient.Create();
            // string localFilePath = "resources/commercial_mono.wav"
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    Metadata = new RecognitionMetadata
                    {
                        InteractionType     = RecognitionMetadata.Types.InteractionType.VoiceSearch,
                        RecordingDeviceType = RecognitionMetadata.Types.RecordingDeviceType.Smartphone,
                        RecordingDeviceName = "Pixel 3",
                    },
                    // The language of the supplied audio. Even though additional languages are
                    // provided by alternative_language_codes, a primary language is still required.
                    LanguageCode = "en-US",
                },
                Audio = new RecognitionAudio
                {
                    Content = ByteString.CopyFrom(File.ReadAllBytes(localFilePath)),
                },
            };
            RecognizeResponse response = speechClient.Recognize(request);

            foreach (var result in response.Results)
            {
                // First alternative is the most probable result
                SpeechRecognitionAlternative alternative = result.Alternatives[0];
                Console.WriteLine($"Transcript: {alternative.Transcript}");
            }
        }
示例#8
0
        /// <summary>
        /// Transcribe a short audio file with language detected from a list of possible languages
        /// </summary>
        /// <param name="localFilePath">Path to local audio file, e.g. /path/audio.wav</param>
        public static void SampleRecognize(string localFilePath)
        {
            SpeechClient speechClient = SpeechClient.Create();
            // string localFilePath = "resources/brooklyn_bridge.flac"
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    // The language of the supplied audio. Even though additional languages are
                    // provided by alternative_language_codes, a primary language is still required.
                    LanguageCode             = "fr",
                    AlternativeLanguageCodes =
                    {
                        "es",
                        "en",
                    },
                },
                Audio = new RecognitionAudio
                {
                    Content = ByteString.CopyFrom(File.ReadAllBytes(localFilePath)),
                },
            };
            RecognizeResponse response = speechClient.Recognize(request);

            foreach (var result in response.Results)
            {
                // The languageCode which was detected as the most likely being spoken in the audio
                Console.WriteLine($"Detected language: {result.LanguageCode}");
                // First alternative is the most probable result
                SpeechRecognitionAlternative alternative = result.Alternatives[0];
                Console.WriteLine($"Transcript: {alternative.Transcript}");
            }
        }
示例#9
0
        public string Trans()
        {
            string ret       = "";
            string DEMO_FILE = "audio.wav";
            var    speech    = SpeechClient.Create();
            var    response  = speech.Recognize(new RecognitionConfig()
            {
                Encoding          = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz   = 41000,
                AudioChannelCount = 2,
                //LanguageCode = "ko-KR",
                LanguageCode = LanguageCodes.Korean.SouthKorea,
            }, RecognitionAudio.FromFile(DEMO_FILE));

            RecognizeResponse recognizeResponse = new RecognizeResponse();


            //var audio = RecognitionAudio.from

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                    ret = alternative.Transcript;
                }
            }
            return(ret);
        }
        public void Recognize()
        {
            moq::Mock <Speech.SpeechClient> mockGrpcClient = new moq::Mock <Speech.SpeechClient>(moq::MockBehavior.Strict);

            mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock <lro::Operations.OperationsClient>().Object);
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig(),
                Audio  = new RecognitionAudio(),
            };
            RecognizeResponse expectedResponse = new RecognizeResponse
            {
                Results =
                {
                    new SpeechRecognitionResult(),
                },
                TotalBilledTime = new wkt::Duration(),
            };

            mockGrpcClient.Setup(x => x.Recognize(request, moq::It.IsAny <grpccore::CallOptions>())).Returns(expectedResponse);
            SpeechClient      client   = new SpeechClientImpl(mockGrpcClient.Object, null);
            RecognizeResponse response = client.Recognize(request.Config, request.Audio);

            xunit::Assert.Same(expectedResponse, response);
            mockGrpcClient.VerifyAll();
        }
        public async stt::Task RecognizeRequestObjectAsync()
        {
            moq::Mock <Speech.SpeechClient> mockGrpcClient = new moq::Mock <Speech.SpeechClient>(moq::MockBehavior.Strict);

            mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock <lro::Operations.OperationsClient>().Object);
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig(),
                Audio  = new RecognitionAudio(),
            };
            RecognizeResponse expectedResponse = new RecognizeResponse
            {
                Results =
                {
                    new SpeechRecognitionResult(),
                },
                TotalBilledTime = new wkt::Duration(),
            };

            mockGrpcClient.Setup(x => x.RecognizeAsync(request, moq::It.IsAny <grpccore::CallOptions>())).Returns(new grpccore::AsyncUnaryCall <RecognizeResponse>(stt::Task.FromResult(expectedResponse), null, null, null, null));
            SpeechClient      client = new SpeechClientImpl(mockGrpcClient.Object, null);
            RecognizeResponse responseCallSettings = await client.RecognizeAsync(request, gaxgrpc::CallSettings.FromCancellationToken(st::CancellationToken.None));

            xunit::Assert.Same(expectedResponse, responseCallSettings);
            RecognizeResponse responseCancellationToken = await client.RecognizeAsync(request, st::CancellationToken.None);

            xunit::Assert.Same(expectedResponse, responseCancellationToken);
            mockGrpcClient.VerifyAll();
        }
        public async Task RecognizeAsync2()
        {
            Mock <Speech.SpeechClient> mockGrpcClient = new Mock <Speech.SpeechClient>(MockBehavior.Strict);

            mockGrpcClient.Setup(x => x.CreateOperationsClient())
            .Returns(new Mock <Operations.OperationsClient>().Object);
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                    SampleRateHertz = 44100,
                    LanguageCode    = "en-US",
                },
                Audio = new RecognitionAudio
                {
                    Uri = "gs://bucket_name/file_name.flac",
                },
            };
            RecognizeResponse expectedResponse = new RecognizeResponse();

            mockGrpcClient.Setup(x => x.RecognizeAsync(request, It.IsAny <CallOptions>()))
            .Returns(new Grpc.Core.AsyncUnaryCall <RecognizeResponse>(Task.FromResult(expectedResponse), null, null, null, null));
            SpeechClient      client   = new SpeechClientImpl(mockGrpcClient.Object, null);
            RecognizeResponse response = await client.RecognizeAsync(request);

            Assert.Same(expectedResponse, response);
            mockGrpcClient.VerifyAll();
        }
        /// <summary>
        /// Print confidence level for individual words in a transcription of a short audio file
        /// </summary>
        /// <param name="localFilePath">Path to local audio file, e.g. /path/audio.wav</param>
        public static void SampleRecognize(string localFilePath)
        {
            SpeechClient speechClient = SpeechClient.Create();
            // string localFilePath = "resources/brooklyn_bridge.flac"
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    // When enabled, the first result returned by the API will include a list
                    // of words and the confidence level for each of those words.
                    EnableWordConfidence = true,
                    // The language of the supplied audio
                    LanguageCode = "en-US",
                },
                Audio = new RecognitionAudio
                {
                    Content = ByteString.CopyFrom(File.ReadAllBytes(localFilePath)),
                },
            };
            RecognizeResponse response = speechClient.Recognize(request);
            // The first result includes confidence levels per word
            SpeechRecognitionResult result = response.Results[0];
            // First alternative is the most probable result
            SpeechRecognitionAlternative alternative = result.Alternatives[0];

            Console.WriteLine($"Transcript: {alternative.Transcript}");
            // Print the confidence level of each word
            foreach (var word in alternative.Words)
            {
                Console.WriteLine($"Word: {word.Word}");
                Console.WriteLine($"Confidence: {word.Confidence}");
            }
        }
示例#14
0
        /// <summary>
        /// Sends the voice audio to Google's API and runs HandleSpeech with transcription.
        /// </summary>
        private void TranscribeSpeech(Message m)
        {
            if (m.voice == null)
            {
                throw new Exception.EmptyVoiceMessageException(m);
            }
            if (m.voice.Duration > maxDur)
            {
                MaxDurationExceeded(m);
                return;
            }

            SpeechClient speech = SpeechClient.Create();

            RecognitionConfig config = new RecognitionConfig();

            config.Encoding        = SpeechHandler.VoiceTypeToGoogleType(m.voice.type);
            config.SampleRateHertz = m.voice.sampleRate;
            config.LanguageCode    = languageCode;
            config.ProfanityFilter = false;


            RecognizeResponse resp = speech.Recognize(config, RecognitionAudio.FromStream(m.voice.AudioStream));

            foreach (var result in resp.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    HandleSpeech(m, alternative.Transcript);
                }
            }
        }
示例#15
0
        public RecognizeResponse translate([FromBody] string filename)
        {
            string           path  = "C:\\Users\\Dell\\Downloads\\" + filename;
            RecognitionAudio audio = RecognitionAudio.FromFile(path);
            //RecognitionAudio audio2 = RecognitionAudio.FetchFromUri("https://storage.googleapis.com/cloud-samples-tests/speech/brooklyn.flac");
            //RecognitionAudio audio3 = RecognitionAudio.FromStorageUri("gs://my-bucket/my-file");

            /* byte[] bytes = ReadAudioData(); // For example, from a database
             * RecognitionAudio audio4 = RecognitionAudio.FromBytes(bytes);
             *
             * using (Stream stream = OpenAudioStream()) // Any regular .NET stream
             * {
             *   RecognitionAudio audio5 = RecognitionAudio.FromStream(stream);
             * }*/

            SpeechClient      client = SpeechClient.Create();
            RecognitionConfig config = new RecognitionConfig
            {
                Encoding        = AudioEncoding.Linear16,
                SampleRateHertz = 48000,
                LanguageCode    = LanguageCodes.English.UnitedStates
            };
            RecognizeResponse response = client.Recognize(config, audio);

            return(response);
        }
示例#16
0
        public Namecard()
        {
            var tempFile = Path.Combine(Config.Config.TempFileDir, Guid.NewGuid() + ".tmp");

            File.Create(tempFile);

            Post["Recognize"] = _ =>
            {
                var stream = this.Request.Body;
                RecognizeRequest req;

                using (var streamreader = new StreamReader(stream))
                {
                    req = JsonConvert.DeserializeObject <RecognizeRequest>(HttpUtility.UrlDecode(streamreader.ReadToEnd()));
                }

                var task = new NamecardRecognizer().Recognize(req.Data, tempFile);

                // polling task status sync
                if (PollingTaskStatus(task) != TaskStatus.Completed.ToString())
                {
                    return(this.Response.AsJson(new RecognizeResponse()
                    {
                        TaskStatus = task.TaskStatus,
                    }));
                }

                var response = RecognizeFieldParser.ParseRecognizeResponse(tempFile);
                if (File.Exists(tempFile))
                {
                    File.Delete(tempFile);
                }

                response.TaskStatus = TaskStatus.Completed.ToString();
                return(this.Response.AsJson(response));
            };

            // Just for testing
            Post["RecognizeMock"] = _ =>
            {
                var response = new RecognizeResponse()
                {
                    Name       = "Jane Doe",
                    Address    = "Awesome Street",
                    Company    = "Awesome Company",
                    Email      = "*****@*****.**",
                    Job        = "Web designer",
                    Phone      = "012345678",
                    Web        = "http://wwww.awesome.com",
                    Text       = "This is mock data",
                    TaskStatus = TaskStatus.Completed.ToString(),
                };

                return(this.Response.AsJson(response));
            };
        }
 /// <summary>Snippet for Recognize</summary>
 public void Recognize()
 {
     // Snippet: Recognize(RecognitionConfig, RecognitionAudio, CallSettings)
     // Create client
     SpeechClient speechClient = SpeechClient.Create();
     // Initialize request argument(s)
     RecognitionConfig config = new RecognitionConfig();
     RecognitionAudio  audio  = new RecognitionAudio();
     // Make the request
     RecognizeResponse response = speechClient.Recognize(config, audio);
     // End snippet
 }
示例#18
0
        // https://developers.google.com/admin-sdk/directory/v1/languages
        /// <summary>
        /// Transcribes the specified URL.
        /// </summary>
        /// <param name="url">The URL.</param>
        /// <param name="languageCode">The language code.</param>
        /// <returns></returns>
        public async Task <TranscriptionViewModel> Transcribe(string url, string languageCode = "en-US", List <string> altLanguages = null)
        {
            // Initialize GA Speech Client
            Channel channel = new Channel(
                SpeechClient.DefaultEndpoint.Host, _googleCredential.ToChannelCredentials());
            SpeechClient speech = SpeechClient.Create(channel);

            RecognitionAudio audio = await RecognitionAudio.FetchFromUriAsync(url);

            RecognitionConfig config = new RecognitionConfig
            {
                Encoding     = AudioEncoding.Linear16,
                LanguageCode = languageCode,
            };

            if (altLanguages != null)
            {
                foreach (string altLang in altLanguages)
                {
                    config.AlternativeLanguageCodes.Add(altLang);
                }
            }

            RecognizeResponse response = speech.Recognize(config, audio);

            string transcript = "";
            float  confidence = 0f;
            string language   = "";

            // Parse results
            foreach (var res in response.Results)
            {
                // Take only the highest confidence transcription
                foreach (var alternative in res.Alternatives)
                {
                    if (alternative.Confidence > confidence)
                    {
                        transcript = alternative.Transcript;
                        confidence = alternative.Confidence;
                    }
                }
                language = res.LanguageCode;
            }

            await channel.ShutdownAsync();

            return(new TranscriptionViewModel()
            {
                Transcript = transcript, Confidence = confidence, Language = language
            });
        }
        /// <summary>Snippet for RecognizeAsync</summary>
        public async Task RecognizeAsync()
        {
            // Snippet: RecognizeAsync(RecognitionConfig, RecognitionAudio, CallSettings)
            // Additional: RecognizeAsync(RecognitionConfig, RecognitionAudio, CancellationToken)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig();
            RecognitionAudio  audio  = new RecognitionAudio();
            // Make the request
            RecognizeResponse response = await speechClient.RecognizeAsync(config, audio);

            // End snippet
        }
 /// <summary>Snippet for Recognize</summary>
 public void Recognize_RequestObject()
 {
     // Snippet: Recognize(RecognizeRequest, CallSettings)
     // Create client
     SpeechClient speechClient = SpeechClient.Create();
     // Initialize request argument(s)
     RecognizeRequest request = new RecognizeRequest
     {
         Config = new RecognitionConfig(),
         Audio  = new RecognitionAudio(),
     };
     // Make the request
     RecognizeResponse response = speechClient.Recognize(request);
     // End snippet
 }
        /// <summary>
        /// Performs synchronous speech recognition with speech adaptation.
        /// </summary>
        /// <param name="sampleRateHertz">Sample rate in Hertz of the audio data sent in all `RecognitionAudio`
        /// messages. Valid values are: 8000-48000.</param>
        /// <param name="languageCode">The language of the supplied audio.</param>
        /// <param name="phrase">Phrase "hints" help Speech-to-Text API recognize the specified phrases from
        /// your audio data.</param>
        /// <param name="boost">Positive value will increase the probability that a specific phrase will be
        /// recognized over other similar sounding phrases.</param>
        /// <param name="uriPath">Path to the audio file stored on GCS.</param>
        public static void SampleRecognize(int sampleRateHertz, string languageCode, string phrase, float boost, string uriPath)
        {
            SpeechClient speechClient = SpeechClient.Create();
            // int sampleRateHertz = 44100
            // string languageCode = "en-US"
            // string phrase = "Brooklyn Bridge"
            // float boost = 20f
            // string uriPath = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    Encoding = RecognitionConfig.Types.AudioEncoding.Mp3,
                    // Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are:
                    // 8000-48000.
                    SampleRateHertz = 44100,
                    // The language of the supplied audio.
                    LanguageCode   = "en-US",
                    SpeechContexts =
                    {
                        new SpeechContext
                        {
                            Phrases =
                            {
                                "Brooklyn Bridge",
                            },
                            // Positive value will increase the probability that a specific phrase will be recognized over other
                            // similar sounding phrases.
                            Boost = 20f,
                        },
                    },
                },
                Audio = new RecognitionAudio
                {
                    // Path to the audio file stored on GCS.
                    Uri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3",
                },
            };
            RecognizeResponse response = speechClient.Recognize(request);

            foreach (var result in response.Results)
            {
                // First alternative is the most probable result
                SpeechRecognitionAlternative alternative = result.Alternatives[0];
                Console.WriteLine($"Transcript: {alternative.Transcript}");
            }
        }
示例#22
0
        public async Task <string> RecognizeFromFile(byte[] audio)
        {
            if (_disabled)
            {
                return("Speech recognition is currently disabled");
            }

            var recognitionAudio = RecognitionAudio.FromBytes(audio);

            RecognizeResponse response = await _speechClient.RecognizeAsync(_config, recognitionAudio);

            var recognized = response.Results
                             .SelectMany(result => result.Alternatives.Select(alternative => alternative.Transcript))
                             .Aggregate((x, y) => x + " " + y);

            return(recognized);
        }
示例#23
0
        /// <summary>
        /// Performs synchronous speech recognition with static context classes.
        /// </summary>
        /// <param name="sampleRateHertz">Sample rate in Hertz of the audio data sent in all `RecognitionAudio`
        /// messages. Valid values are: 8000-48000.</param>
        /// <param name="languageCode">The language of the supplied audio.</param>
        /// <param name="phrase">Phrase "hints" help Speech-to-Text API recognize the specified phrases from
        /// your audio data. In this sample we are using a static class phrase ($TIME). Classes represent
        /// groups of words that represent common concepts that occur in natural language. We recommend
        /// checking out the docs page for more info on static classes.</param>
        /// <param name="uriPath">Path to the audio file stored on GCS.</param>
        public static void SampleRecognize(int sampleRateHertz, string languageCode, string phrase, string uriPath)
        {
            SpeechClient speechClient = SpeechClient.Create();
            // int sampleRateHertz = 24000
            // string languageCode = "en-US"
            // string phrase = "$TIME"
            // string uriPath = "gs://cloud-samples-data/speech/time.mp3"
            RecognizeRequest request = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    Encoding = RecognitionConfig.Types.AudioEncoding.Mp3,
                    // Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are:
                    // 8000-48000.
                    SampleRateHertz = 24000,
                    // The language of the supplied audio.
                    LanguageCode   = "en-US",
                    SpeechContexts =
                    {
                        new SpeechContext
                        {
                            Phrases =
                            {
                                "$TIME",
                            },
                        },
                    },
                },
                Audio = new RecognitionAudio
                {
                    // Path to the audio file stored on GCS.
                    Uri = "gs://cloud-samples-data/speech/time.mp3",
                },
            };
            RecognizeResponse response = speechClient.Recognize(request);

            foreach (var result in response.Results)
            {
                // First alternative is the most probable result
                SpeechRecognitionAlternative alternative = result.Alternatives[0];
                Console.WriteLine($"Transcript: {alternative.Transcript}");
            }
        }
示例#24
0
 /// <summary>Snippet for Recognize</summary>
 public void Recognize()
 {
     // Snippet: Recognize(RecognitionConfig,RecognitionAudio,CallSettings)
     // Create client
     SpeechClient speechClient = SpeechClient.Create();
     // Initialize request argument(s)
     RecognitionConfig config = new RecognitionConfig
     {
         Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
         SampleRateHertz = 44100,
         LanguageCode    = "en-US",
     };
     RecognitionAudio audio = new RecognitionAudio
     {
         Uri = "gs://bucket_name/file_name.flac",
     };
     // Make the request
     RecognizeResponse response = speechClient.Recognize(config, audio);
     // End snippet
 }
示例#25
0
        static string STT(string sIn)
        {
            while (!File.Exists(sIn))
            {
                System.Threading.Thread.Sleep(2000);
            }
            var speech = SpeechClient.Create();
            RecognizeResponse response = new RecognizeResponse();
            bool TryAgain = true;

            while (TryAgain)
            {
                try
                {
                    response = speech.Recognize(new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                        SampleRateHertz = 48000,
                        LanguageCode    = "en",
                    }, RecognitionAudio.FromFile(sIn));
                    TryAgain = false;
                }
                catch (Exception err)
                {
                    if (!err.Message.Contains("because it is being used by another process."))
                    {
                        TryAgain = false;
                    }
                }
            }
            StringBuilder oSb = new StringBuilder("");

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    oSb.AppendLine(alternative.Transcript);
                }
            }
            return(oSb.ToString());
        }
示例#26
0
        public string Recognize()
        {
            if (Recognizer.longerAudioList.Count < 3200)
            {
                return("ERROR");
            }
            RecognitionAudio  audio5   = RecognitionAudio.FromBytes(Recognizer.longerAudioList.ToArray());
            RecognizeResponse response = client.Recognize(config, audio5);

            Console.WriteLine(response);
            Recognizer.longerAudioList.Clear();

            try
            {
                return(response.Results[0].Alternatives[0].Transcript);
            }
            catch (Exception ex)
            {
                return("ERROR");
            }
        }
        public void Recognize()
        {
            var audio = LoadResourceAudio("speech.raw");
            // Sample: Recognize
            // Additional: Recognize(*,*,*)
            SpeechClient      client = SpeechClient.Create();
            RecognitionConfig config = new RecognitionConfig
            {
                Encoding        = AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = LanguageCodes.English.UnitedStates
            };
            RecognizeResponse response = client.Recognize(config, audio);

            Console.WriteLine(response);
            // End sample

            Assert.Equal(
                "this is a test file for the google cloud speech api",
                response.Results[0].Alternatives[0].Transcript,
                true);
        }
        public void Recognize()
        {
            Mock <Speech.SpeechClient> mockGrpcClient = new Mock <Speech.SpeechClient>(MockBehavior.Strict);

            mockGrpcClient.Setup(x => x.CreateOperationsClient())
            .Returns(new Mock <Operations.OperationsClient>().Object);
            RecognizeRequest expectedRequest = new RecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                    SampleRateHertz = 44100,
                    LanguageCode    = "en-US",
                },
                Audio = new RecognitionAudio
                {
                    Uri = "gs://bucket_name/file_name.flac",
                },
            };
            RecognizeResponse expectedResponse = new RecognizeResponse();

            mockGrpcClient.Setup(x => x.Recognize(expectedRequest, It.IsAny <CallOptions>()))
            .Returns(expectedResponse);
            SpeechClient      client = new SpeechClientImpl(mockGrpcClient.Object, null);
            RecognitionConfig config = new RecognitionConfig
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz = 44100,
                LanguageCode    = "en-US",
            };
            RecognitionAudio audio = new RecognitionAudio
            {
                Uri = "gs://bucket_name/file_name.flac",
            };
            RecognizeResponse response = client.Recognize(config, audio);

            Assert.Same(expectedResponse, response);
            mockGrpcClient.VerifyAll();
        }
示例#29
0
        private void Recognize()
        {
            SpeechClientBuilder builder = new SpeechClientBuilder();

            builder.CredentialsPath = GOOGLE_API_CREDS_PATH;

            SpeechClient     client  = builder.Build();
            RecognizeRequest request = new RecognizeRequest()
            {
                Audio  = RecognitionAudio.FromFile(TEMP_AUDIO_PATH),
                Config = new RecognitionConfig()
                {
                    Encoding              = RecognitionConfig.Types.AudioEncoding.EncodingUnspecified,
                    LanguageCode          = "ru-RU",
                    EnableWordTimeOffsets = false
                }
            };
            RecognizeResponse response = client.Recognize(request);

            Result.Text = string.Join("\n", response.Results.Select(
                                          result => result.Alternatives[0].Transcript
                                          ));
        }
 TranscribeResponse GetShortTranscribeResponse(RecognizeResponse response)
 {
     return(GetTranscribeResponse(response.Results));
 }