/// <summary> /// Transcribe a short audio file with language detected from a list of possible languages /// </summary> /// <param name="localFilePath">Path to local audio file, e.g. /path/audio.wav</param> public static void SampleRecognize(string localFilePath) { SpeechClient speechClient = SpeechClient.Create(); // string localFilePath = "resources/brooklyn_bridge.flac" RecognizeRequest request = new RecognizeRequest { Config = new RecognitionConfig { // The language of the supplied audio. Even though additional languages are // provided by alternative_language_codes, a primary language is still required. LanguageCode = "fr", AlternativeLanguageCodes = { "es", "en", }, }, Audio = new RecognitionAudio { Content = ByteString.CopyFrom(File.ReadAllBytes(localFilePath)), }, }; RecognizeResponse response = speechClient.Recognize(request); foreach (var result in response.Results) { // The languageCode which was detected as the most likely being spoken in the audio Console.WriteLine($"Detected language: {result.LanguageCode}"); // First alternative is the most probable result SpeechRecognitionAlternative alternative = result.Alternatives[0]; Console.WriteLine($"Transcript: {alternative.Transcript}"); } }
/// <summary> /// Print confidence level for individual words in a transcription of a short audio file /// </summary> /// <param name="localFilePath">Path to local audio file, e.g. /path/audio.wav</param> public static void SampleRecognize(string localFilePath) { SpeechClient speechClient = SpeechClient.Create(); // string localFilePath = "resources/brooklyn_bridge.flac" RecognizeRequest request = new RecognizeRequest { Config = new RecognitionConfig { // When enabled, the first result returned by the API will include a list // of words and the confidence level for each of those words. EnableWordConfidence = true, // The language of the supplied audio LanguageCode = "en-US", }, Audio = new RecognitionAudio { Content = ByteString.CopyFrom(File.ReadAllBytes(localFilePath)), }, }; RecognizeResponse response = speechClient.Recognize(request); // The first result includes confidence levels per word SpeechRecognitionResult result = response.Results[0]; // First alternative is the most probable result SpeechRecognitionAlternative alternative = result.Alternatives[0]; Console.WriteLine($"Transcript: {alternative.Transcript}"); // Print the confidence level of each word foreach (var word in alternative.Words) { Console.WriteLine($"Word: {word.Word}"); Console.WriteLine($"Confidence: {word.Confidence}"); } }
public RecognizeResponse translate([FromBody] string filename) { string path = "C:\\Users\\Dell\\Downloads\\" + filename; RecognitionAudio audio = RecognitionAudio.FromFile(path); //RecognitionAudio audio2 = RecognitionAudio.FetchFromUri("https://storage.googleapis.com/cloud-samples-tests/speech/brooklyn.flac"); //RecognitionAudio audio3 = RecognitionAudio.FromStorageUri("gs://my-bucket/my-file"); /* byte[] bytes = ReadAudioData(); // For example, from a database * RecognitionAudio audio4 = RecognitionAudio.FromBytes(bytes); * * using (Stream stream = OpenAudioStream()) // Any regular .NET stream * { * RecognitionAudio audio5 = RecognitionAudio.FromStream(stream); * }*/ SpeechClient client = SpeechClient.Create(); RecognitionConfig config = new RecognitionConfig { Encoding = AudioEncoding.Linear16, SampleRateHertz = 48000, LanguageCode = LanguageCodes.English.UnitedStates }; RecognizeResponse response = client.Recognize(config, audio); return(response); }
/// <summary> /// Sends the voice audio to Google's API and runs HandleSpeech with transcription. /// </summary> private void TranscribeSpeech(Message m) { if (m.voice == null) { throw new Exception.EmptyVoiceMessageException(m); } if (m.voice.Duration > maxDur) { MaxDurationExceeded(m); return; } SpeechClient speech = SpeechClient.Create(); RecognitionConfig config = new RecognitionConfig(); config.Encoding = SpeechHandler.VoiceTypeToGoogleType(m.voice.type); config.SampleRateHertz = m.voice.sampleRate; config.LanguageCode = languageCode; config.ProfanityFilter = false; RecognizeResponse resp = speech.Recognize(config, RecognitionAudio.FromStream(m.voice.AudioStream)); foreach (var result in resp.Results) { foreach (var alternative in result.Alternatives) { HandleSpeech(m, alternative.Transcript); } } }
/// <summary> /// Adds additional details short audio file included in this recognition request /// </summary> /// <param name="localFilePath">Path to local audio file, e.g. /path/audio.wav</param> public static void SampleRecognize(string localFilePath) { SpeechClient speechClient = SpeechClient.Create(); // string localFilePath = "resources/commercial_mono.wav" RecognizeRequest request = new RecognizeRequest { Config = new RecognitionConfig { Metadata = new RecognitionMetadata { InteractionType = RecognitionMetadata.Types.InteractionType.VoiceSearch, RecordingDeviceType = RecognitionMetadata.Types.RecordingDeviceType.Smartphone, RecordingDeviceName = "Pixel 3", }, // The language of the supplied audio. Even though additional languages are // provided by alternative_language_codes, a primary language is still required. LanguageCode = "en-US", }, Audio = new RecognitionAudio { Content = ByteString.CopyFrom(File.ReadAllBytes(localFilePath)), }, }; RecognizeResponse response = speechClient.Recognize(request); foreach (var result in response.Results) { // First alternative is the most probable result SpeechRecognitionAlternative alternative = result.Alternatives[0]; Console.WriteLine($"Transcript: {alternative.Transcript}"); } }
private void RecognizeFile(string file) { if (string.IsNullOrEmpty(file)) { MessageBox.Show("Please select a wav file first!"); return; } if (File.Exists(file) == false) { MessageBox.Show("Specified WAV file is NOT exist! Please try to select another file..."); return; } SpeechClient client = SpeechClient.Create(); RecognitionConfig config = new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Linear16, SampleRateHertz = 8000, LanguageCode = "zh-TW", }; RecognitionAudio audio = RecognitionAudio.FromFile(file); var response = client.Recognize(config, audio); foreach (var item in response.Results) { textBox2.AppendLine(item.ToString()); } }
//This function calls the google speech api and translate the audio from the path provided. //The text is then returned as a string for more processing //It willl print NO Response if google could not detect anything public string Send_Value(string path) { string file_path = path; RecognitionAudio audio1 = RecognitionAudio.FromFile(file_path); SpeechClient client = SpeechClient.Create(); RecognitionConfig config = new RecognitionConfig { Encoding = RecognitionConfig.Types.AudioEncoding.Linear16, SampleRateHertz = 44100, LanguageCode = LanguageCodes.English.UnitedStates }; RecognizeResponse response = client.Recognize(config, audio1); foreach (var result in response.Results) { foreach (var alternative in result.Alternatives) { Console.WriteLine(alternative.Transcript); } } var output = response.Results; if (output.Count != 0) { var finaloutput = output[0].Alternatives; return(finaloutput[0].Transcript); } else { return("NO RESPONSE"); } }
/// <summary> /// Transcripts the provided audio file. /// </summary> /// <remarks>WAV format is currently required.</remarks> /// <param name="filepath">The path to the audio file.</param> /// <returns>The transcript retrieved, if any.</returns> public string SpeechToText(string filepath) { if (string.IsNullOrEmpty(filepath)) { throw new ArgumentNullException(nameof(filepath)); } if (!File.Exists(filepath)) { throw new ArgumentException((this as ILocalizedService <SpeechToTextService>).GetLocalized("FileNotFoundError", filepath), nameof(filepath)); } // TODO: Voir maintenant que le front a un polyfill pour le support, si un format plus léger serait tout aussi efficace. SpeechClient speech = SpeechClient.Create(); RecognizeResponse response = speech.Recognize( new RecognitionConfig() { Encoding = AudioEncoding.Linear16, SampleRateHertz = 48000, LanguageCode = this.appSettings.Google.SpeechToText.LanguageCode, }, RecognitionAudio.FromFile(filepath)); foreach (SpeechRecognitionResult result in response.Results) { foreach (SpeechRecognitionAlternative alternative in result.Alternatives) { return(alternative.Transcript); } } return(null); }
/// <summary> /// Transcribe a short audio file with punctuation /// </summary> /// <param name="localFilePath">Path to local audio file, e.g. /path/audio.wav</param> public static void SampleRecognize(string localFilePath) { SpeechClient speechClient = SpeechClient.Create(); // string localFilePath = "resources/commercial_mono.wav" RecognizeRequest request = new RecognizeRequest { Config = new RecognitionConfig { // When enabled, trascription results may include punctuation (available for select languages). EnableAutomaticPunctuation = true, // The language of the supplied audio. Even though additional languages are // provided by alternative_language_codes, a primary language is still required. LanguageCode = "en-US", }, Audio = new RecognitionAudio { Content = ByteString.CopyFrom(File.ReadAllBytes(localFilePath)), }, }; RecognizeResponse response = speechClient.Recognize(request); foreach (var result in response.Results) { // First alternative is the most probable result SpeechRecognitionAlternative alternative = result.Alternatives[0]; Console.WriteLine($"Transcript: {alternative.Transcript}"); } }
/// <summary> /// Performs synchronous speech recognition on an audio file. /// </summary> /// <param name="sampleRateHertz">Sample rate in Hertz of the audio data sent in all `RecognitionAudio` /// messages. Valid values are: 8000-48000.</param> /// <param name="languageCode">The language of the supplied audio.</param> /// <param name="uriPath">Path to the audio file stored on GCS.</param> public static void SampleRecognize(int sampleRateHertz, string languageCode, string uriPath) { SpeechClient speechClient = SpeechClient.Create(); // int sampleRateHertz = 44100 // string languageCode = "en-US" // string uriPath = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3" RecognizeRequest request = new RecognizeRequest { Config = new RecognitionConfig { Encoding = RecognitionConfig.Types.AudioEncoding.Mp3, // Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are: // 8000-48000. SampleRateHertz = 44100, // The language of the supplied audio. LanguageCode = "en-US", }, Audio = new RecognitionAudio { // Path to the audio file stored on GCS. Uri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3", }, }; RecognizeResponse response = speechClient.Recognize(request); foreach (var result in response.Results) { string transcript = result.Alternatives[0].Transcript; Console.WriteLine($"Transcript: {transcript}"); } }
public static int Main(string[] args) { // Create client SpeechClient client = SpeechClient.Create(); // Initialize request argument(s) RecognitionConfig config = new RecognitionConfig { LanguageCode = "en-US", SampleRateHertz = 44100, Encoding = RecognitionConfig.Types.AudioEncoding.Flac, }; RecognitionAudio audio = new RecognitionAudio { Uri = "gs://gapic-toolkit/hello.flac", }; // Call API method RecognizeResponse response = client.Recognize(config, audio); // Show the result Console.WriteLine(response); // Success Console.WriteLine("Smoke test passed OK"); return(0); }
public static void Main(string[] args) { SpeechClient speech = null; if (args.Length == 1) { // use default Google API Console.WriteLine("Using Google APIs"); speech = SpeechClient.Create(); } else { // use msspeech-gbridge Console.WriteLine("Using msspeech-bridge at " + args[1]); speech = SpeechClient.Create(new Channel(args[1], ChannelCredentials.Insecure)); } var response = speech.Recognize(new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Linear16, SampleRateHertz = 16000, LanguageCode = "en-US", }, RecognitionAudio.FromFile(args[0])); foreach (var result in response.Results) { foreach (var alternative in result.Alternatives) { Console.WriteLine(alternative.Transcript); } } }
/// <summary>Snippet for Recognize</summary> public void Recognize() { // Snippet: Recognize(RecognitionConfig, RecognitionAudio, CallSettings) // Create client SpeechClient speechClient = SpeechClient.Create(); // Initialize request argument(s) RecognitionConfig config = new RecognitionConfig(); RecognitionAudio audio = new RecognitionAudio(); // Make the request RecognizeResponse response = speechClient.Recognize(config, audio); // End snippet }
// Data processing functions public static Boolean getGoogleResponse(String[] args) { Console.WriteLine("Getting goodle data..."); lastResponse = SpeechClient.Recognize(new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Flac, SampleRateHertz = 44100, LanguageCode = "en", EnableWordTimeOffsets = true }, RecognitionAudio.FromFile(FilePath)); Console.WriteLine("Data retreved."); return(true); }
// https://developers.google.com/admin-sdk/directory/v1/languages /// <summary> /// Transcribes the specified URL. /// </summary> /// <param name="url">The URL.</param> /// <param name="languageCode">The language code.</param> /// <returns></returns> public async Task <TranscriptionViewModel> Transcribe(string url, string languageCode = "en-US", List <string> altLanguages = null) { // Initialize GA Speech Client Channel channel = new Channel( SpeechClient.DefaultEndpoint.Host, _googleCredential.ToChannelCredentials()); SpeechClient speech = SpeechClient.Create(channel); RecognitionAudio audio = await RecognitionAudio.FetchFromUriAsync(url); RecognitionConfig config = new RecognitionConfig { Encoding = AudioEncoding.Linear16, LanguageCode = languageCode, }; if (altLanguages != null) { foreach (string altLang in altLanguages) { config.AlternativeLanguageCodes.Add(altLang); } } RecognizeResponse response = speech.Recognize(config, audio); string transcript = ""; float confidence = 0f; string language = ""; // Parse results foreach (var res in response.Results) { // Take only the highest confidence transcription foreach (var alternative in res.Alternatives) { if (alternative.Confidence > confidence) { transcript = alternative.Transcript; confidence = alternative.Confidence; } } language = res.LanguageCode; } await channel.ShutdownAsync(); return(new TranscriptionViewModel() { Transcript = transcript, Confidence = confidence, Language = language }); }
public void OnClickGCPRecognitionButton() { var response = client.Recognize( new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Flac, SampleRateHertz = 22050, LanguageCode = LanguageCodes.Japanese.Japan, }, RecognitionAudio.FromFile(Application.streamingAssetsPath + "/test.flac") ); Debug.Log(response); }
/// <summary>Snippet for Recognize</summary> public void Recognize_RequestObject() { // Snippet: Recognize(RecognizeRequest, CallSettings) // Create client SpeechClient speechClient = SpeechClient.Create(); // Initialize request argument(s) RecognizeRequest request = new RecognizeRequest { Config = new RecognitionConfig(), Audio = new RecognitionAudio(), }; // Make the request RecognizeResponse response = speechClient.Recognize(request); // End snippet }
private void SpeechToText() { strRecgnResult = ""; var audio = RecognitionAudio.FromFile(outputFilePath); var response = speech.Recognize(config, audio); foreach (var result in response.Results) { foreach (var alternative in result.Alternatives) { strRecgnResult += ((alternative.Transcript) + "..."); } } isResultRecieved = true; }
/// <summary> /// Performs synchronous speech recognition with speech adaptation. /// </summary> /// <param name="sampleRateHertz">Sample rate in Hertz of the audio data sent in all `RecognitionAudio` /// messages. Valid values are: 8000-48000.</param> /// <param name="languageCode">The language of the supplied audio.</param> /// <param name="phrase">Phrase "hints" help Speech-to-Text API recognize the specified phrases from /// your audio data.</param> /// <param name="boost">Positive value will increase the probability that a specific phrase will be /// recognized over other similar sounding phrases.</param> /// <param name="uriPath">Path to the audio file stored on GCS.</param> public static void SampleRecognize(int sampleRateHertz, string languageCode, string phrase, float boost, string uriPath) { SpeechClient speechClient = SpeechClient.Create(); // int sampleRateHertz = 44100 // string languageCode = "en-US" // string phrase = "Brooklyn Bridge" // float boost = 20f // string uriPath = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3" RecognizeRequest request = new RecognizeRequest { Config = new RecognitionConfig { Encoding = RecognitionConfig.Types.AudioEncoding.Mp3, // Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are: // 8000-48000. SampleRateHertz = 44100, // The language of the supplied audio. LanguageCode = "en-US", SpeechContexts = { new SpeechContext { Phrases = { "Brooklyn Bridge", }, // Positive value will increase the probability that a specific phrase will be recognized over other // similar sounding phrases. Boost = 20f, }, }, }, Audio = new RecognitionAudio { // Path to the audio file stored on GCS. Uri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3", }, }; RecognizeResponse response = speechClient.Recognize(request); foreach (var result in response.Results) { // First alternative is the most probable result SpeechRecognitionAlternative alternative = result.Alternatives[0]; Console.WriteLine($"Transcript: {alternative.Transcript}"); } }
// Transcribe a local audio file. We can only use this with audios up to 1 minute long. public Transcribed_Dto TranscribeLocalFile(string fileName, string language) { // // var speechClient = SpeechClient.Create(); RecognitionAudio recogAudio = RecognitionAudio.FromFile(fileName); var response = speechClient.Recognize(new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Flac, SampleRateHertz = 48000, EnableWordTimeOffsets = true, LanguageCode = language, }, recogAudio); Transcribed_Dto resp = TransformResponse.Simpify(response.Results); return(TransformResponse.FixSpeakerTags(resp)); }
static void Recognize(byte[] recording, SpeechClient speech, RecognitionConfig config, ref string s) { s = ""; var response = speech.Recognize(config, RecognitionAudio.FromBytes(recording)); foreach (var result in response.Results) { foreach (var alternative in result.Alternatives) { //Console.WriteLine(alternative.Transcript); s += alternative.Transcript; } if (result != response.Results.Last()) { s += " "; } } }
/// <summary> /// Performs synchronous speech recognition with static context classes. /// </summary> /// <param name="sampleRateHertz">Sample rate in Hertz of the audio data sent in all `RecognitionAudio` /// messages. Valid values are: 8000-48000.</param> /// <param name="languageCode">The language of the supplied audio.</param> /// <param name="phrase">Phrase "hints" help Speech-to-Text API recognize the specified phrases from /// your audio data. In this sample we are using a static class phrase ($TIME). Classes represent /// groups of words that represent common concepts that occur in natural language. We recommend /// checking out the docs page for more info on static classes.</param> /// <param name="uriPath">Path to the audio file stored on GCS.</param> public static void SampleRecognize(int sampleRateHertz, string languageCode, string phrase, string uriPath) { SpeechClient speechClient = SpeechClient.Create(); // int sampleRateHertz = 24000 // string languageCode = "en-US" // string phrase = "$TIME" // string uriPath = "gs://cloud-samples-data/speech/time.mp3" RecognizeRequest request = new RecognizeRequest { Config = new RecognitionConfig { Encoding = RecognitionConfig.Types.AudioEncoding.Mp3, // Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are: // 8000-48000. SampleRateHertz = 24000, // The language of the supplied audio. LanguageCode = "en-US", SpeechContexts = { new SpeechContext { Phrases = { "$TIME", }, }, }, }, Audio = new RecognitionAudio { // Path to the audio file stored on GCS. Uri = "gs://cloud-samples-data/speech/time.mp3", }, }; RecognizeResponse response = speechClient.Recognize(request); foreach (var result in response.Results) { // First alternative is the most probable result SpeechRecognitionAlternative alternative = result.Alternatives[0]; Console.WriteLine($"Transcript: {alternative.Transcript}"); } }
/// <summary> /// Pipeline function that will handle incoming pipeline packages of audio bytes. /// Will translate audio bytes to text and send text down the pipeline. /// </summary> /// <param name="audio"></param> /// <param name="e"></param> protected override void Receive(AudioBuffer audio, Envelope e) { if (audio.Data.Length > 0) { var response = speech.Recognize(new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Linear16, SampleRateHertz = 16000, LanguageCode = this.AudioLanguage, EnableWordTimeOffsets = true, }, RecognitionAudio.FromBytes(audio.Data)); if (response.Results.Count > 0) { string transcribedAudio = response.Results.First().Alternatives.First().Transcript; this.Out.Post(transcribedAudio, e.OriginatingTime); } } }
/// <summary> /// This is implementation of SpeechToText function using Google Cloud Speech API, for more information see ISpeechRecognizer.SpeechToText /// </summary> /// <param name="waveFileName">Path to a wav voice file containing the voice to be recognized.</param> /// <returns></returns> public List <string> SpeechToText(string waveFileName) { // the collection to put results inside List <string> results = new List <string>(); try { // call Google API var response = speechClient.Recognize( // with this specific configuration new RecognitionConfig() { // specifying that the file format is linear (wave), Encoding = RecognitionConfig.Types.AudioEncoding.Linear16, // and specifying the language of the voice LanguageCode = languageCode, // set the sample rate SampleRateHertz = sampleRate(waveFileName) }, // pass the audio file to be recognized RecognitionAudio.FromFile(waveFileName) ); // for each result returened from Google foreach (var result in response.Results) { // for each text alternation foreach (var alternative in result.Alternatives) { // add the text to results results.Add(alternative.Transcript); } } } catch (Exception ex) { // if something went wrong just rais an error throw new Exception("Could not perform Google voice recognition.", ex); } // return the results return(results); }
/// <summary>Snippet for Recognize</summary> public void Recognize() { // Snippet: Recognize(RecognitionConfig,RecognitionAudio,CallSettings) // Create client SpeechClient speechClient = SpeechClient.Create(); // Initialize request argument(s) RecognitionConfig config = new RecognitionConfig { Encoding = RecognitionConfig.Types.AudioEncoding.Flac, SampleRateHertz = 44100, LanguageCode = "en-US", }; RecognitionAudio audio = new RecognitionAudio { Uri = "gs://bucket_name/file_name.flac", }; // Make the request RecognizeResponse response = speechClient.Recognize(config, audio); // End snippet }
public string Recognize() { if (Recognizer.longerAudioList.Count < 3200) { return("ERROR"); } RecognitionAudio audio5 = RecognitionAudio.FromBytes(Recognizer.longerAudioList.ToArray()); RecognizeResponse response = client.Recognize(config, audio5); Console.WriteLine(response); Recognizer.longerAudioList.Clear(); try { return(response.Results[0].Alternatives[0].Transcript); } catch (Exception ex) { return("ERROR"); } }
private void RecongnizeFromFile() { SpeechClient speechClient = SpeechClient.Create(); RecognitionConfig recognitionConfig = new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Flac, SampleRateHertz = 16000, LanguageCode = "en-US" }; RecognitionAudio recognitionAudio = RecognitionAudio.FromFile("speech.wav"); RecognitionConfig config = recognitionConfig; RecognitionAudio audio = recognitionAudio; foreach (SpeechRecognitionResult result in speechClient.Recognize(config, audio).Results) { foreach (SpeechRecognitionAlternative alternative in result.Alternatives) { Console.WriteLine(alternative.Transcript); } } }
private void openAudioBtn_Click(object sender, RoutedEventArgs e) { System.Windows.Forms.OpenFileDialog fileDiag = new System.Windows.Forms.OpenFileDialog(); // fileDiag.Filter = "Text files (*txt)|*.txt"; if (fileDiag.ShowDialog() == System.Windows.Forms.DialogResult.OK) { selectedFileName = fileDiag.FileName; } if (selectedFileName != "") { SpeechClientBuilder builder = new SpeechClientBuilder { CredentialsPath = @"C:\Users\vdmil\Downloads\my_key.json" }; SpeechClient speech = builder.Build(); var response = speech.Recognize(new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.EncodingUnspecified, SampleRateHertz = 16000, LanguageCode = "en", }, RecognitionAudio.FromFile(selectedFileName)); textEntry.Document.Blocks.Clear(); string tempStr = ""; foreach (var result in response.Results) { foreach (var alternative in result.Alternatives) { tempStr += alternative.Transcript; } } textEntry.Document.Blocks.Add(new Paragraph(new Run(tempStr))); } selectedFileName = ""; }
public void Recognize() { var audio = LoadResourceAudio("speech.raw"); // Sample: Recognize // Additional: Recognize(*,*,*) SpeechClient client = SpeechClient.Create(); RecognitionConfig config = new RecognitionConfig { Encoding = AudioEncoding.Linear16, SampleRateHertz = 16000, LanguageCode = LanguageCodes.English.UnitedStates }; RecognizeResponse response = client.Recognize(config, audio); Console.WriteLine(response); // End sample Assert.Equal( "this is a test file for the google cloud speech api", response.Results[0].Alternatives[0].Transcript, true); }
static object SyncRecognizeWithCredentials(string filePath, string credentialsFilePath) { SpeechClientBuilder builder = new SpeechClientBuilder { CredentialsPath = credentialsFilePath }; SpeechClient speech = builder.Build(); var response = speech.Recognize(new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Linear16, SampleRateHertz = 16000, LanguageCode = "en", }, RecognitionAudio.FromFile(filePath)); foreach (var result in response.Results) { foreach (var alternative in result.Alternatives) { Console.WriteLine(alternative.Transcript); } } return(0); }