public static int Main(string[] args)
        {
            // Create client
            SpeechClient client = SpeechClient.Create();

            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig
            {
                LanguageCode    = "en-US",
                SampleRateHertz = 44100,
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
            };
            RecognitionAudio audio = new RecognitionAudio
            {
                Uri = "gs://gapic-toolkit/hello.flac",
            };

            // Call API method
            RecognizeResponse response = client.Recognize(config, audio);

            // Show the result
            Console.WriteLine(response);

            // Success
            Console.WriteLine("Smoke test passed OK");
            return(0);
        }
Exemplo n.º 2
0
        // [END speech_transcribe_enhanced_model]

        // [START speech_transcribe_multichannel_beta]
        static object SyncRecognizeMultipleChannels(string filePath, int channelCount)
        {
            var speech = SpeechClient.Create();

            // Create transcription request
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding     = RecognitionConfig.Types.AudioEncoding.Linear16,
                LanguageCode = "en",
                // Configure request to enable multiple channels
                EnableSeparateRecognitionPerChannel = true,
                AudioChannelCount = channelCount
            }, RecognitionAudio.FromFile(filePath));

            // Print out the results.
            foreach (var result in response.Results)
            {
                // There can be several transcripts for a chunk of audio.
                // Print out the first (most likely) one here.
                var alternative = result.Alternatives[0];
                Console.WriteLine($"Transcript: {alternative.Transcript}");
                Console.WriteLine($"Channel Tag: {result.ChannelTag}");
            }
            return(0);
        }
Exemplo n.º 3
0
        public async Task <SpeechToTextViewModel> AsyncRecognize(byte[] file)
        {
            var longOperation = await SpeechProperty().LongRunningRecognizeAsync(new RecognitionConfig()
            {
                Encoding                   = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz            = 16000,
                LanguageCode               = "en",
                EnableAutomaticPunctuation = true,
                EnableWordTimeOffsets      = true
            }, RecognitionAudio.FromBytes(file));

            longOperation = longOperation.PollUntilCompleted();
            var response = longOperation.Result;

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    var count = alternative.Words.Count;
                    _model.WordInfo = new WordInfo[count];
                    for (var i = 0; i < count; i++)
                    {
                        _model.WordInfo[i] = alternative.Words[i];
                    }
                }
            }
            return(_model);
        }
Exemplo n.º 4
0
        private void button1_Click(object sender, EventArgs e)
        {
            int            simplehate;
            OpenFileDialog ofd = new OpenFileDialog();

            ofd.ShowDialog();
            using (Mp3FileReader mp3 = new Mp3FileReader(ofd.FileName))
            {
                var outFormat = new WaveFormat(mp3.WaveFormat.SampleRate, 1);
                using (var resampler = new MediaFoundationResampler(mp3, outFormat))
                {
                    simplehate = outFormat.SampleRate;
                    WaveFileWriter.CreateWaveFile("audioteste3.wav", resampler);
                }
            }
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = simplehate,
                LanguageCode    = "en",
            }, RecognitionAudio.FromFile("audioteste3.wav"));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    txtResultado.Text += alternative.Transcript;
                }
            }
        }
Exemplo n.º 5
0
        public void Read()
        {
            var speech        = SpeechClient.Create();
            var longOperation = speech.LongRunningRecognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 8000,
                LanguageCode    = "en",
            }, RecognitionAudio.FromFile(DEMO_FILE));

            longOperation = longOperation.PollUntilCompleted();
            var response = longOperation.Result;

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    OutPut += alternative.Transcript;
                }
            }

            label1.Text = OutPut;
            //string path = @"G:\Code\GS test dotnet";
            //// This text is added only once to the file.
            //if (!File.Exists(path))
            //{
            //    // Create a file to write to.
            //    using (StreamWriter sw = File.CreateText(path))
            //    {
            //        sw.WriteLine("Hello");
            //        sw.WriteLine("And");
            //        sw.WriteLine("Welcome");
            //    }
            //}
        }
        /// <summary>
        /// Transcripts the provided audio file.
        /// </summary>
        /// <remarks>WAV format is currently required.</remarks>
        /// <param name="filepath">The path to the audio file.</param>
        /// <returns>The transcript retrieved, if any.</returns>
        public string SpeechToText(string filepath)
        {
            if (string.IsNullOrEmpty(filepath))
            {
                throw new ArgumentNullException(nameof(filepath));
            }

            if (!File.Exists(filepath))
            {
                throw new ArgumentException((this as ILocalizedService <SpeechToTextService>).GetLocalized("FileNotFoundError", filepath), nameof(filepath));
            }

            // TODO: Voir maintenant que le front a un polyfill pour le support, si un format plus léger serait tout aussi efficace.
            SpeechClient      speech   = SpeechClient.Create();
            RecognizeResponse response = speech.Recognize(
                new RecognitionConfig()
            {
                Encoding        = AudioEncoding.Linear16,
                SampleRateHertz = 48000,
                LanguageCode    = this.appSettings.Google.SpeechToText.LanguageCode,
            },
                RecognitionAudio.FromFile(filepath));

            foreach (SpeechRecognitionResult result in response.Results)
            {
                foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                {
                    return(alternative.Transcript);
                }
            }

            return(null);
        }
        /// <summary>Snippet for LongRunningRecognizeAsync</summary>
        public async Task LongRunningRecognizeAsync()
        {
            // Snippet: LongRunningRecognizeAsync(RecognitionConfig, RecognitionAudio, CallSettings)
            // Additional: LongRunningRecognizeAsync(RecognitionConfig, RecognitionAudio, CancellationToken)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig();
            RecognitionAudio  audio  = new RecognitionAudio();
            // Make the request
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = await speechClient.LongRunningRecognizeAsync(config, audio);

            // Poll until the returned long-running operation is complete
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> completedResponse = await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            LongRunningRecognizeResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> retrievedResponse = await speechClient.PollOnceLongRunningRecognizeAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                LongRunningRecognizeResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
Exemplo n.º 8
0
        static object AsyncRecognizeGcsWords(string storageUri)
        {
            var speech        = SpeechClient.Create();
            var longOperation = speech.LongRunningRecognize(new RecognitionConfig()
            {
                Encoding              = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz       = 16000,
                LanguageCode          = "en",
                EnableWordTimeOffsets = true,
            }, RecognitionAudio.FromStorageUri(storageUri));

            longOperation = longOperation.PollUntilCompleted();
            var response = longOperation.Result;

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine($"Transcript: { alternative.Transcript}");
                    Console.WriteLine("Word details:");
                    Console.WriteLine($" Word count:{alternative.Words.Count}");
                    foreach (var item in alternative.Words)
                    {
                        Console.WriteLine($"  {item.Word}");
                        Console.WriteLine($"    WordStartTime: {item.StartTime}");
                        Console.WriteLine($"    WordEndTime: {item.EndTime}");
                    }
                }
            }
            return(0);
        }
Exemplo n.º 9
0
        public string GetTopResult(string filePath)
        {
            Stopwatch stopwatch = new Stopwatch();

            stopwatch.Start();
            var audio = RecognitionAudio.FromFile(filePath);

            Console.WriteLine("GetAudio:" + stopwatch.ElapsedMilliseconds.ToString() + "ms");
            stopwatch.Restart();
            var audioResult = this.Client.Recognize(Config, audio);

            Console.WriteLine("RecognisedSpeechArray:" + stopwatch.ElapsedMilliseconds.ToString() + "ms");
            stopwatch.Restart();
            var fullResult = JsonSerializer.Deserialize <List <Sequence> >(audioResult.Results.ToString(), new JsonSerializerOptions()
            {
                PropertyNameCaseInsensitive = true
            });

            Console.WriteLine("SerializeResult:" + stopwatch.ElapsedMilliseconds.ToString() + "ms");
            stopwatch.Restart();
            var result = fullResult[0].Alternatives[0].Transcript;

            Console.WriteLine("Return top:" + stopwatch.ElapsedMilliseconds.ToString() + "ms");
            return(result);
        }
Exemplo n.º 10
0
        public async Task <string> ConvertSpeechFileToText(string fileName)
        {
            string URI = "gs://eznotes-user-files/" + fileName;

            // Create credential from secret file
            var credential = GoogleCredential.FromFile(this.keypath)
                             .CreateScoped(SpeechClient.DefaultScopes);
            var channel = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.ToString(),
                                                credential.ToChannelCredentials());

            var speech        = SpeechClient.Create(channel);
            var longOperation = await speech.LongRunningRecognizeAsync(new RecognitionConfig()
            {
                Encoding              = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz       = 16000,
                LanguageCode          = "en-US",
                EnableWordTimeOffsets = true,
                Model = "phone_call",
                EnableSpeakerDiarization   = true,
                EnableAutomaticPunctuation = true,
                UseEnhanced = true
            }, RecognitionAudio.FromStorageUri(URI));

            longOperation = await longOperation.PollUntilCompletedAsync();

            string response = JsonConvert.SerializeObject(longOperation.Result.Results);

            return(response);
        }
Exemplo n.º 11
0
        public static void UploadAudio(IFormFile audio)
        {
            // Reference to Google Cloud Speech-to_text Credentials
            string credential_path = @"C:\Users\Billy\workspace\capstones\1000Words\1000Words\words-247918-f13fa4057b4a.json";

            System.Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", credential_path);

            using (Stream stream = audio.OpenReadStream())
            {
                RecognitionAudio recognitionAudio = RecognitionAudio.FromStream(stream);

                var speech   = SpeechClient.Create();
                var response = speech.Recognize(new RecognitionConfig()
                {
                    Encoding     = RecognitionConfig.Types.AudioEncoding.Linear16,
                    LanguageCode = "en",
                }, recognitionAudio);

                Keywords.Clear();

                foreach (var result in response.Results)
                {
                    foreach (var alternative in result.Alternatives)
                    {
                        // Add transcript to list of keywords to be returned
                        Keywords.Add(alternative.Transcript);
                    }
                }
            }
        }
Exemplo n.º 12
0
        public static string StartTranslate(string path, string lang)
        {
            var builder = new SpeechClientBuilder();

            builder.CredentialsPath = "key.json";
            var speech = builder.Build();

            var config = new RecognitionConfig
            {
                Encoding          = RecognitionConfig.Types.AudioEncoding.Linear16,
                LanguageCode      = lang,
                AudioChannelCount = 1
            };

            var audio = RecognitionAudio.FromFile(path);


            var    response = speech.Recognize(config, audio);
            string fullText = "";

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    fullText += alternative.Transcript;
                }
            }
            return(fullText);
        }
Exemplo n.º 13
0
        // [END speech_transcribe_sync]

        // [START speech_sync_recognize_words]
        static object SyncRecognizeWords(string filePath)
        {
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding              = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz       = 16000,
                LanguageCode          = "en",
                EnableWordTimeOffsets = true,
            }, RecognitionAudio.FromFile(filePath));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine($"Transcript: { alternative.Transcript}");
                    Console.WriteLine("Word details:");
                    Console.WriteLine($" Word count:{alternative.Words.Count}");
                    foreach (var item in alternative.Words)
                    {
                        Console.WriteLine($"  {item.Word}");
                        Console.WriteLine($"    WordStartTime: {item.StartTime}");
                        Console.WriteLine($"    WordEndTime: {item.EndTime}");
                    }
                }
            }
            return(0);
        }
Exemplo n.º 14
0
        public async void GetTranscript(string uri, Action <string> callback)
        {
            if (client == null)
            {
                return;
            }
            var context = new SpeechContext()
            {
                Phrases = { File.ReadLines(CloudUtility.SwearList) }
            };
            var speechOperation = await client.LongRunningRecognizeAsync(new RecognitionConfig()
            {
                Encoding = RecognitionConfig.Types.AudioEncoding.Flac,

                LanguageCode          = "en-US",
                EnableWordTimeOffsets = true,
                SpeechContexts        = { context }
            }, RecognitionAudio.FromFile(uri));

            speechOperation = await speechOperation.PollUntilCompletedAsync();

            var    response = speechOperation.Result;
            string builder  = "";

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    builder += alternative.Transcript;
                }
                builder += Environment.NewLine;
            }
            callback(builder);
        }
Exemplo n.º 15
0
        // [END speech_transcribe_multichannel_beta]

        // [START speech_transcribe_diarization]
        static object SyncRecognizeMultipleSpeakers(string filePath, int numberOfSpeakers)
        {
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding          = RecognitionConfig.Types.AudioEncoding.Linear16,
                LanguageCode      = "en",
                DiarizationConfig = new SpeakerDiarizationConfig()
                {
                    EnableSpeakerDiarization = true,
                    MinSpeakerCount          = 2
                }
            }, RecognitionAudio.FromFile(filePath));

            // Print out the results.
            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine($"Transcript: { alternative.Transcript}");
                    Console.WriteLine("Word details:");
                    Console.WriteLine($" Word count:{alternative.Words.Count}");
                    foreach (var item in alternative.Words)
                    {
                        Console.WriteLine($"  {item.Word}");
                        Console.WriteLine($"  Speaker: {item.SpeakerTag}");
                    }
                }
            }

            return(0);
        }
Exemplo n.º 16
0
        static object SyncRecognizeWithCredentials(string filePath, string credentialsFilePath)
        {
            GoogleCredential googleCredential;

            using (Stream m = new FileStream(credentialsFilePath, FileMode.Open))
            {
                googleCredential = GoogleCredential.FromStream(m);
            }

            var channel = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.Host,
                                                googleCredential.ToChannelCredentials());
            var speech   = SpeechClient.Create(channel);
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            }, RecognitionAudio.FromFile(filePath));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            return(0);
        }
Exemplo n.º 17
0
        static object RecognizeWithContext(string filePath, IEnumerable <string> phrases)
        {
            var speech = SpeechClient.Create();
            var config = new RecognitionConfig()
            {
                SpeechContexts = { new SpeechContext()
                                   {
                                       Phrases ={ phrases               }
                                   } },
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            };
            var audio = IsStorageUri(filePath) ?
                        RecognitionAudio.FromStorageUri(filePath) :
                        RecognitionAudio.FromFile(filePath);
            var response = speech.Recognize(config, audio);

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            return(0);
        }
Exemplo n.º 18
0
        // [END speech_transcribe_diarization]

        //[START speech_transcribe_recognition_metadata]
        static object SyncRecognizeRecognitionMetadata(string filePath)
        {
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding     = RecognitionConfig.Types.AudioEncoding.Flac,
                LanguageCode = "en",
                Metadata     = new RecognitionMetadata()
                {
                    OriginalMediaType = RecognitionMetadata.Types.OriginalMediaType.Audio,
                    OriginalMimeType  = "audio/mp3",

                    // The kind of device used to capture the audio
                    RecordingDeviceType = RecognitionMetadata.Types.RecordingDeviceType.OtherIndoorDevice,

                    // Use case of the audio, e.g. PHONE_CALL, DISCUSSION, etc
                    InteractionType = RecognitionMetadata.Types.InteractionType.VoiceSearch,

                    // The name of the defice used to make the recording.
                    // Arbitrary string, e.g. 'Pixel XL', 'VoIP', or other value
                    RecordingDeviceName = "Pixel XL"
                }
            }, RecognitionAudio.FromFile(filePath));

            foreach (var result in response.Results)
            {
                Console.WriteLine($"Transcript: { result.Alternatives[0].Transcript}");
            }
            return(0);
        }
Exemplo n.º 19
0
        public string ConvertAudioToText(string path)
        {
            var speech = SpeechClient.Create();
            var config = new RecognitionConfig
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz = 16000,
                LanguageCode    = LanguageCodes.English.UnitedStates
            };
            var audio = RecognitionAudio.FromFile(path);

            var response = speech.Recognize(config, audio);

            var sd = "";

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    sd += alternative.Transcript;
                    //Console.WriteLine(alternative.Transcript);
                }
            }
            return(sd);
        }
Exemplo n.º 20
0
 public void FromFile()
 {
     using (var tempFile = TempFile.Generate(500))
     {
         var audio = RecognitionAudio.FromFile(tempFile.Name);
         Assert.Equal(tempFile.Bytes, audio.Content.ToByteArray());
     }
 }
Exemplo n.º 21
0
        public async Task FromFileAsync()
        {
            using (var tempFile = TempFile.Generate(500))
            {
                var audio = await RecognitionAudio.FromFileAsync(tempFile.Name);

                Assert.Equal(tempFile.Bytes, audio.Content.ToByteArray());
            }
        }
Exemplo n.º 22
0
        private static RecognitionAudio LoadResourceAudio(string name)
        {
            var type = typeof(SpeechClientSnippets);

            using (var stream = type.GetTypeInfo().Assembly.GetManifestResourceStream($"{type.Namespace}.{name}"))
            {
                return(RecognitionAudio.FromStream(stream));
            }
        }
Exemplo n.º 23
0
        public List <RecognitionAudio> ConvertRecognitionAudioFiles(List <string> pathlist)
        {
            List <RecognitionAudio> audioList = new List <RecognitionAudio>();

            foreach (var path in pathlist)
            {
                audioList.Add(RecognitionAudio.FromStorageUri($"gs://{this.baseConnector.BucketName}/{path}"));
            }
            return(audioList);
        }
        private void SearchButton_Click(object sender, EventArgs e)
        {
            string wolframKey      = "Wolframkeywhichicantuploadongithub";
            string credential_path = @"I:/IgenVoiceRecognition-file.json";

            System.Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", credential_path);
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            }, RecognitionAudio.FromFile("C:/Users/PrAnk/Desktop/test.flac"));


            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    string str = alternative.Transcript;
                    if (str.StartsWith("answer"))
                    {
                        string wolframUrl = "http://api.wolframalpha.com/v2/query?input=" + str + "&appid=" + wolframKey;

                        /*
                         * WolframAlpha wolfram = new WolframAlpha(wolframKey);
                         *
                         * //Then you simply query Wolfram|Alpha like this
                         * //Note that the spelling error will be correct by Wolfram|Alpha
                         * QueryResult results = wolfram.Query("Who is Danald Duck?");
                         *
                         * //The QueryResult object contains the parsed XML from Wolfram|Alpha. Lets look at it.
                         * //The results from wolfram is split into "pods". We just print them.
                         * if (results != null)
                         * {
                         *  foreach (Pod pod in results.Pods)
                         *  {
                         *      Console.WriteLine(pod.Title);
                         *      if (pod.SubPods != null)
                         *      {
                         *          foreach (SubPod subPod in pod.SubPods)
                         *          {
                         *              Console.WriteLine(subPod.Title);
                         *              MessageBox.Show(subPod.Title,subPod.Plaintext);
                         *          }
                         *      }
                         *  }
                         * }
                         */
                    }
                    Process.Start("chrome", alternative.Transcript);
                }
            }
        }
Exemplo n.º 25
0
        static void Main(string[] args)
        {
            var client  = SpeechClient.Create();
            var results = client.Recognize(new RecognitionConfig()
            {
                Encoding     = AudioEncoding.Flac,
                LanguageCode = "en",
            }, RecognitionAudio.FromFile(@"c:\users\rennie\Music\audio.flac"));

            JsonDumper.Dump(results);
        }
Exemplo n.º 26
0
        public async Task <dynamic> AnalyzeSpeechAsync(byte[] audio, int frequency, string language, int encoding)
        {
            var client = SpeechClient.Create();

            return(await client.RecognizeAsync(new RecognitionConfig
            {
                Encoding = (RecognitionConfig.Types.AudioEncoding)encoding,
                SampleRateHertz = frequency,
                LanguageCode = language
            }, RecognitionAudio.FromBytes(audio)));
        }
        public string GetSpeechText(string fileUri)
        {
            var response = _client
                           .LongRunningRecognize(_config, RecognitionAudio.FromStorageUri(fileUri))
                           .PollUntilCompleted();

            return(response.Result.Results
                   .Select(x => x.Alternatives.First().Transcript)
                   .Aggregate((x, y) =>
                              $"{x} {Environment.NewLine}{y}"));
        }
Exemplo n.º 28
0
        public async Task <string> Recognize(byte[] file)
        {
            var speech   = SpeechClient.Create();
            var response = await speech.RecognizeAsync(new RecognitionConfig()
            {
                Encoding     = RecognitionConfig.Types.AudioEncoding.Linear16,
                LanguageCode = "en",
            }, RecognitionAudio.FromBytes(file));

            return(response.Results?.FirstOrDefault()?.Alternatives?.FirstOrDefault()?.Transcript);
        }
Exemplo n.º 29
0
        static void Main(string[] args)
        {
            var client  = SpeechClient.Create();
            var results = client.Recognize(new RecognitionConfig()
            {
                Encoding     = RecognitionConfig.Types.AudioEncoding.Flac,
                LanguageCode = "en",
            }, RecognitionAudio.FromFile("GCP_Speech_Input_Sample.flac"));

            JsonDumper.Dump(results);
        }
 /// <summary>Snippet for Recognize</summary>
 public void Recognize()
 {
     // Snippet: Recognize(RecognitionConfig, RecognitionAudio, CallSettings)
     // Create client
     SpeechClient speechClient = SpeechClient.Create();
     // Initialize request argument(s)
     RecognitionConfig config = new RecognitionConfig();
     RecognitionAudio  audio  = new RecognitionAudio();
     // Make the request
     RecognizeResponse response = speechClient.Recognize(config, audio);
     // End snippet
 }