public async void GetTranscript(string uri, Action <string> callback) { if (client == null) { return; } var context = new SpeechContext() { Phrases = { File.ReadLines(CloudUtility.SwearList) } }; var speechOperation = await client.LongRunningRecognizeAsync(new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Flac, LanguageCode = "en-US", EnableWordTimeOffsets = true, SpeechContexts = { context } }, RecognitionAudio.FromFile(uri)); speechOperation = await speechOperation.PollUntilCompletedAsync(); var response = speechOperation.Result; string builder = ""; foreach (var result in response.Results) { foreach (var alternative in result.Alternatives) { builder += alternative.Transcript; } builder += Environment.NewLine; } callback(builder); }
/// <summary>Snippet for LongRunningRecognizeAsync</summary> public async Task LongRunningRecognizeAsync_RequestObject() { // Snippet: LongRunningRecognizeAsync(LongRunningRecognizeRequest,CallSettings) // Create client SpeechClient speechClient = await SpeechClient.CreateAsync(); // Initialize request argument(s) LongRunningRecognizeRequest request = new LongRunningRecognizeRequest { Config = new RecognitionConfig { Encoding = RecognitionConfig.Types.AudioEncoding.Flac, SampleRateHertz = 44100, LanguageCode = "en-US", }, Audio = new RecognitionAudio { Uri = "gs://bucket_name/file_name.flac", }, }; // Make the request Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = await speechClient.LongRunningRecognizeAsync(request); // Poll until the returned long-running operation is complete Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> completedResponse = await response.PollUntilCompletedAsync(); // Retrieve the operation result LongRunningRecognizeResponse result = completedResponse.Result; // Or get the name of the operation string operationName = response.Name; // This name can be stored, then the long-running operation retrieved later by name Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> retrievedResponse = await speechClient.PollOnceLongRunningRecognizeAsync(operationName); // Check if the retrieved long-running operation has completed if (retrievedResponse.IsCompleted) { // If it has completed, then access the result LongRunningRecognizeResponse retrievedResult = retrievedResponse.Result; } // End snippet }
/// <summary>Snippet for LongRunningRecognizeAsync</summary> public async Task LongRunningRecognizeAsync() { // Snippet: LongRunningRecognizeAsync(RecognitionConfig, RecognitionAudio, CallSettings) // Additional: LongRunningRecognizeAsync(RecognitionConfig, RecognitionAudio, CancellationToken) // Create client SpeechClient speechClient = await SpeechClient.CreateAsync(); // Initialize request argument(s) RecognitionConfig config = new RecognitionConfig(); RecognitionAudio audio = new RecognitionAudio(); // Make the request Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = await speechClient.LongRunningRecognizeAsync(config, audio); // Poll until the returned long-running operation is complete Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> completedResponse = await response.PollUntilCompletedAsync(); // Retrieve the operation result LongRunningRecognizeResponse result = completedResponse.Result; // Or get the name of the operation string operationName = response.Name; // This name can be stored, then the long-running operation retrieved later by name Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> retrievedResponse = await speechClient.PollOnceLongRunningRecognizeAsync(operationName); // Check if the retrieved long-running operation has completed if (retrievedResponse.IsCompleted) { // If it has completed, then access the result LongRunningRecognizeResponse retrievedResult = retrievedResponse.Result; } // End snippet }