public async Task LongRunningRecognizeAsync()
        {
            // Snippet: LongRunningRecognizeAsync(RecognitionConfig,RecognitionAudio,CallSettings)
            // Additional: LongRunningRecognizeAsync(RecognitionConfig,RecognitionAudio,CancellationToken)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz = 44100,
                LanguageCode    = "en-US",
            };
            RecognitionAudio audio = new RecognitionAudio
            {
                Uri = "gs://bucket_name/file_name.flac",
            };
            // Make the request
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response =
                await speechClient.LongRunningRecognizeAsync(config, audio);

            // Poll until the returned long-running operation is complete
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> completedResponse =
                await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            LongRunningRecognizeResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> retrievedResponse =
                await speechClient.PollOnceLongRunningRecognizeAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                LongRunningRecognizeResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
Exemple #2
0
        public async Task SyncRecognizeAsync()
        {
            // Snippet: SyncRecognizeAsync(RecognitionConfig,RecognitionAudio,CallSettings)
            // Additional: SyncRecognizeAsync(RecognitionConfig,RecognitionAudio,CancellationToken)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig
            {
                Encoding   = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRate = 44100,
            };
            RecognitionAudio audio = new RecognitionAudio
            {
                Uri = "gs://bucket_name/file_name.flac",
            };
            // Make the request
            SyncRecognizeResponse response = await speechClient.SyncRecognizeAsync(config, audio);

            // End snippet
        }
        public async Task AsyncRecognizeAsync_RequestObject()
        {
            // Snippet: AsyncRecognizeAsync(AsyncRecognizeRequest,CallSettings)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            AsyncRecognizeRequest request = new AsyncRecognizeRequest
            {
                Config = new RecognitionConfig(),
                Audio  = new RecognitionAudio(),
            };
            // Make the request
            Operation <AsyncRecognizeResponse> response =
                await speechClient.AsyncRecognizeAsync(request);

            // Poll until the returned long-running operation is complete
            Operation <AsyncRecognizeResponse> completedResponse =
                await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            AsyncRecognizeResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AsyncRecognizeResponse> retrievedResponse =
                await speechClient.PollOnceAsyncRecognizeAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AsyncRecognizeResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
        /// <summary>Snippet for LongRunningRecognizeAsync</summary>
        public async Task LongRunningRecognizeRequestObjectAsync()
        {
            // Snippet: LongRunningRecognizeAsync(LongRunningRecognizeRequest, CallSettings)
            // Additional: LongRunningRecognizeAsync(LongRunningRecognizeRequest, CancellationToken)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            LongRunningRecognizeRequest request = new LongRunningRecognizeRequest
            {
                Config       = new RecognitionConfig(),
                Audio        = new RecognitionAudio(),
                OutputConfig = new TranscriptOutputConfig(),
            };
            // Make the request
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = await speechClient.LongRunningRecognizeAsync(request);

            // Poll until the returned long-running operation is complete
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> completedResponse = await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            LongRunningRecognizeResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> retrievedResponse = await speechClient.PollOnceLongRunningRecognizeAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                LongRunningRecognizeResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
Exemple #5
0
        public async Task SyncRecognizeAsync_RequestObject()
        {
            // Snippet: SyncRecognizeAsync(SyncRecognizeRequest,CallSettings)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            SyncRecognizeRequest request = new SyncRecognizeRequest
            {
                Config = new RecognitionConfig
                {
                    Encoding   = RecognitionConfig.Types.AudioEncoding.Flac,
                    SampleRate = 44100,
                },
                Audio = new RecognitionAudio
                {
                    Uri = "gs://bucket_name/file_name.flac",
                },
            };
            // Make the request
            SyncRecognizeResponse response = await speechClient.SyncRecognizeAsync(request);

            // End snippet
        }
Exemple #6
0
        /// <summary>
        /// Performs an asynchronous speech recognition of the uploaded file.
        /// </summary>
        private async Task Recognize()
        {
            OnRecognizing(EventArgs.Empty);

            try
            {
                var speech = await SpeechClient.CreateAsync();

                LongRunningRecognizeRequest request = new LongRunningRecognizeRequest
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding                   = RecognitionConfig.Types.AudioEncoding.Flac,
                        AudioChannelCount          = Channels,
                        SampleRateHertz            = SampleRate,
                        LanguageCode               = Language,
                        EnableAutomaticPunctuation = true,
                        DiarizationConfig          = new SpeakerDiarizationConfig()
                        {
                            EnableSpeakerDiarization = true,
                            MinSpeakerCount          = SpeakersCount,
                            MaxSpeakerCount          = SpeakersCount
                        }
                    },
                    Audio = RecognitionAudio.FromStorageUri(GSUri)
                };

                Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = await speech.LongRunningRecognizeAsync(request);

                Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> completedResponse = await response.PollUntilCompletedAsync();

                var text = "";

                var result = completedResponse.Result.Results;

                if (result.Count > 0)
                {
                    var alternative       = result.ElementAt(result.Count - 1).Alternatives[0];
                    int currentSpeakerTag = 0;

                    for (var i = 0; i < alternative.Words.Count; i++)
                    {
                        var wordInfo = alternative.Words[i];
                        if (currentSpeakerTag == wordInfo.SpeakerTag)
                        {
                            text += $" {wordInfo.Word}";
                        }
                        else
                        {
                            if (text != "")
                            {
                                text += "\n";
                            }
                            if (SpeakersCount > 1)
                            {
                                text += $"- Intervenant {wordInfo.SpeakerTag} : ";
                            }
                            text += wordInfo.Word;
                            currentSpeakerTag = wordInfo.SpeakerTag;
                        }
                    }
                }

                Transcript = text;

                #if DEBUG
                Transcript += "\n\n" + result.ToString();
                #endif

                OnRecognized(EventArgs.Empty);
            }
            catch (Exception e)
            {
                throw new TranscriptException("An error occured during transcription.", e);
            }
        }
 public CloudInterface()
 {
     Task.Run(async() => client = await SpeechClient.CreateAsync());
 }