Ejemplo n.º 1
0
        public string ConvertAudioToText(string path)
        {
            var speech = SpeechClient.Create();
            var config = new RecognitionConfig
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz = 16000,
                LanguageCode    = LanguageCodes.English.UnitedStates
            };
            var audio = RecognitionAudio.FromFile(path);

            var response = speech.Recognize(config, audio);

            var sd = "";

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    sd += alternative.Transcript;
                    //Console.WriteLine(alternative.Transcript);
                }
            }
            return(sd);
        }
Ejemplo n.º 2
0
        private void FinishedRecordEventHandler(AudioClip clip)
        {
            if (_startRecordButton.interactable)
            {
                _speechRecognitionState.color = Color.yellow;
            }

            if (clip == null)
            {
                return;
            }

            RecognitionConfig config = RecognitionConfig.GetDefault();

            config.languageCode      = ((Enumerators.LanguageCode)_languageDropdown.value).Parse();
            config.audioChannelCount = clip.channels;
            // configure other parameters of the config if need

            GeneralRecognitionRequest recognitionRequest = new GeneralRecognitionRequest()
            {
                audio = new RecognitionAudioContent()
                {
                    content = clip.ToBase64()
                },
                //audio = new RecognitionAudioUri() // for Google Cloud Storage object
                //{
                //	uri = "gs://bucketName/object_name"
                //},
                config = config
            };

            _speechRecognition.Recognize(recognitionRequest);
        }
Ejemplo n.º 3
0
        //This function calls the google speech api and translate the audio from the path provided.
        //The text is then returned as a string for more processing
        //It willl print NO Response if google could not detect anything

        public string Send_Value(string path)
        {
            string            file_path = path;
            RecognitionAudio  audio1    = RecognitionAudio.FromFile(file_path);
            SpeechClient      client    = SpeechClient.Create();
            RecognitionConfig config    = new RecognitionConfig
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 44100,
                LanguageCode    = LanguageCodes.English.UnitedStates
            };
            RecognizeResponse response = client.Recognize(config, audio1);

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            var output = response.Results;

            if (output.Count != 0)
            {
                var finaloutput = output[0].Alternatives;
                return(finaloutput[0].Transcript);
            }

            else
            {
                return("NO RESPONSE");
            }
        }
        public void AsyncRecognize()
        {
            // Snippet: AsyncRecognize(RecognitionConfig,RecognitionAudio,CallSettings)
            // Create client
            SpeechClient speechClient = SpeechClient.Create();
            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig();
            RecognitionAudio  audio  = new RecognitionAudio();
            // Make the request
            Operation <AsyncRecognizeResponse> response =
                speechClient.AsyncRecognize(config, audio);

            // Poll until the returned long-running operation is complete
            Operation <AsyncRecognizeResponse> completedResponse =
                response.PollUntilCompleted();
            // Retrieve the operation result
            AsyncRecognizeResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AsyncRecognizeResponse> retrievedResponse =
                speechClient.PollOnceAsyncRecognize(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AsyncRecognizeResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
Ejemplo n.º 5
0
 internal SpeechRecognizer()
 {
     try
     {
         client = SpeechClient.Create();
     }
     catch (Exception e)
     {
         Console.ForegroundColor = ConsoleColor.Red;
         Console.WriteLine("SpeechLab server could not start speech recognizer.");
         Console.WriteLine("Perhaps the GOOGLE_APPLICATION_CREDENTIALS environment variable or the corresponding .json file is missing.\n" +
                           "Correct GOOGLE_APPLICATION_CREDENTIALS environment variable is required to run SpeechLab server.\n\n" +
                           "Full exception message:");
         Console.WriteLine(e.Message);
         Console.WriteLine("\nPress any key to stop the server...");
         Console.ReadKey();
         Environment.Exit(1);
     }
     config = new RecognitionConfig()
     {
         Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
         SampleRateHertz = 44100,
         LanguageCode    = "ru-Ru"
     };
 }
Ejemplo n.º 6
0
 /// <summary>
 /// Performs asynchronous speech recognition: receive results via the
 /// google.longrunning.Operations interface. Returns either an
 /// `Operation.error` or an `Operation.response` which contains
 /// a `LongRunningRecognizeResponse` message.
 /// </summary>
 /// <param name="config">
 /// *Required* Provides information to the recognizer that specifies how to
 /// process the request.
 /// </param>
 /// <param name="audio">
 /// *Required* The audio data to be recognized.
 /// </param>
 /// <param name="cancellationToken">
 /// A <see cref="CancellationToken"/> to use for this RPC.
 /// </param>
 /// <returns>
 /// A Task containing the RPC response.
 /// </returns>
 public virtual Task <Operation <LongRunningRecognizeResponse> > LongRunningRecognizeAsync(
     RecognitionConfig config,
     RecognitionAudio audio,
     CancellationToken cancellationToken) => LongRunningRecognizeAsync(
     config,
     audio,
     CallSettings.FromCancellationToken(cancellationToken));
Ejemplo n.º 7
0
        private void RecognizeFile(string file)
        {
            if (string.IsNullOrEmpty(file))
            {
                MessageBox.Show("Please select a wav file first!");
                return;
            }

            if (File.Exists(file) == false)
            {
                MessageBox.Show("Specified WAV file is NOT exist! Please try to select another file...");
                return;
            }

            SpeechClient      client = SpeechClient.Create();
            RecognitionConfig config = new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 8000,
                LanguageCode    = "zh-TW",
            };
            RecognitionAudio audio = RecognitionAudio.FromFile(file);
            var response           = client.Recognize(config, audio);

            foreach (var item in response.Results)
            {
                textBox2.AppendLine(item.ToString());
            }
        }
        public static void HandleRecognitionCommand(
            uint sampleRate,
            string audioEncoding,
            uint channelsCount,
            uint maxAlternatives,
            bool disableAutomaticPunctation,
            bool doNotPerformVad,
            float silenceDurationThreshold,
            string audioPath
            )
        {
            RecognitionConfig recognitionConfig = CreateRecognitionConfig(
                sampleRate,
                audioEncoding,
                channelsCount,
                maxAlternatives,
                disableAutomaticPunctation
                );

            ConfigurateVAD(
                recognitionConfig,
                silenceDurationThreshold,
                doNotPerformVad
                );

            using (var fileStream = GetAudioStream(audioPath, audioEncoding))
            {
                System.Console.WriteLine(_client.Recognize(recognitionConfig, fileStream));
            }
        }
Ejemplo n.º 9
0
        public VoiceManager()
        {
            var outputFolder = Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.Desktop), "NAudio");

            Directory.CreateDirectory(outputFolder);
            outputFilePath           = Path.Combine(outputFolder, "recorded.wav");
            waveIn                   = new WaveInEvent();
            waveIn.WaveFormat        = new WaveFormat(44100, 16, 1);
            waveIn.DataAvailable    += waveIn_DataAvailable;
            waveIn.RecordingStopped += waveIn_RecordingStopped;

            writer           = null;
            max_v            = 0;
            isWriting        = false;
            TimeDelay        = 1;  //seconds
            strRecgnResult   = "";
            IdleTimeAmount   = 10; //seconds
            isResultRecieved = false;
            isRecordStarted  = false;

            speech = SpeechClient.Create();
            config = new RecognitionConfig
            {
                LanguageCode = LanguageCodes.Russian.Russia
            };
        }
Ejemplo n.º 10
0
        public RecognizeResponse translate([FromBody] string filename)
        {
            string           path  = "C:\\Users\\Dell\\Downloads\\" + filename;
            RecognitionAudio audio = RecognitionAudio.FromFile(path);
            //RecognitionAudio audio2 = RecognitionAudio.FetchFromUri("https://storage.googleapis.com/cloud-samples-tests/speech/brooklyn.flac");
            //RecognitionAudio audio3 = RecognitionAudio.FromStorageUri("gs://my-bucket/my-file");

            /* byte[] bytes = ReadAudioData(); // For example, from a database
             * RecognitionAudio audio4 = RecognitionAudio.FromBytes(bytes);
             *
             * using (Stream stream = OpenAudioStream()) // Any regular .NET stream
             * {
             *   RecognitionAudio audio5 = RecognitionAudio.FromStream(stream);
             * }*/

            SpeechClient      client = SpeechClient.Create();
            RecognitionConfig config = new RecognitionConfig
            {
                Encoding        = AudioEncoding.Linear16,
                SampleRateHertz = 48000,
                LanguageCode    = LanguageCodes.English.UnitedStates
            };
            RecognizeResponse response = client.Recognize(config, audio);

            return(response);
        }
Ejemplo n.º 11
0
        /// <summary>
        /// Sends the voice audio to Google's API and runs HandleSpeech with transcription.
        /// </summary>
        private void TranscribeSpeech(Message m)
        {
            if (m.voice == null)
            {
                throw new Exception.EmptyVoiceMessageException(m);
            }
            if (m.voice.Duration > maxDur)
            {
                MaxDurationExceeded(m);
                return;
            }

            SpeechClient speech = SpeechClient.Create();

            RecognitionConfig config = new RecognitionConfig();

            config.Encoding        = SpeechHandler.VoiceTypeToGoogleType(m.voice.type);
            config.SampleRateHertz = m.voice.sampleRate;
            config.LanguageCode    = languageCode;
            config.ProfanityFilter = false;


            RecognizeResponse resp = speech.Recognize(config, RecognitionAudio.FromStream(m.voice.AudioStream));

            foreach (var result in resp.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    HandleSpeech(m, alternative.Transcript);
                }
            }
        }
Ejemplo n.º 12
0
        private SpeechRecognizer(FileInfo secret)
        {
            if (secret is null)
            {
                throw new ArgumentNullException(nameof(secret));
            }

            if (!secret.Exists)
            {
                throw new FileNotFoundException("secret file not found.", secret.FullName);
            }

            var credential = GoogleCredential.FromFile(secret.FullName);
            var channel    = new Channel(SpeechClient.DefaultEndpoint.Host, credential.ToChannelCredentials());

            this._speech = SpeechClient.Create(channel);
            this._config = new RecognitionConfig
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16_000,
                LanguageCode    = Thread.CurrentThread.CurrentCulture.Name,
            };
            this._recorder            = new WaveRecorder();
            this._recorder.Completed += this.OnComplete;
        }
Ejemplo n.º 13
0
        /// <summary>
        /// Informa o servidor ASR de que se deseja realizar o reconhecimento de um buffer
        /// </summary>
        /// <param name="lModel">Modelo de linguagem a ser utilizado</param>
        /// <param name="objRecognitionConfig">Parametro a serem setados apenas para este reconhecimento</param>
        /// <param name="dicParameters">Parametro a serem setados apenas para este reconhecimento</param>
        public void StartRecognition(LanguageModel lModel, RecognitionConfig objRecognitionConfig)
        {
            WriteLog(string.Concat("Thread Id: ", Thread.CurrentThread.ManagedThreadId));
            StringBuilder strCommand = new StringBuilder();

            strCommand.AppendLine(string.Format("{0} {1} {2}", PRODUCT, VERSION, ASR_Command.START_RECOGNITION));

            //Insere os valores dos parametros configurados
            if (objRecognitionConfig != null)
            {
                strCommand.AppendLine(GetCommand(objRecognitionConfig));
            }

            if (lModel?.Uri != null)
            {
                strCommand.AppendLine(string.Concat("Content-Length: ", lModel.Uri.Length + 8));
                strCommand.AppendLine("Content-Type: " + TEXT_URI_LIST);
                strCommand.AppendLine();
                strCommand.AppendLine(lModel.Uri);
            }
            else if (lModel?.Definition != null)
            {
                strCommand.AppendLine("Content-ID: " + lModel.Id);
                strCommand.AppendLine(string.Concat("Content-Length: ", lModel.Definition.Length + 8));
                strCommand.AppendLine("Content-Type: " + APPLICATION_SRGS);
                strCommand.AppendLine();
                strCommand.AppendLine(lModel.Definition);
            }

            WriteLog("Start Recognition");
            SendCommand(strCommand.ToString());
        }
Ejemplo n.º 14
0
        public static string StartTranslate(string path, string lang)
        {
            var builder = new SpeechClientBuilder();

            builder.CredentialsPath = "key.json";
            var speech = builder.Build();

            var config = new RecognitionConfig
            {
                Encoding          = RecognitionConfig.Types.AudioEncoding.Linear16,
                LanguageCode      = lang,
                AudioChannelCount = 1
            };

            var audio = RecognitionAudio.FromFile(path);


            var    response = speech.Recognize(config, audio);
            string fullText = "";

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    fullText += alternative.Transcript;
                }
            }
            return(fullText);
        }
Ejemplo n.º 15
0
        public void NoInputTimeOut()
        {
            var recogConfig = new RecognitionConfig
            {
                NoInputTimeoutMilliseconds = 200,
                NoInputTimeoutEnabled      = true
            };

            var clientConfig = CreateClientWithCredentials(recogConfig, TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();

            lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
            var audioSource = new FileAudioSource(TestsReferences.AudioSilence, AudioType.DETECT);
            List <RecognitionResult> results = null;

            try
            {
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                results = ExecuteRecognition(clientConfig, lModelLst, audioSource);
            }
            catch (Exception ex)
            {
                throw new InternalTestFailureException(ex.Message);
            }

            Assert.IsTrue(results != null && results.Count > 0);
            Assert.AreEqual(RecognitionResultCode.NO_INPUT_TIMEOUT, results[0].ResultCode);
        }
        public string Recognize(RecognitionConfig config, Stream audioStream)
        {
            byte[] audioBytes;
            using (MemoryStream buffer = new MemoryStream())
            {
                audioStream.CopyTo(buffer);
                audioBytes = buffer.ToArray();
            }

            RecognizeRequest request = new RecognizeRequest();

            request.Config = config;
            request.Audio  = new RecognitionAudio
            {
                Content = Google.Protobuf.ByteString.CopyFrom(audioBytes, 0, audioBytes.Length)
            };

            var response = _clientSTT.Recognize(request, this.GetMetadataSTT());

            var texts = new List <string>();

            foreach (var result in response.Results)
            {
                foreach (var alt in result.Alternatives)
                {
                    texts.Add(alt.Transcript);
                }
            }

            return(string.Join(" ", texts));
        }
        /// <summary>Snippet for LongRunningRecognizeAsync</summary>
        public async Task LongRunningRecognizeAsync()
        {
            // Snippet: LongRunningRecognizeAsync(RecognitionConfig, RecognitionAudio, CallSettings)
            // Additional: LongRunningRecognizeAsync(RecognitionConfig, RecognitionAudio, CancellationToken)
            // Create client
            SpeechClient speechClient = await SpeechClient.CreateAsync();

            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig();
            RecognitionAudio  audio  = new RecognitionAudio();
            // Make the request
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> response = await speechClient.LongRunningRecognizeAsync(config, audio);

            // Poll until the returned long-running operation is complete
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> completedResponse = await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            LongRunningRecognizeResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> retrievedResponse = await speechClient.PollOnceLongRunningRecognizeAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                LongRunningRecognizeResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
Ejemplo n.º 18
0
        public static int Main(string[] args)
        {
            // Create client
            SpeechClient client = SpeechClient.Create();

            // Initialize request argument(s)
            RecognitionConfig config = new RecognitionConfig
            {
                LanguageCode    = "en-US",
                SampleRateHertz = 44100,
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
            };
            RecognitionAudio audio = new RecognitionAudio
            {
                Uri = "gs://gapic-toolkit/hello.flac",
            };

            // Call API method
            RecognizeResponse response = client.Recognize(config, audio);

            // Show the result
            Console.WriteLine(response);

            // Success
            Console.WriteLine("Smoke test passed OK");
            return(0);
        }
        public void Start(RecognitionConfig config)
        {
            _config = config.Windows;
            _stream = new PipeStream(9600);

            if (_speechEngine == null)
            {
                _speechEngine = new SpeechRecognitionEngine();
                _speechEngine.LoadGrammar(new DictationGrammar());

                _speechEngine.SpeechHypothesized += OnSpeechHypothesized;
                _speechEngine.SpeechRecognized   += OnSpeechRecognized;
            }

            var format = new SpeechAudioFormatInfo(48000, AudioBitsPerSample.Sixteen, AudioChannel.Mono);

            _speechEngine.SetInputToAudioStream(_stream, format);

            if (!_recognizing)
            {
                _recognizing = true;
                _speaking    = false;
                _speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
        }
Ejemplo n.º 20
0
 /// <summary>
 /// Perform asynchronous speech-recognition: receive results via the
 /// google.longrunning.Operations interface. Returns either an
 /// `Operation.error` or an `Operation.response` which contains
 /// an `AsyncRecognizeResponse` message.
 /// </summary>
 /// <param name="config">
 /// [Required] The `config` message provides information to the recognizer
 /// that specifies how to process the request.
 /// </param>
 /// <param name="audio">
 /// [Required] The audio data to be recognized.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// The RPC response.
 /// </returns>
 public virtual Operation AsyncRecognize(
     RecognitionConfig config,
     RecognitionAudio audio,
     CallSettings callSettings = null)
 {
     throw new NotImplementedException();
 }
Ejemplo n.º 21
0
        static object RecognizeWithContext(string filePath, IEnumerable <string> phrases)
        {
            var speech = SpeechClient.Create();
            var config = new RecognitionConfig()
            {
                SpeechContexts = { new SpeechContext()
                                   {
                                       Phrases ={ phrases               }
                                   } },
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            };
            var audio = IsStorageUri(filePath) ?
                        RecognitionAudio.FromStorageUri(filePath) :
                        RecognitionAudio.FromFile(filePath);
            var response = speech.Recognize(config, audio);

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            return(0);
        }
Ejemplo n.º 22
0
 /// <summary>
 /// Perform synchronous speech-recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="config">
 /// [Required] The `config` message provides information to the recognizer
 /// that specifies how to process the request.
 /// </param>
 /// <param name="audio">
 /// [Required] The audio data to be recognized.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// A Task containing the RPC response.
 /// </returns>
 public virtual Task <SyncRecognizeResponse> SyncRecognizeAsync(
     RecognitionConfig config,
     RecognitionAudio audio,
     CallSettings callSettings = null)
 {
     throw new NotImplementedException();
 }
Ejemplo n.º 23
0
 /// <summary>
 /// Perform asynchronous speech-recognition: receive results via the
 /// google.longrunning.Operations interface. Returns either an
 /// `Operation.error` or an `Operation.response` which contains
 /// an `AsyncRecognizeResponse` message.
 /// </summary>
 /// <param name="config">
 /// [Required] The `config` message provides information to the recognizer
 /// that specifies how to process the request.
 /// </param>
 /// <param name="audio">
 /// [Required] The audio data to be recognized.
 /// </param>
 /// <param name="cancellationToken">
 /// A <see cref="CancellationToken"/> to use for this RPC.
 /// </param>
 /// <returns>
 /// A Task containing the RPC response.
 /// </returns>
 public virtual Task <Operation> AsyncRecognizeAsync(
     RecognitionConfig config,
     RecognitionAudio audio,
     CancellationToken cancellationToken) => AsyncRecognizeAsync(
     config,
     audio,
     CallSettings.FromCancellationToken(cancellationToken));
Ejemplo n.º 24
0
 /// <summary>
 /// Performs asynchronous speech recognition: receive results via the
 /// google.longrunning.Operations interface. Returns either an
 /// `Operation.error` or an `Operation.response` which contains
 /// a `LongRunningRecognizeResponse` message.
 /// </summary>
 /// <param name="config">
 /// *Required* Provides information to the recognizer that specifies how to
 /// process the request.
 /// </param>
 /// <param name="audio">
 /// *Required* The audio data to be recognized.
 /// </param>
 /// <param name="cancellationToken">
 /// A <see cref="st::CancellationToken"/> to use for this RPC.
 /// </param>
 /// <returns>
 /// A Task containing the RPC response.
 /// </returns>
 public virtual stt::Task <lro::Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> > LongRunningRecognizeAsync(
     RecognitionConfig config,
     RecognitionAudio audio,
     st::CancellationToken cancellationToken) => LongRunningRecognizeAsync(
     config,
     audio,
     gaxgrpc::CallSettings.FromCancellationToken(cancellationToken));
Ejemplo n.º 25
0
 /// <summary>
 /// Performs synchronous speech recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="config">
 /// *Required* Provides information to the recognizer that specifies how to
 /// process the request.
 /// </param>
 /// <param name="audio">
 /// *Required* The audio data to be recognized.
 /// </param>
 /// <param name="cancellationToken">
 /// A <see cref="st::CancellationToken"/> to use for this RPC.
 /// </param>
 /// <returns>
 /// A Task containing the RPC response.
 /// </returns>
 public virtual stt::Task <RecognizeResponse> RecognizeAsync(
     RecognitionConfig config,
     RecognitionAudio audio,
     st::CancellationToken cancellationToken) => RecognizeAsync(
     config,
     audio,
     gaxgrpc::CallSettings.FromCancellationToken(cancellationToken));
Ejemplo n.º 26
0
 /// <summary>
 /// Performs asynchronous speech recognition: receive results via the
 /// google.longrunning.Operations interface. Returns either an
 /// `Operation.error` or an `Operation.response` which contains
 /// a `LongRunningRecognizeResponse` message.
 /// </summary>
 /// <param name="config">
 /// *Required* Provides information to the recognizer that specifies how to
 /// process the request.
 /// </param>
 /// <param name="audio">
 /// *Required* The audio data to be recognized.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// The RPC response.
 /// </returns>
 public virtual lro::Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> LongRunningRecognize(
     RecognitionConfig config,
     RecognitionAudio audio,
     gaxgrpc::CallSettings callSettings = null) => LongRunningRecognize(
     new LongRunningRecognizeRequest
 {
     Config = gax::GaxPreconditions.CheckNotNull(config, nameof(config)),
     Audio  = gax::GaxPreconditions.CheckNotNull(audio, nameof(audio)),
 },
     callSettings);
Ejemplo n.º 27
0
 /// <summary>
 /// Performs synchronous speech recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="config">
 /// *Required* Provides information to the recognizer that specifies how to
 /// process the request.
 /// </param>
 /// <param name="audio">
 /// *Required* The audio data to be recognized.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// The RPC response.
 /// </returns>
 public virtual RecognizeResponse Recognize(
     RecognitionConfig config,
     RecognitionAudio audio,
     gaxgrpc::CallSettings callSettings = null) => Recognize(
     new RecognizeRequest
 {
     Config = gax::GaxPreconditions.CheckNotNull(config, nameof(config)),
     Audio  = gax::GaxPreconditions.CheckNotNull(audio, nameof(audio)),
 },
     callSettings);
 /// <summary>
 /// Performs asynchronous speech recognition: receive results via the
 /// google.longrunning.Operations interface. Returns either an
 /// `Operation.error` or an `Operation.response` which contains
 /// a `LongRunningRecognizeResponse` message.
 /// </summary>
 /// <param name="config">
 /// *Required* Provides information to the recognizer that specifies how to
 /// process the request.
 /// </param>
 /// <param name="audio">
 /// *Required* The audio data to be recognized.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// A Task containing the RPC response.
 /// </returns>
 public virtual Task <Operation <LongRunningRecognizeResponse, LongRunningRecognizeMetadata> > LongRunningRecognizeAsync(
     RecognitionConfig config,
     RecognitionAudio audio,
     CallSettings callSettings = null) => LongRunningRecognizeAsync(
     new LongRunningRecognizeRequest
 {
     Config = GaxPreconditions.CheckNotNull(config, nameof(config)),
     Audio  = GaxPreconditions.CheckNotNull(audio, nameof(audio)),
 },
     callSettings);
 /// <summary>
 /// Performs synchronous speech recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="config">
 /// *Required* Provides information to the recognizer that specifies how to
 /// process the request.
 /// </param>
 /// <param name="audio">
 /// *Required* The audio data to be recognized.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// A Task containing the RPC response.
 /// </returns>
 public virtual Task <RecognizeResponse> RecognizeAsync(
     RecognitionConfig config,
     RecognitionAudio audio,
     CallSettings callSettings = null) => RecognizeAsync(
     new RecognizeRequest
 {
     Config = GaxPreconditions.CheckNotNull(config, nameof(config)),
     Audio  = GaxPreconditions.CheckNotNull(audio, nameof(audio)),
 },
     callSettings);
Ejemplo n.º 30
0
 /// <summary>
 /// Perform asynchronous speech-recognition: receive results via the
 /// google.longrunning.Operations interface. Returns either an
 /// `Operation.error` or an `Operation.response` which contains
 /// an `AsyncRecognizeResponse` message.
 /// </summary>
 /// <param name="config">
 /// [Required] The `config` message provides information to the recognizer
 /// that specifies how to process the request.
 /// </param>
 /// <param name="audio">
 /// [Required] The audio data to be recognized.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// The RPC response.
 /// </returns>
 public virtual Operation <AsyncRecognizeResponse> AsyncRecognize(
     RecognitionConfig config,
     RecognitionAudio audio,
     CallSettings callSettings = null) => AsyncRecognize(
     new AsyncRecognizeRequest
 {
     Config = config,
     Audio  = audio,
 },
     callSettings);