Exemple #1
0
 /// <summary>
 /// Perform synchronous speech-recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="request">
 /// The request object containing all of the parameters for the API call.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// The RPC response.
 /// </returns>
 public override SyncRecognizeResponse SyncRecognize(
     SyncRecognizeRequest request,
     CallSettings callSettings = null)
 {
     Modify_SyncRecognizeRequest(ref request, ref callSettings);
     return(_callSyncRecognize.Sync(request, callSettings));
 }
Exemple #2
0
        /// <summary>
        /// Perform synchronous speech-recognition: receive results after all audio
        /// has been sent and processed.
        /// </summary>
        /// <param name="config">
        /// [Required] The `config` message provides information to the recognizer
        /// that specifies how to process the request.
        /// </param>
        /// <param name="audio">
        /// [Required] The audio data to be recognized.
        /// </param>
        /// <param name="callSettings">
        /// If not null, applies overrides to this RPC call.
        /// </param>
        /// <returns>
        /// The RPC response.
        /// </returns>
        public override SyncRecognizeResponse SyncRecognize(
            RecognitionConfig config,
            RecognitionAudio audio,
            CallSettings callSettings = null)
        {
            SyncRecognizeRequest request = new SyncRecognizeRequest
            {
                Config = config,
                Audio  = audio,
            };

            Modify_SyncRecognizeRequest(ref request, ref callSettings);
            return(_callSyncRecognize.Sync(request, callSettings));
        }
 public void SyncRecognize_RequestObject()
 {
     // Snippet: SyncRecognize(SyncRecognizeRequest,CallSettings)
     // Create client
     SpeechClient speechClient = SpeechClient.Create();
     // Initialize request argument(s)
     SyncRecognizeRequest request = new SyncRecognizeRequest
     {
         Config = new RecognitionConfig(),
         Audio  = new RecognitionAudio(),
     };
     // Make the request
     SyncRecognizeResponse response = speechClient.SyncRecognize(request);
     // End snippet
 }
Exemple #4
0
        /// <summary>
        /// Performs synchronous speech recognition: receive results after all audiohas been sent and processed.
        /// Documentation https://developers.google.com/speech/v1beta1/reference/speech/syncrecognize
        /// Generation Note: This does not always build corectly.  Google needs to standardise things I need to figuer out which ones are wrong.
        /// </summary>
        /// <param name="service">Authenticated Speech service.</param>
        /// <param name="body">A valid Speech v1beta1 body.</param>
        /// <returns>SyncRecognizeResponseResponse</returns>
        public static SyncRecognizeResponse Syncrecognize(SpeechService service, SyncRecognizeRequest body)
        {
            try
            {
                // Initial validation.
                if (service == null)
                {
                    throw new ArgumentNullException("service");
                }
                if (body == null)
                {
                    throw new ArgumentNullException("body");
                }

                // Make the request.
                return(service.Speech.Syncrecognize(body).Execute());
            }
            catch (Exception ex)
            {
                throw new Exception("Request Speech.Syncrecognize failed.", ex);
            }
        }
Exemple #5
0
 public void SyncRecognize_RequestObject()
 {
     // Snippet: SyncRecognize(SyncRecognizeRequest,CallSettings)
     // Create client
     SpeechClient speechClient = SpeechClient.Create();
     // Initialize request argument(s)
     SyncRecognizeRequest request = new SyncRecognizeRequest
     {
         Config = new RecognitionConfig
         {
             Encoding   = RecognitionConfig.Types.AudioEncoding.Flac,
             SampleRate = 44100,
         },
         Audio = new RecognitionAudio
         {
             Uri = "gs://bucket_name/file_name.flac",
         },
     };
     // Make the request
     SyncRecognizeResponse response = speechClient.SyncRecognize(request);
     // End snippet
 }
Exemple #6
0
 // Partial modifier methods contain '_' to ensure no name conflicts with RPC methods.
 partial void Modify_SyncRecognizeRequest(ref SyncRecognizeRequest request, ref CallSettings settings);
Exemple #7
0
 /// <summary>
 /// Perform synchronous speech-recognition: receive results after all audio
 /// has been sent and processed.
 /// </summary>
 /// <param name="request">
 /// The request object containing all of the parameters for the API call.
 /// </param>
 /// <param name="callSettings">
 /// If not null, applies overrides to this RPC call.
 /// </param>
 /// <returns>
 /// The RPC response.
 /// </returns>
 public virtual SyncRecognizeResponse SyncRecognize(
     SyncRecognizeRequest request,
     CallSettings callSettings = null)
 {
     throw new NotImplementedException();
 }