// [END authenticating]

        // [START run_application]
        static public void Main(string[] args)
        {
            if (args.Count() < 1)
            {
                Console.WriteLine("Usage:\nTranscribe audio_file");
                return;
            }
            var    service         = CreateAuthorizedClient();
            string audio_file_path = args[0];
            // [END run_application]
            // [START construct_request]
            var request = new Google.Apis.CloudSpeechAPI.v1beta1.Data.AsyncRecognizeRequest()
            {
                Config = new Google.Apis.CloudSpeechAPI.v1beta1.Data.RecognitionConfig()
                {
                    Encoding     = "LINEAR16",
                    SampleRate   = 16000,
                    LanguageCode = "en-US"
                },
                Audio = new Google.Apis.CloudSpeechAPI.v1beta1.Data.RecognitionAudio()
                {
                    Content = Convert.ToBase64String(File.ReadAllBytes(audio_file_path))
                }
            };
            // [END construct_request]
            // [START send_request]
            var asyncResponse = service.Speech.Asyncrecognize(request).Execute();
            var name          = asyncResponse.Name;

            Google.Apis.CloudSpeechAPI.v1beta1.Data.Operation op;
            do
            {
                Console.WriteLine("Waiting for server processing...");
                Thread.Sleep(1000);
                op = service.Operations.Get(name).Execute();
            } while (!(op.Done.HasValue && op.Done.Value));
            dynamic results = op.Response["results"];

            foreach (var result in results)
            {
                foreach (var alternative in result.alternatives)
                {
                    Console.WriteLine(alternative.transcript);
                }
            }
            // [END send_request]
        }
        // [END authenticating]
        public static List <string> Transcribe(string audio_file_path)
        {
            var service = CreateAuthorizedClient();
            // [START construct_request]
            var request = new Google.Apis.CloudSpeechAPI.v1beta1.Data.AsyncRecognizeRequest()
            {
                Config = new Google.Apis.CloudSpeechAPI.v1beta1.Data.RecognitionConfig()
                {
                    Encoding     = "LINEAR16",
                    SampleRate   = 16000,//44100
                    LanguageCode = "vi-VN"
                },
                Audio = new Google.Apis.CloudSpeechAPI.v1beta1.Data.RecognitionAudio()
                {
                    Content = Convert.ToBase64String(File.ReadAllBytes(audio_file_path))
                }
            };
            // [END construct_request]
            // [START send_request]
            var asyncResponse = service.Speech.Asyncrecognize(request).Execute();
            var name          = asyncResponse.Name;

            Google.Apis.CloudSpeechAPI.v1beta1.Data.Operation op;
            do
            {
                //Console.WriteLine("Waiting for server processing...");
                Thread.Sleep(1000);
                op = service.Operations.Get(name).Execute();
            } while (!(op.Done.HasValue && op.Done.Value));
            dynamic       results = op.Response["results"];
            List <string> outcome = new List <string>();

            foreach (var result in results)
            {
                foreach (var alternative in result.alternatives)
                {
                    outcome.Add(alternative.transcript);
                }
                //Console.WriteLine(alternative.transcript);
            }
            // [END send_request]
            return(outcome);
        }
 // [END authenticating]
 // [START run_application]
 public static void Main(string[] args)
 {
     if (args.Count() < 1)
     {
         Console.WriteLine("Usage:\nTranscribe audio_file");
         return;
     }
     var service = CreateAuthorizedClient();
     string audio_file_path = args[0];
     // [END run_application]
     // [START construct_request]
     var request = new Google.Apis.CloudSpeechAPI.v1beta1.Data.AsyncRecognizeRequest()
     {
         Config = new Google.Apis.CloudSpeechAPI.v1beta1.Data.RecognitionConfig()
         {
             Encoding = "LINEAR16",
             SampleRate = 16000,
             LanguageCode = "en-US"
         },
         Audio = new Google.Apis.CloudSpeechAPI.v1beta1.Data.RecognitionAudio()
         {
             Content = Convert.ToBase64String(File.ReadAllBytes(audio_file_path))
         }
     };
     // [END construct_request]
     // [START send_request]
     var asyncResponse = service.Speech.Asyncrecognize(request).Execute();
     var name = asyncResponse.Name;
     Google.Apis.CloudSpeechAPI.v1beta1.Data.Operation op;
     do
     {
         Console.WriteLine("Waiting for server processing...");
         Thread.Sleep(1000);
         op = service.Operations.Get(name).Execute();
     } while (!(op.Done.HasValue && op.Done.Value));
     dynamic results = op.Response["results"];
     foreach (var result in results)
     {
         foreach (var alternative in result.alternatives)
             Console.WriteLine(alternative.transcript);
     }
     // [END send_request]
 }
Exemple #4
0
 /// <summary>Constructs a new Asyncrecognize request.</summary>
 public AsyncrecognizeRequest(Google.Apis.Services.IClientService service, Google.Apis.CloudSpeechAPI.v1beta1.Data.AsyncRecognizeRequest body)
     : base(service)
 {
     Body = body;
     InitParameters();
 }
Exemple #5
0
 /// <summary>Perform asynchronous speech-recognition: receive results via the google.longrunning.Operations
 /// interface. Returns either an `Operation.error` or an `Operation.response` which contains an
 /// `AsyncRecognizeResponse` message.</summary>
 /// <param name="body">The body of the request.</param>
 public virtual AsyncrecognizeRequest Asyncrecognize(Google.Apis.CloudSpeechAPI.v1beta1.Data.AsyncRecognizeRequest body)
 {
     return(new AsyncrecognizeRequest(service, body));
 }
Exemple #6
0
        // [END authenticating]

        // [START run_application]
        static public void audio2text()
        {
            //if (args.Count() < 1)
            //{
            //    Console.WriteLine("Usage:\nTranscribe audio_file");
            //    return;
            //}
            var service = CreateAuthorizedClient();
            //string audio_file_path = args[0];
            // [END run_application]
            // [START construct_request]
            var request = new Google.Apis.CloudSpeechAPI.v1beta1.Data.AsyncRecognizeRequest()
            {
                Config = new Google.Apis.CloudSpeechAPI.v1beta1.Data.RecognitionConfig()
                {
                    Encoding     = "LINEAR16",
                    SampleRate   = 44100,
                    LanguageCode = "da-DK"
                },
                Audio = new Google.Apis.CloudSpeechAPI.v1beta1.Data.RecognitionAudio()
                {
                    //Content = Convert.ToBase64String(File.ReadAllBytes(audio_file_path))
                    Uri = "gs://tvtranscribe/de_sorte_spejdere_ep1_2min_mono.flac"
                }
            };
            // [END construct_request]
            // [START send_request]
            var asyncResponse = service.Speech.Asyncrecognize(request).Execute();
            var name          = asyncResponse.Name;

            Google.Apis.CloudSpeechAPI.v1beta1.Data.Operation op;
            do
            {
                Console.WriteLine("Waiting for server processing...");
                Thread.Sleep(1000);
                op = service.Operations.Get(name).Execute();
            } while (!(op.Done.HasValue && op.Done.Value));
            dynamic results = op.Response["results"];

            foreach (var result in results)
            {
                foreach (var alternative in result.alternatives)
                {
                    Console.WriteLine(alternative.transcript);
                }
            }

            if ((!File.Exists(TRANSCRIBED_TEXT)))              //Checking if
            {
                FileStream fs = File.Create(TRANSCRIBED_TEXT); //Creates Scores.txt
                fs.Close();                                    //Closes file stream
            }

            using (System.IO.StreamWriter file = new System.IO.StreamWriter(TRANSCRIBED_TEXT))
            {
                foreach (var result in results)
                {
                    foreach (var alternative in result.alternatives)
                    {
                        file.WriteLine(alternative.transcript);
                    }
                }
            }
            // [END send_request]
        }