コード例 #1
0
        // [END speech_transcribe_diarization]

        //[START speech_transcribe_recognition_metadata]
        static object SyncRecognizeRecognitionMetadata(string filePath)
        {
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding     = RecognitionConfig.Types.AudioEncoding.Flac,
                LanguageCode = "en",
                Metadata     = new RecognitionMetadata()
                {
                    OriginalMediaType = RecognitionMetadata.Types.OriginalMediaType.Audio,
                    OriginalMimeType  = "audio/mp3",

                    // The kind of device used to capture the audio
                    RecordingDeviceType = RecognitionMetadata.Types.RecordingDeviceType.OtherIndoorDevice,

                    // Use case of the audio, e.g. PHONE_CALL, DISCUSSION, etc
                    InteractionType = RecognitionMetadata.Types.InteractionType.VoiceSearch,

                    // The name of the defice used to make the recording.
                    // Arbitrary string, e.g. 'Pixel XL', 'VoIP', or other value
                    RecordingDeviceName = "Pixel XL"
                }
            }, RecognitionAudio.FromFile(filePath));

            foreach (var result in response.Results)
            {
                Console.WriteLine($"Transcript: { result.Alternatives[0].Transcript}");
            }
            return(0);
        }
コード例 #2
0
        // [END speech_transcribe_multichannel]

        // [START speech_transcribe_diarization]
        static object SyncRecognizeMultipleSpeakers(string filePath, int numberOfSpeakers)
        {
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding          = RecognitionConfig.Types.AudioEncoding.Linear16,
                LanguageCode      = "en",
                DiarizationConfig = new SpeakerDiarizationConfig()
                {
                    EnableSpeakerDiarization = true,
                    MinSpeakerCount          = 2
                }
            }, RecognitionAudio.FromFile(filePath));

            // Print out the results.
            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine($"Transcript: { alternative.Transcript}");
                    Console.WriteLine("Word details:");
                    Console.WriteLine($" Word count:{alternative.Words.Count}");
                    foreach (var item in alternative.Words)
                    {
                        Console.WriteLine($"  {item.Word}");
                        Console.WriteLine($"  Speaker: {item.SpeakerTag}");
                    }
                }
            }

            return(0);
        }
コード例 #3
0
ファイル: Supreme.cs プロジェクト: Autom8tic/Autom8tic.io
        public string ConvertAudioToText(string path)
        {
            var speech = SpeechClient.Create();
            var config = new RecognitionConfig
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz = 16000,
                LanguageCode    = LanguageCodes.English.UnitedStates
            };
            var audio = RecognitionAudio.FromFile(path);

            var response = speech.Recognize(config, audio);

            var sd = "";

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    sd += alternative.Transcript;
                    //Console.WriteLine(alternative.Transcript);
                }
            }
            return(sd);
        }
コード例 #4
0
        public async void GetTranscript(string uri, Action <string> callback)
        {
            if (client == null)
            {
                return;
            }
            var context = new SpeechContext()
            {
                Phrases = { File.ReadLines(CloudUtility.SwearList) }
            };
            var speechOperation = await client.LongRunningRecognizeAsync(new RecognitionConfig()
            {
                Encoding = RecognitionConfig.Types.AudioEncoding.Flac,

                LanguageCode          = "en-US",
                EnableWordTimeOffsets = true,
                SpeechContexts        = { context }
            }, RecognitionAudio.FromFile(uri));

            speechOperation = await speechOperation.PollUntilCompletedAsync();

            var    response = speechOperation.Result;
            string builder  = "";

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    builder += alternative.Transcript;
                }
                builder += Environment.NewLine;
            }
            callback(builder);
        }
コード例 #5
0
        // [END speech_transcribe_enhanced_model]

        // [START speech_transcribe_multichannel]
        static object SyncRecognizeMultipleChannels(string filePath, int channelCount)
        {
            var speech = SpeechClient.Create();

            // Create transcription request
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding     = RecognitionConfig.Types.AudioEncoding.Linear16,
                LanguageCode = "en",
                // Configure request to enable multiple channels
                EnableSeparateRecognitionPerChannel = true,
                AudioChannelCount = channelCount
                                    // Note: Sample uses local file.
            }, RecognitionAudio.FromFile(filePath));

            // Print out the results.
            foreach (var result in response.Results)
            {
                // There can be several transcripts for a chunk of audio.
                // Print out the first (most likely) one here.
                var alternative = result.Alternatives[0];
                Console.WriteLine($"Transcript: {alternative.Transcript}");
                Console.WriteLine($"Channel Tag: {result.ChannelTag}");
            }
            return(0);
        }
コード例 #6
0
        // [END speech_sync_recognize]


        // [START speech_sync_recognize_words]
        static object SyncRecognizeWords(string filePath)
        {
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding              = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz       = 16000,
                LanguageCode          = "en",
                EnableWordTimeOffsets = true,
            }, RecognitionAudio.FromFile(filePath));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine($"Transcript: { alternative.Transcript}");
                    Console.WriteLine("Word details:");
                    Console.WriteLine($" Word count:{alternative.Words.Count}");
                    foreach (var item in alternative.Words)
                    {
                        Console.WriteLine($"  {item.Word}");
                        Console.WriteLine($"    WordStartTime: {item.StartTime}");
                        Console.WriteLine($"    WordEndTime: {item.EndTime}");
                    }
                }
            }
            return(0);
        }
コード例 #7
0
        static object SyncRecognizeWithCredentials(string filePath, string credentialsFilePath)
        {
            GoogleCredential googleCredential;

            using (Stream m = new FileStream(credentialsFilePath, FileMode.Open))
                googleCredential = GoogleCredential.FromStream(m);
            var channel = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.Host,
                                                googleCredential.ToChannelCredentials());
            var speech   = SpeechClient.Create(channel);
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            }, RecognitionAudio.FromFile(filePath));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            return(0);
        }
コード例 #8
0
        public static string StartTranslate(string path, string lang)
        {
            var builder = new SpeechClientBuilder();

            builder.CredentialsPath = "key.json";
            var speech = builder.Build();

            var config = new RecognitionConfig
            {
                Encoding          = RecognitionConfig.Types.AudioEncoding.Linear16,
                LanguageCode      = lang,
                AudioChannelCount = 1
            };

            var audio = RecognitionAudio.FromFile(path);


            var    response = speech.Recognize(config, audio);
            string fullText = "";

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    fullText += alternative.Transcript;
                }
            }
            return(fullText);
        }
コード例 #9
0
        static object RecognizeWithContext(string filePath, IEnumerable <string> phrases)
        {
            var speech = SpeechClient.Create();
            var config = new RecognitionConfig()
            {
                SpeechContexts = { new SpeechContext()
                                   {
                                       Phrases ={ phrases               }
                                   } },
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            };
            var audio = IsStorageUri(filePath) ?
                        RecognitionAudio.FromStorageUri(filePath) :
                        RecognitionAudio.FromFile(filePath);
            var response = speech.Recognize(config, audio);

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            return(0);
        }
コード例 #10
0
 public void FromFile()
 {
     using (var tempFile = TempFile.Generate(500))
     {
         var audio = RecognitionAudio.FromFile(tempFile.Name);
         Assert.Equal(tempFile.Bytes, audio.Content.ToByteArray());
     }
 }
コード例 #11
0
ファイル: Speech.cs プロジェクト: atiq-cs/demos
        static void Main(string[] args)
        {
            var client  = SpeechClient.Create();
            var results = client.Recognize(new RecognitionConfig()
            {
                Encoding     = AudioEncoding.Flac,
                LanguageCode = "en",
            }, RecognitionAudio.FromFile(@"c:\users\rennie\Music\audio.flac"));

            JsonDumper.Dump(results);
        }
コード例 #12
0
        private void SearchButton_Click(object sender, EventArgs e)
        {
            string wolframKey      = "Wolframkeywhichicantuploadongithub";
            string credential_path = @"I:/IgenVoiceRecognition-file.json";

            System.Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", credential_path);
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            }, RecognitionAudio.FromFile("C:/Users/PrAnk/Desktop/test.flac"));


            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    string str = alternative.Transcript;
                    if (str.StartsWith("answer"))
                    {
                        string wolframUrl = "http://api.wolframalpha.com/v2/query?input=" + str + "&appid=" + wolframKey;

                        /*
                         * WolframAlpha wolfram = new WolframAlpha(wolframKey);
                         *
                         * //Then you simply query Wolfram|Alpha like this
                         * //Note that the spelling error will be correct by Wolfram|Alpha
                         * QueryResult results = wolfram.Query("Who is Danald Duck?");
                         *
                         * //The QueryResult object contains the parsed XML from Wolfram|Alpha. Lets look at it.
                         * //The results from wolfram is split into "pods". We just print them.
                         * if (results != null)
                         * {
                         *  foreach (Pod pod in results.Pods)
                         *  {
                         *      Console.WriteLine(pod.Title);
                         *      if (pod.SubPods != null)
                         *      {
                         *          foreach (SubPod subPod in pod.SubPods)
                         *          {
                         *              Console.WriteLine(subPod.Title);
                         *              MessageBox.Show(subPod.Title,subPod.Plaintext);
                         *          }
                         *      }
                         *  }
                         * }
                         */
                    }
                    Process.Start("chrome", alternative.Transcript);
                }
            }
        }
コード例 #13
0
        static void Main(string[] args)
        {
            var client  = SpeechClient.Create();
            var results = client.Recognize(new RecognitionConfig()
            {
                Encoding     = RecognitionConfig.Types.AudioEncoding.Flac,
                LanguageCode = "en",
            }, RecognitionAudio.FromFile("GCP_Speech_Input_Sample.flac"));

            JsonDumper.Dump(results);
        }
コード例 #14
0
 // Data processing functions
 public static Boolean getGoogleResponse(String[] args)
 {
     Console.WriteLine("Getting goodle data...");
     lastResponse = SpeechClient.Recognize(new RecognitionConfig()
     {
         Encoding              = RecognitionConfig.Types.AudioEncoding.Flac,
         SampleRateHertz       = 44100,
         LanguageCode          = "en",
         EnableWordTimeOffsets = true
     }, RecognitionAudio.FromFile(FilePath));
     Console.WriteLine("Data retreved.");
     return(true);
 }
コード例 #15
0
ファイル: SpeechBehaviour.cs プロジェクト: mrk21/sandbox
    public void OnClickGCPRecognitionButton()
    {
        var response = client.Recognize(
            new RecognitionConfig()
        {
            Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
            SampleRateHertz = 22050,
            LanguageCode    = LanguageCodes.Japanese.Japan,
        },
            RecognitionAudio.FromFile(Application.streamingAssetsPath + "/test.flac")
            );

        Debug.Log(response);
    }
コード例 #16
0
 private void btnSpeechInfo_Click(object sender, EventArgs e)
 {
     if (File.Exists("audio.raw"))
     {
         var  speech = SpeechClient.Create();
         bool currentAutoPunctuation = currentLanguage == "en"?true:false;
         var  response = speech.Recognize(new RecognitionConfig()
         {
             Encoding                   = RecognitionConfig.Types.AudioEncoding.Linear16,
             SampleRateHertz            = 8000,
             LanguageCode               = currentLanguage,
             EnableAutomaticPunctuation = currentAutoPunctuation
         }, RecognitionAudio.FromFile("audio.raw"));
         textBox1.Text = "";
         foreach (var result in response.Results)
         {
             foreach (var alternative in result.Alternatives)
             {
                 speechConverter.OutputData = textBox1.Text + " " + alternative.Transcript;
             }
         }
         textBox1.Text = speechConverter.OutputData;
         if (textBox1.Text.Length == 0)
         {
             textBox1.Text = "No data";
         }
         speechConverter.OutputData = textBox1.Text;
     }
     else
     {
         textBox1.Text = "Audio File Missing";
         return;
     }
     if (textBox1.Text != "No data" && SaveFileDialog.FileName != "")
     {
         TextFileCreator textFileCreator = new TextFileCreator(SaveFileDialog.FileName);
         textFileCreator.WriteToFile(textBox1.Text);
     }
     if (openFileDialog1.FileName != "")
     {
         TextFileCreator textFileCreator = new TextFileCreator(openFileDialog1.FileName);
         textFileCreator.WriteToFile(textBox1.Text);
     }
     menuStrip1.Enabled     = true;
     btnRecordVoice.Enabled = true;
     btnSave.Enabled        = false;
     btnSpeechInfo.Enabled  = false;
     btnFindRelated.Enabled = false;
 }
コード例 #17
0
        private void SpeechToText()
        {
            strRecgnResult = "";
            var audio = RecognitionAudio.FromFile(outputFilePath);

            var response = speech.Recognize(config, audio);

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    strRecgnResult += ((alternative.Transcript) + "...");
                }
            }
            isResultRecieved = true;
        }
コード例 #18
0
        public string ConvertSpeechToText(string waveFilePath)
        {
            //var fileDetails = new FileInfo(waveFilePath);
            //if (fileDetails.Length > )

            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig
            {
                SampleRateHertz = 44100,
                LanguageCode    = "en"
            }, RecognitionAudio.FromFile(waveFilePath));

            var speechRecognitionAlternative = response.Results.FirstOrDefault()?.Alternatives.OrderBy(x => x.Confidence).FirstOrDefault();

            return(speechRecognitionAlternative != null ? speechRecognitionAlternative.Transcript : string.Empty);
        }
コード例 #19
0
        // Not an actual test... just examples
        public void FactoryMethods()
        {
            // Sample: FactoryMethods
            RecognitionAudio audio1 = RecognitionAudio.FromFile("Sound/SpeechSample.flac");
            RecognitionAudio audio2 = RecognitionAudio.FromUri("https://.../HostedSpeech.flac");
            RecognitionAudio audio3 = RecognitionAudio.FromStorageUri("gs://my-bucket/my-file");

            byte[]           bytes  = ReadAudioData(); // For example, from a database
            RecognitionAudio audio4 = RecognitionAudio.FromBytes(bytes);

            using (Stream stream = OpenAudioStream()) // Any regular .NET stream
            {
                RecognitionAudio audio5 = RecognitionAudio.FromStream(stream);
            }
            // End sample
        }
コード例 #20
0
        public static void Main(string[] args)
        {
            var speech   = SpeechClient.Create();
            var response = speech.SyncRecognize(new RecognitionConfig()
            {
                Encoding   = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRate = 16000,
            }, RecognitionAudio.FromFile("audio.raw"));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
        }
コード例 #21
0
        // Transcribe a local audio file. We can only use this with audios up to 1 minute long.
        public Transcribed_Dto TranscribeLocalFile(string fileName, string language)
        {
            //    // var speechClient = SpeechClient.Create();
            RecognitionAudio recogAudio = RecognitionAudio.FromFile(fileName);

            var response = speechClient.Recognize(new RecognitionConfig()
            {
                Encoding              = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz       = 48000,
                EnableWordTimeOffsets = true,
                LanguageCode          = language,
            }, recogAudio);

            Transcribed_Dto resp = TransformResponse.Simpify(response.Results);

            return(TransformResponse.FixSpeakerTags(resp));
        }
コード例 #22
0
ファイル: MainForm.cs プロジェクト: jergend/VoiceToTextEditor
        private void TranscribeAudio(string fn)
        {
            uxTextbox.Text = "";
            //string path = Path.Combine(Directory.GetParent(Directory.GetCurrentDirectory()).Parent.FullName, @"MyProject31047-87c642b30d06.json");
            Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", "MyProject31047-87c642b30d06.json");
            var speech = SpeechClient.Create();

            if (hasRecorded)
            {
                var response = speech.Recognize(new RecognitionConfig()
                {
                    Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                    SampleRateHertz = 16000,
                    LanguageCode    = "en",
                }, RecognitionAudio.FromFile(uxExportAudioDialog.FileName));
                foreach (var result in response.Results)
                {
                    foreach (var alternative in result.Alternatives)
                    {
                        uxTextbox.Text += alternative.Transcript;
                    }
                }
            }
            else if (fn != "")
            {
                var response = speech.Recognize(new RecognitionConfig()
                {
                    Encoding                   = RecognitionConfig.Types.AudioEncoding.Linear16,
                    SampleRateHertz            = 16000,
                    LanguageCode               = "en",
                    EnableAutomaticPunctuation = true,
                }, RecognitionAudio.FromFile(fn));
                foreach (var result in response.Results)
                {
                    foreach (var alternative in result.Alternatives)
                    {
                        uxTextbox.Text += alternative.Transcript;
                    }
                }
            }
            else
            {
                MessageBox.Show("No Audio File Found");
            }
        }
コード例 #23
0
        public static void Main(string[] args)
        {
            Console.OutputEncoding = new System.Text.UTF8Encoding();
            DotEnv.Config(true, "../../../.env");

            var speech   = SpeechClient.Create();
            var response = speech.Recognize(
                new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz = 22050,
                LanguageCode    = LanguageCodes.Japanese.Japan,
            },
                RecognitionAudio.FromFile(DEMO_FILE)
                );

            System.Console.WriteLine(response);
        }
コード例 #24
0
        public static void Main(string[] args)
        {
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            }, RecognitionAudio.FromFile(DEMO_FILE));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Debug.Log("Final: " + alternative.Transcript);
                }
            }
        }
コード例 #25
0
        public static void Audio2Text(string filepath)
        {
            var speechClient = SpeechClient.Create();
            var response     = speechClient.Recognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "ru"
            }, RecognitionAudio.FromFile(filepath));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
        }
コード例 #26
0
        // Transcribe a local audio file. We can only use this with audios up to 1 minute long.
        public TranscribeResponse TranscribeFile(string fileName, string language)
        {
            var speech = SpeechClient.Create();
            RecognitionAudio recogAudio = RecognitionAudio.FromFile(fileName);

            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding              = RecognitionConfig.Types.AudioEncoding.Flac,
                SampleRateHertz       = 48000,
                EnableWordTimeOffsets = true,
                LanguageCode          = language,
            }, recogAudio);

            // Transform the Google response into a more usable object.
            TranscribeResponse transcript = GetShortTranscribeResponse(response);

            return(transcript);
        }
コード例 #27
0
        // [START speech_sync_recognize]
        static object SyncRecognize(string filePath)
        {
            var speech   = SpeechClient.Create();
            var response = speech.SyncRecognize(new RecognitionConfig()
            {
                Encoding   = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRate = 16000,
            }, RecognitionAudio.FromFile(filePath));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            return(0);
        }
コード例 #28
0
        /// <summary>
        /// Async recognition of file audio with credentials file(json)
        /// </summary>
        /// <param name="filePath"></param>
        /// <param name="credentialsFilePath"></param>
        /// <returns></returns>
        public static object AsyncRecognizeGcsWordsWithCredentials(string filePath, string credentialsFilePath, int samplingRate)
        {
            //for credetial
            GoogleCredential googleCredential;

            using (Stream m = new FileStream(credentialsFilePath, FileMode.Open))
                googleCredential = GoogleCredential.FromStream(m);
            var channel = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.Host,
                                                googleCredential.ToChannelCredentials());

            var speech = SpeechClient.Create(channel);

            //recognition settings
            var longOperation = speech.LongRunningRecognize(new RecognitionConfig()
            {
                Encoding              = RecognitionConfig.Types.AudioEncoding.Linear16, //Linear16 = wav
                SampleRateHertz       = samplingRate,                                   //set the rate recorded in iPad
                LanguageCode          = "ja-JP",                                        //English:en, Japanese: ja-JP
                EnableWordTimeOffsets = true,                                           //true: you can get timecodes of words
                //MaxAlternatives = 3,// alternative count. this is max count. you sometimes get less number of alternatives you set. Alternatives don't have timecode
            }, RecognitionAudio.FromFile(filePath));

            longOperation = longOperation.PollUntilCompleted();
            var response = longOperation.Result;

            //recognition result
            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine($"Transcript: { alternative.Transcript}");
                    Console.WriteLine($"Confidence: {alternative.Confidence}");
                    Console.WriteLine("Word details:");
                    Console.WriteLine($" Word count:{alternative.Words.Count}");
                    foreach (var item in alternative.Words)
                    {
                        Console.WriteLine($"  {item.Word}");
                        Console.WriteLine($"    WordStartTime: {item.StartTime}");
                        Console.WriteLine($"    WordEndTime: {item.EndTime}");
                    }
                }
            }
            return(0);
        }
コード例 #29
0
ファイル: voice.cs プロジェクト: klemby/act
    public string HttpUploadFile(string url, string file, string paramName, string contentType)
    {
        var speech   = SpeechClient.Create();
        var response = speech.Recognize(new RecognitionConfig()
        {
            Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
            SampleRateHertz = 48000,
            LanguageCode    = "en",
        }, RecognitionAudio.FromFile(file));

        foreach (var result in response.Results)
        {
            foreach (var alternative in result.Alternatives)
            {
                return(alternative.Transcript);
            }
        }
        return("nothing");
    }
コード例 #30
0
        public static void Main(string[] args)
        {
            var speech   = SpeechClient.Create();
            var response = speech.Recognize(new RecognitionConfig()
            {
                Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                SampleRateHertz = 16000,
                LanguageCode    = "en",
            }, RecognitionAudio.FromFile(DEMO_FILE));

            foreach (var result in response.Results)
            {
                foreach (var alternative in result.Alternatives)
                {
                    Console.WriteLine(alternative.Transcript);
                }
            }
            CreateWebHostBuilder(args).Build().Run();
        }