Example #1
0
        async void MakeTranscript()
        {
            speaking = true;

            var config = SpeechConfig.FromSubscription("0be2c48d5bf14f51b98a74bcc5e385bf"
                                                       , "westeurope");

            using (var recognizer = new SpeechRecognizer(config))
            {
                // Starts recognizing.
                Console.WriteLine("Say something...");

                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                if (textBox1.InvokeRequired)
                {
                    textBox1.Invoke(new Action(() => textBox1.Text += result.Text + Environment.NewLine));
                }


                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
            speaking = false;
        }
Example #2
0
        // Speech recognition from microphone.
        public static async Task RecognitionWithMicrophoneAsync()
        {
            // <recognitionWithMicrophone>
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            // The default language is "en-us".
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech recognizer using microphone as audio input.
            using (var recognizer = new SpeechRecognizer(config))
            {
                // Starts recognizing.
                Console.WriteLine("Say something...");

                // Performs recognition. RecognizeOnceAsync() returns when the first utterance has been recognized,
                // so it is suitable only for single shot recognition like command or query. For long-running
                // recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
            // </recognitionWithMicrophone>
        }
        /// <summary>
        /// medthod to convert Speech to text using Microphone
        /// </summary>
        /// <returns></returns>
        public static async Task SpeechToTextAsyncwithMicrophone()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "eastus").
            var config = SpeechConfig.FromSubscription(SubscriptionKey, ServiceRegion);

            // Creates a speech recognizer.
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Say something...");

                // Starts speech recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.

                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
Example #4
0
        public async Task <ActionResult> RecognizeSpeechAsync()
        {
            try
            {
                config = SpeechConfig.FromSubscription("ae9492aae8044a4c888a45a45e957d83", "westus");

                using (var recognizer = new SpeechRecognizer(config))
                {
                    var result = await recognizer.RecognizeOnceAsync();



                    if (result.Reason == ResultReason.RecognizedSpeech)
                    {
                        translatedWords = translatedWords + result.Text;
                        ViewBag.message = result.Text;
                    }

                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        ViewBag.message = "Not recognized";
                    }
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = CancellationDetails.FromResult(result);


                        if (cancellation.Reason == CancellationReason.Error)
                        {
                            ViewBag.message = result.Text;
                        }
                    }
                }
            }

            catch (Exception ex)
            {
                x = ex.StackTrace;
            }


            return(View("Index"));
        }
        // Speech recognition using a customized model.
        public static async Task RecognitionUsingCustomizedModelAsync()
        {
            // <recognitionCustomized>
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("926e97a14b9946b98175f9b740af6579", "westus");

            // Replace with the CRIS endpoint id of your customized model.
            config.EndpointId = "YourEndpointId";

            // Creates a speech recognizer using microphone as audio input.
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Say something...");

                // Performs recognition. RecognizeOnceAsync() returns when the first utterance has been recognized,
                // so it is suitable only for single shot recognition like command or query. For long-running
                // recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                // Checks results.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
            // </recognitionCustomized>
        }
        /// <summary>
        /// 辨識輸入語音為文字
        /// </summary>
        /// <param name="recognizeBM">語音辨識基本設定用</param>
        /// <returns>辨識後的文字</returns>
        public async Task <string> SpeakToText(RecognizeBM recognizeBM)
        {
            // 設定語音服務設定
            var config = this.GetSpeechConfig(recognizeBM);

            // 建立語音辨識器類別
            using (var recognizer = new SpeechRecognizer(config))
            {
                // RecognizeOnceAsync 此方法 僅適用於單個語音識別,如命令或查詢,輸入時間小於 15 秒
                // 如果需要長時間運行的多話語識別,請改用 StartContinuousRecognitionAsync(),
                // 輸入時間大於 15 秒
                var result = await recognizer.RecognizeOnceAsync();

                // 判斷回傳執行結果狀態
                this.CheckReason(result);

                return(recognizeText);
            }
        }
        // 使用麥克風進行語音辨識
        public static async Task RecognitionWithMicrophoneAsync()
        {
            // 建立語音辨識的設定,這裡必須提供 Azure Cognitive Service 的訂閱金鑰和服務區域
            var config = SpeechConfig.FromSubscription(YourSubscriptionKey, YourServiceRegion);

            // 預設使用 en-us 的美式英文作為辨識語言
            config.SpeechRecognitionLanguage = "en-us";

            // 建立語音辨識器,並將音訊來源指定為機器預設的麥克風
            using (var recognizer = new SpeechRecognizer(config, AudioConfig.FromDefaultMicrophoneInput()))
            {
                Console.WriteLine("Say something...");

                // 開始進行語音辨識,會在辨別出句子結束時,返回語音辨識的結果。
                // 會藉由句子說完後,所產生的靜默時間作為辨識依據,或者語音超過 15 秒,也會處理成斷句。
                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                // 輸出語音辨識結果
                switch (result.Reason)
                {
                case ResultReason.RecognizedSpeech:
                    Console.WriteLine($"RECOGNIZED: {result.Text}");
                    break;

                case ResultReason.NoMatch:
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    break;

                case ResultReason.Canceled:
                default:
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                    break;
                }
            }
        }
Example #8
0
        /// <summary>
        /// 开始识别事件
        /// 启动识别,等待收到最终结果,然后停止识别
        /// </summary>
        /// <param name="recognizer">识别器</param>
        ///  <value>
        ///   <c>Base</c> if Baseline model; otherwise,
        /// </value>
        private async Task RunRecognizer(SpeechRecognizer recognizer, TaskCompletionSource <int> source)
        {
            //创建事件
            EventHandler <SpeechRecognitionEventArgs> recognizingHandler = (sender, e) => RecognizingEventHandler(e);

            //识别器添加事件
            recognizer.Recognizing += recognizingHandler;


            EventHandler <SpeechRecognitionEventArgs> recognizedHandler = (sender, e) => RecognizedEventHandler(e);

            EventHandler <SpeechRecognitionCanceledEventArgs> canceledHandler = (sender, e) => CanceledEventHandler(e, source);
            EventHandler <SessionEventArgs>     sessionStartedHandler         = (sender, e) => SessionStartedEventHandler(e);
            EventHandler <SessionEventArgs>     sessionStoppedHandler         = (sender, e) => SessionStoppedEventHandler(e, source);
            EventHandler <RecognitionEventArgs> speechStartDetectedHandler    = (sender, e) => SpeechDetectedEventHandler(e, "start");
            EventHandler <RecognitionEventArgs> speechEndDetectedHandler      = (sender, e) => SpeechDetectedEventHandler(e, "end");

            recognizer.Recognized          += recognizedHandler;
            recognizer.Canceled            += canceledHandler;
            recognizer.SessionStarted      += sessionStartedHandler;
            recognizer.SessionStopped      += sessionStoppedHandler;
            recognizer.SpeechStartDetected -= speechStartDetectedHandler;
            recognizer.SpeechEndDetected   -= speechEndDetectedHandler;

            //开始,等待,停止识别(单次识别)
            //await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
            await recognizer.RecognizeOnceAsync();

            await source.Task.ConfigureAwait(false);

            //await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);

            this.EnableButtons();


            recognizer.Recognizing         -= recognizingHandler;
            recognizer.Recognized          -= recognizedHandler;
            recognizer.Canceled            -= canceledHandler;
            recognizer.SessionStarted      -= sessionStartedHandler;
            recognizer.SessionStopped      -= sessionStoppedHandler;
            recognizer.SpeechStartDetected -= speechStartDetectedHandler;
            recognizer.SpeechEndDetected   -= speechEndDetectedHandler;
        }
Example #9
0
        /// <summary>
        /// Recognizes wav files but only 15 secs at max
        /// </summary>
        /// <param name="stream"></param>
        /// <param name="fileName"></param>
        /// <returns></returns>
        public async Task <string> RecognizeShortAudio(Stream stream, string fileName)
        {
            DirectoryInfo info = new DirectoryInfo("audios");

            if (!info.Exists)
            {
                info.Create();
            }

            string path = Path.Combine("audios", fileName);

            using (FileStream outputFileStream = new FileStream(path, FileMode.Create))
            {
                stream.CopyTo(outputFileStream);
            }
            var audioConfig = AudioConfig.FromWavFileInput(path);
            var recognizer  = new SpeechRecognizer(speechConfig, audioConfig);
            var result      = await recognizer.RecognizeOnceAsync();

            switch (result.Reason)
            {
            case ResultReason.RecognizedSpeech:
                Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                return(result.Text);

            case ResultReason.NoMatch:
                Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                break;

            case ResultReason.Canceled:
                var cancellation = CancellationDetails.FromResult(result);
                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                if (cancellation.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                }
                break;
            }
            return(null);
        }
        public static async Task <string> RecognizeSpeechAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Creates a speech recognizer.

            {
                // Starts speech recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    return(result.Text);
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    return($"NOMATCH. Error code {NoMatchDetails.FromResult(result).ToString()}");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        return($"CANCELED: Error code {cancellation.ErrorCode}");
                    }
                    else
                    {
                        return($"CANCELED: {cancellation.Reason}");
                    }
                }
                else
                {
                    return("Unknown error");
                }
            }
        }
Example #11
0
        public void RecognizeSpeech()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("c78539a1f1754b37a9d72875a3d19c06", "southeastasia");

            // Creates a speech recognizer.
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Say something...");

                // Starts speech recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                var result = recognizer.RecognizeOnceAsync().Result;

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    txt_STTText.Text += result.Text + Environment.NewLine;
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
        private async void OnRecordButtonClicked(object sender, EventArgs e)
        {
            try
            {
                var config = SpeechConfig.FromSubscription(speech_key, "westus");
                config.SpeechRecognitionLanguage = config.SpeechSynthesisLanguage =
                    viewModel.LangCodeDictionary[(string)SourceLanguage.SelectedItem];

                using (var recognizer = new SpeechRecognizer(config))
                {
                    var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                    // Checks result.
                    StringBuilder sb = new StringBuilder();
                    if (result.Reason == ResultReason.RecognizedSpeech)
                    {
                        sb.AppendLine(result.Text);
                    }
                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        sb.AppendLine($"NOMATCH: Speech could not be recognized.");
                    }
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = CancellationDetails.FromResult(result);
                        sb.AppendLine($"CANCELED: Reason={cancellation.Reason}");

                        if (cancellation.Reason == CancellationReason.Error)
                        {
                            sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            sb.AppendLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                            sb.AppendLine($"CANCELED: Did you update the subscription info?");
                        }
                    }

                    UpdateUI(sb.ToString());
                }
            }
            catch (Exception ex) {
                UpdateUI("Exception: " + ex);
            }
        }
        static async Task RecognizeSpeechAsync(TextBox txt, TextBox txtResponse, Label txtCRMPost, TextBox txtJsonResponse)
        {
            var config =
                SpeechConfig.FromSubscription(
                    "04be3fcaa4c444ca8cc1e64cb113b2f1",
                    "australiaeast");

            var recognizer = new SpeechRecognizer(config);

            var result = await recognizer.RecognizeOnceAsync();

            switch (result.Reason)
            {
            case ResultReason.RecognizedSpeech:
                Console.WriteLine($"We recognized: {result.Text}");
                txt.Text = result.Text;
                break;

            case ResultReason.NoMatch:
                Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                break;

            case ResultReason.Canceled:
                var cancellation = CancellationDetails.FromResult(result);
                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                if (cancellation.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                }
                break;
            }
            try
            {
                PostAPICalls(txt, txtResponse, txtCRMPost, txtJsonResponse);
            } catch (Exception e)
            {
                txtCRMPost.Text = e.Message;
            }
        }
Example #14
0
        public async Task RecognizeSpeechAsync(string path)
        {
            try
            {
                var config = SpeechConfig.FromSubscription("523e137d4e544865b41b7b418dd39ac0", "uksouth");
                config.SpeechRecognitionLanguage = "pt-PT";

                using (var audioInput = AudioConfig.FromWavFileInput(path))
                    using (var recognizer = new SpeechRecognizer(config, audioInput))
                    {
                        Console.WriteLine("Recognizing first result...");
                        var result = await recognizer.RecognizeOnceAsync();

                        switch (result.Reason)
                        {
                        case ResultReason.RecognizedSpeech:
                            this.result_text = result.Text;
                            break;

                        case ResultReason.NoMatch:
                            this.result_text = "NOMATCH: Speech could not be recognized.";
                            break;

                        case ResultReason.Canceled:
                            var cancellation = CancellationDetails.FromResult(result);
                            Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");
                            this.result_text = "CANCELED: Reason={cancellation.Reason}";
                            if (cancellation.Reason == CancellationReason.Error)
                            {
                                Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                                Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                                Console.WriteLine($"CANCELED: Did you update the subscription info?");
                            }
                            break;
                        }
                    }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.ToString());
            }
        }
Example #15
0
        public static async Task RecognizeSpeechAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key // and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("a500a698e9f247eda1b27c379ba3298a", "uksouth");

            // Creates a speech recognizer.
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Te rog sa spui ceva in engleza in microfon...");

                // Starts speech recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Nu pot sa recunosc ce spui.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
Example #16
0
        public async Task <SpeechRecognitionResult> Record()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("2515f320-76bd-4798-adb3-dade8f1db94e", "northeurope");

            //var config = SpeechConfig.FromEndpoint(new Uri("https://northeurope.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1"), "2515f320-76bd-4798-adb3-dade8f1db94e");

            // Creates a speech recognizer.
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Say something...");

                // Performs recognition. RecognizeOnceAsync() returns when the first utterance has been recognized,
                // so it is suitable only for single shot recognition like command or query. For long-running
                // recognition, use StartContinuousRecognitionAsync() instead.
                SpeechRecognitionResult result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }

                return(result);
            }
        }
Example #17
0
        public static async Task <string> RecognizeSpeechFromBytesAsync(byte[] bytes, string locale)
        {
            MemoryStream stream = new MemoryStream(bytes);

            var speechApiKey    = Environment.GetEnvironmentVariable("SpeechApiKey");
            var speechApiRegion = Environment.GetEnvironmentVariable("SpeechApiRegion");

            var speechConfig = SpeechConfig.FromSubscription(speechApiKey, speechApiRegion);

            speechConfig.SpeechRecognitionLanguage = locale;

            var audioFormat = AudioStreamFormat.GetWaveFormatPCM(16000, 16, 1);
            var audioStream = new VoiceAudioStream(stream);
            var audioConfig = AudioConfig.FromStreamInput(audioStream);

            var recognizer = new SpeechRecognizer(speechConfig, audioConfig);
            var result     = await recognizer.RecognizeOnceAsync();

            return(result.Text);
        }
        //public async Task CodecCompressedAudioRecognition(string fileName)
        //{
        //    var audioFormat = AudioStreamFormat.GetCompressedFormat(AudioStreamContainerFormat.OGG_OPUS);
        //    var audioConfig = AudioConfig.FromStreamInput(myPushStream, audioFormat);


        //    using (m_audioRecognizer = new SpeechRecognizer(m_config, audioConfig))
        //    {
        //        var result = await m_recognizer.RecognizeOnceAsync();

        //        if (result.Reason == ResultReason.RecognizingSpeech)
        //        {
        //            Console.WriteLine($"Recognized: {result.Text}");
        //        }
        //        else
        //        {
        //            Console.WriteLine($"Faile to recognize, the reason is {result.Reason.ToString()}");
        //        }
        //    }
        //}


        public async Task ShortSpeechRecognitionWithFile(string fileName)
        {
            Console.WriteLine("----------------Short Speech With File Start------------------------");
            var audioInput = AudioConfig.FromWavFileInput(fileName);

            using (var audioRecognizer = new SpeechRecognizer(m_config, audioInput))
            {
                var result = await audioRecognizer.RecognizeOnceAsync();

                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"Recognized: {result.Text}");
                }
                else
                {
                    Console.WriteLine($"Faile to recognize, the reason is {result.Reason.ToString()}");
                }
            }
            Console.WriteLine("-----------------Short Speech With File End--------------------------");
        }
Example #19
0
        async static Task FromStream(SpeechConfig speechConfig)
        {
            var reader = new BinaryReader(File.OpenRead(DEMO_FILE));

            Console.WriteLine(reader.ToString());
            using var audioInputStream = AudioInputStream.CreatePushStream();
            using var audioConfig      = AudioConfig.FromStreamInput(audioInputStream);
            using var recognizer       = new SpeechRecognizer(speechConfig, audioConfig);

            byte[] readBytes;
            do
            {
                readBytes = reader.ReadBytes(1024);
                audioInputStream.Write(readBytes, readBytes.Length);
            } while (readBytes.Length > 0);

            var result = await recognizer.RecognizeOnceAsync();

            Console.WriteLine($"RECOGNIZED: Text={result.Text}");
        }
Example #20
0
        async static Task FromMic(SpeechConfig speechConfig)
        {
            var checker = true;

            while (checker)
            {
                using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
                using var recognizer  = new SpeechRecognizer(speechConfig, audioConfig);

                Console.WriteLine("Speak into your microphone.");
                var result = await recognizer.RecognizeOnceAsync();

                Console.WriteLine($"RECOGNIZED: Text={result.Text}");

                if (result.Text == "end." || result.Text == "End.")
                {
                    break;
                }
            }
        }
Example #21
0
        public static async Task RecognizeSpeechAsync()
        {
            var speechKey      = ConfigurationManager.AppSettings["Speech:Key"];
            var speechLocation = ConfigurationManager.AppSettings["Speech:Location"];
            var config         = SpeechConfig.FromSubscription(speechKey, speechLocation);
            TextAnalyticsClient client;
            var textAnalyticsKey      = ConfigurationManager.AppSettings["TextAnalytics:Key"];
            var textAnalyticsEndpoint = ConfigurationManager.AppSettings["TextAnalytics:Endpoint"];

            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Speak something which I can recognize...");

                while (true)
                {
                    var result = await recognizer.RecognizeOnceAsync();

                    if (result.Reason == ResultReason.RecognizedSpeech)
                    {
                        var sentimentResult = client.Sentiment(statement, "en");
                        Console.WriteLine(result.Text);
                    }
                    //else if (result.Reason == ResultReason.NoMatch)
                    //{
                    //    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    //}
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = CancellationDetails.FromResult(result);
                        Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                        if (cancellation.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }
                    }
                }
            }
        }
        static async Task RecognizeSpeechAsync()
        {
            // Configuración de la información del recurso para acceder a el.
            // Usa la key1 o la key2 del recurso Speech Service que has creado
            var config = SpeechConfig.FromSubscription("TU_KEY_DE_COGNITIVE_SERVICE", "LOCALIZACION_DEL_RECURSO");

            // Carga el archivo de audio quq usarás. En esta ocasión desde un archivo local
            using (var audioInput = AudioConfig.FromWavFileInput("NOMBRE_ARCHIVO.wav"))

                // Aquí le pasas en parametros que necesita el Speech Service junto con el archivo audio
                using (var recognizer = new SpeechRecognizer(config, audioInput))
                {
                    Console.WriteLine("Recognizing first result...");
                    var result = await recognizer.RecognizeOnceAsync();

                    switch (result.Reason)
                    {
                    case ResultReason.RecognizedSpeech:
                        // Si cae aquí, el archivo de audio fue reconocido y verás la transcripción en la terminal
                        Console.WriteLine($"We recognized: {result.Text}");
                        break;

                    case ResultReason.NoMatch:
                        // Si cae aquí, el archivo de audio NO fue reconocido por Azure y verás un mensaje de ello
                        Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                        break;

                    case ResultReason.Canceled:
                        // Si cae aquí, la operación de canceló y en terminal te manda la razón
                        var cancellation = CancellationDetails.FromResult(result);
                        Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");
                        if (cancellation.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }
                        break;
                    }
                }
        }
Example #23
0
        public static async Task RecognizeSpeechFromMicrophoneAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription(AzurePrivateData.SUBSCRIPTION_KEY, AzurePrivateData.REGION_STRING);

            //config.OutputFormat = OutputFormat.Detailed;

            // Creates a speech recognizer.
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Say something...");

                // Performs recognition. RecognizeOnceAsync() returns when the first utterance has been recognized,
                // so it is suitable only for single shot recognition like command or query. For long-running
                // recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
Example #24
0
        public static async Task RecognizeLng()
        {
            SpeechConfig speechConfig = SpeechConfig.FromEndpoint(new System.Uri(ConfigurationManager.AppSettings.Get("SpeechEndpoint")), ConfigurationManager.AppSettings.Get("TTSKey"));
            AudioConfig  audioConfig  = AudioConfig.FromDefaultSpeakerOutput();
            AutoDetectSourceLanguageConfig autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig
                                                                            .FromLanguages(new string[] { "en-US", "ru-RU" });

            using (var recognizer = new SpeechRecognizer(
                       speechConfig,
                       autoDetectSourceLanguageConfig,
                       audioConfig))
            {
                Console.WriteLine("Say something...");
                var speechRecognitionResult = await recognizer.RecognizeOnceAsync();

                var autoDetectSourceLanguageResult =
                    AutoDetectSourceLanguageResult.FromResult(speechRecognitionResult);
                var detectedLng = autoDetectSourceLanguageResult.Language;
                Console.WriteLine("I recognized " + speechRecognitionResult.Text + " in " + detectedLng);
            }
        }
        public async Task ShortSpeechRecognitionWithPullStream(string fileName)
        {
            Console.WriteLine("-------------------Short Speech With PullStream Start---------------------");
            using (var audioConfig = Helper.OpenWavFile(fileName))
            {
                using (var audioRecognizer = new SpeechRecognizer(m_config, audioConfig))
                {
                    var result = await audioRecognizer.RecognizeOnceAsync();

                    if (result.Reason == ResultReason.RecognizedSpeech)
                    {
                        Console.WriteLine($"Recognized: {result.Text}");
                    }
                    else
                    {
                        Console.WriteLine($"Failed to recognize, the reason is {result.Reason.ToString()}");
                    }
                }
            }
            Console.WriteLine("-------------------Short Speech With PullStream End-----------------------");
        }
Example #26
0
        /// <summary>语音转文字 从内存流识别</summary>
        public static async Task <string> RecognizeFromStreamAsync(string inputFileName)
        {
            var config = SpeechConfig.FromSubscription(subscriptionKey, region);

            var reader = new BinaryReader(File.OpenRead(inputFileName));

            using var audioInputStream = AudioInputStream.CreatePushStream();
            using var audioConfig      = AudioConfig.FromStreamInput(audioInputStream);
            using var recognizer       = new SpeechRecognizer(config, audioConfig);

            byte[] readBytes;
            do
            {
                readBytes = reader.ReadBytes(1024);
                audioInputStream.Write(readBytes, readBytes.Length);
            } while (readBytes.Length > 0);

            var result = await recognizer.RecognizeOnceAsync();

            return(result.Text);
        }
        public async Task <string> RecognizeSpeechAsync(string file)
        {
            var result   = "";
            var filePath = Path.Combine(@"c:\Users\dimab\Desktop\TraraBot\tarabot\TaraBot\", file);

            File.Move(filePath, Path.ChangeExtension(filePath, ".wav"));

            var config = SpeechConfig.FromSubscription(AzureSpeechToText.SubscriptionKey, AzureSpeechToText.Region);

            using (var recognizer = new SpeechRecognizer(config, filePath))
            {
                var resultSpeechToText = await recognizer.RecognizeOnceAsync();

                if (resultSpeechToText.Reason == ResultReason.RecognizedSpeech)
                {
                    result = resultSpeechToText.Text;
                }
            }

            return(result);
        }
        /// <summary>
        /// 短语音的识别(实时)
        /// </summary>
        /// <returns></returns>
        public static async Task RecognizeSortSpeechAsync()
        {
            var config = SpeechConfig.FromSubscription(apiKey, region);

            config.SpeechRecognitionLanguage = speechRecognitionLanguage; // 语言设置

            // 创建分析器
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("说点什么...");

                /*
                 * RecognizeOnceAsync() 在第一个语音被识别后返回,因此它仅适用于命令或查询等单一识别,
                 * 对于长时间运行的识别,使用 StartContinuousRecognitionAsync() 代替。
                 */
                var result = await recognizer.RecognizeOnceAsync();

                // 检查结果
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"识别结果: {result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"识别失败: 语言无法被识别.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"识别已取消: 原因={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"识别已取消: 错误码={cancellation.ErrorCode}");
                        Console.WriteLine($"识别已取消: 错误详情={cancellation.ErrorDetails}");
                        Console.WriteLine($"识别已取消: 请检查订阅是否正常");
                    }
                }
            }
        }
Example #29
0
        public static async Task RecognizeSpeechAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("b253e516e5054860a09955c63ceb0558", "westus");

            config.SpeechRecognitionLanguage = "es-MX"; //recognize Spanish voice

            // Creates a speech recognizer.
            using (var recognizer = new SpeechRecognizer(config))
            {
                Console.WriteLine("Say something...");

                // Performs recognition. RecognizeOnceAsync() returns when the first utterance has been recognized,
                // so it is suitable only for single shot recognition like command or query. For long-running
                // recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
Example #30
0
        public static async Task <string> RecognizeSpeechAsync()
        {
            Debug.WriteLine("Starting Speech2Text service...");
            var config = SpeechConfig.FromSubscription("d882cca2d3b44735b0760cbaece4b340", "westus");

            config.SpeechRecognitionLanguage = "es-MX";
            using (var audioInput = AudioConfig.FromWavFileInput(@"D:\VS Projects\Ignite\Consultant\Consultant\Speech.wav"))
            {
                using (var recognizer = new SpeechRecognizer(config, audioInput))
                {
                    Debug.WriteLine("Recognizing first result...");
                    var result = await recognizer.RecognizeOnceAsync();

                    if (result.Reason == ResultReason.RecognizedSpeech)
                    {
                        Debug.WriteLine($"We recognized: {result.Text}");
                        return(result.Text);
                    }
                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        Debug.WriteLine($"NOMATCH: Speech could not be recognized.");
                        return("Not recognized");
                    }
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = CancellationDetails.FromResult(result);
                        Debug.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                        if (cancellation.Reason == CancellationReason.Error)
                        {
                            Debug.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            Debug.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                            Debug.WriteLine($"CANCELED: Did you update the subscription info?");
                        }
                        return("Canceled");
                    }
                    return("");
                }
            }
        }