Ejemplo n.º 1
0
        // <recognitionCustomized>
        // Speech recognition using a customized model.
        public static async Task RecognitionUsingCustomizedModelAsync()
        {
            // Creates an instance of a speech factory with specified
            // subscription key and service region. Replace with your own subscription key
            // and service region (e.g., "westus").
            var factory = SpeechFactory.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech recognizer using microphone as audio input.
            using (var recognizer = factory.CreateSpeechRecognizer())
            {
                // Replace with the CRIS deployment id of your customized model.
                recognizer.DeploymentId = "YourDeploymentId";

                Console.WriteLine("Say something...");

                // Starts recognition. It returns when the first utterance has been recognized.
                var result = await recognizer.RecognizeAsync().ConfigureAwait(false);

                // Checks results.
                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"There was an error. Status:{result.RecognitionStatus.ToString()}, Reason:{result.RecognitionFailureReason}");
                }
                else
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
            }
        }
        private async void SpeechRecognitionFromMicrophone_ButtonClicked(object sender, RoutedEventArgs e)
        {
            if (!AreKeysValid())
            {
                NotifyUser("Subscription Key is missing!", NotifyType.ErrorMessage);
                return;
            }
            // Creates an instance of a speech factory with specified
            var factory = SpeechFactory.FromSubscription(this.SubscriptionKey, this.Region);

            // Creates a speech recognizer using microphone as audio input. The default language is "en-us".
            using (var recognizer = factory.CreateSpeechRecognizer(this.RecognitionLanguage))
            {
                // Starts recognition. It returns when the first utterance has been recognized.
                var result = await recognizer.RecognizeAsync().ConfigureAwait(false);

                // Checks result.
                string str;
                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    str = $"Speech Recognition Failed. '{result.RecognitionStatus.ToString()}'";
                }
                else
                {
                    str = $"Recognized: '{result.Text}'";
                }
                NotifyUser(str, NotifyType.StatusMessage);
            }
        }
Ejemplo n.º 3
0
        private static async Task <SpeechViewModel> EnableSpeechRecognition()
        {
            SpeechViewModel svm = new SpeechViewModel
            {
                PromptMessage = "Speak."
            };

            SpeechFactory factory = SpeechFactory.FromSubscription("a5a9e9b4c6164808be0c34ccd4d1e598", "westus");

            // Creates a SpeechRecognizer to accept audio input from the user
            SpeechRecognizer recognizer = factory.CreateSpeechRecognizer();

            // Accepts audio input from the user to recognize speech
            SpeechRecognitionResult result = await recognizer.RecognizeAsync();

            // Acts on recognized speech from audio input
            if (result.RecognitionStatus != RecognitionStatus.Recognized)
            {
                Console.WriteLine($"Recognition status: {result.RecognitionStatus.ToString()}");
                if (result.RecognitionStatus == RecognitionStatus.Canceled)
                {
                    svm.ResultMessage = $"There was an error, reason: {result.RecognitionFailureReason}";
                }
                else
                {
                    svm.ResultMessage = "No speech could be recognized.\n";
                }
            }
            else
            {
                svm.ResultMessage = $"{result.Text}";
            }

            return(svm);
        }
Ejemplo n.º 4
0
        // Speech recognition from microphone.
        public static async Task RecognitionWithMicrophoneAsync()
        {
            // <recognitionWithMicrophone>
            // Creates an instance of a speech factory with specified
            // subscription key and service region. Replace with your own subscription key
            // and service region (e.g., "westus").
            var factory = SpeechFactory.FromSubscription("59a0243e86ae4919aa26f9e839f34b28", "westus");

            // Creates a speech recognizer using microphone as audio input. The default language is "en-us".
            using (var recognizer = factory.CreateSpeechRecognizer())
            {
                // Starts recognizing.
                Console.WriteLine("Say something...");

                // Starts recognition. It returns when the first utterance has been recognized.
                var result = await recognizer.RecognizeAsync().ConfigureAwait(false);

                // Checks result.
                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"There was an error. Status:{result.RecognitionStatus.ToString()}, Reason:{result.RecognitionFailureReason}");
                }
                else
                {
                    Console.WriteLine($"We recognized: {result.RecognizedText}");
                }
            }
            // </recognitionWithMicrophone>
        }
Ejemplo n.º 5
0
        /// <summary>
        ///     Recognizes the speech asynchronous.
        /// </summary>
        /// <returns></returns>
        public static async Task RecognizeSpeechAsync()
        {
            var factory = SpeechFactory.FromSubscription("", "global");

            // Creates a speech recognizer.
            using (var recognizer = factory.CreateSpeechRecognizer())
            {
                Console.WriteLine("Say something...");

                var result = await recognizer.RecognizeAsync();

                // Checks result.
                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"Recognition status: {result.RecognitionStatus.ToString()}");
                    Console.WriteLine(result.RecognitionStatus == RecognitionStatus.Canceled
                        ? $"There was an error, reason: {result.RecognitionFailureReason}"
                        : "No speech could be recognized.\n");
                }
                else
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
            }
        }
Ejemplo n.º 6
0
        // Speech recognition from file.
        public static async Task RecognitionWithFileAsync()
        {
            // <recognitionFromFile>
            // Creates an instance of a speech factory with specified
            // subscription key and service region. Replace with your own subscription key
            // and service region (e.g., "westus").
            var factory = SpeechFactory.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech recognizer using file as audio input.
            // Replace with your own audio file name.
            using (var recognizer = factory.CreateSpeechRecognizerWithFileInput(@"YourAudioFile.wav"))
            {
                // Starts recognition. It returns when the first utterance is recognized.
                var result = await recognizer.RecognizeAsync().ConfigureAwait(false);

                // Checks result.
                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"There was an error. Status:{result.RecognitionStatus.ToString()}, Reason:{result.RecognitionFailureReason}");
                }
                else
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
            }
            // </recognitionFromFile>
        }
Ejemplo n.º 7
0
        /// <summary>
        /// Initializes the factory object with subscription key and region
        /// Initializes the recognizer object with a TranslationRecognizer
        /// Subscribes the recognizer to recognition Event Handlers
        /// If recognition is running, starts a thread which stops the recognition
        /// </summary>
        private void CreateRecognizer()
        {
            string region       = "eastasia";
            string fromLanguage = "en-US";
            //var toLanguages = new List<string>() { "zh", "de" };
            var toLanguages = new List <string>()
            {
                "ja"
            };
            // var voiceChinese = "zh-CN-Yaoyao";
            var    voiceChinese = "ja-JP-Ayumi";
            string sub_key      = "7620810559eb4d1c96f770c5ee019bd3";

            //string sub_key = "91ad01e1da954931955dc87b6fb71c0c";

            //this.factory = SpeechFactory.FromSubscription(SubscriptionKey, region);
            this.factory = SpeechFactory.FromSubscription(sub_key, region);

            this.recognizer = this.factory.CreateTranslationRecognizer(fromLanguage, toLanguages, voiceChinese);

            this.recognizer.IntermediateResultReceived += this.OnPartialResponseReceivedHandler;
            this.recognizer.FinalResultReceived        += this.OnFinalResponse;
            this.recognizer.SynthesisResultReceived    += this.OnSynthesis;
            this.recognizer.RecognitionErrorRaised     += this.OnError;
        }
        public async Task RecognizeSpeechAsync(CNMAudioItem cnmAudioItem)
        {
            var stopRecognition = new TaskCompletionSource <int>();

            var factory = _speechFactory ?? SpeechFactory.FromSubscription("77623f52633c426890a6d2bb11116c8b", "westus");

            using (var recognizer = factory.CreateSpeechRecognizerWithFileInput(cnmAudioItem.Filename, "es-ES"))
            {
                recognizer.FinalResultReceived += (s, e) =>
                {
                    if (e.Result.RecognitionStatus == RecognitionStatus.Recognized)
                    {
                        cnmAudioItem.AppendTextLine(e.Result.Text);
                    }
                };
                recognizer.OnSessionEvent += (s, e) =>
                {
                    if (e.EventType == SessionEventType.SessionStoppedEvent)
                    {
                        cnmAudioItem.IsBusy = false;
                        stopRecognition.TrySetResult(0);
                    }
                };
                recognizer.RecognitionErrorRaised += (s, e) =>
                {
                    Console.WriteLine(e.FailureReason);
                };

                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                await stopRecognition.Task.ConfigureAwait(false);

                await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
            }
        }
Ejemplo n.º 9
0
        static async Task RecoFromMicrophoneAsync()
        {
            var subscriptionKey = "a83dbe5e00c94e7dad4be7786dc32252";
            var region          = "westus";

            var factory = SpeechFactory.FromSubscription(subscriptionKey, region);

            using (var recognizer = factory.CreateSpeechRecognizer())
            {
                Console.WriteLine("Say something...");
                var result = await recognizer.RecognizeAsync();

                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"There was an error, status {result.RecognitionStatus.ToString()}, reason {result.RecognitionFailureReason}");
                }
                else
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
                if (result.Text != "" && result.Text != null)
                {
                    Console.WriteLine("DialogFlow answer :");
                    Console.Write(GetDialogFlowAnswer(result.Text));
                    Console.WriteLine();
                }
                Console.WriteLine("Please press a key to continue.");
                Console.ReadKey();
            }
        }
Ejemplo n.º 10
0
        public static async Task RunAsync()
        {
            var factory = SpeechFactory.FromEndPoint(new Uri(""), "");

            while (true)
            {
                var speechRecognizer = factory.CreateSpeechRecognizerWithStream(new AudioStreamReader(new BinaryReader(File.OpenRead(Path.Combine(Directory.GetCurrentDirectory(), "b0017.wav")))));

                speechRecognizer.FinalResultReceived += OnResult;

                try
                {
                    await speechRecognizer.StartContinuousRecognitionAsync();

                    await Task.Delay(TimeSpan.FromMinutes(0.5));
                }
                catch { }
                finally
                {
                    await speechRecognizer.StopContinuousRecognitionAsync();

                    await Task.Delay(TimeSpan.FromSeconds(10));
                }
            }
        }
        // </recognitionCustomized>

        // <recognitionContinuous>
        // Speech recognition with events
        public static async Task ContinuousRecognitionAsync()
        {
            // Creates an instance of a speech factory with specified
            // subscription key and service region. Replace with your own subscription key
            // and service region (e.g., "westus").
            var factory = SpeechFactory.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech recognizer using microphone as audio input.
            using (var recognizer = factory.CreateSpeechRecognizer())
            {
                // Subscribes to events.
                recognizer.IntermediateResultReceived += (s, e) =>
                { Console.WriteLine($"\n    Partial result: {e.Result.RecognizedText}."); };
                recognizer.FinalResultReceived += (s, e) =>
                { Console.WriteLine($"\n    Final result: Status: {e.Result.RecognitionStatus}, Text: {e.Result.RecognizedText}."); };
                recognizer.RecognitionErrorRaised += (s, e) =>
                { Console.WriteLine($"\n    An error occurred. Status: {e.Status.ToString()}"); };
                recognizer.OnSessionEvent += (s, e) =>
                { Console.WriteLine($"\n    Session event. Event: {e.EventType.ToString()}."); };

                // Starts continuos recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                Console.WriteLine("Say something...");
                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                Console.WriteLine("Press any key to stop");
                Console.ReadKey();

                await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
            }
        }
Ejemplo n.º 12
0
        // Speech recognition in the specified spoken language.
        public static async Task RecognitionWithLanguageAsync()
        {
            // <recognitionWithLanguage>
            // Creates an instance of a speech factory with specified
            // subscription key and service region. Replace with your own subscription key
            // and service region (e.g., "westus").
            var factory = SpeechFactory.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech recognizer for the specified language, using microphone as audio input.
            var lang = "de-de";

            using (var recognizer = factory.CreateSpeechRecognizer(lang))
            {
                // Starts recognizing.
                Console.WriteLine($"Say something in {lang} ...");

                // Starts recognition. It returns when the first utterance has been recognized.
                var result = await recognizer.RecognizeAsync().ConfigureAwait(false);

                // Checks result.
                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"There was an error. Status:{result.RecognitionStatus.ToString()}, Reason:{result.RecognitionFailureReason}");
                }
                else
                {
                    Console.WriteLine($"We recognized: {result.Text}");
                }
            }
            // </recognitionWithLanguage>
        }
Ejemplo n.º 13
0
        private void Form1_Load(object sender, EventArgs e)
        {
            try
            {
                SpeechFactory speechFactory = SpeechFactory.FromSubscription(speechKey, speechRegion);

                // 设置识别中文
                recognizer = speechFactory.CreateSpeechRecognizer("zh-CN");

                // 挂载识别中的事件
                // 收到中间结果
                recognizer.IntermediateResultReceived += Recognizer_IntermediateResultReceived;
                // 收到最终结果
                recognizer.FinalResultReceived += Recognizer_FinalResultReceived;
                // 发生错误
                recognizer.RecognitionErrorRaised += Recognizer_RecognitionErrorRaised;

                // 启动语音识别器,开始持续监听音频输入
                recognizer.StartContinuousRecognitionAsync();

                // 设置意图预测器
                LUISRuntimeClient client = new LUISRuntimeClient(new ApiKeyServiceClientCredentials(luisKey));
                client.Endpoint  = luisEndpoint;
                intentPrediction = new Prediction(client);
            }
            catch (Exception ex)
            {
                Log(ex.Message);
            }
        }
Ejemplo n.º 14
0
        private void Form1_Load(object sender, EventArgs e)
        {
            try
            {
                SpeechFactory speechFactory = SpeechFactory.FromSubscription(luisKey, "");
                recognizer = speechFactory.CreateIntentRecognizer("zh-cn");

                // 创建意图识别器用到的模型
                var model = LanguageUnderstandingModel.FromSubscription(luisKey, luisAppId, luisRegion);

                // 将模型中的意图加入到意图识别器中
                recognizer.AddIntent("None", model, "None");
                recognizer.AddIntent("TurnOn", model, "TurnOn");
                recognizer.AddIntent("TurnOff", model, "TurnOff");

                // 挂载识别中的事件
                // 收到中间结果
                recognizer.IntermediateResultReceived += Recognizer_IntermediateResultReceived;
                // 收到最终结果
                recognizer.FinalResultReceived += Recognizer_FinalResultReceived;
                // 发生错误
                recognizer.RecognitionErrorRaised += Recognizer_RecognitionErrorRaised;

                // 启动语音识别器,开始持续监听音频输入
                recognizer.StartContinuousRecognitionAsync();
            }
            catch (Exception ex)
            {
                Log(ex.Message);
            }
        }
Ejemplo n.º 15
0
        // Continuous Recognition with Microphone using Authorization Token
        public static async Task ContinuousRecognitionWithAuthorizationTokenAsync()
        {
            // Gets a fresh authorization token from
            // specified subscription key and service region (e.g., "westus").
            authorizationToken = await GetToken(subscriptionKey, region);

            // Creates an instance of a speech factory with
            // acquired authorization token and service region (e.g., "westus").
            factory = SpeechFactory.FromAuthorizationToken(authorizationToken, region);

            // Define the cancellation token in order to stop the periodic renewal
            // of authorization token after completing recognition.
            CancellationTokenSource source = new CancellationTokenSource();

            // Run task for token renewal in the background.
            var tokenRenewTask = StartTokenRenewTask(source.Token);

            // Creates a speech recognizer using microphone as audio input. The default language is "en-us".
            using (var recognizer = factory.CreateSpeechRecognizer())
            {
                // Subscribe to events.
                recognizer.FinalResultReceived += (s, e) => {
                    var result = e.Result;
                    if (result.RecognitionStatus == RecognitionStatus.Recognized)
                    {
                        Console.WriteLine($"\n    Final result: Status: {result.RecognitionStatus.ToString()}, Text: {result.Text}.");
                    }
                    else
                    {
                        Console.WriteLine($"Recognition status: {result.RecognitionStatus.ToString()}");
                        if (result.RecognitionStatus == RecognitionStatus.Canceled)
                        {
                            Console.WriteLine($"There was an error, reason: {result.RecognitionFailureReason}");
                        }
                        else
                        {
                            Console.WriteLine("No speech could be recognized.\n");
                        }
                    }
                };

                recognizer.RecognitionErrorRaised += (s, e) => {
                    Console.WriteLine($"\n    An error occurred. Status: {e.Status.ToString()}, FailureReason: {e.FailureReason}");
                };

                // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                Console.WriteLine("Say something...");
                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                Console.WriteLine("Press any key to stop");
                Console.ReadKey();

                await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);

                // Cancel cancellationToken to stop the token renewal task.
                source.Cancel();
            }
        }
Ejemplo n.º 16
0
        // Speech recognition with audio stream
        public static async Task RecognitionWithAudioStreamAsync()
        {
            stopRecognitionTaskCompletionSource = new TaskCompletionSource <int>();

            // Creates an instance of a speech factory with specified
            // subscription key and service region. Replace with your own subscription key
            // and service region (e.g., "westus").
            var factory = SpeechFactory.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Create an audio stream from a wav file.
            // Replace with your own audio file name.
            var stream = Helper.OpenWaveFile(@"YourAudioFile.wav");

            // Creates a speech recognizer using audio stream input.
            using (var recognizer = factory.CreateSpeechRecognizerWithStream(stream))
            {
                // Subscribes to events.
                recognizer.IntermediateResultReceived += (s, e) => {
                    Console.WriteLine($"\n    Partial result: {e.Result.Text}.");
                };

                recognizer.FinalResultReceived += (s, e) => {
                    if (e.Result.RecognitionStatus == RecognitionStatus.Recognized)
                    {
                        Console.WriteLine($"\n    Final result: Status: {e.Result.RecognitionStatus.ToString()}, Text: {e.Result.Text}.");
                    }
                    else
                    {
                        Console.WriteLine($"\n    Final result: Status: {e.Result.RecognitionStatus.ToString()}, FailureReason: {e.Result.RecognitionFailureReason}.");
                    }
                };

                recognizer.RecognitionErrorRaised += (s, e) => {
                    Console.WriteLine($"\n    An error occurred. Status: {e.Status.ToString()}, FailureReason: {e.FailureReason}");
                    stopRecognitionTaskCompletionSource.TrySetResult(0);
                };

                recognizer.OnSessionEvent += (s, e) =>
                {
                    Console.WriteLine($"\nSession event. Event: {e.EventType.ToString()}.");
                    // Stops translation when session stop is detected.
                    if (e.EventType == SessionEventType.SessionStoppedEvent)
                    {
                        Console.WriteLine($"\nStop translation.");
                        stopRecognitionTaskCompletionSource.TrySetResult(0);
                    }
                };

                // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                // Waits for completion.
                await stopRecognitionTaskCompletionSource.Task.ConfigureAwait(false);

                // Stops recognition.
                await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
            }
        }
Ejemplo n.º 17
0
        public override void OnRequestPermissionsResult(int requestCode, string[] permissions, [GeneratedEnum] Permission[] grantResults)
        {
            base.OnRequestPermissionsResult(requestCode, permissions, grantResults);

            SpeechFactory.ConfigureNativePlatformBindingWithDefaultCertificate();
            factory = SpeechFactory.FromSubscription(ApiKeys.SpeechApiKey, ApiKeys.ServiceRegion);

            ListenForSpeech();
        }
Ejemplo n.º 18
0
        private async Task ProcessFile(string filePath)
        {
            // Creates an instance of a speech factory with specified
            // subscription key and service region. Replace with your own subscription key
            // and service region (e.g., "westus").
            //var fileInfoContainer = CheckValidityAndReturnFileInfo(filePath).GetEnumerator();
            //fileInfoContainer.MoveNext();
            //var filetype = fileInfoContainer.Current.Key;
            //var bitrate = fileInfoContainer.Current.Value;

            var fileName = Path.GetFileNameWithoutExtension(filePath);
            var factory  = SpeechFactory.FromSubscription(_apiKey, _apiRegion);
            var isStop   = false;

            if (!Directory.Exists(TranscriptDestination))
            {
                Directory.CreateDirectory(TranscriptDestination);
            }

            if (File.Exists(TranscriptDestination + "azure_" + fileName + ".txt"))
            {
                File.Delete(TranscriptDestination + "azure_" + fileName + ".txt");
            }


            // Creates a speech recognizer using microphone as audio input.
            using (var recognizer = factory.CreateSpeechRecognizerWithFileInput(filePath, "en-US"))
            {
                recognizer.FinalResultReceived += (s, e) =>
                {
                    if ((e.Result.RecognitionStatus == RecognitionStatus.Recognized))
                    {
                        File.AppendAllText(TranscriptDestination + "azure_" + fileName + ".txt", e.Result.Text);
                    }
                };

                recognizer.OnSessionEvent += (s, e) =>
                {
                    isStop = e.EventType != SessionEventType.SessionStartedEvent;
                };

                // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                await recognizer.StartContinuousRecognitionAsync();

                //stopping check
                while (true)
                {
                    if (isStop)
                    {
                        break;
                    }
                }

                await recognizer.StopContinuousRecognitionAsync();
            }
        }
Ejemplo n.º 19
0
        public MainWindow()
        {
            InitializeComponent();

            var basicFactory = SpeechFactory.FromSubscription("738e230c827f4a13b4406fac6d08c179", "westus");

            recognizer = basicFactory.CreateSpeechRecognizer("zh-CN");
            recognizer.FinalResultReceived += Recognizer_FinalResultReceived;
            recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
        }
        /// <summary>
        /// Initializes the factory object with subscription key and region
        /// Initializes the recognizer object with a TranslationRecognizer
        /// Subscribes the recognizer to recognition Event Handlers
        /// If recognition is running, starts a thread which stops the recognition
        /// </summary>
        private void CreateRecognizer()
        {
            this.factory    = SpeechFactory.FromSubscription(SubscriptionKey, Region);
            this.recognizer = this.factory.CreateTranslationRecognizer(FromLanguage, ToLanguages, voice);

            this.recognizer.IntermediateResultReceived += this.OnPartialResponseReceivedHandler;
            this.recognizer.FinalResultReceived        += this.OnFinalResponse;
            this.recognizer.SynthesisResultReceived    += this.OnSynthesis;
            this.recognizer.RecognitionErrorRaised     += this.OnError;
        }
        // Intent recognition in the specified language, using microphone.
        public static async Task RecognitionWithMicrophoneUsingLanguageAsync()
        {
            // <intentRecognitionWithLanguage>
            // Creates an instance of a speech factory with specified subscription key
            // and service region. Note that in contrast to other services supported by
            // the Cognitive Service Speech SDK, the Language Understanding service
            // requires a specific subscription key from https://www.luis.ai/.
            // The Language Understanding service calls the required key 'endpoint key'.
            // Once you've obtained it, replace with below with your own Language Understanding subscription key
            // and service region (e.g., "westus").
            var factory = SpeechFactory.FromSubscription("YourLanguageUnderstandingSubscriptionKey", "YourLanguageUnderstandingServiceRegion");

            // Creates an intent recognizer in the specified language using microphone as audio input.
            var lang = "de-de";

            using (var recognizer = factory.CreateIntentRecognizer(lang))
            {
                // Creates a Language Understanding model using the app id, and adds specific intents from your model
                var model = LanguageUnderstandingModel.FromAppId("YourLanguageUnderstandingAppId");
                recognizer.AddIntent("id1", model, "YourLanguageUnderstandingIntentName1");
                recognizer.AddIntent("id2", model, "YourLanguageUnderstandingIntentName2");
                recognizer.AddIntent("any-IntentId-here", model, "YourLanguageUnderstandingIntentName3");

                // Starts recognizing.
                Console.WriteLine("Say something in " + lang + "...");

                // Performs recognition.
                // RecognizeAsync() returns when the first utterance has been recognized, so it is suitable
                // only for single shot recognition like command or query. For long-running recognition, use
                // StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeAsync().ConfigureAwait(false);

                // Checks result.
                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"Recognition status: {result.RecognitionStatus.ToString()}");
                    if (result.RecognitionStatus == RecognitionStatus.Canceled)
                    {
                        Console.WriteLine($"There was an error, reason: {result.RecognitionFailureReason}");
                    }
                    else
                    {
                        Console.WriteLine("No speech could be recognized.\n");
                    }
                }
                else
                {
                    Console.WriteLine($"We recognized: {result.Text}.");
                    Console.WriteLine($"\n    Intent Id: {result.IntentId}.");
                    Console.WriteLine($"\n    Language Understanding JSON: {result.Properties.Get<string>(ResultPropertyKind.LanguageUnderstandingJson)}.");
                }
            }
            // </intentRecognitionWithLanguage>
        }
Ejemplo n.º 22
0
        private async void Window_Loaded(object sender, RoutedEventArgs e)
        {
            factory    = SpeechFactory.FromSubscription(subscriptionKey, region);
            recognizer = factory.CreateSpeechRecognizer();

            recognizer.IntermediateResultReceived += Recognizer_IntermediateResultReceived;
            recognizer.FinalResultReceived        += Recognizer_FinalResultReceived;

            await recognizer.StartContinuousRecognitionAsync();

            LastMessageBlock.Text = "Ready!";
        }
Ejemplo n.º 23
0
 private void Form1_Load(object sender, EventArgs e)
 {
     try
     {
         // 第一步
         // 初始化语音服务SDK并启动识别器,进行语音转文本
         // 密钥和区域可在 https://azure.microsoft.com/zh-cn/try/cognitive-services/my-apis/?api=speech-services 中找到
         // 密钥示例: 5ee7ba6869f44321a40751967accf7a9
         // 区域示例: westus
             << << << < HEAD
             //SpeechFactory speechFactory = SpeechFactory.FromSubscription("密钥 1 或 密钥 2", "区域");
             == == == =
                 SpeechFactory speechFactory = SpeechFactory.FromSubscription("5ee7ba6869f44321a40751967accf7a9", "westus");
Ejemplo n.º 24
0
 private void CreateFactory()
 {
     if (string.IsNullOrEmpty(this._subscriptionKey) || string.IsNullOrEmpty(this._region))
     {
         Console.WriteLine("Creating speech factory error: invalid key or region.");
         return;
     }
     else
     {
         Console.WriteLine(string.Format(CultureInfo.InvariantCulture, "Creating speech factory with key of {0} and region of {1}.", this._subscriptionKey, this._region));
         this._factory = SpeechFactory.FromSubscription(_subscriptionKey, _region);
     }
 }
        /// <summary>
        /// Creates Recognizer with English language and microphone
        /// Creates a factory with subscription key and selected region
        /// Waits on RunRecognition
        /// </summary>
        private async Task CreateMicrophoneReco()
        {
            thinking = new SoundPlayer(@"../../Resources/SpeechResponse_Thinking.wav");
            // Todo: suport users to specifiy a different region.

            var basicFactory = SpeechFactory.FromSubscription(this.MicrosoftSpeechApiKey, this.Region);

            SpeechRecognizer basicRecognizer;

            using (basicRecognizer = basicFactory.CreateSpeechRecognizer(this.DefaultLocale))
            {
                await this.RunRecognizer(basicRecognizer, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false);
            }
        }
Ejemplo n.º 26
0
        // Speech recognition in the specified spoken language and uses detailed output format.
        public static async Task RecognitionWithLanguageAndDetailedOutputAsync()
        {
            // <recognitionWithLanguageAndDetailedOutputFormat>
            // Creates an instance of a speech factory with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var factory = SpeechFactory.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Creates a speech recognizer for the specified language, using microphone as audio input.
            // Replace the language with your language in BCP-47 format, e.g. en-US.
            var lang = "de-DE";

            // Requests detailed output format.
            using (var recognizer = factory.CreateSpeechRecognizer(lang, OutputFormat.Detailed))
            {
                // Starts recognizing.
                Console.WriteLine($"Say something in {lang} ...");

                // Performs recognition.
                // RecognizeAsync() returns when the first utterance has been recognized, so it is suitable
                // only for single shot recognition like command or query. For long-running recognition, use
                // StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeAsync().ConfigureAwait(false);

                // Checks result.
                if (result.RecognitionStatus != RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"Recognition status: {result.RecognitionStatus.ToString()}");
                    if (result.RecognitionStatus == RecognitionStatus.Canceled)
                    {
                        Console.WriteLine($"There was an error, reason: {result.RecognitionFailureReason}");
                    }
                    else
                    {
                        Console.WriteLine("No speech could be recognized.\n");
                    }
                }
                else
                {
                    Console.WriteLine($"We recognized: {result.Text}, Offset: {result.OffsetInTicks}, Duration: {result.Duration}.");
                    Console.WriteLine("Detailed results:");
                    var detailedResults = result.Best();
                    foreach (var item in detailedResults)
                    {
                        Console.WriteLine($"Confidence: {item.Confidence}, Text: {item.Text}, LexicalForm: {item.LexicalForm}, NormalizedForm: {item.NormalizedForm}, MaskedNormalizedForm: {item.MaskedNormalizedForm}");
                    }
                }
            }
            // </recognitionWithLanguageAndDetailedOutputFormat>
        }
Ejemplo n.º 27
0
        public static async Task RecognitionWithLUIS()
        {
            // Create a LUIS endpoint key in the Azure portal, add the key on
            // the LUIS publish page, and use again here. Do not use starter key!
            var luisSubscriptionKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
            var luisRegion          = "westus";
            var luisAppId           = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx";
            var speechRegion        = "";

            // region must be empty string
            // must use same LUIS guid in both places
            var factory = SpeechFactory.FromSubscription(luisSubscriptionKey, speechRegion);

            // Create an intent recognizer using microphone as audio input.
            using (var recognizer = factory.CreateIntentRecognizer())
            {
                // Create a LanguageUnderstandingModel to use with the intent recognizer
                var model = Microsoft.CognitiveServices.Speech.Intent.LanguageUnderstandingModel.FromSubscription(luisSubscriptionKey, luisAppId, luisRegion);

                // Add intents from your LU model to your intent recognizer
                // These intents are based on the Human Resources model imported at
                // ../../quickstarts/HumanResources.json
                recognizer.AddIntent("None", model, "None");
                recognizer.AddIntent("FindForm", model, "FindForm");
                recognizer.AddIntent("GetEmployeeBenefits", model, "GetEmployeeBenefits");
                recognizer.AddIntent("GetEmployeeOrgChart", model, "GetEmployeeOrgChart");
                recognizer.AddIntent("MoveAssetsOrPeople", model, "MoveAssetsOrPeople");

                // Prompt the user to speak
                Console.WriteLine("Say something...");

                // Start recognition; will return the first result recognized
                var result = await recognizer.RecognizeAsync().ConfigureAwait(false);

                // Check the reason returned
                if (result.RecognitionStatus == RecognitionStatus.Recognized)
                {
                    Console.WriteLine($"{result.ToString()}");
                }
                else if (result.RecognitionStatus == RecognitionStatus.NoMatch)
                {
                    Console.WriteLine("We didn't hear you say anything...");
                }
                else if (result.RecognitionStatus == RecognitionStatus.Canceled)
                {
                    Console.WriteLine($"There was an error; reason {result.RecognitionStatus}-{result.RecognizedText}");
                }
            }
        }
Ejemplo n.º 28
0
        //spd 选填 语速,取值0-15,默认为5中语速
        //pit 选填 音调,取值0-15,默认为5中语调
        //vol 选填 音量,取值0-15,默认为5中音量
        //per 选填 发音人选择, 0为普通女声,1为普通男生,3为情感合成-度逍遥,4为情感合成-度丫丫,默认为普通女声


        private void Form1_Load(object sender, EventArgs e)
        {
            this.BackgroundImage = Image.FromFile("house.jpg");
            Tts("欢迎您使用智能家居服务");
            try
            {
                // 第一步
                // 初始化语音服务SDK并启动识别器,进行语音转文本
                // 密钥和区域可在 https://azure.microsoft.com/zh-cn/try/cognitive-services/my-apis/?api=speech-services 中找到
                // 密钥示例: 5ee7ba6869f44321a40751967accf7a9
                // 区域示例: westus
                SpeechFactory speechFactory = SpeechFactory.FromSubscription("a566f8b985f842159e97a1f790906c0b", "westus");

                // 识别中文
                recognizer = speechFactory.CreateSpeechRecognizer("zh-CN");

                // 识别过程中的中间结果
                recognizer.IntermediateResultReceived += Recognizer_IntermediateResultReceived;
                // 识别的最终结果
                recognizer.FinalResultReceived += Recognizer_FinalResultReceived;
                // 出错时的处理
                recognizer.RecognitionErrorRaised += Recognizer_RecognitionErrorRaised;
                buttonClick();
            }
            catch (Exception ex)
            {
                if (ex is System.TypeInitializationException)
                {
                    Log("语音SDK不支持Any CPU, 请更改为x64");
                }
                else
                {
                    Log("初始化出错,请确认麦克风工作正常");
                    Log("已降级到文本语言理解模式");

                    TextBox inputBox = new TextBox();
                    inputBox.Text     = "";
                    inputBox.Size     = new Size(300, 26);
                    inputBox.Location = new Point(10, 10);
                    inputBox.KeyDown += inputBox_KeyDown;
                    Controls.Add(inputBox);
                    buttonClick();
                    button1.Visible = false;
                }
            }
        }
Ejemplo n.º 29
0
        bool flag = false;//判断是否需要二次识别
        private void Form1_Load(object sender, EventArgs e)
        {
            entities.Add("location", "null");
            entities.Add("device", "null");

            try
            {
                // 第一步
                // 初始化语音服务SDK并启动识别器,进行语音转文本
                // 密钥和区域可在 https://azure.microsoft.com/zh-cn/try/cognitive-services/my-apis/?api=speech-services 中找到
                // 密钥示例: 5ee7ba6869f44321a40751967accf7a9
                // 区域示例: westus
                SpeechFactory speechFactory = SpeechFactory.FromSubscription("aee80335bb2049d593c906f5aa208b50", "westus");

                // 识别中文
                recognizer = speechFactory.CreateSpeechRecognizer("zh-CN");

                // 识别过程中的中间结果
                recognizer.IntermediateResultReceived += Recognizer_IntermediateResultReceived;
                // 识别的最终结果
                recognizer.FinalResultReceived += Recognizer_FinalResultReceived;
                // 出错时的处理
                recognizer.RecognitionErrorRaised += Recognizer_RecognitionErrorRaised;
            }
            catch (Exception ex)
            {
                if (ex is System.TypeInitializationException)
                {
                    Log("语音SDK不支持Any CPU, 请更改为x64");
                }
                else
                {
                    Log("初始化出错,请确认麦克风工作正常");
                    Log("已降级到文本语言理解模式");

                    TextBox inputBox = new TextBox();
                    inputBox.Text     = "";
                    inputBox.Size     = new Size(300, 26);
                    inputBox.Location = new Point(10, 10);
                    inputBox.KeyDown += inputBox_KeyDown;
                    Controls.Add(inputBox);

                    button1.Visible = false;
                }
            }
        }
Ejemplo n.º 30
0
        static void Main(string[] args)
        {
            factory = SpeechFactory.FromSubscription(ConfigurationManager.AppSettings["SubscriptionKey"], ConfigurationManager.AppSettings["ServiceRegion"]);
            Console.WriteLine("1: Mic Input \n 2: WAV input");
            Console.Write("Chosse an option:");
            int selectedOption = Convert.ToInt16(Console.ReadLine());

            switch (selectedOption)
            {
            case 1:
                ContinuousRecognitionAsyncMic().Wait();
                break;

            case 2:
                ContinuousRecognitionAsyncWAV().Wait();
                break;
            }
        }