예제 #1
0
        public static async Task <string> ProcessAudioAsync()
        {
            string       key              = "<KEY>";
            string       region           = "<REGION>";
            SpeechConfig configRecognizer = SpeechConfig.FromSubscription(key, region);
            string       processedAudio   = "";

            bool isRecorded = CheckAudioFile();

            if (isRecorded)
            {
                using (AudioConfig audioInput = AudioConfig.FromWavFileInput(_audioFile))
                    using (IntentRecognizer recognizer = new IntentRecognizer(configRecognizer, audioInput))
                    {
                        TaskCompletionSource <int> stopRecognition = new TaskCompletionSource <int>();

                        recognizer.Recognized += (s, e) =>
                        {
                            if (e.Result.Reason == ResultReason.RecognizedSpeech)
                            {
                                processedAudio = e.Result.Text;
                            }
                        };

                        recognizer.Canceled += (s, e) => {
                            if (e.Reason == CancellationReason.Error)
                            {
                                //log
                            }
                            stopRecognition.TrySetResult(0);
                        };

                        recognizer.SessionStarted += (s, e) => {
                            //log
                        };

                        recognizer.SessionStopped += (s, e) => {
                            //log

                            stopRecognition.TrySetResult(0);
                        };

                        await recognizer.StartContinuousRecognitionAsync();

                        Task.WaitAny(new[] { stopRecognition.Task });

                        await recognizer.StopContinuousRecognitionAsync();
                    }

                //log

                return(processedAudio);
            }
            else
            {
                //log

                return(processedAudio);
            }
        }
        // Intent recognition using microphone.
        public static async Task RecognitionWithMicrophoneAsync()
        {
            // <intentRecognitionWithMicrophone>
            // Creates an instance of a speech config with specified subscription key
            // and service region. Note that in contrast to other services supported by
            // the Cognitive Services Speech SDK, the Language Understanding service
            // requires a specific subscription key from https://www.luis.ai/.
            // The Language Understanding service calls the required key 'endpoint key'.
            // Once you've obtained it, replace with below with your own Language Understanding subscription key
            // and service region (e.g., "westus").
            // The default language is "en-us".
            var config = SpeechConfig.FromSubscription("YourLanguageUnderstandingSubscriptionKey", "YourLanguageUnderstandingServiceRegion");

            // Creates an intent recognizer using microphone as audio input.
            using (var recognizer = new IntentRecognizer(config))
            {
                // Creates a Language Understanding model using the app id, and adds specific intents from your model
                var model = LanguageUnderstandingModel.FromAppId("YourLanguageUnderstandingAppId");
                recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName1", "id1");
                recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName2", "id2");
                recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName3", "any-IntentId-here");

                // Starts recognizing.
                Console.WriteLine("Say something...");

                // Performs recognition. RecognizeOnceAsync() returns when the first utterance has been recognized,
                // so it is suitable only for single shot recognition like command or query. For long-running
                // recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                // Checks result.
                if (result.Reason == ResultReason.RecognizedIntent)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent Id: {result.IntentId}.");
                    Console.WriteLine($"    Language Understanding JSON: {result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)}.");
                }
                else if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent not recognized.");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
            // </intentRecognitionWithMicrophone>
        }
예제 #3
0
        private async Task <LuisResult> RecognizeSpeechWithIntentRecognizerAsync(string speechFile)
        {
            var speechConfig = SpeechConfig.FromEndpoint(this.LuisConfiguration.SpeechEndpoint, this.LuisConfiguration.EndpointKey);

            using (var audioInput = AudioConfig.FromWavFileInput(speechFile))
                using (var recognizer = new IntentRecognizer(speechConfig, audioInput))
                {
                    // Add intents to intent recognizer
                    var model = LanguageUnderstandingModel.FromAppId(this.LuisConfiguration.AppId);
                    recognizer.AddIntent(model, "None", "None");
                    var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                    // Checks result.
                    // For some reason RecognizeOnceAsync always return ResultReason.RecognizedSpeech
                    // when intent is recognized. It's because we don't add all possible intents (note that this IS intentional)
                    // in code via AddIntent method.
                    if (result.Reason == ResultReason.RecognizedSpeech || result.Reason == ResultReason.RecognizedIntent)
                    {
                        var content = result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult);
                        return(JsonConvert.DeserializeObject <LuisResult>(content));
                    }
                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        Logger.LogWarning("Received 'NoMatch' result from Cognitive Services.");
                        return(null);
                    }
                    else
                    {
                        throw new InvalidOperationException($"Failed to get speech recognition result. Reason = '{result.Reason}'");
                    }
                }
        }
예제 #4
0
        private void Form1_Load(object sender, EventArgs e)
        {
            try
            {
                SpeechFactory speechFactory = SpeechFactory.FromSubscription(luisKey, "");
                recognizer = speechFactory.CreateIntentRecognizer("zh-cn");

                // 创建意图识别器用到的模型
                var model = LanguageUnderstandingModel.FromSubscription(luisKey, luisAppId, luisRegion);

                // 将模型中的意图加入到意图识别器中
                recognizer.AddIntent("None", model, "None");
                recognizer.AddIntent("TurnOn", model, "TurnOn");
                recognizer.AddIntent("TurnOff", model, "TurnOff");

                // 挂载识别中的事件
                // 收到中间结果
                recognizer.IntermediateResultReceived += Recognizer_IntermediateResultReceived;
                // 收到最终结果
                recognizer.FinalResultReceived += Recognizer_FinalResultReceived;
                // 发生错误
                recognizer.RecognitionErrorRaised += Recognizer_RecognitionErrorRaised;

                // 启动语音识别器,开始持续监听音频输入
                recognizer.StartContinuousRecognitionAsync();
            }
            catch (Exception ex)
            {
                Log(ex.Message);
            }
        }
예제 #5
0
        public async Task StartAsync(string fileName = null)
        {
            var speechConfig = SpeechConfig.FromSubscription(this.settings.SubscriptionKey, this.settings.Region);

            speechConfig.SpeechRecognitionLanguage = "de-de";
            speechConfig.OutputFormat = OutputFormat.Detailed;

            using (var audioInput = fileName == null ? AudioConfig.FromDefaultMicrophoneInput() : AudioConfig.FromWavFileInput(fileName))
            {
                using (var intentRecognizer = new IntentRecognizer(speechConfig, audioInput))
                {
                    stopRecognition = new TaskCompletionSource <int>();

                    var model = LanguageUnderstandingModel.FromAppId(this.settings.LuisAppId);

                    intentRecognizer.AddAllIntents(model);

                    intentRecognizer.SessionStarted      += IntentRecognizer_SessionStarted;
                    intentRecognizer.Recognized          += IntentRecognizer_Recognized;
                    intentRecognizer.Recognizing         += IntentRecognizer_Recognizing;
                    intentRecognizer.SessionStopped      += IntentRecognizer_SessionStopped;
                    intentRecognizer.SpeechEndDetected   += IntentRecognizer_SpeechEndDetected;
                    intentRecognizer.SpeechStartDetected += IntentRecognizer_SpeechStartDetected;
                    intentRecognizer.Canceled            += IntentRecognizer_Canceled;

                    await intentRecognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    Task.WaitAny(stopRecognition.Task);

                    await intentRecognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
        }
        public async Task RecognizeSpeech()
        {
            var audioConfig  = AudioConfig.FromDefaultMicrophoneInput();
            var speechConfig = SpeechConfig.FromSubscription(key, "westus2");

            // Creates a speech recognizer.
            using (var recognizer = new IntentRecognizer(speechConfig, audioConfig))
            {
                // Hide user secrets later
                var model = LanguageUnderstandingModel.FromAppId(Environment.GetEnvironmentVariable("LUIS_APP_ID"));
                recognizer.AddAllIntents(model);

                var stopRecognition = new TaskCompletionSource <int>();
                // Can add logic to exit using voice command, "Thanks see you at the window" etc.
                // Subscribe to appropriate events
                recognizer.Recognizing += (s, e) =>
                {
                    // Use this to send partial responses
                    Console.WriteLine($"Partial: {e.Result.Text}");
                };

                recognizer.Recognized += (s, e) =>
                {
                    var exit = ProcessRecognizedText(s, e);
                    if (exit)
                    {
                        recognizer.StopContinuousRecognitionAsync().Wait(); //ConfigureAwait(false);
                    }
                };

                recognizer.SessionStarted += (s, e) =>
                {
                    Console.WriteLine("Session started event.");
                };

                recognizer.SessionStopped += (s, e) =>
                {
                    Console.WriteLine("Session stopped event.");
                    stopRecognition.TrySetResult(0);
                };

                recognizer.Canceled += (s, e) =>
                {
                    Console.WriteLine(e.ErrorDetails);
                    stopRecognition.TrySetResult(0);
                };

                // Instantiate new Order object
                _order = new Order();

                Console.WriteLine("Say something to get started, or \"Exit\" to quit.");
                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                // Need more understanding about this part
                Task.WaitAny(new[] { stopRecognition.Task });
            }
        }
예제 #7
0
        /// <summary>
        /// Recognizer methods are run in the ordered registered.
        /// </summary>
        public IntentRecognizerMiddleware OnRecognize(IntentRecognizer recognizer)
        {
            if (recognizer == null)
                throw new ArgumentNullException(nameof(recognizer));

            _intentRecognizers.AddLast(recognizer);

            return this;
        }
예제 #8
0
        public IntentRegonisor()
        {
            var config = SpeechConfig.FromSubscription(
                Secret.LuisPredictionKey, Secret.Region);

            var model = LanguageUnderstandingModel.FromAppId(Secret.LuisAppId);

            recognizer = new IntentRecognizer(config);

            recognizer.AddAllIntents(model);
        }
예제 #9
0
    /// <summary>
    /// IntentRecognizer & event handlers cleanup after use
    /// </summary>
    private async void StopIntentRecognition()
    {
        if (recognizer != null)
        {
            await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);

            recognizer.Recognizing -= Recognizer_Recognizing;
            recognizer.Recognized  -= Recognizer_Recognized;
            recognizer.Canceled    -= Recognizer_Canceled;
            recognizer.Dispose();
            recognizer = null;
        }
    }
예제 #10
0
파일: Program.cs 프로젝트: petefield/speech
        private static async Task <string> awaitCommand(IntentRecognizer intentRecognizer, SpeechSynthesizer synth)
        {
            await SayAsync(synth, "Yes sir?");

            var r = await intentRecognizer.RecognizeOnceAsync().ConfigureAwait(false);

            switch (r.Reason)
            {
            case ResultReason.RecognizedIntent:
                return(r.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult));

            default:
                return(null);
            }
        }
    /// <summary>
    /// Creates and initializes the IntentRecognizer
    /// </summary>
    void CreateIntentRecognizer()
    {
        if (LUISAppKey.Length == 0 || LUISAppKey == "YourLUISAppKey")
        {
            recognizedString = "You forgot to obtain Cognitive Services LUIS credentials and inserting them in this app." + Environment.NewLine +
                               "See the README file and/or the instructions in the Awake() function for more info before proceeding.";
            errorString = "ERROR: Missing service credentials";
            UnityEngine.Debug.LogFormat(errorString);
            return;
        }
        UnityEngine.Debug.LogFormat("Creating Intent Recognizer.");
        recognizedString = "Initializing intent recognition, please wait...";

        if (intentreco == null)
        {
            // Creates an instance of a speech config with specified subscription key
            // and service region. Note that in contrast to other services supported by
            // the Cognitive Services Speech SDK, the Language Understanding service
            // requires a specific subscription key from https://www.luis.ai/.
            // The Language Understanding service calls the required key 'endpoint key'.
            // Once you've obtained it, replace with below with your own Language Understanding subscription key
            // and service region (e.g., "westus").
            // The default language is "en-us".
            var config = SpeechConfig.FromSubscription(LUISAppKey, LUISRegion);
            // Creates an intent recognizer using microphone as audio input.
            intentreco = new IntentRecognizer(config);

            // Creates a Language Understanding model using the app id, and adds specific intents from your model
            var model = LanguageUnderstandingModel.FromAppId(LUISAppId);
            intentreco.AddIntent(model, "ChangeColor", "color");
            intentreco.AddIntent(model, "Transform", "transform");
            intentreco.AddIntent(model, "Help", "help");
            intentreco.AddIntent(model, "None", "none");

            // Subscribes to speech events.
            intentreco.Recognizing         += RecognizingHandler;
            intentreco.Recognized          += RecognizedHandler;
            intentreco.SpeechStartDetected += SpeechStartDetectedHandler;
            intentreco.SpeechEndDetected   += SpeechEndDetectedHandler;
            intentreco.Canceled            += CanceledHandler;
            intentreco.SessionStarted      += SessionStartedHandler;
            intentreco.SessionStopped      += SessionStoppedHandler;
        }
        UnityEngine.Debug.LogFormat("CreateIntentRecognizer exit");
    }
예제 #12
0
파일: Program.cs 프로젝트: petefield/speech
        async static Task Main(string[] args)
        {
            const string WAKE_WORD    = "hey computer";
            var          speechConfig = SpeechConfig.FromSubscription("e073d2855d604ddda74ba6518ab2e6b3", "westeurope");
            var          Intentconfig = SpeechConfig.FromSubscription("9051c66d5ba949ac84e32b01c37eb9b4", "westus");
            var          audioConfig  = AudioConfig.FromDefaultMicrophoneInput();

            var model = LanguageUnderstandingModel.FromAppId("7f7a9344-69b6-4582-a01d-19ffa3c9bed8");

            var continuousRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
            var intentRecognizer     = new IntentRecognizer(Intentconfig, audioConfig);

            intentRecognizer.AddAllIntents(model);

            var  synthesizer        = new SpeechSynthesizer(speechConfig);
            bool _waitingForCommand = false;

            continuousRecognizer.Recognized += async(s, e) =>
            {
                if (!_waitingForCommand)
                {
                    if (e.Result.Reason == ResultReason.RecognizedSpeech)
                    {
                        Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");

                        if (e.Result.Text.Contains(WAKE_WORD, StringComparison.CurrentCultureIgnoreCase))
                        {
                            Console.WriteLine($"RECOGNIZED: {WAKE_WORD}");
                            _waitingForCommand = true;
                            await ParseCommand(synthesizer, await awaitCommand(intentRecognizer, synthesizer));

                            _waitingForCommand = false;
                            Console.WriteLine("Listening for wake word.");
                        }
                    }
                }
            };

            await continuousRecognizer.StartContinuousRecognitionAsync();

            Console.Write("Press any key!");
            Console.Read();
        }
    /// <summary>
    /// IntentRecognizer & event handlers cleanup after use
    /// </summary>
    public async void StopIntentRecognition()
    {
        if (intentreco != null)
        {
            await intentreco.StopContinuousRecognitionAsync().ConfigureAwait(false);

            intentreco.Recognizing         -= RecognizingHandler;
            intentreco.Recognized          -= RecognizedHandler;
            intentreco.SpeechStartDetected -= SpeechStartDetectedHandler;
            intentreco.SpeechEndDetected   -= SpeechEndDetectedHandler;
            intentreco.Canceled            -= CanceledHandler;
            intentreco.SessionStarted      -= SessionStartedHandler;
            intentreco.SessionStopped      -= SessionStoppedHandler;
            intentreco.Dispose();
            intentreco       = null;
            recognizedString = "Intent Recognizer is now stopped.";
            UnityEngine.Debug.LogFormat("Intent Recognizer is now stopped.");
        }
    }
예제 #14
0
    private async void RecognizeIntentAsync()
    {
        recognizer = new IntentRecognizer(luisConfig);

        // Creates a Language Understanding model using the app id, and adds specific intents from your model
        var model = LanguageUnderstandingModel.FromAppId(LuisAppId);

        recognizer.AddIntent(model, "changeColor", "changeColor");
        recognizer.AddIntent(model, "moveObject", "moveObject");

        // add event handlers
        recognizer.Recognized  += Recognizer_Recognized;
        recognizer.Recognizing += Recognizer_Recognizing;
        recognizer.Canceled    += Recognizer_Canceled;

        //Start recognizing
        Debug.Log("Say something...");
        await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
    }
예제 #15
0
        private async Task <SpeechLuisResult> RecognizeSpeechWithIntentRecognizerAsync(string speechFile)
        {
            if (this.LuisConfiguration.IsStaging)
            {
                throw new NotSupportedException("Testing LUIS from speech with the Speech SDK does not currently support the LUIS staging endpoint.");
            }

            var speechConfig = SpeechConfig.FromSubscription(this.LuisConfiguration.SpeechKey, this.LuisConfiguration.SpeechRegion);

            using (var audioInput = AudioConfig.FromWavFileInput(speechFile))
                using (var recognizer = new IntentRecognizer(speechConfig, audioInput))
                {
                    // Add intents to intent recognizer
                    var model = LanguageUnderstandingModel.FromAppId(this.LuisConfiguration.AppId);
                    recognizer.AddIntent(model, "None", "None");
                    var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                    // Checks result.
                    // For some reason RecognizeOnceAsync always return ResultReason.RecognizedSpeech
                    // when intent is recognized. It's because we don't add all possible intents (note that this IS intentional)
                    // in code via AddIntent method.
                    if (result.Reason == ResultReason.RecognizedSpeech || result.Reason == ResultReason.RecognizedIntent)
                    {
                        var content           = result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult);
                        var luisResult        = JsonConvert.DeserializeObject <LuisResult>(content);
                        var speechContent     = result.Properties.GetProperty(PropertyId.SpeechServiceResponse_JsonResult);
                        var speechContentJson = JObject.Parse(speechContent);
                        var textScore         = speechContentJson["NBest"]?.Max(t => t.Value <double?>("Confidence"));
                        return(new SpeechLuisResult(luisResult, textScore));
                    }
                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        Logger.LogWarning("Received 'NoMatch' result from Cognitive Services.");
                        return(null);
                    }
                    else
                    {
                        throw new InvalidOperationException($"Failed to get speech recognition result. Reason = '{result.Reason}'");
                    }
                }
        }
        public async Task <Result <LuisResult> > Recognize(string filePath)
        {
            // Credenciais do LUIS
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourRegion");

            config.SpeechRecognitionLanguage = "pt-br";

            using (var audioInput = AudioConfig.FromWavFileInput(filePath))
            {
                using (var recognizer = new IntentRecognizer(config, audioInput))
                {
                    var model = LanguageUnderstandingModel.FromAppId("YourLuisAppId");
                    recognizer.AddIntent(model, "intent.iot.device_off", "device_off");
                    recognizer.AddIntent(model, "intent.iot.device_on", "device_on");

                    var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                    if (result.Reason == ResultReason.RecognizedIntent)
                    {
                        var js = new DataContractJsonSerializer(typeof(LuisResult));
                        var ms = new MemoryStream(Encoding.UTF8.GetBytes(result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)));
                        return(new Result <LuisResult>((js.ReadObject(ms) as LuisResult)));
                    }
                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        return(new Result <LuisResult>(null, false, "Falha no reconhecimento do áudio!"));
                    }
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = CancellationDetails.FromResult(result);
                        if (cancellation.Reason == CancellationReason.Error)
                        {
                            return(new Result <LuisResult>(null, false, $"Motivo: {cancellation.Reason}. Detalhes: {cancellation.ErrorDetails}"));
                        }
                        return(new Result <LuisResult>(null, false, $"Motivo: {cancellation.Reason}."));
                    }
                }
            }
            return(new Result <LuisResult>(null, false, "Erro desconhecido!"));
        }
예제 #17
0
        static async Task RecognizeIntentAsync()
        {
            //simple speech recognition with intent
            var config = SpeechConfig.FromSubscription(luisKey, luisRegion);

            using (var recognizer = new IntentRecognizer(config))
            {
                var model = LanguageUnderstandingModel.FromAppId(luisAppId);

                //add LUIS intents, you have the option to add only selected intents
                recognizer.AddAllIntents(model);

                Console.WriteLine("Say something...");

                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                if (result.Reason == ResultReason.RecognizedIntent)
                {
                    Console.WriteLine($"Recognized: Text = {result.Text}");
                    Console.WriteLine($"Language Understanding JSON: {result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)}");
                }
                else if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"Recognized: Text = {result.Text}");
                    Console.WriteLine("Intent not recognized.");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine("Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"Canceled. Reason = {cancellation.Reason}");
                }
            }
        }
예제 #18
0
        public static async Task RecognizeIntentAsync()
        {
            // </skeleton_1>
            // Creates an instance of a speech config with specified subscription key
            // and service region. Note that in contrast to other services supported by
            // the Cognitive Services Speech SDK, the Language Understanding service
            // requires a specific subscription key from https://www.luis.ai/.
            // The Language Understanding service calls the required key 'endpoint key'.
            // Once you've obtained it, replace with below with your own Language Understanding subscription key
            // and service region (e.g., "westus").
            // The default language is "en-us".
            // <create_speech_configuration>
            var config = SpeechConfig.FromSubscription(
                "YourLanguageUnderstandingSubscriptionKey",
                "YourLanguageUnderstandingServiceRegion");

            // </create_speech_configuration>

            // <create_intent_recognizer_1>
            // Creates an intent recognizer using microphone as audio input.
            using (var recognizer = new IntentRecognizer(config))
            {
                // </create_intent_recognizer_1>

                // <add_intents>
                // Creates a Language Understanding model using the app id, and adds specific intents from your model
                var model = LanguageUnderstandingModel.FromAppId("YourLanguageUnderstandingAppId");
                recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName1", "id1");
                recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName2", "id2");
                recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName3", "any-IntentId-here");
                // </add_intents>

                // To add all of the possible intents from a LUIS model to the recognizer, uncomment the line below:
                // recognizer.AddAllIntents(model);

                // <recognize_intent>
                // Starts recognizing.
                Console.WriteLine("Say something...");

                // Starts intent recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync();

                // </recognize_intent>

                // <print_results>
                // Checks result.
                switch (result.Reason)
                {
                case ResultReason.RecognizedIntent:
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent Id: {result.IntentId}.");
                    var json = result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult);
                    Console.WriteLine($"    Language Understanding JSON: {json}.");
                    break;

                case ResultReason.RecognizedSpeech:
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent not recognized.");
                    break;

                case ResultReason.NoMatch:
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    break;

                case ResultReason.Canceled:
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                    break;
                }
                // </print_results>
                // <create_intent_recognizer_2>
            }
            // </create_intent_recognizer_2>
            // <skeleton_2>
        }
                    public static async Task TextBasedAssistant()
                    {
                        // <intentContinuousRecognitionWithFile>
                        // Creates an instance of a speech config with specified subscription key
                        // and service region. Note that in contrast to other services supported by
                        // the Cognitive Services Speech SDK, the Language Understanding service
                        // requires a specific subscription key from https://www.luis.ai/.
                        // The Language Understanding service calls the required key 'endpoint key'.
                        // Once you've obtained it, replace with below with your own Language Understanding subscription key
                        // and service region (e.g., "westus").
                        var config = SpeechConfig.FromSubscription("", "westus");

                        using (var recognizer = new IntentRecognizer(config))
                        {
                            // Creates a Language Understanding model using the app id, and adds specific intents from your model
                            var model = LanguageUnderstandingModel.FromAppId("");
                            recognizer.AddIntent(model, "DiscoverDealsForTheDay", "id1");
                            recognizer.AddIntent(model, "DiscoverLocation", "id2");
                            recognizer.AddIntent(model, "DiscoverPersonalizedDeals", "id3");
                            recognizer.AddIntent(model, "GoodBye", "id4");
                            recognizer.AddIntent(model, "Greeting", "id5");
                            recognizer.AddIntent(model, "HowToGoToTrialRoom", "id6");
                            recognizer.AddIntent(model, "DiscoverPaymentQueueLength", "id7");

                            // Subscribes to events.
                            recognizer.Recognizing += (s, e) => {
                                //Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
                            };

                            recognizer.Recognized += (s, e) => {
                                if (e.Result.Reason == ResultReason.RecognizedIntent)
                                {
                                    Console.WriteLine($"\nText={e.Result.Text}");
                                    //Console.WriteLine($"    Intent Id: {e.Result.IntentId}.");
                                    string Response = e.Result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult);
                                    //Console.Write(Response);
                                    dynamic IntentResponse = JObject.Parse(Response);
                                    dynamic IntentResult   = JObject.Parse(IntentResponse.topScoringIntent.ToString());
                                    string  Intent         = IntentResult.intent.ToString();
                                    Console.WriteLine("Intent: " + Intent);
                                    String TextResult = "";
                                    if (Intent == "Greeting")
                                    {
                                        TextResult = "Hello Suneetha, Welcome back to Shopper's Heaven. Last time you have visited us on Jan 1st ,2019 for some new year offers. How can I help you today ?";
                                    }
                                    else if (Intent == "DiscoverDealsForTheDay")
                                    {
                                        TextResult = "Sure...Give me 2 mins.. We have got great offers for you today. Women and Kids Casual Wears having 40 % discount offers. Women Foot wear section had just launched 60 % clearance sale offer. And cosmetics section is having personalized offer for you";
                                    }
                                    else if (Intent == "DiscoverLocation")
                                    {
                                        TextResult = "Please go till Gate 2 and take the elevator to second floor. You can find Woman's casual wear section on your righthand side";
                                    }
                                    else if (Intent == "HowToGoToTrialRoom")
                                    {
                                        TextResult = "Trail toom is just 200 metres away from you on your left hand side. But looks like all trial rooms are busy now. Please try after 10 mins";
                                    }
                                    else if (Intent == "DiscoverPersonalizedDeals")
                                    {
                                        TextResult = "Please proceed to cosmetic sections at the entrance of second floor. Swith to scan mode in your app and just scan the product that you are interested to buy. You will get Virtual vouchers for that product which you can redeem when you are buying it";
                                    }
                                    else if (Intent == "DiscoverPaymentQueueLength")
                                    {
                                        TextResult = "Its looks good Suneetha. You will be 3rd person in the queue. Please proceed";
                                    }
                                    else if (Intent == "GoodBye")
                                    {
                                        TextResult = "Thanks Suneetha for visiting us again. Hope you enjoyed shopping with me today. See you soon. Last but not least, please do not forget to give feedback about your experience at Shopper Heaven today at the link..";
                                    }
                                    else
                                    {
                                        TextResult = "No Intent Found";
                                    }
                                    Console.WriteLine(TextResult + "\n");
                                    //{
                                    //    "query": "hello",
                                    //      "topScoringIntent": {
                                    //        "intent": "Greeting",
                                    //        "score": 0.9444705
                                    //      },
                                    //      "entities": []
                                    //    }
                                }
                                else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                                {
                                    //Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                                    //Console.WriteLine($"    Intent not recognized.");
                                }
                                else if (e.Result.Reason == ResultReason.NoMatch)
                                {
                                    //Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                                }
                            };

                            recognizer.Canceled += (s, e) => {
                                Console.WriteLine($"CANCELED: Reason={e.Reason}");

                                if (e.Reason == CancellationReason.Error)
                                {
                                    Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                                    Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                                }
                            };

                            recognizer.SessionStarted += (s, e) => {
                                //Console.WriteLine("\nSession started");
                            };

                            recognizer.SessionStopped += (s, e) => {
                                //Console.WriteLine("\nSession stopped");
                                //Console.WriteLine("\nStop recognition.");
                            };


                            // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                            Console.WriteLine("Say something...");
                            await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                            // Waits for completion.
                            // Use Task.WaitAny to keep the task rooted.

                            do
                            {
                                Console.WriteLine("Press Enter to stop");
                            } while (Console.ReadKey().Key != ConsoleKey.Enter);

                            // Stops recognition.
                            await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                        }
                    }
예제 #20
0
        private async static Task RecognizeSpeechAndIntentAsync()
        {
            var config = SpeechConfig.FromEndpoint(
                new Uri("https://eastus2.api.cognitive.microsoft.com/sts/v1.0/issuetoken"),
                "MySuscriptionKey");

            config.SpeechRecognitionLanguage = "es-ES";

            using var speechRecognition = new IntentRecognizer(config);

            var luisModel = LanguageUnderstandingModel.FromAppId("ba417c40-bb51-4704-966a-f9c58afaf1c8");

            speechRecognition.AddAllIntents(luisModel);
            speechRecognition.AddIntent("chao");

            var endRecognition = new TaskCompletionSource <int>();

            speechRecognition.Recognized += (s, e) =>
            {
                switch (e.Result.Reason)
                {
                case ResultReason.NoMatch:
                    if (!endRecognition.Task.IsCompleted)
                    {
                        Console.WriteLine($"No entendí na':{e.Result.Text}");
                    }
                    break;

                case ResultReason.Canceled:
                    Console.WriteLine($"Se canceló la escucha:{e.Result.Text}");
                    break;

                case ResultReason.RecognizingSpeech:
                    Console.WriteLine($"Escuchando:{e.Result.Text}");
                    break;

                case ResultReason.RecognizedSpeech:
                    Console.WriteLine($"Entendí esto:{e.Result.Text}");
                    break;

                case ResultReason.RecognizedIntent:
                    Console.WriteLine($"Detectado comando de voz:{e.Result.Text}");
                    Console.WriteLine($"Saliendo ....");
                    endRecognition.TrySetResult(0);
                    break;

                default:
                    Console.WriteLine($"LLegué aquí porque:{e.Result.Reason}");
                    break;
                }
            };

            speechRecognition.Canceled += (s, e) =>
            {
                if (e.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"ocurrió un error:{e.ErrorCode} => {e.ErrorDetails}");
                }

                endRecognition.TrySetResult(0);
            };

            speechRecognition.SessionStopped += (s, e) =>
            {
                Console.WriteLine("Deteniendo");
                endRecognition.TrySetResult(0);
            };

            Console.WriteLine("Ahora empieza a hablar...");
            await speechRecognition.StartContinuousRecognitionAsync().ConfigureAwait(false);

            Task.WaitAny(new[] { endRecognition.Task });
            await speechRecognition.StopContinuousRecognitionAsync().ConfigureAwait(false);
        }
        // Continuous intent recognition using file input.
        public static async Task ContinuousRecognitionWithFileAsync()
        {
            // <intentContinuousRecognitionWithFile>
            // Creates an instance of a speech config with specified subscription key
            // and service region. Note that in contrast to other services supported by
            // the Cognitive Services Speech SDK, the Language Understanding service
            // requires a specific subscription key from https://www.luis.ai/.
            // The Language Understanding service calls the required key 'endpoint key'.
            // Once you've obtained it, replace with below with your own Language Understanding subscription key
            // and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourLanguageUnderstandingSubscriptionKey", "YourLanguageUnderstandingServiceRegion");

            // Creates an intent recognizer using file as audio input.
            // Replace with your own audio file name.
            using (var audioInput = AudioConfig.FromWavFileInput("whatstheweatherlike.wav"))
            {
                using (var recognizer = new IntentRecognizer(config, audioInput))
                {
                    // The TaskCompletionSource to stop recognition.
                    var stopRecognition = new TaskCompletionSource <int>();

                    // Creates a Language Understanding model using the app id, and adds specific intents from your model
                    var model = LanguageUnderstandingModel.FromAppId("YourLanguageUnderstandingAppId");
                    recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName1", "id1");
                    recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName2", "id2");
                    recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName3", "any-IntentId-here");

                    // Subscribes to events.
                    recognizer.Recognizing += (s, e) => {
                        Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
                    };

                    recognizer.Recognized += (s, e) => {
                        if (e.Result.Reason == ResultReason.RecognizedIntent)
                        {
                            Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                            Console.WriteLine($"    Intent Id: {e.Result.IntentId}.");
                            Console.WriteLine($"    Language Understanding JSON: {e.Result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)}.");
                        }
                        else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                            Console.WriteLine($"    Intent not recognized.");
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                        }
                    };

                    recognizer.Canceled += (s, e) => {
                        Console.WriteLine($"CANCELED: Reason={e.Reason}");

                        if (e.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }

                        stopRecognition.TrySetResult(0);
                    };

                    recognizer.SessionStarted += (s, e) => {
                        Console.WriteLine("\n    Session started event.");
                    };

                    recognizer.SessionStopped += (s, e) => {
                        Console.WriteLine("\n    Session stopped event.");
                        Console.WriteLine("\nStop recognition.");
                        stopRecognition.TrySetResult(0);
                    };


                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    Console.WriteLine("Say something...");
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopRecognition.Task });

                    // Stops recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
            // </intentContinuousRecognitionWithFile>
        }
        public static async Task RecognizeSpeechAsync()
        {
            initMqttClient(mqttBrokerAddress);
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key // and service region (e.g., "westus").
            var intentConfig = SpeechConfig.FromSubscription("", "westus2");

            // Creates a speech recognizer.
            using (var intentRecognizer = new IntentRecognizer(intentConfig))
            {
                // The TaskCompletionSource to stop recognition.
                var stopRecognition = new TaskCompletionSource <int>();

                var model = LanguageUnderstandingModel.FromAppId("");
                intentRecognizer.AddAllIntents(model);

                // Subscribes to events.
                intentRecognizer.Recognizing += (s, e) => {
                    Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
                };

                intentRecognizer.Recognized += (s, e) => {
                    if (e.Result.Reason == ResultReason.RecognizedIntent)
                    {
                        Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                        Console.WriteLine($"    Intent Id: {e.Result.IntentId}.");
                        Console.WriteLine($"    Language Understanding JSON: {e.Result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)}.");
                        if (e.Result.IntentId == "FollowPerson")
                        {
                            var     jsonResult = e.Result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult);
                            dynamic stuff      = JObject.Parse(jsonResult);
                            try
                            {
                                string name = stuff.entities[0].entity;
                                Console.WriteLine(name);
                                int id = nameToIdDict.GetValueOrDefault(name);
                                mqttClient.Publish("bcx19-seek-the-geek/tag/control", Encoding.UTF8.GetBytes($"target.{name}"), MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE, false);
                                Console.WriteLine("MQTT Message sent");
                            }
                            catch
                            {
                                Console.WriteLine("Error");
                            }
                        }
                        else if (e.Result.IntentId == "Rover.Stop")
                        {
                            mqttClient.Publish("bcx19-seek-the-geek/tag/control", Encoding.UTF8.GetBytes("rover.stop"), MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE, false);
                            Console.WriteLine("MQTT Message sent");
                        }
                        else if (e.Result.IntentId == "Rover.Start")
                        {
                            mqttClient.Publish("bcx19-seek-the-geek/tag/control", Encoding.UTF8.GetBytes("rover.start"), MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE, false);
                            Console.WriteLine("MQTT Message sent");
                        }
                        else if (e.Result.IntentId == "Rover.Left")
                        {
                            mqttClient.Publish("bcx19-seek-the-geek/tag/control", Encoding.UTF8.GetBytes("rover.left"), MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE, false);
                            Console.WriteLine("MQTT Message sent");
                        }
                        else if (e.Result.IntentId == "Rover.Right")
                        {
                            mqttClient.Publish("bcx19-seek-the-geek/tag/control", Encoding.UTF8.GetBytes("rover.right"), MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE, false);
                            Console.WriteLine("MQTT Message sent");
                        }
                        else if (e.Result.IntentId == "Rover.Exit")
                        {
                            mqttClient.Publish("bcx19-seek-the-geek/tag/control", Encoding.UTF8.GetBytes("rover.exit"), MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE, false);
                            Console.WriteLine("MQTT Message sent");
                        }
                        else if (e.Result.IntentId == "Rover.Back")
                        {
                            mqttClient.Publish("bcx19-seek-the-geek/tag/control", Encoding.UTF8.GetBytes("rover.back"), MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE, false);
                            Console.WriteLine("MQTT Message sent");
                        }
                    }
                    else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                    {
                        Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                        Console.WriteLine($"    Intent not recognized.");
                    }
                    else if (e.Result.Reason == ResultReason.NoMatch)
                    {
                        Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    }
                };

                intentRecognizer.Canceled += (s, e) => {
                    Console.WriteLine($"CANCELED: Reason={e.Reason}");

                    if (e.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }

                    stopRecognition.TrySetResult(0);
                };

                intentRecognizer.SessionStarted += (s, e) => {
                    Console.WriteLine("\n    Session started event.");
                };

                intentRecognizer.SessionStopped += (s, e) => {
                    Console.WriteLine("\n    Session stopped event.");
                    Console.WriteLine("\nStop recognition.");
                    stopRecognition.TrySetResult(0);
                };


                // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                Console.WriteLine("Say something...");
                await intentRecognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                // Waits for completion.
                // Use Task.WaitAny to keep the task rooted.
                Task.WaitAny(new[] { stopRecognition.Task });

                // Stops recognition.
                await intentRecognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
            }
        }
        /// <summary>
        /// Use pattern matching for intent recognition from your default microphone input
        /// </summary>
        public static async Task IntentPatternMatchingWithMicrophoneAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service
            // region. Note that in contrast to the other samples this DOES NOT require a LUIS
            // application.
            // The default recognition language is "en-us".
            var config = SpeechConfig.FromSubscription("YourLanguageUnderstandingSubscriptionKey", "YourLanguageUnderstandingServiceRegion");

            // Creates an intent recognizer using microphone as audio input.
            using (var recognizer = new IntentRecognizer(config))
            {
                // Creates a Pattern Matching model and adds specific intents from your model. The
                // Id is used to identify this model from others in the collection.
                var model = new PatternMatchingModel("YourPatternMatchingModelId");

                // Creates a pattern that uses groups of optional words. "[Go | Take me]" will match either "Go", "Take me", or "".
                var patternWithOptionalWords = "[Go | Take me] to [floor|level] {floorName}";

                // Creates a pattern that uses an optional entity and group that could be used to tie commands together.
                var patternWithOptionalEntity = "Go to parking [{parkingLevel}]";

                // You can also have multiple entities of the same name in a single pattern by adding appending a unique identifier
                // to distinguish between the instances. For example:
                var patternWithTwoOfTheSameEntity = "Go to floor {floorName:1} [and then go to floor {floorName:2}]";
                // NOTE: Both floorName:1 and floorName:2 are tied to the same list of entries. The identifier can be a string
                //       and is separated from the entity name by a ':'

                // Adds some intents to look for specific patterns.
                model.Intents.Add(new PatternMatchingIntent("ChangeFloors", patternWithOptionalWords, patternWithOptionalEntity, patternWithTwoOfTheSameEntity));
                model.Intents.Add(new PatternMatchingIntent("DoorControl", "{action} the doors", "{action} doors", "{action} the door", "{action} door"));

                // Creates the "floorName" entity and set it to type list.
                // Adds acceptable values. NOTE the default entity type is Any and so we do not need
                // to declare the "action" entity.
                model.Entities.Add(PatternMatchingEntity.CreateListEntity("floorName", EntityMatchMode.Strict, "ground floor", "lobby", "1st", "first", "one", "1", "2nd", "second", "two", "2"));

                // Creates the "parkingLevel" entity as a pre-built integer
                model.Entities.Add(PatternMatchingEntity.CreateIntegerEntity("parkingLevel"));

                // Add the model to a new language model collection
                var modelCollection = new LanguageUnderstandingModelCollection();
                modelCollection.Add(model);

                // Apply the language model collection to the recognizer.
                recognizer.ApplyLanguageModels(modelCollection);

                Console.WriteLine("Say something...");

                // Starts intent recognition, and returns after a single utterance is recognized. The
                // end of a single utterance is determined by listening for silence at the end, or until
                // a maximum of 15 seconds of audio is processed. The task returns the recognition
                // text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only
                // for single shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync()
                // instead.
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedIntent)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"       Intent Id={result.IntentId}.");

                    var entities = result.Entities;
                    switch (result.IntentId)
                    {
                    case "ChangeFloors":
                        if (entities.TryGetValue("floorName", out string floorName))
                        {
                            Console.WriteLine($"       FloorName={floorName}");
                        }

                        if (entities.TryGetValue("floorName:1", out floorName))
                        {
                            Console.WriteLine($"     FloorName:1={floorName}");
                        }

                        if (entities.TryGetValue("floorName:2", out floorName))
                        {
                            Console.WriteLine($"     FloorName:2={floorName}");
                        }

                        if (entities.TryGetValue("parkingLevel", out string parkingLevel))
                        {
                            Console.WriteLine($"    ParkingLevel={parkingLevel}");
                        }

                        break;

                    case "DoorControl":
                        if (entities.TryGetValue("action", out string action))
                        {
                            Console.WriteLine($"          Action={action}");
                        }
                        break;
                    }
                }
                else if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent not recognized.");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
예제 #24
0
        public static async Task RecognizeSpeechAsync()
        {
            // Create an instance of a speech config with the specified app key and region.
            var config = SpeechConfig.FromSubscription(YourAppKey, YourRegion);

            // Use the microphone as input.
            var audioConfig = AudioConfig.FromDefaultMicrophoneInput();

            var stopRecognition = new TaskCompletionSource <int>();

            // Create a new IntentRecognizer, which uses the config and the audioConfig.
            using (var recognizer = new IntentRecognizer(config, audioConfig))
            {
                // Create a Language Understanding model using the app id.
                var model = LanguageUnderstandingModel.FromAppId(YourAppId);

                // Add the intents which are specified by the LUIS app.
                recognizer.AddIntent(model, "HomeAutomation.TurnOff", "off");
                recognizer.AddIntent(model, "HomeAutomation.TurnOn", "on");

                // Subscribe to events.
                recognizer.Recognizing += (s, e) =>
                {
                    Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
                };

                recognizer.Recognized += (s, e) =>
                {
                    // The LUIS app recognized an intent.
                    if (e.Result.Reason == ResultReason.RecognizedIntent)
                    {
                        // Get the result from the event arguments and print it into the console.
                        var responseJson = e.Result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult);

                        Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                        Console.WriteLine($"    Intent Id: {e.Result.IntentId}.");
                        Console.WriteLine($"    Language Understanding JSON: {responseJson}.");

                        // Deserialize the JSON result into an object.
                        var responseObject = JsonConvert.DeserializeObject <SpeechResponse>(responseJson);

                        // Get the intent out of the result. This gives us the command.
                        var intent = responseObject.topScoringIntent.intent;
                        if (intent == "HomeAutomation.TurnOn")
                        {
                            intent = "on";
                        }
                        else if (intent == "HomeAutomation.TurnOff")
                        {
                            intent = "off";
                        }

                        // Get the colour entity out of the result.
                        var colourEntity = responseObject.entities.FirstOrDefault(x => x.type == "Colour");
                        var colour       = colourEntity.entity;

                        // Create the request we will send to the web API.
                        var request = new SpeechRequest
                        {
                            Colour  = colour,
                            Command = intent
                        };

                        // Create a new HttpClient and send the request.
                        var client  = new HttpClient();
                        var content = new StringContent(JsonConvert.SerializeObject(request), Encoding.UTF8, "application/json");

                        client.PostAsync("http://<your-local-raspberrypi-ip>/api/Speech", content);
                    }

                    // The speech service recognized speech.
                    else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                    {
                        Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                        Console.WriteLine($"    Intent not recognized.");
                    }

                    // The input has not been recognized.
                    else if (e.Result.Reason == ResultReason.NoMatch)
                    {
                        Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    }
                };

                recognizer.Canceled += (s, e) =>
                {
                    Console.WriteLine($"CANCELED: Reason={e.Reason}");

                    if (e.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }

                    stopRecognition.TrySetResult(0);
                };

                recognizer.SessionStarted += (s, e) =>
                {
                    Console.WriteLine("\n    Session started event.");
                };

                recognizer.SessionStopped += (s, e) =>
                {
                    Console.WriteLine("\n    Session stopped event.");
                    Console.WriteLine("\nStop recognition.");
                    stopRecognition.TrySetResult(0);
                };

                // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                // Waits for completion.
                // Use Task.WaitAny to keep the task rooted.
                Task.WaitAny(new[] { stopRecognition.Task });

                // Stops recognition.
                await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
            }
        }
예제 #25
0
        /// <summary>
        /// processing audio file with Microsoft.CognitiveServices and returns recognised text string or empty string
        /// </summary>
        /// <returns></returns>
        public async Task <string> ProcessAudioSampleAsync()
        {
            _log.Called();

            string       key              = _config.AudioApiKey;
            string       region           = _config.AudioApiRegion;
            SpeechConfig configRecognizer = SpeechConfig.FromSubscription(key, region);
            string       processedAudio   = "";
            string       audioFile        = _fileService.GetAudioFilePath();
            bool         isRecorded       = _fileService.CheckAudioFile();

            if (!isRecorded)
            {
                return(processedAudio); //if file empty
            }
            try
            {
                using (AudioConfig audioInput = AudioConfig.FromWavFileInput(audioFile))
                    using (IntentRecognizer recognizer = new IntentRecognizer(configRecognizer, audioInput))
                    {
                        TaskCompletionSource <int> stopRecognition = new TaskCompletionSource <int>();

                        recognizer.Recognized += (s, e) =>
                        {
                            if (e.Result.Reason == ResultReason.RecognizedSpeech)
                            {
                                processedAudio = e.Result.Text;
                            }
                        };

                        recognizer.Canceled += (s, e) => {
                            if (e.Reason == CancellationReason.Error)
                            {
                                //log
                            }
                            stopRecognition.TrySetResult(0);
                        };

                        recognizer.SessionStarted += (s, e) => {
                            _log.Info("Start recording.");
                        };

                        recognizer.SessionStopped += (s, e) => {
                            _log.Info("Stop recording.");

                            stopRecognition.TrySetResult(0);
                        };

                        await recognizer.StartContinuousRecognitionAsync();

                        Task.WaitAny(new[] { stopRecognition.Task });

                        await recognizer.StopContinuousRecognitionAsync();
                    }

                return(processedAudio);
            }
            catch (Exception e)
            {
                _log.Error(e.Message);

                return(string.Empty);
            }
        }
예제 #26
0
        public static async Task RecognizeOnceSpeechAsync(SpeechTranslationConfig config)
        {
            var allCultures = CultureInfo.GetCultures(CultureTypes.AllCultures);

            // Creates a speech recognizer.
            using (var recognizer = new IntentRecognizer(config))
            {
                Console.WriteLine("Say something...");

                var model = LanguageUnderstandingModel.FromAppId(ConfigurationManager.AppSettings.Get("LUISId"));
                recognizer.AddAllIntents(model);

                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedIntent)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent Id: {result.IntentId}.");
                    Console.WriteLine($"    Language Understanding JSON: {result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)}.");
                    if (result.IntentId == "Translate")
                    {
                        var    luisJson  = JObject.Parse(result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult));
                        string targetLng = luisJson["entities"].First(x => x["type"].ToString() == "TargetLanguage")["entity"].ToString();
                        string text      = luisJson["entities"].First(x => x["type"].ToString() == "Text")["entity"].ToString();

                        var lng = allCultures.FirstOrDefault(c => c.DisplayName.ToLower() == targetLng.ToLower()) ??
                                  allCultures.FirstOrDefault(c => c.DisplayName.ToLower() == "english");
                        var translated = Translate.TranslateText("de-DE", text);

                        Console.WriteLine("Translation: " + translated);

                        var synth = new System.Speech.Synthesis.SpeechSynthesizer();

                        // Configure the audio output.
                        synth.SetOutputToDefaultAudioDevice();

                        // Speak a string.
                        synth.SelectVoice(synth.GetInstalledVoices().First(x => x.VoiceInfo.Culture.TwoLetterISOLanguageName == lng.TwoLetterISOLanguageName).VoiceInfo.Name);
                        synth.Speak(translated);
                    }
                }
                else if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent not recognized.");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
예제 #27
0
        /// <summary>
        /// Keyword-triggered intent recognition using microphone. This is useful for when you don't have a push-to-talk feature
        /// and want to activate your device with voice only. A keyword model is used for local recognition and activation.
        /// NOTE: It is possible to still call recognize once during a keyword spotting session if you want to have both
        /// push-to-talk and keyword activation.
        /// Example interaction: "Computer turn on the lights".
        /// </summary>
        public static async Task IntentPatternMatchingWithMicrophoneAndKeywordSpottingAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region. Note that in
            // contrast to the other samples this DOES NOT require a LUIS application.
            // The default recognition language is "en-us".
            var config = SpeechConfig.FromSubscription(
                "YourSubscriptionKey",
                "YourSubscriptionRegion");

            // Creates an instance of a keyword recognition model. Update this to
            // point to the location of your keyword recognition model.
            var keywordModel = KeywordRecognitionModel.FromFile(@"PathToKeywordModel\Keyword.table");

            // The phrase your keyword recognition model triggers on.
            var keyword = "YourKeyword";

            // Creates an intent recognizer using microphone as audio input.
            using (var recognizer = new IntentRecognizer(config))
            {
                // Create a string containing the keyword with the optional pattern tags on it. This can be useful if you
                // are using push to talk and keyword activation.
                var keywordOptionalPattern = "[" + keyword + "]";

                // Creates a Pattern Matching model and adds specific intents from your model. The Id is used to identify
                // this model from others in the collection.
                var patternMatchingModel = new PatternMatchingModel("YourPatternMatchingModelId");

                // Creates the "floorName" entity and set it to type list.
                // Adds acceptable values. NOTE the default entity type is Any and so we do not need
                // to declare the "action" entity.
                patternMatchingModel.Entities.Add(PatternMatchingEntity.CreateListEntity(
                                                      "floorName",
                                                      EntityMatchMode.Strict,
                                                      "ground floor", "lobby", "1st", "first", "one", "1", "2nd", "second", "two", "2"));

                // Creates the "parkingLevel" entity as a pre-built integer
                patternMatchingModel.Entities.Add(PatternMatchingEntity.CreateIntegerEntity("parkingLevel"));

                // Creates a string with a pattern that uses groups of optional words. Optional phrases in square brackets can
                // select one phrase from several choices by separating them inside the brackets with a pipe '|'. Here,
                // "[Go | Take me]" will match either "Go", "Take me", or "". Note the space after the keyword.
                var patternWithOptionalWords = keywordOptionalPattern + " " + "[Go | Take me] to [floor|level] {floorName}";

                // Creates a string with a pattern that uses an optional entity and group that could be used to tie commands
                // together. Optional patterns in square brackets can also include a reference to an entity. "[{parkingLevel}]"
                // includes a match against the named entity as an optional component in this pattern.
                var patternWithOptionalEntity = keywordOptionalPattern + " " + "Go to parking [{parkingLevel}]";

                // You can also have multiple entities of the same name in a single pattern by adding appending a unique identifier
                // to distinguish between the instances. For example:
                var patternWithTwoOfTheSameEntity = keywordOptionalPattern + " "
                                                    + "Go to floor {floorName:1} [and then go to floor {floorName:2}]";
                // NOTE: Both floorName:1 and floorName:2 are tied to the same list of entries. The identifier can be a string
                //       and is separated from the entity name by a ':'

                // Adds some intents to look for specific patterns.
                patternMatchingModel.Intents.Add(new PatternMatchingIntent(
                                                     "ChangeFloors", patternWithOptionalWords, patternWithOptionalEntity, patternWithTwoOfTheSameEntity));
                patternMatchingModel.Intents.Add(new PatternMatchingIntent("DoorControl",
                                                                           keywordOptionalPattern + " " + "{action} the doors",
                                                                           keywordOptionalPattern + " " + "{action} doors",
                                                                           keywordOptionalPattern + " " + "{action} the door",
                                                                           keywordOptionalPattern + " " + "{action} door"));

                // Add the model to a new language model collection
                var modelCollection = new LanguageUnderstandingModelCollection();
                modelCollection.Add(patternMatchingModel);

                // Apply the language model collection to the recognizer.
                recognizer.ApplyLanguageModels(modelCollection);

                var stopRecognition = new TaskCompletionSource <int>();

                // Subscribes to events.
                recognizer.Recognizing += (s, e) =>
                {
                    if (e.Result.Reason == ResultReason.RecognizingKeyword)
                    {
                        Console.WriteLine($"RECOGNIZING KEYWORD: Text={e.Result.Text}");
                    }
                    else if (e.Result.Reason == ResultReason.RecognizingSpeech)
                    {
                        Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
                    }
                };

                recognizer.Recognized += (s, e) =>
                {
                    // Checks result.
                    var result = e.Result;
                    if (result.Reason == ResultReason.RecognizedKeyword)
                    {
                        Console.WriteLine($"RECOGNIZED KEYWORD: Text={e.Result.Text}");
                    }
                    else if (result.Reason == ResultReason.RecognizedIntent)
                    {
                        Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                        Console.WriteLine($"{"Intent Id=",13} {result.IntentId}.");

                        var entities = result.Entities;
                        switch (result.IntentId)
                        {
                        case "ChangeFloors":
                            if (entities.TryGetValue("floorName", out string floorName))
                            {
                                Console.WriteLine($"{"FloorName=",17} {floorName}");
                            }

                            if (entities.TryGetValue("floorName:1", out floorName))
                            {
                                Console.WriteLine($"{"FloorName:1=",17} {floorName}");
                            }

                            if (entities.TryGetValue("floorName:2", out floorName))
                            {
                                Console.WriteLine($"{"FloorName:2=",17} {floorName}");
                            }

                            if (entities.TryGetValue("parkingLevel", out string parkingLevel))
                            {
                                Console.WriteLine($"{"ParkingLevel=",17} {parkingLevel}");
                            }
                            break;

                        case "DoorControl":
                            if (entities.TryGetValue("action", out string action))
                            {
                                Console.WriteLine($"{"Action=",17} {action}");
                            }
                            break;

                        default:
                            Console.WriteLine($"Unknown intent ID: {result.IntentId}");
                            break;
                        }
                    }
                    else if (result.Reason == ResultReason.RecognizedSpeech)
                    {
                        Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                        Console.WriteLine($"{"Intent not recognized.",17}");
                    }
                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    }
                };

                recognizer.Canceled += (s, e) =>
                {
                    Console.WriteLine($"CANCELED: Reason={e.Reason}");

                    if (e.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                    stopRecognition.TrySetResult(0);
                };

                recognizer.SessionStarted += (s, e) =>
                {
                    Console.WriteLine($"{"Session started event.",17}");
                };

                recognizer.SessionStopped += (s, e) =>
                {
                    Console.WriteLine($"{"Session stopped event.",17}");
                    Console.WriteLine($"{"Stop recognition.",17}");
                    stopRecognition.TrySetResult(0);
                };

                // Starts recognizing.
                Console.WriteLine($"Say something starting with the keyword '{keyword}' followed by whatever you want...");

                // Starts continuous recognition using the keyword model. Use
                // StopKeywordRecognitionAsync() to stop recognition.
                await recognizer.StartKeywordRecognitionAsync(keywordModel).ConfigureAwait(false);

                // Waits for a single successful keyword-triggered speech recognition (or error).
                await stopRecognition.Task;

                // Stops recognition.
                await recognizer.StopKeywordRecognitionAsync().ConfigureAwait(false);
            }
        }
        static async Task RecognizeIntentAsync()
        {
            // Creates an instance of a speech config with specified subscription key
            // and service region. Note that in contrast to other services supported by
            // the Cognitive Services Speech SDK, the Language Understanding service
            // requires a specific subscription key from https://www.luis.ai/.
            // The Language Understanding service calls the required key 'endpoint key'.
            // Once you've obtained it, replace with below with your own Language Understanding subscription key
            // and service region (e.g., "westus").
            // The default language is "en-us".
            var config = SpeechConfig.FromSubscription("YourLanguageUnderstandingSubscriptionKey", "YourLanguageUnderstandingServiceRegion");

            // Creates an intent recognizer using microphone as audio input.
            using (var recognizer = new IntentRecognizer(config))
            {
                // Creates a Language Understanding model using the app id, and adds specific intents from your model
                var model = LanguageUnderstandingModel.FromAppId("YourLanguageUnderstandingAppId");
                recognizer.AddIntent(model, "HomeAutomation.TurnOff", "off");
                recognizer.AddIntent(model, "HomeAutomation.TurnOn", "on");

                // Starts recognizing.
                Console.WriteLine("Say something...");

                // Starts intent recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                // Checks result.
                if (result.Reason == ResultReason.RecognizedIntent)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent Id: {result.IntentId}.");
                    Console.WriteLine($"    Language Understanding JSON: {result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)}.");
                }
                else if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    Console.WriteLine($"    Intent not recognized.");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
예제 #29
0
        private async void ConfigureIntentRecognizer()
        {
            var config = SpeechConfig.FromSubscription("e4c237841b7043b9b2c73a432a85416c", "westus");

            config.SpeechRecognitionLanguage = "es-es";
            var stopRecognition = new TaskCompletionSource <int>();

            using (recognizer = new IntentRecognizer(config))
            {
                var model = LanguageUnderstandingModel.FromAppId("ce892100-78a7-48b0-8e09-5dc18b16996d");
                recognizer.AddAllIntents(model, "Id1");

                recognizer.Recognized += async(s, e) =>
                {
                    await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        //recognizedText.Text = e.Result.Text;

                        //var prediction = predictionResult.Prediction;
                        //MyMessageBox(prediction.TopIntent);
                    });

                    try
                    {
                        var predictionResult = GetPredictionAsync(e.Result.Text).Result;
                        var prediction       = predictionResult.Prediction;


                        if (prediction.TopIntent == "None")
                        {
                            await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                recognizedText.Text = "No entiendo la acción que me pides.";
                            });

                            muestraHora  = false;
                            muestraTexto = false;
                        }
                        else if (prediction.TopIntent == "MuestraHora")
                        {
                            await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                var now = DateTime.Now;
                                //recognizedText.Text = now.ToString("HH:mm");
                                recognizedText.Text = "Son las " + now.Hour + ":" + now.Minute + ".";
                            });

                            muestraHora  = true;
                            muestraTexto = false;
                        }
                        else if (prediction.TopIntent == "EscribeTexto")
                        {
                            await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                                                                     {;
                                                                      recognizedText.Text = "Esto es un texto."; });

                            muestraTexto = true;
                            muestraHora  = false;
                        }
                        else if (prediction.TopIntent == "OcultaHora")
                        {
                            await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                if (muestraHora)
                                {
                                    recognizedText.Text = "Hora oculta.";
                                    muestraHora         = false;
                                    muestraTexto        = false;
                                }
                            });
                        }
                        else if (prediction.TopIntent == "OcultaTexto")
                        {
                            await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                if (muestraTexto)
                                {
                                    recognizedText.Text = "Texto ocultado.";
                                    muestraHora         = false;
                                    muestraTexto        = false;
                                }
                            });
                        }
                        else if (prediction.TopIntent == "CambioColor")
                        {
                            string color = "";
                            await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                //string item = prediction.Entities.First().Value.ToString();
                                color = prediction.Entities.First().Value.ToString().Replace(System.Environment.NewLine, "");
                                color = color.Replace("[", "");
                                color = color.Replace("]", "");
                                color = color.Replace("\"", "");
                                color = color.Replace(" ", "");
                                recognizedText.Text = "Estableciendo fondo de color " + color + ".";
                                muestraHora         = false;
                                muestraTexto        = false;
                            });

                            try
                            {
                                if (colors.ContainsKey(color))
                                {
                                    await rectangle.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                                    {
                                        rectangle.Fill = new SolidColorBrush(colors[color]);
                                    });
                                }
                            }
                            catch (NullReferenceException) { }
                        }
                    }
                    catch (System.AggregateException)
                    {
                    }

                    /*if (e.Result.Reason == ResultReason.RecognizedIntent)
                     * {
                     *
                     *  var jsonResponse = JObject.Parse(e.Result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult));
                     *  await textJson.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                     *  {
                     *      textJson.Text = jsonResponse.ToString();
                     *  });
                     *  var intent = jsonResponse.SelectToken("topScoringIntent.intent").ToString();
                     *
                     *  if (intent.Equals("CambioColor"))
                     *  {
                     *      try
                     *      {
                     *          string color = jsonResponse.SelectToken("$..entities[?(@.type=='Color')].entity").ToString();
                     *          if (colors.ContainsKey(color))
                     *              await rectangle.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                     *              {
                     *                  rectangle.Fill = new SolidColorBrush(colors[color]);
                     *              });
                     *      }
                     *      catch (NullReferenceException) { }
                     *
                     *
                     *
                     *  }
                     *
                     *
                     * }
                     * else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                     * {
                     *  await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                     *  {
                     *      recognizedText.Text = e.Result.Text;
                     *  });
                     * }
                     * else if (e.Result.Reason == ResultReason.NoMatch)
                     * {
                     *  await recognizedText.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                     *  {
                     *      recognizedText.Text = "Speech could not be recognized.";
                     *  });
                     * }*/
                };

                recognizer.Canceled += (s, e) =>
                {
                    Console.WriteLine($"CANCELED: Reason={e.Reason}");

                    if (e.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                };

                recognizer.SessionStopped += (s, e) =>
                {
                    Console.WriteLine("\n    Session stopped event.");
                    Console.WriteLine("\nStop recognition.");
                    stopRecognition.TrySetResult(0);
                };

                Console.WriteLine("Say something...");
                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                // Waits for completion.
                // Use Task.WaitAny to keep the task rooted.
                Task.WaitAny(new[] { stopRecognition.Task });

                // Stops recognition.
                await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
            }
        }
예제 #30
0
        public static void Main(string [] args)
        {
            //CONFIG LUIS
            configLuis = SpeechConfig.FromSubscription("b1ea9c02d3954e9f9cf90592f9062ef6", "westus2");
            configLuis.SpeechRecognitionLanguage = "pt-BR";

            //CONFIG SPEECH
            configSpeech = SpeechConfig.FromSubscription("9184b21db9b743bd83770061b376ee2c", "westus2");
            configSpeech.SpeechSynthesisVoiceName = "Microsoft Server Speech Text to Speech Voice (pt-BR, FranciscaNeural)";

            //ADICIONANDO AS INTENCOES CRIADAS NO LUIS
            recognizer = new IntentRecognizer(configLuis);
            model      = LanguageUnderstandingModel.FromAppId("2940577d-15f2-42bb-a07f-a760cd651c0b");
            recognizer.AddIntent(model, "AvisarChegada", "avisar_chegada");
            recognizer.AddIntent(model, "Cumprimento", "cumprimento");
            recognizer.AddIntent(model, "Despedida", "despedida");
            recognizer.AddIntent(model, "Entregar", "entregar");
            recognizer.AddIntent(model, "Visita", "visitar");
            recognizer.AddIntent(model, "InformarDestino", "destino");
            recognizer.AddIntent(model, "AutorizarVisitante", "autorizado");
            recognizer.AddIntent(model, "NaoAutorizarVisitante", "nao_autorizado");

            synthesizer = new SpeechSynthesizer(configSpeech);

            //LÊ AS REGRAS DEFINIDAS NO ARQUIVO XML
            leitorXML = new Leitor();
            leitorXML.LeArquivoXML();

            //VALIDA AS VARIAVEIS E REGRAS LIDAS NO ARQUIVO ATRAVES DO PROCESSADOR LOGICO
            processador = new Processador();

            while (controle_interacao)
            {
                try
                {
                    if (estado == 2 || estado == 3 || estado == 4)
                    {
                        controle_interacao = false;
                        ligacao_morador();
                    }
                    else if (estado == 6)
                    {
                        despedida();
                    }
                    else
                    {
                        RecognizeIntentAsync().Wait();
                    }
                }
                catch
                {
                    if (timeout == 0)
                    {
                        Resposta("Olá, tem alguém me escutando?").Wait();
                    }

                    else if (timeout == 1)
                    {
                        Resposta("Olá, ainda tem alguém aí?").Wait();
                    }

                    else if (timeout == 2)
                    {
                        Resposta("Encerrando a chamada").Wait();
                        controle_interacao = false;
                    }

                    timeout++;
                }
            }
        }