// This event handler's code only works in UWP (i.e. HoloLens)
#if WINDOWS_UWP
    /// <summary>
    /// This event is fired after the user pauses, typically at the end of a sentence. The full recognized string is returned here.
    /// </summary>
    /// <param name="text">The text that was heard by the recognizer.</param>
    /// <param name="confidence">A representation of how confident (rejected, low, medium, high) the recognizer is of this recognition.</param>
    private async void DictationRecognizer_DictationResult(string text, ConfidenceLevel confidence)
    {
        StopRecording();

        // Append textSoFar with latest text
        textSoFar.Append(text);

        // Set DictationDisplay text to be textSoFar as return by hypothesis
        //DictationDisplay.text = textSoFar.ToString();

        UnityEngine.WSA.Application.InvokeOnAppThread(()   =>
                                                               {
            // Display captions for the question
            captionsManager.SetCaptionsText(text);
                    
        },  false);

        string msg    = text;
        string result = "I'm sorry, I'm not sure how to answer that";

        if (await tmsBot.SendMessage(msg))
        {
            ConversationActitvities messages = await tmsBot.GetMessages();

            if (messages.activities.Length > 0)
            {
                result = "";
            }

            // Note that attachments (like cards) are still not supported
            for (int i = 1; i < messages.activities.Length; i++)
            {
                // We focus on the speak tag if the bot was speech-enabled.
                // Otherwise we'll just speak the default text instead.
                if (messages.activities[i].speak.Length > 0)
                {
                    result += (messages.activities[i].speak + " ");
                }
                else
                {
                    result += (messages.activities[i].text + " ");
                }
            }
        }

        //animator.Play("Happy");
        MyTTS.SpeakText(result);

        UnityEngine.WSA.Application.InvokeOnAppThread(()   =>
                                                               {
            // Display captions for the question
            captionsManager.SetCaptionsText(result);
                    
        },  false);
    }
Beispiel #2
0
    /// <summary>
    /// Sends requests to the Bot Framework via the DirectLine v3 API.
    /// The specific bot that gets called gets configured via the DirectLine API key
    /// in the BotService class. This function runs in the background to insure the
    /// application isn;t blocked whiule we wait for the bot response.
    /// </summary>
    /// <param name="message"></param>
    private async void SendBotRequestMessage(string message)
    {
        string result = "I'm sorry, I'm not sure how to answer that";

        // sends the message to the bot and awaits a response.
        if (await tmsBot.SendMessage(message))
        {
            ConversationActitvities messages = await tmsBot.GetMessages();

            if (messages.activities.Length > 0)
            {
                result = "";
            }

            // Note that attachments (like cards) are still not supported
            for (int i = 1; i < messages.activities.Length; i++)
            {
                // We focus on the speak tag if the bot was speech-enabled.
                // Otherwise we'll just speak the default text instead.
                if (messages.activities[i].speak?.Length > 0)
                {
                    result += (messages.activities[i].speak + " ");
                }
                else
                {
                    result += (messages.activities[i].text + " ");
                }
            }
        }

        //animator.Play("Happy");
        recognizedString = result;
        // Use Text-to Speech to respond to the user
        UnityDispatcher.InvokeOnAppThread(() => { UpdateUI(); });
        speechTTS.SpeakWithSDKPlugin(result);
    }