示例#1
0
    /// <summary>
    /// This event is fired after the user pauses, typically at the end of a sentence. The full recognized string is returned here.
    /// </summary>
    /// <param name="text">The text that was heard by the recognizer.</param>
    /// <param name="confidence">A representation of how confident (rejected, low, medium, high) the recognizer is of this recognition.</param>
    private async void DictationRecognizer_DictationResult(string text, ConfidenceLevel confidence)
    {
        StopRecording();

        // Append textSoFar with latest text
        textSoFar.Append(text + ". ");

        string msg    = text;
        string result = "I'm sorry, I'm not sure how to answer that";

        if (await tmsBot.SendMessage(msg))
        {
            ConversationMessages messages = await tmsBot.GetMessages();

            for (int i = 1; i < messages.messages.Length; i++)
            {
                result = messages.messages[i].text;
            }
        }

        //animator.Play("Happy");
        MyTTS.SpeakText(result);
        // Set DictationDisplay text to be textSoFar
        //DictationDisplay.text = textSoFar.ToString();
    }
    private void PlayInstructions()
    {
        switch (instrSwitch)
        {
        case 0:
        {
            print("Nothing to say!");
            break;
        }

        case 1:
        {
            textToSpeechManager.SpeakText("Welcome to our study! The time is " + DateTime.Now.ToString("t"));
            break;
        }

        case 2:
        {
            textToSpeechManager.SpeakText("Explore the room with the HoloLens");
            break;
        }

        case 3:
        {
            textToSpeechManager.SpeakText("Explore the room for two minutes.");
            break;
        }

        default:
        {
            print("Incorrect level; nothing to say");
            break;
        }
        }
    }
    // Use this for initialization
    void Start()
    {
        ttsMgr = GetComponent <TextToSpeechManager>();
        if (ttsMgr == null)
        {
            Debug.LogError("TextToSpeechManager Required");
        }

        anchorManager = WorldAnchorManager.Instance;
        if (anchorManager == null)
        {
            Debug.LogError("This script expects that you have a WorldAnchorManager component in your scene.");
        }

        spatialMappingManager = SpatialMappingManager.Instance;
        if (spatialMappingManager == null)
        {
            Debug.LogError("This script expects that you have a SpatialMappingManager component in your scene.");
        }

        if (anchorManager != null && spatialMappingManager != null)
        {
            anchorManager.AttachAnchor(this.gameObject, SavedAnchorFriendlyName);
            ttsMgr.SpeakText("Anchor Locked");
        }
        else
        {
            ttsMgr.SpeakText("Cannot Lock Anchor");
        }
    }
示例#4
0
    public void pronounceItem()
    {
        ttsManager.audioSource.loop = true;
        if (timeSincePlayed < 2f)
        {
            return;
        }

        if (played)
        {
            ttsManager.audioSource.Play();
        }
        else
        {
            played = true;
            if (string.IsNullOrEmpty(this.ItemName))
            {
                ttsManager.SpeakText("Unknown");
            }
            else
            {
                ttsManager.SpeakText(this.ItemName);
            }
        }
        timeSincePlayed = 0;
    }
示例#5
0
 // Use this for initialization
 void Start()
 {
     textToSpeechManager.SpeakText("This is the Practice Scene");
     // Set up a GestureRecognizer to detect Select gestures.
     gestureRecognizer              = new GestureRecognizer();
     gestureRecognizer.TappedEvent += GestureRecognizer_TappedEvent;
     gestureRecognizer.StartCapturingGestures();
 }
示例#6
0
    // Use this for initialization
    void Start()
    {
        var soundManager = gameObject;

        tsm = soundManager.GetComponent <TextToSpeechManager>();

        tsm.SpeakText("Welcome to the Holographic App ! You can use Gaze, Gesture and Voice Command to interact with it!");
        tsm.SpeakText("this is a test!");
    }
示例#7
0
        private void HandleOnConversationStarted(object sender, string id)
        {
            conversationId   = id;
            isPerformingInit = false;
            var greetingMessage = "Greetings, I can help you with tracked objects. Just ask.";

            textToSpeechManager.SpeakText(greetingMessage);
            messageLabel.text         = greetingMessage;
            dictationButton.IsEnabled = true;
        }
 private void Update()
 {
     // Yeah, this is a hack, but the TTS Manager did not seem to initialize fast
     // enough to make this SpeakText() call in Start() above
     if (IsStartup)
     {
         IsStartup = false;
         MyTTS.SpeakText("Welcome to the holographic model showcase. Look at the floor and pick a spot to place the model. Air Tap when ready.");
     }
 }
示例#9
0
    // This event handler's code only works in UWP (i.e. HoloLens)
#if WINDOWS_UWP
    /// <summary>
    /// This event is fired after the user pauses, typically at the end of a sentence. The full recognized string is returned here.
    /// </summary>
    /// <param name="text">The text that was heard by the recognizer.</param>
    /// <param name="confidence">A representation of how confident (rejected, low, medium, high) the recognizer is of this recognition.</param>
    private async void DictationRecognizer_DictationResult(string text, ConfidenceLevel confidence)
    {
        StopRecording();

        // Append textSoFar with latest text
        textSoFar.Append(text);

        // Set DictationDisplay text to be textSoFar as return by hypothesis
        //DictationDisplay.text = textSoFar.ToString();

        UnityEngine.WSA.Application.InvokeOnAppThread(()   =>
                                                               {
            // Display captions for the question
            captionsManager.SetCaptionsText(text);
                    
        },  false);

        string msg    = text;
        string result = "I'm sorry, I'm not sure how to answer that";

        if (await tmsBot.SendMessage(msg))
        {
            ConversationActitvities messages = await tmsBot.GetMessages();

            if (messages.activities.Length > 0)
            {
                result = "";
            }

            // Note that attachments (like cards) are still not supported
            for (int i = 1; i < messages.activities.Length; i++)
            {
                // We focus on the speak tag if the bot was speech-enabled.
                // Otherwise we'll just speak the default text instead.
                if (messages.activities[i].speak.Length > 0)
                {
                    result += (messages.activities[i].speak + " ");
                }
                else
                {
                    result += (messages.activities[i].text + " ");
                }
            }
        }

        //animator.Play("Happy");
        MyTTS.SpeakText(result);

        UnityEngine.WSA.Application.InvokeOnAppThread(()   =>
                                                               {
            // Display captions for the question
            captionsManager.SetCaptionsText(result);
                    
        },  false);
    }
示例#10
0
 // Uses Text to Speech Manager to say whatever is in the current open information panel
 public void Say()
 {
     if (textToSpeechManager != null)
     {
         if (!textToSpeechManager.IsSpeaking())
         {
             textToSpeechManager.SpeakText(a.getSpeechText());
         }
         else
         {
             textToSpeechManager.StopSpeaking();
         }
     }
 }
    void Update()
    {
        switch (curentState)
        {
        case ControlState.CheckAnchorStatus:
            var cnt = anchorStore.anchorCount;
            if (cnt > 0)
            {
                var sb = new StringBuilder("Found Anchor" + (cnt == 1 ? " " : "s "));
                foreach (var ids in anchorStore.GetAllIds())
                {
                    sb.Append(ids);
                }
                Debug.Log(sb.ToString());
                ttsMgr.SpeakText(sb.ToString());
                DisplayUI.Instance.AppendText(sb.ToString());
            }
            else
            {
                ttsMgr.SpeakText("No Anchors Found, Creating Anchor");
                Debug.Log("No Anchors Found, Creating Anchor");
            }
            anchorManager.AttachAnchor(PlacementObject, SavedAnchorFriendlyName);
            curentState = ControlState.Ready;
            break;

        case ControlState.Ready:
            break;

        case ControlState.PlaceAnchor:
            // TODO: Use GazeManager + Cursor Tracking instead of another Raycast
            var        headPosition  = Camera.main.transform.position;
            var        gazeDirection = Camera.main.transform.forward;
            RaycastHit hitInfo;
            if (Physics.Raycast(headPosition, gazeDirection, out hitInfo,
                                30.0f, spatialMappingManager.LayerMask))
            {
                PlacementObject.transform.position = hitInfo.point;

                // Rotate this object to face the user.
                //Quaternion toQuat = Camera.main.transform.localRotation;
                //toQuat.x = 0;
                //toQuat.z = 0;
                //this.transform.rotation = toQuat;
            }
            break;
        }
    }
示例#12
0
    // Use this for initialization
    void Start()
    {
        TextToSpeechManager textToSpeechManager = this.GetComponent <TextToSpeechManager>();

        textToSpeechManager.Voice = TextToSpeechVoice.Mark;
        textToSpeechManager.SpeakText("Welcome To Explore HoloLens. A Holographic View of a HoloLens device. You can use Gaze, Gesture and Voice Command to explore different components. Walk around and start exploring !");
    }
    public void PlayTextToSpeechMessage(MCSComputerVisionOCRDto computerVisionOCR)
    {
        string message = string.Empty;

        if (string.IsNullOrEmpty(computerVisionOCR.text))
        {
            message = "I couldn't detect text";
        }
        else
        {
            message = string.Format("The text says, {0}", computerVisionOCR.text);
        }

        // Try and get a TTS Manager
        TextToSpeechManager tts = null;

        if (photoCaptureManagerGmObj != null)
        {
            tts = photoCaptureManagerGmObj.GetComponent <TextToSpeechManager>();
        }

        if (tts != null)
        {
            //Play voice message
            tts.SpeakText(message);
        }
    }
示例#14
0
    /// <summary>
    /// This event is fired after the user pauses, typically at the end of a sentence. The full recognized string is returned here.
    /// </summary>
    /// <param name="text">The text that was heard by the recognizer.</param>
    /// <param name="confidence">A representation of how confident (rejected, low, medium, high) the recognizer is of this recognition.</param>
    private void DictationRecognizer_DictationResult(string text, ConfidenceLevel confidence)
    {
        StopRecording();

        //Keep if tu quieres espanol

        string          url       = "http://104.41.148.55:8069/trans?text=" + text;
        HttpWebRequest  request   = (HttpWebRequest)WebRequest.Create(url);
        HttpWebResponse response  = (HttpWebResponse)request.GetResponse();
        Stream          resStream = response.GetResponseStream();
        StreamReader    reader    = new StreamReader(resStream);
        string          fin       = reader.ReadToEnd();

        Debug.Log(fin);


        // Append textSoFar with latest text
        textSoFar.Append(text);

        var retText = speakSpan ? fin : text;

        captionsManager.SetCaptionsText(retText);

        //animator.Play("Happy"); // TO DO: Need to fix, not working yet
        MyTTS.SpeakText(text);

        // Set DictationDisplay text to be textSoFar
        //DictationDisplay.text = textSoFar.ToString();
    }
示例#15
0
    private void GestureRecognizer_TappedEvent(InteractionSourceKind source, int tapCount, Ray headRay)
    {
        GazeManager gm = GazeManager.Instance;

        if (gm.Hit)
        {
            // Get the target object
            GameObject obj = gm.HitInfo.collider.gameObject;

            // Try and get a TTS Manager
            TextToSpeechManager tts = null;
            if (obj != null)
            {
                tts = obj.GetComponent <TextToSpeechManager>();
            }

            // If we have a text to speech manager on the target object, say something.
            // This voice will appear to emanate from the object.
            if (tts != null)
            {
                // Get the name
                var voiceName = Enum.GetName(typeof(TextToSpeechVoice), tts.Voice);

                // Create message
                var msg = string.Format("This is the {0} voice. It should sound like it's coming from the object you clicked. Feel free to walk around and listen from different angles.", voiceName);

                // Speak message
                tts.SpeakText(msg);
            }
        }
    }
    /// <summary>
    /// Handle On Click Event for Lense
    /// </summary>
    /// <param name="eventData"></param>
    public void OnInputClicked(InputEventData eventData)
    {
        hit = GazeManager.Instance.HitInfo;


        if (hit.transform.gameObject != null)
        {
            isTapped = !isTapped;

            if (isTapped)
            {
                TranslateLenseObjects(-5.0f);


                // Attach Sound Manager while  Air Tap
                var soundManager = GameObject.FindWithTag("SoundManager");
                TextToSpeechManager textToSpeech = soundManager.GetComponent <TextToSpeechManager>();
                textToSpeech.Voice = TextToSpeechVoice.Mark;
                textToSpeech.SpeakText("The HoloLens display is basically a set of transparent lenses placed just in front of the eyes.");
            }
            else
            {
                TranslateLenseObjects(5.0f);
            }
        }
    }
示例#17
0
    private void GestureRecognizer_TappedEvent(InteractionSourceKind source, int tapCount, Ray headRay)
    {
        GazeManager gm = GazeManager.Instance;

        if (gm.Hit)
        {
            // Get the target object
            GameObject obj = gm.HitInfo.collider.gameObject;

            // Try and get a TTS Manager
            TextToSpeechManager tts = null;
            if (obj != null)
            {
                tts = obj.GetComponent <TextToSpeechManager>();
            }

            // If we have a text to speech manager on the target object, say something.
            // This voice will appear to emanate from the object.
            if (tts != null)
            {
//                tts.SpeakText("This voice should sound like it's coming from the object you clicked. Feel free to walk around and listen from different angles.");
                tts.SpeakText("The time is " + DateTime.Now.ToString("t"));
            }
        }
    }
示例#18
0
    private async void UpdateIpd(float ipd)
    {
        // Update only allowed if signed in
        if (state == FBControllerState.LoggedIn)
        {
            try
            {
                // Set IPD
                await portal.SetInterPupilaryDistance(ipd);

                // Reread values
                ReadValues();

                // Define message to speak
                var speakText = string.Format("IPD set to {0}", ipd);

                // Speak the message
                textToSpeech.SpeakText(speakText);
            }
            catch (Exception ex)
            {
                // Show error on Unity thread
                ShowError(ex.Message);
            }
        }
    }
示例#19
0
    void IInputHandler.OnInputUp(InputEventData eventData)
    {
        if (eventData.PressType == InteractionSourcePressType.Select)
        {
            GameObject obj = FocusManager.Instance.TryGetFocusedObject(eventData);

            // Try and get a TTS Manager
            TextToSpeechManager tts = (obj == null)
                ? null
                : obj.GetComponent <TextToSpeechManager>();

            if (tts != null)
            {
                // If we have a text to speech manager on the target object, say something.
                // This voice will appear to emanate from the object.
                if (!tts.IsSpeaking())
                {
                    // Get the name
                    var voiceName = Enum.GetName(typeof(TextToSpeechVoice), tts.Voice);

                    // Create message
                    var msg = string.Format("This is the {0} voice. It should sound like it's coming from the object you clicked. Feel free to walk around and listen from different angles.", voiceName);

                    // Speak message
                    tts.SpeakText(msg);
                }
                else
                {
                    tts.StopSpeaking();
                }

                eventData.Use(); // Mark the event as used, so it doesn't fall through to other handlers.
            }
        }
    }
示例#20
0
    IEnumerator TextRequest(WWW www)
    {
        yield return(www);

        // check for errors
        if (www.error == null)
        {
            string s = www.text;
            Debug.Log("Yseop response: " + s);

            int    start = s.IndexOf("text-result") + 13;
            int    end   = s.IndexOf("<p>");
            string text  = s.Substring(start, end - start);
            text = text.Replace("\n", string.Empty);
            text = text.Replace("\r", string.Empty);
            //text = text.Replace("\t", String.Empty);

            Debug.Log("Yseop text: " + text);

            //            textToSpeechManager.SpeakText("Your flow of 14 Patients starts from only one entry, which is ALVEOLOPLASTY. Overall, 28.57 % of Patients end at the main exit which is SURG TOOTH EXTRACT NEC (4 Patients). Then, there are CONT MECH VENT < 96 HRS with 2, and finally TOOTH EXTRACTION NEC with 2.");
            textToSpeechManager.SpeakText(text);
        }
        else
        {
            Debug.Log("WWW Error: " + www.error);
        }
    }
    private void GestureRecognizer_TappedEvent(InteractionSourceKind source, int tapCount, Ray headRay)
    {
        GazeManager gm = GazeManager.Instance;

        if (gm.Hit)
        {
            // Get the target object
            GameObject obj = gm.HitInfo.collider.gameObject;

            // Try and get a TTS Manager
            TextToSpeechManager tts = null;
            if (obj != null)
            {
                tts = obj.GetComponent <TextToSpeechManager>();
            }

            // If we have a text to speech manager on the target object, say something.
            // This voice will appear to emanate from the object.
            if (tts != null)
            {
//                tts.SpeakText("This voice should sound like it's coming from the object you clicked. Feel free to walk around and listen from different angles.");
//                tts.SpeakText("The time is " + DateTime.Now.ToString("t"));
                tts.SpeakText("Your flow of 14 Patients starts from only one entry, which is ALVEOLOPLASTY. Overall, 28.57 % of Patients end at the main exit which is SURG TOOTH EXTRACT NEC (4 Patients). Then, there are CONT MECH VENT < 96 HRS with 2, and finally TOOTH EXTRACTION NEC with 2. ALVEOLOPLASTY: The Patients who began at ALVEOLOPLASTY and finished at SURG TOOTH EXTRACT NEC took only one relevant path: 2 Patients began at ALVEOLOPLASTY before ending at SURG TOOTH EXTRACT NEC. Out of the 4 Patients who began at ALVEOLOPLASTY and resulted at SURG TOOTH EXTRACT NEC, 25 % passed at OTHER GASTROSTOMY, REMOV GASTROSTOMY TUBE, OTHER ORTHODONTIC OPERAT. The Patients who began at ALVEOLOPLASTY and finished at PROSTHET DENTAL IMPLANT took only one relevant path: 1 Patients began at ALVEOLOPLASTY before ending, by a five times repetition, at PROSTHET DENTAL IMPLANT. Out of the 2 Patients who began at ALVEOLOPLASTY and resulted at PROSTHET DENTAL IMPLANT, 50 % passed at SURG TOOTH EXTRACT NEC. The Patients who began at ALVEOLOPLASTY and finished at CONT MECH VENT < 96 HRS took only one relevant path: 1 Patients began at ALVEOLOPLASTY, then to TOOTH EXTRACTION NEC and finally ending at CONT MECH VENT < 96 HRS. Out of the 2 Patients who began at ALVEOLOPLASTY and resulted at CONT MECH VENT < 96 HRS, 100 % passed at TOOTH EXTRACTION NEC, SPINAL TAP, AORTA - SUBCLV - CAROT BYPAS, HEAD / NECK VES RESEC-REPL, REP VESS W SYNTH PATCH, VENOUS CATH NEC, EXTRACORPOREAL CIRCULAT, CARDIOPULM RESUSCITA NOS, ARTERIAL CATHETERIZATION, INSERT ENDOTRACHEAL TUBE. The Patients who began at ALVEOLOPLASTY and finished at TOOTH EXTRACTION NEC took only one path: 2 Patients began at ALVEOLOPLASTY before ending at TOOTH EXTRACTION NEC. ALVEOLOPLASTY is the step that should be focused on to reduce the number of Patients who reach SURG TOOTH EXTRACT NEC");
            }
        }
    }
        private void WebSocket_MessageReceived(Windows.Networking.Sockets.MessageWebSocket sender, Windows.Networking.Sockets.MessageWebSocketMessageReceivedEventArgs args)
        {
            using (DataReader dataReader = args.GetDataReader())
            {
                byte[] bytes = new byte[dataReader.UnconsumedBufferLength];
                dataReader.ReadBytes(bytes);

                Gabriel.ToClient toClient = Gabriel.ToClient.Parser.ParseFrom(bytes);

                if (toClient.ResultWrapper == null)
                {
                    // Set num tokens on welcome message
                    lock (_tokenLock)
                    {
                        _numTokens = toClient.NumTokens;
                    }
                    return;
                }

                // We only return one to avoid race conditions when we have multiple messages in flight
                lock (_tokenLock)
                {
                    _numTokens++;
                }

                Gabriel.ResultWrapper resultWrapper = toClient.ResultWrapper;
                if (resultWrapper.Status != Gabriel.ResultWrapper.Types.Status.Success)
                {
                    UnityEngine.Debug.Log("Output status was: " + resultWrapper.Status);
                    return;
                }

                Instruction.EngineFields newEngineFields = resultWrapper.EngineFields.Unpack <Instruction.EngineFields>();
                if (newEngineFields.UpdateCount <= _engineFields.UpdateCount)
                {
                    // There was no update or there was an update based on a stale frame
                    return;
                }

                for (int i = 0; i < resultWrapper.Results.Count(); i++)
                {
                    Gabriel.ResultWrapper.Types.Result result = resultWrapper.Results[i];

                    if (!result.EngineName.Equals(Const.ENGINE_NAME))
                    {
                        UnityEngine.Debug.LogError("Got result from engine " + result.EngineName);
                    }

                    if (result.PayloadType == Gabriel.PayloadType.Text)
                    {
                        _speechFeedback = result.Payload.ToStringUtf8();
                        textToSpeechManager.SpeakText(_speechFeedback);
                    }
                }

                _engineFields = newEngineFields;
                UpdateHologram(newEngineFields, resultWrapper.FrameId);
            }
        }
示例#23
0
        private void DoTextToSpeech()
        {
            // Create message
            var msg = "Hello. I am a " + Color + " Cube! My ID is " + ID + ".";

            // Speak message
            TextToSpeech.SpeakText(msg);
        }
示例#24
0
    // Use this for initialization
    void Start()
    {
        var soundManager = GameObject.Find("Audio Manager");
        TextToSpeechManager textToSpeech = soundManager.GetComponent <TextToSpeechManager>();

        textToSpeech.Voice = TextToSpeechVoice.Mark;
        textToSpeech.SpeakText("Please walk around and map your room to start the game!");
    }
示例#25
0
 private void Say(string message)
 {
     Debug.Log("Announcement:" + message + "");
     if (Utils.IsHoloLens)
     {
         tts.SpeakText(message);
     }
 }
示例#26
0
 // Update is called once per frame
 void Update()
 {
     if (speak)
     {
         textMan.SpeakText(textToSay);
         Debug.Log(textToSay);
         speak = false;
     }
 }
示例#27
0
    IEnumerator InitSpeech()
    {
        yield return(new WaitForSeconds(0.5f));

        txt2speech.SpeakText("Hello! Welcome to Human anatomy explorer . Please gaze at an empty space and air tap to start the demo");
        Debug.Log("Intro Speech");
        //yield return new WaitForSeconds(4.0f);
        // IntroText.gameObject.SetActive(true);
        //txtAnim.SetBool("trigFade", true);
    }
示例#28
0
    public void Play()
    {
        // Get the name
        var voiceName = System.Enum.GetName(typeof(TextToSpeechVoice), TextToSpeech.Voice);

        // Create message
        var msg = string.Format("This is the {0} voice. It should sound like it's coming from the object you clicked. Feel free to walk around and listen from different angles.", voiceName);

        // Speak message
        TextToSpeech.SpeakText(msg);
    }
示例#29
0
    //private TextToSpeech textToSpeech;

    /*private void Awake()
     * {
     *  textToSpeech = GetComponent<TextToSpeech>();
     * }*/

    public void OnInputClicked()
    {
        // Create message
        //var msg = string.Format(
        //"eu", textToSpeech.Voice.ToString());
        // Speak message
        //textToSpeech.StartSpeaking(msg);
        TextToSpeechManager voice = new TextToSpeechManager();

        voice.SpeakText(texto);
    }
示例#30
0
    // This event handler's code only works in UWP (i.e. HoloLens)
#if WINDOWS_UWP
    /// <summary>
    /// This event is fired after the user pauses, typically at the end of a sentence. The full recognized string is returned here.
    /// </summary>
    /// <param name="text">The text that was heard by the recognizer.</param>
    /// <param name="confidence">A representation of how confident (rejected, low, medium, high) the recognizer is of this recognition.</param>
    private async void DictationRecognizer_DictationResult(string text, ConfidenceLevel confidence)
    {
        StopRecording();

        // Append textSoFar with latest text
        textSoFar.Append(text);

        // Set DictationDisplay text to be textSoFar
        //DictationDisplay.text = textSoFar.ToString();

        UnityEngine.WSA.Application.InvokeOnAppThread(()   =>
                                                               {
            // Display captions for the question
            captionsManager.SetCaptionsText(text);
                    
        },  false);

        string msg    = text;
        string result = "I'm sorry, I'm not sure how to answer that";

        if (await tmsBot.SendMessage(msg))
        {
            ConversationMessages messages = await tmsBot.GetMessages();

            for (int i = 1; i < messages.messages.Length; i++)
            {
                result = messages.messages[i].text;
            }
        }

        //animator.Play("Happy");
        MyTTS.SpeakText(result);

        UnityEngine.WSA.Application.InvokeOnAppThread(()   =>
                                                               {
            // Display captions for the question
            captionsManager.SetCaptionsText(result);
                    
        },  false);
    }