private void OnListenMessage(WSConnector.Message msg) { if (msg is WSConnector.TextMessage) { WSConnector.TextMessage tm = (WSConnector.TextMessage)msg; IDictionary json = Json.Deserialize(tm.Text) as IDictionary; if (json != null) { if (json.Contains("error")) { string error = (string)json["error"]; Log.Error("TextToSpeech.OnListenMessage()", "Error: {0}", error); StopListening(); if (OnError != null) { OnError(error); } } } else { Log.Error("TextToSpeech.OnListenMessage()", "Failed to parse JSON from server: {0}", tm.Text); } } else if (msg is WSConnector.BinaryMessage) { WSConnector.BinaryMessage message = (WSConnector.BinaryMessage)msg; listenCallback(message.Data); } }
private void OnListenMessage(WSConnector.Message msg) { if (msg is WSConnector.TextMessage) { WSConnector.TextMessage tm = (WSConnector.TextMessage)msg; IDictionary json = Json.Deserialize(tm.Text) as IDictionary; if (json != null) { if (json.Contains("results")) { SpeechResultList results = ParseRecognizeResponse(json); if (results != null) { // when we get results, start listening for the next block .. // if continuous is true, then we don't need to do this.. if (!EnableContinousRecognition && results.HasFinalResult()) { SendStart(); } if (m_ListenCallback != null) { m_ListenCallback(results); } else { StopListening(); // automatically stop listening if our callback is destroyed. } } else { Log.Error("SpeechToText", "Failed to parse results: {0}", tm.Text); } } else if (json.Contains("state")) { string state = (string)json["state"]; #if ENABLE_DEBUGGING Log.Debug("SpeechToText", "Server state is {0}", state); #endif if (state == "listening") { if (m_IsListening) { if (!m_ListenActive) { m_ListenActive = true; // send all pending audio clips .. while (m_ListenRecordings.Count > 0) { AudioData clip = m_ListenRecordings.Dequeue(); m_ListenSocket.Send(new WSConnector.BinaryMessage(AudioClipUtil.GetL16(clip.Clip))); m_AudioSent = true; } } } } } else if (json.Contains("error")) { string error = (string)json["error"]; Log.Error("SpeechToText", "Error: {0}", error); StopListening(); if (OnError != null) { OnError(error); } } else { Log.Warning("SpeechToText", "Unknown message: {0}", tm.Text); } } else { Log.Error("SpeechToText", "Failed to parse JSON from server: {0}", tm.Text); } } }
private void OnListenMessage(WSConnector.Message msg) { if (msg is WSConnector.TextMessage) { WSConnector.TextMessage tm = (WSConnector.TextMessage)msg; IDictionary json = Json.Deserialize(tm.Text) as IDictionary; if (json != null) { if (json.Contains("results")) { SpeechResultList results = ParseRecognizeResponse(json); if (results != null) { if (newSpeech == true) { newSpeech = false; UnityEngine.Debug.Log("--new: " + new SpeechToTextData(results).Text); Cloudspace.NotificationCenter.DefaultCenter().PostNotification(null, "OnListeningToUser", new SpeechToTextData(results).Text); } if (finalResults == null) { finalResults = results; } else { SpeechResult[] aggregated = new SpeechResult[finalResults.Results.Length + results.Results.Length]; for (int i = 0; i < finalResults.Results.Length; i++) { aggregated [i] = finalResults.Results [i]; } for (int i = finalResults.Results.Length; i < finalResults.Results.Length + results.Results.Length; i++) { aggregated [i] = results.Results [i - finalResults.Results.Length]; } finalResults.Results = aggregated; } // UnityEngine.Debug.Log ("--agg: "+new SpeechToTextData (finalResults).AllText); // when we get results, start listening for the next block .. // if continuous is true, then we don't need to do this.. if (!EnableContinousRecognition && results.HasFinalResult()) { SendStart(); } // if (m_ListenCallback == null) { StopListening(); // } } else { Log.Error("SpeechToText", "Failed to parse results: {0}", tm.Text); } } else if (json.Contains("state")) { string state = (string)json["state"]; #if ENABLE_DEBUGGING Log.Debug("SpeechToText", "Server state is {0}", state); #endif if (state == "listening") { if (m_IsListening) { if (!m_ListenActive) { m_ListenActive = true; // send all pending audio clips .. while (m_ListenRecordings.Count > 0) { AudioData clip = m_ListenRecordings.Dequeue(); m_ListenSocket.Send(new WSConnector.BinaryMessage(AudioClipUtil.GetL16(clip.Clip))); m_AudioSent = true; } } } } } else if (json.Contains("error")) { string error = (string)json["error"]; Log.Error("SpeechToText", "Error: {0}", error); StopListening(); if (OnError != null) { OnError(error); } } else { Log.Warning("SpeechToText", "Unknown message: {0}", tm.Text); } } else { Log.Error("SpeechToText", "Failed to parse JSON from server: {0}", tm.Text); } } if (silenceCounter > m_PausesLimit) { silenceCounter = 0; if (finalResults != null) { newSpeech = true; m_ListenCallback(finalResults); finalResults = null; } } }
private void OnListenMessage(WSConnector.Message msg) { if (msg is WSConnector.TextMessage) { WSConnector.TextMessage tm = (WSConnector.TextMessage)msg; IDictionary json = Json.Deserialize(tm.Text) as IDictionary; if (json != null) { if (json.Contains("results")) { SpeechRecognitionEvent results = ParseRecognizeResponse(json); if (results != null) { //// when we get results, start listening for the next block .. //if (results.HasFinalResult()) //Log.Debug("SpeechToText.OnListenMessage()", "final json response: {0}", tm.Text); // SendStart(); if (_listenCallback != null) { _listenCallback(results); } else { StopListening(); // automatically stop listening if our callback is destroyed. } } else { Log.Error("SpeechToText.OnListenMessage()", "Failed to parse results: {0}", tm.Text); } } else if (json.Contains("state")) { string state = (string)json["state"]; #if ENABLE_DEBUGGING Log.Debug("SpeechToText.OnListenMessage()", "Server state is {0}", state); #endif if (state == "listening") { if (_isListening) { if (!_listenActive) { _listenActive = true; //Debug.Log("Listening, sending " + _listenRecordings.Count + " queued clips"); bool hasAudio = _listenRecordings.Count > 0; // send all pending audio clips .. while (_listenRecordings.Count > 0) { AudioData clip = _listenRecordings.Dequeue(); _listenSocket.Send(new WSConnector.BinaryMessage(AudioClipUtil.GetL16(clip.Clip))); _audioSent = true; } // We may have received a stop command while waiting for the listening state. if (_sendStopAfterListening && hasAudio) { SendStop(); } } } } } else if (json.Contains("speaker_labels")) { SpeakerRecognitionEvent speakerRecognitionEvent = ParseSpeakerRecognitionResponse(json); if (speakerRecognitionEvent != null) { _speakerLabelCallback(speakerRecognitionEvent); } } else if (json.Contains("error")) { string error = (string)json["error"]; Log.Error("SpeechToText.OnListenMessage()", "Error: {0}", error); StopListening(); if (OnError != null) { OnError(error); } } else { Log.Warning("SpeechToText.OnListenMessage()", "Unknown message: {0}", tm.Text); } } else { Log.Error("SpeechToText.OnListenMessage()", "Failed to parse JSON from server: {0}", tm.Text); } } }