예제 #1
0
        private void OnRecognizeResponse(RESTConnector.Request req, RESTConnector.Response resp)
        {
            RecognizeRequest recognizeReq = req as RecognizeRequest;

            if (recognizeReq == null)
            {
                throw new WatsonException("Unexpected request type.");
            }

            SpeechResultList result = null;

            if (resp.Success)
            {
                result = ParseRecognizeResponse(resp.Data);
                if (result == null)
                {
                    Log.Error("SpeechToText", "Failed to parse json response: {0}",
                              resp.Data != null ? Encoding.UTF8.GetString(resp.Data) : "");
                }
                else
                {
                    Log.Status("SpeechToText", "Received Recognize Response, Elapsed Time: {0}, Results: {1}",
                               resp.ElapsedTime, result.Results.Length);
                }
            }
            else
            {
                Log.Error("SpeechToText", "Recognize Error: {0}", resp.Error);
            }

            if (recognizeReq.Callback != null)
            {
                recognizeReq.Callback(result);
            }
        }
예제 #2
0
        private void OnListenMessage(WSConnector.Message msg)
        {
            if (msg is WSConnector.TextMessage)
            {
                WSConnector.TextMessage tm = (WSConnector.TextMessage)msg;

                IDictionary json = Json.Deserialize(tm.Text) as IDictionary;
                if (json != null)
                {
                    if (json.Contains("results"))
                    {
                        SpeechResultList results = ParseRecognizeResponse(json);
                        if (results != null)
                        {
                            if (newSpeech == true)
                            {
                                newSpeech = false;
                                UnityEngine.Debug.Log("--new: " + new SpeechToTextData(results).Text);
                                Cloudspace.NotificationCenter.DefaultCenter().PostNotification(null, "OnListeningToUser",
                                                                                               new SpeechToTextData(results).Text);
                            }
                            if (finalResults == null)
                            {
                                finalResults = results;
                            }
                            else
                            {
                                SpeechResult[] aggregated = new SpeechResult[finalResults.Results.Length + results.Results.Length];
                                for (int i = 0; i < finalResults.Results.Length; i++)
                                {
                                    aggregated [i] = finalResults.Results [i];
                                }
                                for (int i = finalResults.Results.Length; i < finalResults.Results.Length + results.Results.Length; i++)
                                {
                                    aggregated [i] = results.Results [i - finalResults.Results.Length];
                                }
                                finalResults.Results = aggregated;
                            }
//							UnityEngine.Debug.Log ("--agg: "+new SpeechToTextData (finalResults).AllText);

                            // when we get results, start listening for the next block ..
                            // if continuous is true, then we don't need to do this..
                            if (!EnableContinousRecognition && results.HasFinalResult())
                            {
                                SendStart();
                            }

//                            if (m_ListenCallback == null) {
                            StopListening();
//							}
                        }
                        else
                        {
                            Log.Error("SpeechToText", "Failed to parse results: {0}", tm.Text);
                        }
                    }
                    else if (json.Contains("state"))
                    {
                        string state = (string)json["state"];
#if ENABLE_DEBUGGING
                        Log.Debug("SpeechToText", "Server state is {0}", state);
#endif
                        if (state == "listening")
                        {
                            if (m_IsListening)
                            {
                                if (!m_ListenActive)
                                {
                                    m_ListenActive = true;

                                    // send all pending audio clips ..
                                    while (m_ListenRecordings.Count > 0)
                                    {
                                        AudioData clip = m_ListenRecordings.Dequeue();
                                        m_ListenSocket.Send(new WSConnector.BinaryMessage(AudioClipUtil.GetL16(clip.Clip)));
                                        m_AudioSent = true;
                                    }
                                }
                            }
                        }
                    }
                    else if (json.Contains("error"))
                    {
                        string error = (string)json["error"];
                        Log.Error("SpeechToText", "Error: {0}", error);

                        StopListening();
                        if (OnError != null)
                        {
                            OnError(error);
                        }
                    }
                    else
                    {
                        Log.Warning("SpeechToText", "Unknown message: {0}", tm.Text);
                    }
                }
                else
                {
                    Log.Error("SpeechToText", "Failed to parse JSON from server: {0}", tm.Text);
                }
            }
            if (silenceCounter > m_PausesLimit)
            {
                silenceCounter = 0;

                if (finalResults != null)
                {
                    newSpeech = true;
                    m_ListenCallback(finalResults);
                    finalResults = null;
                }
            }
        }
예제 #3
0
        private void OnListenMessage(WSConnector.Message msg)
        {
            if (msg is WSConnector.TextMessage)
            {
                WSConnector.TextMessage tm = (WSConnector.TextMessage)msg;

                IDictionary json = Json.Deserialize(tm.Text) as IDictionary;
                if (json != null)
                {
                    if (json.Contains("results"))
                    {
                        SpeechResultList results = ParseRecognizeResponse(json);
                        if (results != null)
                        {
                            // when we get results, start listening for the next block ..
                            // if continuous is true, then we don't need to do this..
                            if (!EnableContinousRecognition && results.HasFinalResult())
                            {
                                SendStart();
                            }

                            if (m_ListenCallback != null)
                            {
                                m_ListenCallback(results);
                            }
                            else
                            {
                                StopListening();            // automatically stop listening if our callback is destroyed.
                            }
                        }
                        else
                        {
                            Log.Error("SpeechToText", "Failed to parse results: {0}", tm.Text);
                        }
                    }
                    else if (json.Contains("state"))
                    {
                        string state = (string)json["state"];

#if ENABLE_DEBUGGING
                        Log.Debug("SpeechToText", "Server state is {0}", state);
#endif
                        if (state == "listening")
                        {
                            if (m_IsListening)
                            {
                                if (!m_ListenActive)
                                {
                                    m_ListenActive = true;

                                    // send all pending audio clips ..
                                    while (m_ListenRecordings.Count > 0)
                                    {
                                        AudioData clip = m_ListenRecordings.Dequeue();
                                        m_ListenSocket.Send(new WSConnector.BinaryMessage(AudioClipUtil.GetL16(clip.Clip)));
                                        m_AudioSent = true;
                                    }
                                }
                            }
                        }
                    }
                    else if (json.Contains("error"))
                    {
                        string error = (string)json["error"];
                        Log.Error("SpeechToText", "Error: {0}", error);

                        StopListening();
                        if (OnError != null)
                        {
                            OnError(error);
                        }
                    }
                    else
                    {
                        Log.Warning("SpeechToText", "Unknown message: {0}", tm.Text);
                    }
                }
                else
                {
                    Log.Error("SpeechToText", "Failed to parse JSON from server: {0}", tm.Text);
                }
            }
        }