Ejemplo n.º 1
0
        private void ToSpeechResponse(RESTConnector.Request req, RESTConnector.Response resp)
        {
            ToSpeechRequest speechReq = req as ToSpeechRequest;

            if (speechReq == null)
            {
                throw new WatsonException("Wrong type of request object.");
            }

            Log.Debug("TextToSpeech", "Request completed in {0} seconds.", resp.ElapsedTime);

            AudioClip clip = resp.Success ? ProcessResponse(speechReq.TextId, resp.Data) : null;

            if (clip == null)
            {
                Log.Error("TextToSpeech", "Request Failed: {0}", resp.Error);
            }
            if (m_SpeechCache != null && clip != null)
            {
                m_SpeechCache.Save(speechReq.TextId, resp.Data);
            }

            if (speechReq.Callback != null)
            {
                speechReq.Callback(clip);
            }
        }
Ejemplo n.º 2
0
        /// <summary>
        /// Converts the given text into an AudioClip that can be played.
        /// </summary>
        /// <param name="text">The text to synthesis into speech.</param>
        /// <param name="callback">The callback to invoke with the AudioClip.</param>
        /// <param name="usePost">If true, then we use post instead of get, this allows for text that exceeds the 5k limit.</param>
        /// <returns>Returns true if the request is sent.</returns>
        public bool ToSpeech(string text, ToSpeechCallback callback, bool usePost = false)
        {
            if (string.IsNullOrEmpty(text))
            {
                throw new ArgumentNullException("text");
            }
            if (callback == null)
            {
                throw new ArgumentNullException("callback");
            }

            if (!m_AudioFormats.ContainsKey(m_AudioFormat))
            {
                Log.Error("TextToSpeech", "Unsupported audio format: {0}", m_AudioFormat.ToString());
                return(false);
            }
            if (!m_VoiceTypes.ContainsKey(m_Voice))
            {
                Log.Error("TextToSpeech", "Unsupported voice: {0}", m_Voice.ToString());
                return(false);
            }

            text = Utility.RemoveTags(text);

            string textId = Utility.GetMD5(text);

            if (!DisableCache)
            {
                if (m_SpeechCache == null)
                {
                    m_SpeechCache = new DataCache("TTS_" + m_VoiceTypes[m_Voice]);
                }

                byte [] data = m_SpeechCache.Find(textId);
                if (data != null)
                {
                    AudioClip clip = ProcessResponse(textId, data);
                    callback(clip);
                    return(true);
                }
            }

            RESTConnector connector = RESTConnector.GetConnector(SERVICE_ID, "/v1/synthesize");

            if (connector == null)
            {
                Log.Error("TextToSpeech", "Failed to get connector.");
                return(false);
            }

            ToSpeechRequest req = new ToSpeechRequest();

            req.TextId               = textId;
            req.Text                 = text;
            req.Callback             = callback;
            req.Parameters["accept"] = m_AudioFormats[m_AudioFormat];
            req.Parameters["voice"]  = m_VoiceTypes[m_Voice];
            req.OnResponse           = ToSpeechResponse;

            if (connector.UsingGateway || usePost)
            {
                Dictionary <string, string> upload = new Dictionary <string, string>();
                upload["text"] = text;

                req.Send = Encoding.UTF8.GetBytes(Json.Serialize(upload));
                req.Headers["Content-Type"] = "application/json";
            }
            else
            {
                req.Parameters["text"] = text;
            }

            return(connector.Send(req));
        }