public IEnumerator HandleSpeak(IAction speakAction) { //m_rpc.Perceive(new [] { EventHelper.ActionStart(m_rpc.CharacterName.ToString(), speakAction.Name.ToString(), IATConsts.PLAYER) }); Name currentState = speakAction.Parameters[0]; Name nextState = speakAction.Parameters[1]; Name meaning = speakAction.Parameters[2]; Name style = speakAction.Parameters[3]; var Target = speakAction.Target; var dialogs = m_iat.GetDialogueActions(currentState, nextState, meaning, style); // Debug.Log("Here we go speaking: " + currentState.ToString() + " ns " + nextState.ToString() + " meaning " + meaning.ToString()); var dialog = dialogs.Shuffle().FirstOrDefault(); // Debug.Log("Going to say: " + dialog.Utterance); if (dialog == null) { Debug.LogWarning("Unknown dialog action."); m_dialogController.AddDialogLine("... (unkown dialogue) ..."); } else { this.setFloor(false); string subFolder = m_scenarioData.TTSFolder; if (subFolder != "<none>") { var path = string.Format("/TTS-Dialogs/{0}/{1}/{2}", subFolder, m_rpc.VoiceName, dialog.UtteranceId); var absolutePath = Application.streamingAssetsPath; #if UNITY_EDITOR || UNITY_STANDALONE absolutePath = "file://" + absolutePath; #endif string audioUrl = absolutePath + path + ".wav"; string xmlUrl = absolutePath + path + ".xml"; var audio = new WWW(audioUrl); var xml = new WWW(xmlUrl); yield return(audio); yield return(xml); var xmlError = !string.IsNullOrEmpty(xml.error); var audioError = !string.IsNullOrEmpty(audio.error); if (xmlError) { Debug.LogError(xml.error); } if (audioError) { Debug.LogError(audio.error); } m_dialogController.AddDialogLine(dialog.Utterance); if (xmlError || audioError) { yield return(new WaitForSeconds(2)); } else { var clip = audio.GetAudioClip(false); yield return(_body.PlaySpeech(clip, xml.text)); clip.UnloadAudioData(); } } else { m_dialogController.AddDialogLine(dialog.Utterance); yield return(new WaitForSeconds(2)); } if (RPC.GetBeliefValue("HasFloor(SELF)") != "True") //todo: replace with a constant { this.SetDialogueState(Target.ToString(), nextState.ToString()); reply = dialog; just_talked = true; } } if (nextState.ToString() == "Disconnect") { this.End(); } }
private IEnumerator HandleSpeak(IAction speakAction) { Name currentState = speakAction.Parameters[0]; Name nextState = speakAction.Parameters[1]; Name meaning = speakAction.Parameters[2]; Name style = speakAction.Parameters[3]; var dialogs = m_iat.GetDialogueActions(currentState, nextState, meaning, style); var dialog = dialogs.Shuffle().FirstOrDefault(); if (dialog == null) { Debug.LogWarning("Unknown dialog action."); m_dialogController.AddDialogLine("... (unkown dialogue) ..."); } else { string subFolder = m_scenarioData.TTSFolder; if (subFolder != "<none>") { var path = string.Format("/TTS-Dialogs/{0}/{1}/{2}", subFolder, m_rpc.VoiceName, dialog.UtteranceId); var absolutePath = Application.streamingAssetsPath; #if UNITY_EDITOR || UNITY_STANDALONE absolutePath = "file://" + absolutePath; #endif string audioUrl = absolutePath + path + ".wav"; string xmlUrl = absolutePath + path + ".xml"; var audio = new WWW(audioUrl); var xml = new WWW(xmlUrl); yield return(audio); yield return(xml); var xmlError = !string.IsNullOrEmpty(xml.error); var audioError = !string.IsNullOrEmpty(audio.error); if (xmlError) { Debug.LogError(xml.error); } if (audioError) { Debug.LogError(audio.error); } m_dialogController.AddDialogLine(dialog.Utterance); if (xmlError || audioError) { yield return(new WaitForSeconds(2)); } else { var clip = audio.GetAudioClip(false); yield return(_body.PlaySpeech(clip, xml.text)); clip.UnloadAudioData(); } } else { m_dialogController.AddDialogLine(dialog.Utterance); yield return(new WaitForSeconds(2)); } reply = dialog; just_talked = true; } if (nextState.ToString() == "Disconnect") { this.End(); } AddEvent(EventHelper.ActionEnd(m_rpc.CharacterName.ToString(), speakAction.Name.ToString(), IATConsts.PLAYER).ToString()); }
public IEnumerator HandleSpeak(IAction speakAction) { lastAction = speakAction; Name currentState = speakAction.Parameters[0]; Name nextState = speakAction.Parameters[1]; Name meaning = speakAction.Parameters[2]; Name style = speakAction.Parameters[3]; m_rpc.SaveToFile(m_rpc.CharacterName + "-output" + ".rpc"); var dialog = m_iat.GetDialogueActions(currentState, nextState, meaning, style).FirstOrDefault(); if (dialog == null) { Debug.LogWarning("Unknown dialog action."); m_dialogController.AddDialogLine("... (unkown dialogue) ..."); } else { string subFolder = m_scenarioData.TTSFolder; if (subFolder != "<none>") { var provider = (AssetManager.Instance.Bridge as AssetManagerBridge)._provider; var path = string.Format("/TTS-Dialogs/{0}/{1}/{2}", subFolder, m_rpc.VoiceName, dialog.UtteranceId); AudioClip clip = null; //Resources.Load<AudioClip>(path); string xml = null; //Resources.Load<TextAsset>(path); var xmlPath = path + ".xml"; if (provider.FileExists(xmlPath)) { try { using (var xmlStream = provider.LoadFile(xmlPath, FileMode.Open, FileAccess.Read)) { using (var reader = new StreamReader(xmlStream)) { xml = reader.ReadToEnd(); } } } catch (Exception e) { Debug.LogException(e); } if (!string.IsNullOrEmpty(xml)) { var wavPath = path + ".wav"; if (provider.FileExists(wavPath)) { try { using (var wavStream = provider.LoadFile(wavPath, FileMode.Open, FileAccess.Read)) { var wav = new WavStreamReader(wavStream); clip = AudioClip.Create("tmp", (int)wav.SamplesLength, wav.NumOfChannels, (int)wav.SampleRate, false); clip.SetData(wav.GetRawSamples(), 0); } } catch (Exception e) { Debug.LogException(e); if (clip != null) { clip.UnloadAudioData(); clip = null; } } } } } if (clip != null && xml != null) { yield return(_body.PlaySpeech(clip, xml)); clip.UnloadAudioData(); } else { Debug.LogWarning("Could not found speech assets for a dialog"); yield return(new WaitForSeconds(2)); } } else { yield return(nextframe); } if (nextState.ToString() != "-") //todo: replace with a constant { AddEvent(string.Format("Event(Property-change,Suspect,DialogueState(Player),{0})", nextState)); } } if (speakAction.Parameters[1].ToString() != "-") //todo: replace with a constant { var dialogueStateUpdateEvent = string.Format("Event(Property-Change, Suspect ,DialogueState({0}),{1})", speakAction.Target, speakAction.Parameters[1]); AddEvent(dialogueStateUpdateEvent); } if (nextState.ToString() == "Disconnect") { this.End(); } m_rpc.Perceive(new Name[] { EventHelper.ActionEnd(m_rpc.CharacterName.ToString(), speakAction.Name.ToString(), IATConsts.PLAYER) }); yield return(new WaitForSeconds(0.1f)); m_dialogController.AddDialogLine(dialog.Utterance); reply = dialog; just_talked = true; }
// Method to play the audio file of the specific dialogue, aka what makes the agent talk private IEnumerator Speak(System.Guid id, Name initiator, Name target) { // The player has no body, so we use a shortcut and ignore him having a voice at all if (_playerRpc.CharacterName == initiator) { yield break; } // What is the type of of Voice of the agent var voiceType = _rpcList.Find(x => x.CharacterName == initiator).VoiceName; // Each utterance has a unique Id so we can retrieve its audio file var utteranceID = _iat.GetDialogActionById(id).UtteranceId; // This path can be changed, for now it is the path we used in this project var textToSpeechPath = "/SingleCharacterv4.0/TTS/" + voiceType + "/" + utteranceID; var absolutePath = Application.streamingAssetsPath; #if UNITY_EDITOR || UNITY_STANDALONE absolutePath = "file://" + absolutePath; #endif // Systems tried to "download" the .wav file along with its xml configuration string audioUrl = absolutePath + textToSpeechPath + ".wav"; string xmlUrl = absolutePath + textToSpeechPath + ".xml"; var audio = new WWW(audioUrl); var xml = new WWW(xmlUrl); yield return(audio); yield return(xml); // If these files were not found there return var xmlError = !string.IsNullOrEmpty(xml.error); var audioError = !string.IsNullOrEmpty(audio.error); if (xmlError) { Debug.LogError(xml.error); } if (audioError) { Debug.LogError(audio.error); } if (xmlError || audioError) { yield return(new WaitForSeconds(2)); } else { var clip = audio.GetAudioClip(false); // The Unity Body Implement script allows us to play sound clips yield return(_agentBodyController.PlaySpeech(clip, xml.text)); clip.UnloadAudioData(); } }