public DialogEntry(PersonState spkr, Sprite[] sprs, string msg, bool isInfo, bool isInfoRequest, AudioClipIndex audioClip, bool isCustom = false, UISentenceCallback callback = null, List <string> subjects = null, List <string> objects = null) { speaker = spkr; sprites = sprs; message = msg; isInfoExchange = isInfo; isInfoExchangeRequest = isInfoRequest; this.audio = audioClip; isCustomSentence = isCustom; customSentenceCallback = callback; subjectOverrides = subjects; objectOverrides = objects; }
// call anywhere with code like AudioPlayer.PlaySound(AudioClipIndex.IMPACT); public static void PlaySound(AudioClipIndex index) { // Don't play the same clip twice in a row. instead, be silent. if (index == prevClip && index != AudioClipIndex.PIANO) { return; } prevClip = index; // special case to randomize HMM and HMM2 sounds if (index == AudioClipIndex.HMM || index == AudioClipIndex.HMM2) { index = new AudioClipIndex[] { AudioClipIndex.HMM, AudioClipIndex.HMM2 }[Random.Range(0, 2)]; } if (index == AudioClipIndex.NONE) { return; } instance.GetComponent <AudioSource>().PlayOneShot(instance.audioClips[(int)index]); }
public void PlayAtAttachPoint(AudioClipIndex index) { if (isOn & !IsPlaying[index]) { AudioSource.PlayClipAtPoint(AudioClips[index], ViveSR_Experience.instance.AttachPoint.transform.position); IsPlaying[index] = true; if (index == AudioClipIndex.Drag) { this.Delay(() => { IsPlaying[index] = false; }, 0.1f); } if (index == AudioClipIndex.FairyWalk) { this.Delay(() => { IsPlaying[index] = false; }, 0.55f); } else { this.DelayOneFrame(() => { IsPlaying[index] = false; }); } } }
public void DoMyBestPlay(AudioClipIndex index) { this.DoMyBestPlay((int)index); }
public void Listen(PersonState person, Sentence sentence, out string[] spokenResponse, out AudioClipIndex[] audioResponse) { // Allow the player to use these words for sentences later // unique words are not worth talking about, and AI should also not store any beliefs. if (sentence.Subject.Type() == NounType.Unique || sentence.DirectObject.Type() == NounType.Unique) { int randomReaction2 = Random.Range(0, 3); string[] possibleSpokenReact2 = new string[] { "Interesting...", "Hmm...", "Oh." }; AudioClipIndex[] possibleAudio2 = new AudioClipIndex[] { AudioClipIndex.HMM, AudioClipIndex.HMM, AudioClipIndex.OH }; spokenResponse = new string[] { possibleSpokenReact2[randomReaction2] }; audioResponse = new AudioClipIndex[] { possibleAudio2[randomReaction2] }; return; } // special case hax: SuspectedName is not worth talking about if (sentence.Subject != Noun.SuspectedName) { KnownWords.Add(sentence.Subject); } if (sentence.DirectObject != Noun.SuspectedName) { KnownWords.Add(sentence.DirectObject); } // preemptively reject sentences that contradict something we are sure of Sentence opposite = new Sentence(sentence.Subject, sentence.Verb, sentence.DirectObject, sentence.Adverb == Adverb.True ? Adverb.False : Adverb.True); float confidenceInOpposite = VerifyBelief(opposite); if (confidenceInOpposite >= 1f) { Debug.Log(person.PersonId + " told a lie: " + sentence); ConfidenceLost(person.PersonId); spokenResponse = new string[] { "What? I know that's not true." }; audioResponse = new AudioClipIndex[] { AudioClipIndex.SURPRISE_EH }; return; } float confidence = VerifyBelief(sentence); if (confidence > 0) { if (confidence >= 1) { spokenResponse = new string[] { "Sure, I already knew that." }; audioResponse = new AudioClipIndex[] { AudioClipIndex.AGREE }; return; } if (confidence >= 0.5) { spokenResponse = new string[] { "I suspected as much." }; // should this actually early return? audioResponse = new AudioClipIndex[] { AudioClipIndex.AGREE }; return; } // increase confidence? maybe this is a way to "hack the system" to gain AI trust: tell them things they already believe } // Add this to beliefs with some confidence number Debug.Log(mPersonId + " hears " + person.PersonId + " say " + sentence); SentenceBelief belief = new SentenceBelief(sentence, person.PersonId, mPersonConfidence[person.PersonId]); List <SentenceBelief> beliefs = new List <SentenceBelief>(); beliefs.Add(belief); AddBeliefs(beliefs); Noun myHairColor = mPerson.AttributeMap[NounType.HairColor]; if (sentence.Subject == myHairColor) { spokenResponse = new string[] { "So " + sentence.DirectObject.AsPersonal() + "?", sentence.DirectObject.PersonalReaction() }; audioResponse = new AudioClipIndex[] { AudioClipIndex.SURPRISE_EH, AudioClipIndex.NONE }; return; } else if (sentence.DirectObject == myHairColor) { spokenResponse = new string[] { "So I'm " + sentence.Subject.AsSubject() + "...?", sentence.Subject.PersonalReaction() }; audioResponse = new AudioClipIndex[] { AudioClipIndex.SURPRISE_EH, AudioClipIndex.NONE }; return; } int randomReaction = Random.Range(0, 3); string[] possibleSpokenReact = new string[] { "Interesting...", "Hmm...", "Oh." }; AudioClipIndex[] possibleAudio = new AudioClipIndex[] { AudioClipIndex.HMM, AudioClipIndex.HMM, AudioClipIndex.OH }; spokenResponse = new string[] { possibleSpokenReact[randomReaction] }; audioResponse = new AudioClipIndex[] { possibleAudio[randomReaction] }; return; }
public void QueueInfoExchangeRequest(PersonState speaker, Sprite[] sprites, string msg, AudioClipIndex audio = AudioClipIndex.NONE) { mDialogEntries.Add(new DialogEntry(speaker, sprites, msg, false, true, audio)); }
private void InsertDialogue(int index, PersonState speaker, Sprite[] sprites, string msg, AudioClipIndex audio = AudioClipIndex.NONE) { mDialogEntries.Insert(index, new DialogEntry(speaker, sprites, msg, false, false, audio)); }
// Speaker can be null or the player. public void QueueDialogue(PersonState speaker, Sprite[] sprites, string msg, AudioClipIndex audio = AudioClipIndex.NONE) { InsertDialogue(mDialogEntries.Count, speaker, sprites, msg, audio); }