IEnumerator StartGame() { LeanTween.alpha(guoChang, 0, 0.5f); if (WindowsManager.instance != null) { WindowsManager.instance.initWindows(); } //pretend it's over to hold the time isOver = true; yield return(new WaitForSeconds(1f)); reverseCount.gameObject.SetActive(true); reverseCount.text = "3"; yield return(new WaitForSeconds(1f)); reverseCount.text = "2"; yield return(new WaitForSeconds(1f)); reverseCount.text = "1"; yield return(new WaitForSeconds(1f)); reverseCount.gameObject.SetActive(false); gameManager.SetActive(true); isOver = false; if (Application.isMobilePlatform) { ASR.startPlay(); ASRIsOn = true; } }
// Use this for initialization void Awake() { asr = GetComponent <ASR>(); //Check if there is at least one microphone connected if (Microphone.devices.Length <= 0) { //Throw a warning message at the console if there isn't Debug.LogWarning("Microphone not connected!"); } else //At least one microphone is present { //Set 'micConnected' to true micConnected = true; //Debug.Log("Habe ein Mic"); //Get the default microphone recording capabilities Microphone.GetDeviceCaps(null, out minFreq, out maxFreq); //According to the documentation, if minFreq and maxFreq are zero, the microphone supports any frequency... if (minFreq == 0 && maxFreq == 0) { //...meaning 44100 Hz can be used as the recording sampling rate maxFreq = 44100; } //Get the attached AudioSource component goAudioSource = this.GetComponent <AudioSource>(); } }
IEnumerator EndGame() { ASRIsOn = false; if (Application.isMobilePlatform) { str.Capacity = 1024; ASR.stopPlay(1, str); } WindowsManager.instance.showConclusionWindows(score); yield return(null); }
// Use this for initialization void Awake() { asr = GetComponent <ASR>(); AddWakeWords(new String[] { "computer", "Auto" }); keywordRecognizer = new KeywordRecognizer(WakeWords.ToArray(), ConfidenceLevel.Low); keywordRecognizer.OnPhraseRecognized += KeywordRecognizer_OnPhraseRecognized; PhraseRecognitionSystem.OnError += (errorCode) => { Debug.LogError(string.Format("***************Es ist ein Fehler in der WakeWord Engine aufgetreten: {0}", errorCode.ToString())); }; }
private void FixedUpdate() { if (Application.platform == RuntimePlatform.WindowsEditor) { if (Input.anyKeyDown) { foreach (KeyCode keyCode in Enum.GetValues(typeof(KeyCode))) { if (Input.GetKeyDown(keyCode)) { //Debug.Log("Current Key is : " + keyCode.ToString()); gameManagerInterface.recieveAsrResult(keyCode.ToString()); } } } } if (ASRIsOn == false) { return; } try { timeForCalling += Time.fixedDeltaTime; if (timeForCalling >= timePerCalling) { str.Capacity = 1024; timeForCalling %= timePerCalling; str_before = str.ToString(); ASR.catchPlay(1, str); //ASR.text.text = str.ToString()+"| |"+str_before; //ASR.text.text = ""; #region recognizeRe if (!str_before.Equals(str.ToString())) { string [] strs = str.ToString().Trim().Split(' '); ASR.text.text = strs [strs.Length - 1]; gameManagerInterface.recieveAsrResult(strs[strs.Length - 1]); } #endregion } } catch (System.Exception o) { ASR.text.text = o.GetType().ToString() + "| |" + o.Message; throw; } }
// Start is called before the first frame update void Awake() { gameManager.SetActive(false); gameManagerInterface = gameManager.GetComponent <IGameManager>(); str = new StringBuilder(1024); ASRIsOn = false; if (Application.isMobilePlatform) { ASR.recorderSetUp(0); ASR.text.text = "Asr has set up"; } guoChang = GameObject.Find("GuoChang").GetComponent <RectTransform>(); reverseCount.gameObject.SetActive(false); StartCoroutine(StartGame()); }
// Use this for initialization void Awake() { debugText = GameObject.Find("DebugText").GetComponent <Text>(); asr = GetComponent <ASR>(); dictationRecognizer = new DictationRecognizer(); dictationRecognizer.InitialSilenceTimeoutSeconds = 20f; dictationRecognizer.DictationResult += DictationRecognizer_DictationResult; dictationRecognizer.DictationError += DictationRecognizer_DictationError; dictationRecognizer.DictationComplete += (completionCause) => { if (completionCause.Equals(DictationCompletionCause.Complete)) { Debug.LogErrorFormat("STT erfolgreich fertig wg: {0}... brauche ich hier noch ein Event das zurückwechselt? Soll bereits bei DictationResult passieren.", completionCause.ToString()); EventManager.TriggerEvent(EventManager.ttsUnhandledError, new EventMessageObject(EventManager.ttsUnhandledError, completionCause.ToString())); //todo: falls benötigt noch neues Event erstellen } else if (completionCause.Equals(DictationCompletionCause.TimeoutExceeded) || completionCause.Equals(DictationCompletionCause.PauseLimitExceeded)) { Debug.LogErrorFormat("Dictation completed unsuccessfully: {0}.", completionCause); EventManager.TriggerEvent(EventManager.ttsTimeout, new EventMessageObject(EventManager.ttsTimeout, completionCause.ToString())); } else if (completionCause.Equals(DictationCompletionCause.AudioQualityFailure) || completionCause.Equals(DictationCompletionCause.MicrophoneUnavailable) || completionCause.Equals(DictationCompletionCause.NetworkFailure) || completionCause.Equals(DictationCompletionCause.UnknownError)) { Debug.LogErrorFormat("Dictation completed unsuccessfully: {0}.", completionCause); EventManager.TriggerEvent(EventManager.ttsError, new EventMessageObject(EventManager.ttsError, completionCause.ToString())); } else { Debug.LogErrorFormat("Dictation fertig mit unbehandeltem Zustand: {0}", completionCause); EventManager.TriggerEvent(EventManager.ttsUnhandledError, new EventMessageObject(EventManager.ttsUnhandledError, completionCause.ToString())); } }; dictationRecognizer.DictationHypothesis += (text) => { Debug.LogFormat("Dictation hypothesis: {0}", text); if (text != "") { debugText.text = text; } else { Debug.LogError("STT Eingabe erkannt - wurde aber nicht verstanden."); } }; }
static void Main(string[] args) { Session session = new Session(); session.Open(); int freq = 440; //Hz, Choose signal frequency, 440 Hz is audible and ok for speakers. Many tactors are closer to 150-250 Hz // We will begin by creating some basic oscillators, these are default amplitude 1.0 and infinite length of time Signal sin = new Sine(freq); // Sine wave Signal squ = new Square(freq); // Square wave Signal saw = new Saw(freq); // Saw wave Signal tri = new Triangle(freq); // Triangle wave // We can use pulse width modulation (PWM) to quickly create a repeating cue train with frequency (1Hz) and duty cycle (0.3) Signal pwm = new Pwm(1, 0.3); // Now we can pair those oscillators with an envelope to give them shape // This is a basic envelope that specifies amplitude (0.9), and duration (0.5 sec) Signal bas = new Envelope(0.9, 0.5); // This is an attack (1 sec), sustain (3 sec), release (1 sec) envelope. The sustain amplitude is 1.0. Signal asr = new ASR(1, 2, 1, 1.0); // This adds one more part to the above envelope. Attack (1 sec, to amplitude 1.0), decay (2 sec), // sustain (3 sec, amplitude 0.8), release (1 sec). Curves can be added here as well Signal adsr = new ADSR(1, 2, 3, 1, 1.0, 0.8); // Pairing these oscillators and envelopes can give us complex cues Signal sig1 = sin * bas; Signal sig2 = sin * pwm * adsr; // More information in sequencing these in time can be found in examples_sequences session.Play(0, sig2); Sleep(sig2.length); session.Stop(0); session.Dispose(); }
public AnonymousInstruction6(ASR parent) { this.parent = parent; }
static void Main(string[] args) { // Syntacts usage begins with creating an audio context, or Session Session session = new Session(); // Now let's open a device ... // Usually, you would use "session.open(i)"" to open a specific device with index i // Alternatively, you can just open the system default device by passing no arguments // (which this example does because we don't know what device numbers you might have!) session.Open(); //------------------------------------------------------------------------- // Now, let's create some vibrations ... // Vibrations are represented by Signals and combinations of Signals // Some Signals (e.g. oscillators) have an INFINITE duration Signal sig1 = new Sine(440); // a 440 Hz sinewave Console.WriteLine(sig1.length); // inf // Other Signals (e.g. envelopes) have FINITE duration Signal sig2 = new ASR(1, 3, 1); // a 5 second attack, sustain, release envelope Console.WriteLine(sig2.length); // 5 // Signals can be combined using math operation Signal sig3 = sig1 * sig2; // a 5 second 440 Hz sinewave with an ASR envelope Console.WriteLine(sig3.length); // 5 // Such operations can be done in a single line Signal sig4 = new Square(880) * new Sine(10) * new ADSR(1, 1, 1, 1); // 880 Hz square, amplitude modulated with 10 Hz sine and 4 s ADSR envelope Console.WriteLine(sig4.length); // 4 // For more advanced Signal synthesis, see "example_signals.cpp" //------------------------------------------------------------------------- // Now that we have some Signals, let's play them ... // Play sig1 on channel 0 of the open Device session.Play(0, sig1); // The Signal will immediately start playing in the Session's audio thread, // but we need to sleep this thread so that the program doesn't continue prematurely Sleep(3); // Now, we stop the Signal on channel 0 (sig1 will have played for 3 seconds) session.Stop(0); // Let's play another on channel 1... session.Play(1, sig3); Sleep(sig3.length); // We don't have to call session.stop(1) because sig3 is FINITE // You can also play a Signal on all channels session.PlayAll(sig4); Sleep(sig4.length); //------------------------------------------------------------------------- // It is important to dispose of the session at the end of your program! session.Dispose(); //------------------------------------------------------------------------- // This was an extremely basic example of using Syntacts. See the other // examples for more complex usage and other included features! }
public ASRproperties(ASR.App.MainForm form) { Form = form; }