/// <summary> /// Handles input form the AerHandler /// </summary> /// <param name="input"></param> public void RecognizedInput(AerRecognitionResult input) { if (input.Confidence < input.RequiredConfidence) { AerDebug.LogSpeech(input.Text, input.Confidence, false); return; } AerDebug.LogSpeech(input.Text, input.Confidence, true); if (input.Command != null) { if (_EventRegistry.ContainsKey(input.Command)) { //If we haven't said 'stop listening' if (!_Squelched) { TimeSpan elapsed = DateTime.UtcNow.Subtract(_LastQueryTime); if (input.Command.Equals("AerEndQuery")) { //Do nothing until Aer is addressed again... //This makes (_LastQueryTime + elapsed time) > _StopListeningTime _LastQueryTime = DateTime.UtcNow.Subtract(new TimeSpan(0, 0, _StopListeningTime)); _EventRegistry[input.Command](input); } else if (elapsed.TotalSeconds < _StopListeningTime) { _LastQueryTime = DateTime.UtcNow; _EventRegistry[input.Command](input); } else if (input.Command.Equals("AerQuery")) { _LastQueryTime = DateTime.UtcNow; _EventRegistry[input.Command](input); } //If require query is turned off... else if (!_ReqQuery) { _EventRegistry[input.Command](input); } } else { //If we said 'start listening' to end squelch state if (input.Command.Equals("StartListening")) { _EventRegistry[input.Command](input); } } } else { AerDebug.LogError(@"Recieved command that didn't have a handler, command=" + input.Command); } } else { AerDebug.LogError(@"Recieved Recognition Result that didn't have a command semantic, '" + input.ToString() + "'"); } }
/// <summary> /// Handles the SpeechRecognized event from the Recognition Engine /// </summary> /// <param name="sender"></param> /// <param name="e"></param> public virtual void SpeechRecognized_Handler(object sender, SpeechRecognizedEventArgs e) { string text = e.Result.Text; SemanticValue semantics = e.Result.Semantics; NewInput = true; LastResult = e.Result; AerDebug.LogSpeech(e.Result.Text, e.Result.Confidence); }