private void SpeechDetectedEventHandler(RecognitionEventArgs e, RecoType rt, string eventType) { var log = (rt == RecoType.Base) ? this.baseModelLogText : this.customModelLogText; this.WriteLine(log, String.Format(CultureInfo.InvariantCulture, "Speech recognition: Speech {0} detected event: {1}.", eventType, e.ToString())); }
private void DetectedEventHandler(RecognitionEventArgs e, RecoType rt, string eventType) { SpeechDetectedEvendEventArgs eventArgs = new SpeechDetectedEvendEventArgs(); eventArgs.Message = "Speech recognition: Speech " + eventType + "detected event: " + e.ToString() + "."; OnSpeechDetectedEvent(eventArgs); }
private void Recognizer_SpeechEndDetected(object sender, RecognitionEventArgs e) { Dispatcher.Invoke(() => { MessageBlock.Text = $"Speech End: {e.SessionId}"; }); }
/// <summary> /// 認識イベントの実行 /// </summary> /// <param name="sender"></param> /// <param name="args"></param> private void RecognitionEventHandler(object sender, RecognitionEventArgs args) { var confidence = args.Result.Confidence; var text = args.Result.Text; ResultList += $"[Recognition] {confidence:f3} : {text}\n"; }
protected virtual void OnGestureRecognized(RecognitionEventArgs e) { if (GestureRecognized != null) { GestureRecognized(this, e); } }
protected void GestureManager_GestureRecognized(object sender, RecognitionEventArgs e) { // Exit if we're teaching if (Properties.Settings.Default.Teaching) { return; } // Get action to be executed IAction executableAction = Applications.ApplicationManager.Instance.GetAnyDefinedAction(e.GestureName); // Exit if there is no action configured if (executableAction == null) { return; } // Locate the plugin associated with this action IPluginInfo pluginInfo = FindPluginByClassAndFilename(executableAction.PluginClass, executableAction.PluginFilename); // Exit if there is no plugin available for action if (pluginInfo == null) { return; } // Load action settings into plugin pluginInfo.Plugin.Deserialize(executableAction.ActionSettings); // Execute plugin process pluginInfo.Plugin.Gestured(new PointInfo(e.CapturePoint)); }
private void SpeechDetectedEventHandler(RecognitionEventArgs e, string eventType) { TextBox log = this.baseModelLogText; this.WriteLine(log, String.Format(CultureInfo.InvariantCulture, "Speech recognition: Speech {0} detected event: {1}.", eventType, e.ToString())); }
private void recognizer_SpeechRecognitionRejected(object sender, RecognitionEventArgs e) { if (target == null) { return; } LastCommandText = "???"; }
private void AutoRecognizeShapes_RecognitionOccured(object sender, RecognitionEventArgs e) { GraphicElement element = new GraphicElement(e.Element); element.Description = e.Description; this.Elements.Insert(0, element); this.Selected = element; }
public RecognizedText(RecognitionEventArgs recognitionResult) { //TODO map to enum ? Meal = recognitionResult.Result.Semantics["dish"].Value.ToString(); Kind = recognitionResult.Result.Semantics["kind"].Value.ToString(); Sauce = recognitionResult.Result.Semantics["sauce"].Value.ToString(); Default = recognitionResult.Result.Semantics["def"].Value.ToString(); Confirmation = recognitionResult.Result.Semantics["confirmation"].Value.ToString(); Confidence = recognitionResult.Result.Confidence; }
private void SpeechRecognizerSpeechEndDetected(object sender, RecognitionEventArgs e) { this.speech_recognizer.StopContinuousRecognitionAsync(); Action change_text = () => this.text_result = record_result.Text; record_result.Invoke(change_text); Action close_this = () => this.Close(); this.Invoke(close_this); }
} // end method Image_MouseDown // when the listener recognizes a phrase from the grammar, set the // display string and call DisplaySpeak void myGrammar_SpeechRecognized( object sender, RecognitionEventArgs e) { // Use the phrase-to-description dictionary to get the // appropriate description for the spoken phrase displayString = phraseDescriptions[e.Result.Text]; // Use the dispatcher to call displayDetails this.Dispatcher.BeginInvoke( new Action(DisplaySpeak)); } // end method myGrammar_SpeechRecognized
protected void PointCapture_GestureRecognized(object sender, RecognitionEventArgs e) { var pointCapture = (IPointCapture)sender; // Get action to be executed var executableActions = ApplicationManager.Instance.GetRecognizedDefinedAction(e.GestureName)?.ToList(); if (executableActions == null) { return; } ExecuteAction(executableActions, pointCapture.Mode, pointCapture.SourceDevice, e.ContactIdentifiers, e.FirstCapturedPoints, e.Points); }
private void Recognizer_Info(object sender, RecognitionEventArgs e) { string tag = e.GetType().Name.Replace("Speech", "").Replace("EventArgs", ""); float conf = e.Result.Confidence; string name = ""; if (e.Result.Grammar != null) { name = $" {e.Result.Grammar.Name}:"; } Log($"Speech {tag} {conf:00.0%}{name} {e.Result.Text}"); }
void speechRecognition(object sender, RecognitionEventArgs e) { String result = ""; string[] commands = SeparateCommands(e.Result.Grammar.Name); foreach (string command in commands) { result += Command(command) + ", "; } IntelliRoomSystem.voiceEngine.Speak(result); Data.InfoMessages.InformationMessage("Se ejecutó por comando de voz: " + e.Result.Grammar.Name + " .Con la frase: " + e.Result.Text + " .Devolviendo: " + result); }
private static void SpeechRecognizedHandler( Object sender, RecognitionEventArgs e) { if (e.Result != null && !String.IsNullOrEmpty(e.Result.Text)) { using (new ConsoleForegroundColor(ConsoleColor.Green)) { Console.WriteLine(e.Result.Text); } return; } using (new ConsoleForegroundColor(ConsoleColor.Red)) { Console.WriteLine("Recognized text not available."); } }
protected void TouchCapture_GestureRecognized(object sender, RecognitionEventArgs e) { var touchCapture = (ITouchCapture)sender; // Exit if we're teaching if (touchCapture.Mode == CaptureMode.Training) { return; } // Get action to be executed IEnumerable <IAction> executableActions = ApplicationManager.Instance.GetRecognizedDefinedAction(e.GestureName); foreach (IAction executableAction in executableActions) { // Exit if there is no action configured if (executableAction == null || !executableAction.IsEnabled || (touchCapture.Mode == CaptureMode.UserDisabled && !"GestureSign.CorePlugins.ToggleDisableGestures".Equals(executableAction.PluginClass)) || !Compute(executableAction.Condition, e.Points, e.ContactIdentifiers)) { continue; } // Locate the plugin associated with this action IPluginInfo pluginInfo = FindPluginByClassAndFilename(executableAction.PluginClass, executableAction.PluginFilename); // Exit if there is no plugin available for action if (pluginInfo == null) { continue; } // Load action settings into plugin pluginInfo.Plugin.Deserialize(executableAction.ActionSettings); // Execute plugin process pluginInfo.Plugin.Gestured(new PointInfo(e.FirstCapturedPoints, e.Points)); } }
private void OnSpeechEndDetected(object sender, RecognitionEventArgs e) { LOG.NoJumpNext(); LOG.WriteLine(LLV.DEV, $"{DateTime.Now.ToString(TimeUtil.FormatYMDHMSms)} {GetType().Name}({GetTargetRecognizeLanguage()}).OnSpeechEndDetected : {e}"); }
private void wordList_SpeechRecognized(object sender, RecognitionEventArgs e) { if (e.Result.Confidence >= 0.7) { if (e.Result.Text == "Hi") { speaker.Speak("Hi"); } } else { } }
private void SpeechDetectedEventHandler(RecognitionEventArgs e, RecoType rt) { var log = (rt == RecoType.Base) ? this.baseModelLogText : this.customModelLogText; this.WriteLine(log, String.Format("Speech recognition: Speech event: {0}.", e.ToString())); }
private static void SpeechDetectedEventHandler(RecognitionEventArgs e, RecognizerType rt, string eventType) { Console.WriteLine(string.Format(CultureInfo.InvariantCulture, "Speech recognition: Speech {0} detected event: {1}.", eventType, e.ToString())); }
private void SpeechEndDetectedHandler(object sender, RecognitionEventArgs e) { Debug.Log("recognizer speech end detected"); }
private void SpeechEndDetectedHandler(object sender, RecognitionEventArgs e) { Debug.Log($"SpeechEndDetected received: offset: {e.Offset}."); Debug.Log($"Speech end detected."); }
private void recognizer_SpeechRecognized(object sender, RecognitionEventArgs e) { if (target == null) { return; } if (!e.Result.Text.StartsWith("select")) { // this is a toolbar command switch (e.Result.Text) { case "new": ApplicationCommands.New.Execute(null, target); break; case "first": CustomCommands.First.Execute(null, target); break; case "previous": CustomCommands.Previous.Execute(null, target); break; case "next": CustomCommands.Next.Execute(null, target); break; case "last": CustomCommands.Last.Execute(null, target); break; case "move": CustomCommands.Move.Execute(null, target); break; case "rotate": CustomCommands.Rotate.Execute(null, target); break; } LastCommandText = e.Result.Text; } else { // this is a select square command // get the square position char f = e.Result.Text[7]; char r = '\0'; switch (e.Result.Text.Substring(9)) { case "one": r = '1'; break; case "two": r = '2'; break; case "three": r = '3'; break; case "four": r = '4'; break; case "five": r = '5'; break; case "six": r = '6'; break; case "seven": r = '7'; break; case "eight": r = '8'; break; } int position = Utils.GetPosition(f, r); LastCommandText = "select " + f + r; CustomCommands.Select.Execute(position, target); } }
public void OnVideoStreamStarting(object sender, RecognitionEventArgs e) { PersonRecognizer.Instance.LoadTrainedFaces(); PersonRecognizer.Instance.StartCapture((frame) => _view.CurrentFrame = frame, e.FromVideo); }
//Called when speech end is detected private void SpeechEndDetectedHandler(object sender, RecognitionEventArgs e) { UnityEngine.Debug.LogFormat($"SpeechEndDetected received: offset: {e.Offset}."); UnityEngine.Debug.LogFormat($"Speech end detected."); }
private void CancelHandler(object sender, RecognitionEventArgs e) { Debug.Log("SpeechEndDetectedHandler called"); }
private void SpeechStartDetected(object sender, RecognitionEventArgs e) { Debug.Log("SpeechStart Handler called"); }
private void _speechRecognizer_SpeechStartDetected(object sender, RecognitionEventArgs e) { Console.WriteLine("---- Taking notes ----"); }
private void _speechRecognizer_SpeechEndDetected(object sender, RecognitionEventArgs e) { Console.WriteLine($"---- End of taking notes. ----"); }
private static void OnSpeechEndDetected(object sender, RecognitionEventArgs e) { Console.WriteLine($"OnSpeechStartDetected : {e}"); }
private void IntentRecognizer_SpeechEndDetected(object sender, RecognitionEventArgs e) { this.SpeechStateChanged?.Invoke(SpeechState.NotSpeaking); }
void rec_SpeechRecognized(object sender, RecognitionEventArgs e) { temp = e.Result.Text; sr.SpeechRecognized -= this.rec_SpeechRecognized; }