void engine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Debug.WriteLine("Hypothesized word at time: " + e.Result.Audio.StartTime.ToString()); if (verbose) { HelperMethods.WriteToLog("Hypothesized the word \"" + e.Result.Text + "\"", parent); } }
private void OnSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { var threshold = _speaking ? _config.MinUpdateConfidence : _config.MinStartConfidence; if (e.Result.Confidence >= threshold) { _speaking = true; SpeechPartial(this, e.Result.Text); } }
private void AddLog(object sender, SpeechHypothesizedEventArgs args) { bool turn = true; if (!turn) { return; } Console.WriteLine($"RecognationResult => {args.Result.Text}"); this.commandHandler.Invoke(sender, args); }
private void speechRecognizer_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if ((e == null) || (e.Result == null) || (e.Result.Confidence < 0.1) || (e.Result.Alternates == null) || (e.Result.Alternates.Count < 1)) { return; } RecognizedSpeech recognizedSpeech = GenerateRecognizedSpeechObject(e.Result.Alternates); OnSpeechHypothesized(recognizedSpeech); }
/// <summary> /// Called whenever a partial recognition result is available. /// </summary> /// <param name="sender">The source of the event.</param> /// <param name="e">An object that contains the event data.</param> private void OnSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { // Convention for intervals is to use the end time as the originating time so we add the duration as well. DateTime originatingTime = this.streamStartTime + e.Result.Audio.AudioPosition + e.Result.Audio.Duration; // Post the raw result from the underlying recognition engine this.PostWithOriginatingTimeConsistencyCheck(this.SpeechHypothesized, e, originatingTime); var result = this.BuildPartialSpeechRecognitionResult(e.Result); this.PostWithOriginatingTimeConsistencyCheck(this.PartialRecognitionResults, result, originatingTime); }
void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Debug.WriteLine("Speech Hypothesized: " + e.Result.Text + " - Confidence: %" + e.Result.Confidence * 100f); if (SpeechHypothesizedFunc != null) { SpeechHypothesizedFunc(e); } if (Console != null && SpeechHypothesizedCommand != null) { Console.Run(SpeechHypothesizedCommand + ' ' + e.Result.Text); } }
private static void SpeechHypothesizedHandler(object sender, SpeechHypothesizedEventArgs e) { Debug.WriteLine(" SpeechHypothesized event raised."); if (e.Result != null) { Debug.WriteLine(" Grammar = {0}; Text = {1}", e.Result.Grammar.Name ?? "<none>", e.Result.Text); } else { Debug.WriteLine(" No recognition result available."); } }
private static void SpeechHypothesizing(object sender, SpeechHypothesizedEventArgs e) { // If our confidence is < the confidence limit or scroll lock is disabled, or TTS is disabled, return. if (e.Result.Confidence < confidenceLimit || !Control.IsKeyLocked(Keys.Scroll) || !ttsEnabled) { return; } // Clear the current line if it was already being written. clearLine(); // Write out confidence / result. Console.Write(e.Result.Text + " - Confidence: " + e.Result.Confidence.ToString().Substring(0, 4)); }
private void Engine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { return; if (e.Result.Confidence < 0.85) { return; } Console.WriteLine("! Hypothesis {0} - {1}", e.Result.Confidence, e.Result.Text); DispatchResult(e.Result, RecognizedSpeechType.Hypothesis); }
/// <summary> /// Called whenever a partial recognition result is available. /// </summary> /// <param name="sender">The source of the event.</param> /// <param name="e">An object that contains the event data.</param> private void OnSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { // SpeechHypothesized events don't come with an audio position, so we will have to // calculate it based on the current position of the stream reader. DateTime originatingTime = this.streamStartTime + this.speechRecognitionEngine.RecognizerAudioPosition; // Post the raw result from the underlying recognition engine this.PostWithOriginatingTimeConsistencyCheck(this.SpeechHypothesized, e, originatingTime); var result = this.BuildPartialSpeechRecognitionResult(e.Result); this.PostWithOriginatingTimeConsistencyCheck(this.PartialRecognitionResults, result, originatingTime); }
private void Determinant_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if (e.Result.Text == "Ася, найди в интернете" && e.Result.Confidence > confidence && recording) { this.Invoke(new Action(() => StartRecording1())); determinant.SpeechDetected += new EventHandler <SpeechDetectedEventArgs>(Determinant_SpeechDetected); recording = false; this.Invoke(new Action(() => timer1.Start())); } }
private void SpeechRecognition_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if (abstraction.IsSpeechEnabled) { if (e.Result.Grammar.Name.Equals(GRAMMAR_MATH)) { abstraction.CurrentInputString = e.Result.Text; } else { abstraction.CurrentInputString = string.Empty; } } }
private void Engine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if (e == null || e.Result == null || e.Result.Words == null) { return; } //double position = Engine.RecognizerAudioPosition.TotalMilliseconds; double confidence = e.Result.Words.Last().Confidence; string text = e.Result.Words.Last().Text; Result.Add(text, confidence); }
private void sre_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.Write("\rSpeech Hypothesized: \t{0}", e.Result.Text); //Application.Current.Dispatcher.BeginInvoke( // System.Windows.Threading.DispatcherPriority.Normal, // new Action( // delegate() // { // MainWindow t = (MainWindow)Application.Current.MainWindow; // t.VoiceCommandDisplay.Text = " Speech Hypothesized: " + e.Result.Text + "\n Conf: " + e.Result.Confidence; // t.VoiceCommandDisplay.Background = System.Windows.Media.Brushes.Orange; // } // ) //); }
private void processSpeechHypothesizedEventArgs(SpeechHypothesizedEventArgs result) { if (listener != null) { Result res = new Result(); res.cancelled = false; res.timeout = false; if (result.Result != null) { res.Add(createHypothesis(result.Result)); } listener.recognizeHypothesis(res); } }
private void reminderTask_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { string speech = e.Result.Text; if (speech == null) { _recognizer.RecognizeAsync(RecognizeMode.Multiple); reminderTask.RecognizeAsyncCancel(); return; } currentReminder.What = "You have a " + speech; reminders.Add(currentReminder); reminderTask.RecognizeAsyncCancel(); _recognizer.RecognizeAsync(RecognizeMode.Multiple); Jarvis.SpeakAsync("Reminder set for" + currentReminder.When); }
/** * @method CreateHypothesized * * Create a result object from a hypothesized event * * @param {SpeechHypothesizedEventArgs} Event returned by the recognizer. * @returns {void} */ public void CreateHypothesized(SpeechHypothesizedEventArgs e, CsGrammars Grammars) { if (Grammars.Length() > 0) { Result.Alternates = e.Result.Alternates; Result.Homophones = e.Result.Homophones; Result.ReplacementWordUnits = e.Result.ReplacementWordUnits; Result.Words = e.Result.Words; } Result.Audio = e.Result.Audio; Result.Confidence = e.Result.Confidence; Result.Grammar = e.Result.Grammar; Result.HomophoneGroupId = e.Result.HomophoneGroupId; Result.Semantics = ConstructSemanticsJSON(e.Result.Semantics); Result.Text = e.Result.Text; }
private static void SpeechHypothesizedHandler(object sender, SpeechHypothesizedEventArgs e) // Handle the SpeechHypothesized event. { Debug.WriteLine(" In SpeechHypothesizedHandler:"); string grammarName = "<not available>"; string resultText = "<not available>"; if (e.Result != null) { if (e.Result.Grammar != null) { grammarName = e.Result.Grammar.Name; } resultText = e.Result.Text; } Debug.WriteLine(" - Grammar Name = {0}; Result Text = {1}", grammarName, resultText); }
// Handle the SpeechHypothesized event. static void SpeechHypothesizedHandler( object sender, SpeechHypothesizedEventArgs e) { Console.WriteLine("Speech Processor: In SpeechHypothesizedHandler:"); string grammarName = "<not available>"; string resultText = "<not available>"; if (e.Result != null) { if (e.Result.Grammar != null) { grammarName = e.Result.Grammar.Name; } resultText = e.Result.Text; } Console.WriteLine(" - Grammar Name = {0}; Result Text = {1}", grammarName, resultText); }
void sre_SpeechRecognized(object sender, SpeechHypothesizedEventArgs e) { Console.WriteLine("Speech recognized: " + e.Result.Text + " - Precision: " + e.Result.Confidence); SetMainWindows( ); // Search for a key mapped on the command switch (e.Result.Text) { // Case controllers: case "DIREITA": this.Invoke(new Action(async() => { input.Keyboard.KeyDown(VirtualKeyCode.RIGHT); rightBox.BackColor = Color.Green; await Task.Delay(150); rightBox.BackColor = Color.White; input.Keyboard.KeyUp(VirtualKeyCode.RIGHT); })); break; case "ESQUERDA": this.Invoke(new Action(async() => { input.Keyboard.KeyDown(VirtualKeyCode.LEFT); rightBox.BackColor = Color.Green; await Task.Delay(150); rightBox.BackColor = Color.White; input.Keyboard.KeyUp(VirtualKeyCode.LEFT); })); break; case "ESCOLHER": listView1.Invoke(new Action(async() => { chooseBox.BackColor = Color.Green; var choosed = listView1.SelectedItems; gameChoosed = choosed[0].Text; _canRecognize = false; await Task.Delay(150); chooseBox.BackColor = Color.White; this.Close(); })); break; } }
void recEngine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { float confidence = e.Result.Confidence; string textResult = e.Result.Text; string grammarName = e.Result.Grammar.Name; string ruleName = e.Result.Grammar.RuleName; KeyValuePair <string, SemanticValue>[] kvp = e.Result.Semantics.ToArray(); double audioDuration = -1; if (e.Result.Audio != null) { //Looks like we can't get the audio duration for hypothesis result, BUG in Microsoft? audioDuration = e.Result.Audio.Duration.TotalMilliseconds; } listener.handleSpeechHypothesizedResult(confidence, textResult, grammarName, ruleName, kvp, audioDuration); }
void SpeechEngineSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if (e.Result.Text == "brian") { brianImg.Visibility = Visibility.Visible; } else if (e.Result.Text == "jimmy") { jimmyImg.Visibility = Visibility.Visible; } else if (e.Result.Text == "elaine") { elaineImg.Visibility = Visibility.Visible; } else if (e.Result.Text == "binila") { binilaImg.Visibility = Visibility.Visible; } else if (e.Result.Text == "clear") { clearAll(); } }
private static void Recognizer_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.ForegroundColor = ConsoleColor.DarkMagenta; writeMessage($"({e.Result.Text} ... {e.Result.Confidence})"); Console.ResetColor(); if (partialInput && partialInputTimeout.Elapsed >= TimeSpan.FromSeconds(3) && e.Result.Confidence >= 0.25) { var response = bot.Chat(new Request("PartialInput " + e.Result.Text, user, bot), false); var text = Regex.Replace(response.ToString(), "<oob>([^<]*)</oob>", ""); if (!string.IsNullOrWhiteSpace(text)) { partialInputTimeout.Restart(); Console.ForegroundColor = ConsoleColor.Blue; Console.WriteLine(); Console.WriteLine(text); Console.ResetColor(); Console.Write("> "); synthesizer.SpeakAsyncCancelAll(); synthesizer.SpeakAsync(text); } } }
private void Engine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if (e == null || e.Result == null || e.Result.Words == null) { return; } double position = Engine.RecognizerAudioPosition.TotalMilliseconds; double confidence = e.Result.Words.Last().Confidence; string text = e.Result.Words.Last().Text; //if (confidence_list.Contains(text)) confidence *= 2; if (confidence <= Confidence) { return; } RecognizedName = text; RecognizedNumber = Convert(RecognizedName); Confidence = confidence; }
void recEngine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { float confidence = e.Result.Confidence; string textResult = e.Result.Text; string grammarName = e.Result.Grammar.Name; string ruleName = e.Result.Grammar.RuleName; double audioDuration = -1; if (e.Result.Audio != null) { //Looks like we can't get the audio duration for hypothesis result, BUG in Microsoft? audioDuration = e.Result.Audio.Duration.TotalMilliseconds; } KeyValuePair <string, SemanticValue>[] kvp = e.Result.Semantics.ToArray(); //AbstractEBookEvent.raise(new RecognitionResultEvent(confidence,textResult,true, // kvp,grammarName,ruleName, audioDuration,null)); ActivityExecutor.add(new InternalSpeechRecognitionResultActivity(confidence, textResult, true, kvp, grammarName, ruleName, audioDuration, null)); String timeStamp = GetTimestamp(DateTime.Now); string text = "\n" + e.Result.Confidence + "\t" + e.Result.Text + "(Hypothesis)\t" + e.Result.Semantics.Value + "\tgrammarName=" + e.Result.Grammar.Name + "\truleName=" + e.Result.Grammar.RuleName + "\t" + timeStamp; Trace.WriteLine(text); //double timeNow = EBookUtil.GetUnixTimeMillis(); //double mun = timeNow - speechTurnAroundTime; //speechTurnAroundTime = timeNow; //eachSpeechTimes.Add(mun); }
void sre_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { labelSpeech.Content = "Speech hypothesized"; }
private void wordInProgress(object sender, SpeechHypothesizedEventArgs e) { }
private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { this.ReportSpeechStatus("Hypothesized: " + e.Result.Text + " " + e.Result.Confidence); }
void engine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Debug.WriteLine("Hypothesized word at time: " + e.Result.Audio.StartTime.ToString()); if (verbose) { HelperMethods.WriteToLog("Hypothesized the word \"" + e.Result.Text + "\"", parent); } }
private void sre_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { //MessageBox.Show("Hypothesized: " + e.Result.Text + " with confidence " + e.Result.Confidence); //this.HypothesizedWord = e.Result.Text; }
/// <summary> /// Called when a speech is in the process. /// </summary> public override void SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { PackageHost.WriteInfo("Appel de Jarvis."); }
/// <summary> /// Processes speech which the kinect is uncertain about. /// </summary> private static void SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { //Console.WriteLine("Hypothesized: " + e.Result.Text); }
public static void reco_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { recoPhonemes = StringFromWordArray(e.Result.Words, WordType.Pronunciation); }
void Engine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.WriteLine("Speech hypothesized: " + e.Result.Text); }
// SpeechHypothesized event is generated by recognition engines (both SpeechRecognizer and // SpeechRecognitionEngine) when part of the audio input speech has been tentatively recognized. // This Method displays the last hypothesis in different color temporarily in the result text box // and starts a background color animation on the grammar which generated the hypothesis. void SpeechEngine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { LogEvent(_brushHypothesized, "SpeechHypothesized", "Confidence={0:0.00} Grammar={1} Hypothesis=\"{2}\"", e.Result.Confidence, e.Result.Grammar.Name, e.Result.Text); if (_hypothesis != null) { // Remove the previous hypothesis from the result. _paragraphResult.Inlines.Remove(_hypothesis); } // Display the new hypothesis in a different color. _hypothesis = new Run(e.Result.Text); _hypothesis.Foreground = _brushHypothesized; _paragraphResult.Inlines.Add(_hypothesis); _richTextBoxResult.ScrollToEnd(); // Get the background brush for the grammar which generated the hypothesis. SolidColorBrush brush = _dictionaryBrushes[e.Result.Grammar]; // Start a two second color animation. ColorAnimation animation = new ColorAnimation(_colorHypothesized, _colorDefaultBackground, new Duration(TimeSpan.FromSeconds(2))); brush.BeginAnimation(SolidColorBrush.ColorProperty, animation); }
protected void recognizer_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Trace("recognizer_SpeechHypothesized " + e.Result.Text + " => " + e.Result.Confidence); }
// HYPOTHESIS SPEACH private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.WriteLine("Speach hypothesized: " + e.Result.Text + ", confidence:" + e.Result.Confidence); }
void mRecog_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.WriteLine("Hypothesized: {0} = {1}", e.Result.Text, e.Result.Confidence); }
private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { mainWindow.DebugSpeechTextBox.Text = "hypothesize as: " + e.Result.Text; //this.ReportSpeechStatus("Hypothesized: " + e.Result.Text + " " + e.Result.Confidence); }
private void SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { TextLog = $"[Speech Hypothesized: {e.Result.Text} ({ e.Result.Confidence})][WhiteSmoke]"; }
private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.Write("\rSpeech Hypothesized: \t{0}", e.Result.Text); }
// HYPOTHESIS SPEACH private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.WriteLine("Speach hypothesized: " + e.Result.Text + ", confidence:" + e.Result.Confidence); }
private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { //MessageBox.Show("Hypothesized: " + e.Result.Text + " " + e.Result.Confidence); }
protected void recognizer_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { cfg.logDebug("ENGINE - " + Name, "recognizer_SpeechHypothesized " + e.Result.Text + " => " + e.Result.Confidence); }
//hypothesized result private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { //TextBlock1.Text = "Hypothesized: " + e.Result.Text + " " + e.Result.Confidence; }
// Handle the SpeechHypothesized event. static void SpeechHypothesizedHandler(object sender, SpeechHypothesizedEventArgs e) { Console.WriteLine(" In SpeechHypothesizedHandler:"); string grammarName = "<not available>"; string resultText = "<not available>"; if (e.Result != null) { if (e.Result.Grammar != null) { grammarName = e.Result.Grammar.Name; } resultText = e.Result.Text; } Console.WriteLine(" - Grammar Name = {0}; Result Text = {1}", grammarName, resultText); }
private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { //ViewModel.HypothesizedText = e.Result.Text; }
private static void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { // MessageBox.Show(string.Format("\rSpeech Hypothesized: \t{0}", e.Result.Text)); }
/// \brief Handles Speech Hypothesis Events private void speechHypothesizedHandler(object sender, SpeechHypothesizedEventArgs e) { Console.Write("\rCommand Hypothesized: \t{0}\t\tConfidence:\t{1}", e.Result.Text, e.Result.Confidence); }
/// <summary> /// Occurs when speech has been hypothesized. /// </summary> /// <param name="sender">Object that send the event</param> /// <param name="e">Event args</param> private void SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.Write("\rSpeech Hypothesized: \t{0}\tConfidence:\t{1}", e.Result.Text, e.Result.Confidence); }
public static void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { //Console.Write("\rSpeech Hypothesized: \t{0}", e.Result.Text); //onMessage("Speech Hypothesized: " + e.Result.Text); System.Windows.MessageBox.Show("Speech Hypothesized: " + e.Result.Text); }
private void speechRecognizer_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if ((e == null) || (e.Result == null) || (e.Result.Confidence < 0.1) || (e.Result.Alternates == null) || (e.Result.Alternates.Count < 1)) return; RecognizedSpeech recognizedSpeech = GenerateRecognizedSpeechObject(e.Result.Alternates); OnSpeechHypothesized(recognizedSpeech); }
void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { recTextBox.BeginInvoke( (MethodInvoker)delegate { recTextBox.Text = "? " + e.Result.Text + " (" + e.Result.Confidence + ")"; }); }
private void SpeechEngineHypothesized(Object sender, SpeechHypothesizedEventArgs args) { // We don't care. }
private void recognizer_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Hypothesized++; LabelHypothesized.Content = "Hypothesized: " + Hypothesized.ToString(); }
private void OnSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if (string.IsNullOrEmpty(_rejected)) _dte.StatusBar.Text = "I'm listening... (" + e.Result.Text + " " + Math.Round(e.Result.Confidence * 100) + "%)"; }
protected void recognizer_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { cfg.logDebug("ENGINE - " + Name, "recognizer_SpeechHypothesized"); }
private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { if (voiceConsoleText) Console.WriteLine("Hypothesized: " + e.Result.Text + " " + e.Result.Confidence); }
//-------------------------------------------------------------------------------------------- void engine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { //txtSpoken.Text += "\r Hipotesis" + getKnownTextOrExecute(e.Result.Text); //scvText.ScrollToEnd(); }
void sre_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e) { Console.WriteLine("Speech Hypothesized: {0}", e.Result.Text); }