Esempio n. 1
0
        /// <summary>
        /// Handles 'SpInProcRecoContext.FalseRecognition' event. Fires rarely,
        /// supposedly if engine-confidence-level is too low or perhaps when a
        /// word or phrase is dropped due to interference - but the event is
        /// unfortunately not that consistent in my experience.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="Result"></param>
        void rc_FalseRecognition(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
        {
#if DEBUG
            logfile.Log();
            logfile.Log("rc_FalseRecognition() #" + StreamNumber + " StreamPosition= " + StreamPosition + " _generato= " + _generato);
#endif
            rc_Recognition(StreamNumber, StreamPosition, SpeechRecognitionType.SRTStandard, Result);             // force Recognition.

/*			logfile.Log(". " + Result.PhraseInfo.GetText()); // (0, -1, true)
 *
 *                      logfile.Log(". Result.PhraseInfo.Rule.Name= "             + Result.PhraseInfo.Rule.Name); // <- blank.
 *                      logfile.Log(". Result.PhraseInfo.Rule.Confidence= "       + Result.PhraseInfo.Rule.Confidence);
 *                      logfile.Log(". Result.PhraseInfo.Rule.EngineConfidence= " + Result.PhraseInfo.Rule.EngineConfidence);
 *                      logfile.Log(". Result.PhraseInfo.Rule.Id= "               + Result.PhraseInfo.Rule.Id);
 *
 *                      logfile.Log(". wordcount= " + Result.PhraseInfo.Elements.Count);
 *                      foreach (ISpeechPhraseElement word in Result.PhraseInfo.Elements)
 *                      {
 *                              logfile.Log(". . word= "             + word.DisplayText);
 *                              logfile.Log(". . LexicalForm= "      + word.LexicalForm);
 *                              logfile.Log(". . ActualConfidence= " + word.ActualConfidence);
 *                              logfile.Log(". . EngineConfidence= " + word.EngineConfidence);
 *                              var ids = (ushort[])word.Pronunciation;
 *                              foreach (var id in ids) logfile.Log(". . . PhoneId= " + id + " - " + _phoneConverter.IdToPhone(id));
 *                      } */
        }
    // Function for extracting SAPI phonemes from voice recognition results
    public void GetPhonemes(ISpeechRecoResult Result)
    {
        //VA.WriteToLog("Extracting phonemes from voice recognition result"); // Output info to event log

        try                                                                       // Attempt the following code
        {
            SpPhoneConverter MyPhoneConverter = new SpPhoneConverter();           // Create new SPPhoneConverter instance
            MyPhoneConverter.LanguageId = 1033;                                   // Set the phone converter's language (English = 1033)
            string SAPIPhonemesRaw = null;                                        // Initialize string for storing raw SAPI phoneme data
            string SAPIPhonemes    = null;                                        // Initialize string for storing delimited SAPI phoneme data
            int    i             = 1;                                             // Initialize integer for tracking phoneme count
            string WordSeparator = " ";                                           // Initialize string variable for storing the characters used to separate words within the phoneme result

            if (VA.GetBoolean("~~SeparatePhonemes") == true)                      // Check if user wants to have the "-" character separate the words within the phoneme result
            {
                WordSeparator = " - ";                                            // Redefine the WordSeparator
            }
            foreach (ISpeechPhraseElement MyPhrase in Result.PhraseInfo.Elements) // Loop through each element of the recognized text
            {
                if (MyPhrase.DisplayText != " ")
                {
                    SAPIPhonemesRaw += " " + MyPhoneConverter.IdToPhone(MyPhrase.Pronunciation);                             // Build string of SAPI phonemes extracted from the recognized text
                    SAPIPhonemes    += (i++ > 1 ? WordSeparator : " ") + MyPhoneConverter.IdToPhone(MyPhrase.Pronunciation); // Build string of SAPI phonemes extracted from the recognized text, delimited by " "
                }
            }
            MyPhoneConverter = null;                                             // Set to null in preparation for garbage collection

            VA.SetText("~~SAPIPhonemesRaw", SAPIPhonemesRaw.Trim());             // Send raw SAPI phoneme data back to VoiceAttack as text variable
            VA.SetText("~~SAPIPhonemes", SAPIPhonemes.Trim());                   // Send word-delimited SAPI phoneme data back to VoiceAttack as text variable
        }
        catch                                                                    // Handle exceptions in above code
        {
            VA.SetText("~~RecognitionError", "Error during phoneme extraction"); // Send error detail back to VoiceAttack as text variable
        }
    }
    // Function for processing voice recognition results
    public void RecognitionProcessing(ISpeechRecoResult Result)
    {
        //VA.WriteToLog("Processing recognition result"); // Output info to event log

        try                                                                                                                 // Attempt the following code
        {
            string  RecognizedText             = Result.PhraseInfo.GetText().Trim();                                        // Store recognized text
            float   confidence                 = Result.PhraseInfo.Elements.Item(0).EngineConfidence;                       // Get confidence of voice recognition result
            decimal RecognitionConfidenceScore = Decimal.Round(Convert.ToDecimal(confidence), (confidence > 0.01 ? 3 : 4)); // Calculate confidence of voice recognition result convert to decimal, and round the result
            string  RecognitionConfidenceLevel = Result.PhraseInfo.Elements.Item(0).ActualConfidence.ToString().Replace("SEC", "").Replace("Confidence", "");
            VA.SetText("~~RecognizedText", RecognizedText);                                                                 // Send recognized text back to VoiceAttack as text variable
            //VA.SetText("~~RecognitionConfidenceLevel", RecognitionConfidenceLevel); // Send speech recognition confidence level back to VoiceAttack as text variable
            //VA.SetDecimal("~~RecognitionConfidence", RecognitionConfidenceScore); // Send recognized confidence back to VoiceAttack as decimal variable

            if (VA.GetBoolean("~~ShowConfidence") == true)
            {
                RecognitionConfidence = "(" + RecognitionConfidenceLevel + " @ " + RecognitionConfidenceScore.ToString() + ")" + RecognitionFlag;
            }
            //VA.SetText("~~RecognitionConfidence", RecognitionConfidenceLevel + " @ " + RecognitionConfidenceScore.ToString()); // Send speech recognition confidence data back to VoiceAttack as text variable
            VA.SetText("~~RecognitionConfidence", RecognitionConfidence); // Send formatted speech recognition confidence data back to VoiceAttack as text variable
            if (UseDictation == true)                                     // Check if pronunciation dictation grammar should be used with speech recognition
            {
                RecognizedText = RecognizedText.Replace("hh", "h");       // Replace any instances of "hh" in recognized phonemes with "h"
                VA.SetText("~~SAPIPhonemes", RecognizedText);             // Send word-delimited SAPI phoneme data back to VoiceAttack as text variable
            }
        }
        catch (Exception e)         // Handle exceptions in above code
        {
            VA.WriteToLog(e.ToString());
            VA.SetText("~~RecognitionError", "Error during processing of recognition result (SAPI)");             // Send error detail back to VoiceAttack as text variable
        }
    }
Esempio n. 4
0
 /*EVENT TRIGGERED IN A RECOGNITION EVENT*/
 public void RecoContext_Recognition(int StreamNumber, object StreamPosition,
     SpeechRecognitionType RecognitionType, ISpeechRecoResult e)
 {
     //get phrase
     string phrase = e.PhraseInfo.GetText(0, -1, true);
     Debug.WriteLine(phrase);
 }
Esempio n. 5
0
        //public Familiar_Grammar create_dictation_grammar( )
        //{
        //    Familiar_Grammar new_grammar = new Familiar_Grammar();
        //    new_grammar.create_dictation(this);
        //    return new_grammar;
        //}

        void context_FalseRecognition(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
        {
            Feedback.print("!\r\n", Feedback.Status.debug);
            Familiar_Result result = new Familiar_Result(context, Result);

            result.display("(!) ");
        }
Esempio n. 6
0
        /// <summary>
        ///     RecoContext_Hypothesis is the event handler function for
        ///     SpSharedRecoContext object's Recognition event.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="RecognitionType"></param>
        /// <param name="Result"></param>
        /// <remarks>
        ///     See EnableSpeech() for how to hook up this function with the
        ///     event.
        /// </remarks>
        public void RecoContext_Recognition(int StreamNumber,
                                            object StreamPosition,
                                            SpeechRecognitionType RecognitionType,
                                            ISpeechRecoResult Result)
        {
            Debug.WriteLine("Recognition: " +
                            Result.PhraseInfo.GetText(0, -1, true) + ", " +
                            StreamNumber + ", " + StreamPosition);

            int index;
            ISpeechPhraseProperty oItem;

            // oItem will be the property of the second part in the recognized
            // phase. For example, if the top level rule matchs
            // "select Seattle". Then the ListItemsRule matches "Seattle" part.
            // The following code will get the property of the "Seattle"
            // phrase, which is set when the word "Seattle" is added to the
            // ruleListItems in RebuildGrammar.
            oItem = Result.PhraseInfo.Properties.Item(0).Children.Item(0);
            index = oItem.Id;

            if ((System.Decimal)Result.PhraseInfo.GrammarId == grammarId)
            {
                // Check to see if the item at the same position in the list
                // still has the same text.
                // This is to prevent the rare case that the user keeps
                // talking while the list is being added or removed. By the
                // time this event is fired and handled, the list box may have
                // already changed.
                if (oItem.Name.CompareTo(this.Items[index].ToString()) == 0)
                {
                    this.SelectedIndex = index;
                }
            }
        }
Esempio n. 7
0
//		/// <summary>
//		/// does not fire. SURPRISE!
//		///
//		/// Handles 'SpInProcRecoContext.AudioLevel' event.
//		/// </summary>
//		/// <param name="StreamNumber"></param>
//		/// <param name="StreamPosition"></param>
//		/// <param name="AudioLevel"></param>
//		void rc_AudioLevel(int StreamNumber, object StreamPosition, int AudioLevel)
//		{
//			logfile.Log("rc_AudioLevel() #" + StreamNumber + " StreamPosition= " + StreamPosition + " AudioLevel= " + AudioLevel);
//		}


        /// <summary>
        /// Handles 'SpInProcRecoContext.Hypothesis' event. Fires each time the
        /// engine performs a hypothesis.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="Result"></param>
        void rc_Hypothesis(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
        {
            logfile.Log("rc_Hypothesis() #" + StreamNumber + " StreamPosition= " + StreamPosition + " _generato= " + _generato);
            logfile.Log(". " + Result.PhraseInfo.GetText());             // (0, -1, true)

//			logfile.Log(". Result.PhraseInfo.Rule.Name= "             + Result.PhraseInfo.Rule.Name); // <- blank.
//			logfile.Log(". Result.PhraseInfo.Rule.Id= "               + Result.PhraseInfo.Rule.Id);
//			logfile.Log(". Result.PhraseInfo.Rule.EngineConfidence= " + Result.PhraseInfo.Rule.EngineConfidence);
//			logfile.Log(". Result.PhraseInfo.Rule.Confidence= "       + Result.PhraseInfo.Rule.Confidence);
//
//			logfile.Log(". wordcount= " + Result.PhraseInfo.Elements.Count);
//			foreach (ISpeechPhraseElement word in Result.PhraseInfo.Elements)
//			{
//				logfile.Log(". . word= "              + word.DisplayText);
//				logfile.Log(". . LexicalForm= "       + word.LexicalForm);
//				logfile.Log(". . DisplayAttributes= " + word.DisplayAttributes);
//				logfile.Log(". . EngineConfidence= "  + word.EngineConfidence);
//				logfile.Log(". . ActualConfidence= "  + word.ActualConfidence);
//				var ids = (ushort[])word.Pronunciation;
//				foreach (var id in ids) logfile.Log(". . . PhoneId= " + id + " - " + _phoneConverter.IdToPhone(id));
//			}

//			logfile.Log(". get Alternates");
//			ISpeechPhraseAlternates alts = Result.Alternates(3);	// DOES NOT WORK AS EXPECTED.
//			logfile.Log(". alts.Count= " + alts.Count);				// NOTE: for CC only - SpeechRecoContext.CmdMaxAlternates() def 0
//			logfile.Log(". alt[0]= " + alts.Item(0));				// This fails silently regardless of CmdMaxAlternates value and/or isCC.
//			foreach (ISpeechPhraseAlternate alt in alts)
//				logfile.Log(". . alt= " + alt.PhraseInfo.GetText());
//			logfile.Log(". got Alternates");
        }
        public Phrase(ISpeechRecoResult heardSpeechObj, string computerName)
        {
            try
            {
                //calculate accuracy
                _accuracy = (float)heardSpeechObj.PhraseInfo.Elements.Item(0).EngineConfidence;

                //change accuracyMax dynamicly
                if (_accuracyMax < _accuracy)
                {
                    _accuracyMax = _accuracy;
                }

                if (_accuracy < 0)
                {
                    _accuracy = 0;
                }

                _accuracy = (int)((float)_accuracy / _accuracyMax * 100);

                //_computerName = Form1.computerNickName;
                _phrase = heardSpeechObj.PhraseInfo.GetText(0, -1, true);
                //_phrase = _phrase.Replace(_computerName, "").Trim();
                _phrase       = _phrase.Replace(computerName, "").Trim();
                _ruleName     = heardSpeechObj.PhraseInfo.Rule.Name;
                _speechObject = heardSpeechObj;
            }
            catch (Exception ex)
            {
                System.Diagnostics.EventLog.WriteEntry("Phrase", ex.Message);
            }
        }
 public void RecoContext_Hypothesis(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
 {
     Debug.WriteLine("Hypothesis: " + Result.PhraseInfo.GetText(0, -1, true) + ", " +
         StreamNumber + ", " + StreamPosition);
     if (voiceInfoAutomat.Status == State.init) {
         this.understandet(Result.PhraseInfo.GetText(0, -1, true));
     }
 }
    // Event handler for voice recognition hypotheses
    public void RecoContext_Hypothesis(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
    {
        //VA.WriteToLog("Recognition hypothesis"); // Output info to event log

        float confidence = Result.PhraseInfo.Elements.Item(0).EngineConfidence;

        VA.WriteToLog("Hypothesis = " + Result.PhraseInfo.GetText() + " (" + Decimal.Round(Convert.ToDecimal(confidence), (confidence > 0.01 ? 3 : 4)) + ")");         // Output info to event log
    }
Esempio n. 11
0
 /*Speech recog methods*/
 /// <summary>
 ///     RecoContext_Hypothesis is the event handler function for
 ///     SpInProcRecoContext object's Hypothesis event.
 /// </summary>
 /// <param name="StreamNumber"></param>
 /// <param name="StreamPosition"></param>
 /// <param name="Result"></param>
 /// <remarks>
 ///     See EnableSpeech() for how to hook up this function with the
 ///     event.
 /// </remarks>
 private void RecoContext_Hypothesis(int StreamNumber,
                                     object StreamPosition,
                                     ISpeechRecoResult Result)
 {
     Debug.WriteLine("Hypothesis: " +
                     Result.PhraseInfo.GetText(0, -1, true) + ", " +
                     StreamNumber + ", " + StreamPosition);
 }
Esempio n. 12
0
        private void Hypo_Event(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
        {
            String text = Result.PhraseInfo.GetText(0, -1, true);

            synth.Speak("Hypothesis: " + text); // DEBUG

            // TODO: Did you mean? If "yes", call DoActionFromVoiceCommand(text).
        }
Esempio n. 13
0
        float _accuracyMax = 0.1F;                                              //to avoid divide by zero

        public Phrase(ISpeechRecoResult heardSpeechObj)
        {
            //calculate accuracy
            _accuracy = (float)heardSpeechObj.PhraseInfo.Elements.Item(0).EngineConfidence;

            //change accuracyMax dynamicly
            if (_accuracyMax < _accuracy)
            {
                _accuracyMax = _accuracy;
            }

            if (_accuracy < 0)
            {
                _accuracy = 0;
            }

            _accuracy = (int)((float)_accuracy / _accuracyMax * 100);

            _computerName = Form1.computerNickName;
            _phrase       = heardSpeechObj.PhraseInfo.GetText(0, -1, true);
            _phrase       = _phrase.Replace(_computerName, "").Trim();
            _ruleName     = heardSpeechObj.PhraseInfo.Rule.Name;
            _speechObject = heardSpeechObj;
        }
Esempio n. 14
0
        private void OnRecognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            string word = Result.PhraseInfo.GetText(0, -1, true);

            //Notify plugin to output word on next time it runs evaluate
            this.FInvalidate = true;
            this.FData       = word;
        }
Esempio n. 15
0
 private void FailedRecognitionHandler(
     int StreamNumber,
     object StreamPosition,
     ISpeechRecoResult Result)
 {
     foreach (CommandRecognizerMode mode in ActiveModes)
     {
         mode.OnCommandNotRecognized(Result);
     }
 }
Esempio n. 16
0
        void context_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            if (context.AudioInputInterferenceStatus == SpeechInterference.SINoise)
            {
                return;
            }

            //      Feedback.print(string.Format("Result = {0} {1}\r\n", StreamNumber, context.AudioInputInterferenceStatus), Feedback.Status.debug);
            Feedback.print(string.Format("Result = {0}, {1}\r\n", Result.PhraseInfo.GetText(0, -1, true), Result.PhraseInfo.Elements.Item(0).EngineConfidence), Feedback.Status.debug);
            Profiler.initialize();
            Familiar_Result result = new Familiar_Result(context, Result);

            //            Familiar_Result result = get_result("> ", Result);
            //          result.display("> ");
            Profiler.trace("result created");

            Feedback.print(string.Format("Element Count = {0}\r\n", Result.PhraseInfo.Elements.Count), Feedback.Status.debug);
            foreach (Token word in result.chosen_phrase.words)
            {
                Feedback.print(string.Format("{0} ({1}, {2})\r\n", word.text, word.confidence, word.source.document.name), Feedback.Status.debug);
            }

            //if (Result.PhraseInfo.Elements.Count == 1 && result.chosen_phrase.words[0].confidence < 0.6)
            //    return;

            result.run();

            if (recognized != null)
            {
                recognized.Invoke(this, result);
            }
        }
Esempio n. 17
0
        private void SpeechRecognizer_Hypothesis(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
        {
            string result;
            float confidence;
            RecognizedSpeechAlternate[] alternates;
            RecognizedSpeech recognizedSpeech;

            result = Result.PhraseInfo.GetText(0, -1, true);
            confidence = Result.PhraseInfo.Rule.EngineConfidence;
            if (result.Length < 1)
                return;

            alternates = new RecognizedSpeechAlternate[1];
            alternates[0] = new RecognizedSpeechAlternate(result, confidence);
            recognizedSpeech = new RecognizedSpeech(alternates);

            OnSpeechHypothesized(recognizedSpeech);
        }
Esempio n. 18
0
 /// <summary>
 /// Fires when a voice command has been received
 /// </summary>
 /// <param name="StreamNumber"></param>
 /// <param name="StreamPosition"></param>
 /// <param name="RecognitionType"></param>
 /// <param name="Result"></param>
 private void recoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
 {
     // Fire the CommandReceived event with the received command text
     CommandReceived(Result.PhraseInfo.GetText(0, -1, false));
 }
Esempio n. 19
0
        private void SpeechRecognizer_Hypothesis(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
        {
            string result;
            float  confidence;

            RecognizedSpeechAlternate[] alternates;
            RecognizedSpeech            recognizedSpeech;

            result     = Result.PhraseInfo.GetText(0, -1, true);
            confidence = Result.PhraseInfo.Rule.EngineConfidence;
            if (result.Length < 1)
            {
                return;
            }

            alternates       = new RecognizedSpeechAlternate[1];
            alternates[0]    = new RecognizedSpeechAlternate(result, confidence);
            recognizedSpeech = new RecognizedSpeech(alternates);

            OnSpeechHypothesized(recognizedSpeech);
        }
Esempio n. 20
0
        public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            //MessageBox.Show(charReq.Status.ToString());
            if (charReq.Status == 0)
            {
                grammar.DictationSetState(SpeechRuleState.SGDSInactive);

                inputBox.Text = Result.PhraseInfo.GetText(0, -1, true);

                doChatting();

                grammar.DictationSetState(SpeechLib.SpeechRuleState.SGDSActive);
            }
        }
        private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            String text = Result.PhraseInfo.GetText(0, -1, true);
            synth.Speak("Recognition: " + text); // DEBUG

            // TODO: For "Do you mean?" functionality, check yes/no NOT in the following function
            //      because they are not actions.

            DoActionFromVoiceCommand(text);
        }
    // Event handler for unsuccessful (low confidence) voice recognition
    public void RecoContext_FalseRecognition(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
    {
        //VA.WriteToLog("Low confidence recognition"); // Output info to event log

        //VA.WriteToLog(Result.PhraseInfo.GetText());
        //VA.SetText("~~FalseRecognitionFlag", "*"); // Send unsuccessful recognition flag (text character) back to VoiceAttack as text variable
        RecognitionFlag = "*";         // Set the RecognitionFlag as "*"
        RecognitionProcessing(Result); // Process the voice recognition result
        GetPhonemes(Result);           // Retrieve SAPI phonemes from recognition result
    }
    // Event handler for successful (higher confidence) voice recognition
    public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
    {
        //VA.WriteToLog("Recognition successful"); // Output info to event log

        //VA.SetText("~~FalseRecognitionFlag", ""); // Send blank recognition flag ("") back to VoiceAttack as text variable
        //RecognitionFlag = ""; // Set the RecognitionFlag as blank
        RecognitionProcessing(Result); // Process the voice recognition result
        //if (UseDictation == false) // Check if pronunciation dictation grammar should NOT be used with speech recognition
        GetPhonemes(Result);           // Retrieve SAPI phonemes from recognition result
    }
        private void OnReco(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            string recoResult = Result.PhraseInfo.GetText();    // Translates whatever was somewhat definitively recognized by Windows Speech Recognition into text.

            recoResult = recoResult.ToLower();          // This is the same as taking inquiry text and making it all lowercase in Minerva.

            submit(recoResult);
        }
Esempio n. 25
0
 private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
 {
     string lWord = Result.PhraseInfo.GetText(0, -1, true);
     if (VoiceCommandReceivedEvent != null)
         VoiceCommandReceivedEvent(this, new VoiceCommandEventArgs(lWord));
 }
Esempio n. 26
0
 private void Hypo_Event(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
 {
     string lWord = Result.PhraseInfo.GetText(0, -1, true);
 }
Esempio n. 27
0
//		ulong GetAudioStreamPositionSeconds(string pos)
//		{
//			ulong sec = UInt64.Parse(pos);
//
//			sec /= 2uL;		// bytes per sample (16-bit)
//			sec /= 44100;	// samples per second
//
//			return sec;
//		}

        /// <summary>
        /// Handles 'SpInProcRecoContext.Recognition' event. Fires as the final
        /// hypothesis for a phrase. Each word will be added to a list of
        /// 'OrthographicResult's for the phrase.
        /// WARNING: This can fire 2+ on the same file-stream causing the engine
        /// to drop/reset important variables like 'PhraseInfo.StartTime' and
        /// 'word.AudioStreamOffset' and 'word.AudioTimeOffset'
        /// TODO: a fact that is exceedingly annoying to try to compensate for.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="RecognitionType"></param>
        /// <param name="Result"></param>
        void rc_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
#if DEBUG
            logfile.Log();
            logfile.Log("rc_Recognition() #" + StreamNumber + " StreamPosition= " + StreamPosition + " _generato= " + _generato);
            logfile.Log(". RecognitionType= " + RecognitionType);             // <- standard.

            logfile.Log(". _phoneConverter.LanguageId= " + _phoneConverter.LanguageId);

            logfile.Log(". " + Result.PhraseInfo.GetText());             // (0, -1, true)

            logfile.Log(". _offset                       = " + _offset);
            logfile.Log(". PhraseInfo.AudioStreamPosition= " + Result.PhraseInfo.AudioStreamPosition);
//			logfile.Log(". . sec= " + GetAudioStreamPositionSeconds(Result.PhraseInfo.AudioStreamPosition.ToString()));

            logfile.Log(". PhraseInfo.AudioSizeBytes     = " + Result.PhraseInfo.AudioSizeBytes);
            logfile.Log(". PhraseInfo.StartTime          = " + Result.PhraseInfo.StartTime);
            logfile.Log(". PhraseInfo.AudioSizeTime      = " + Result.PhraseInfo.AudioSizeTime);

            logfile.Log(". Result.PhraseInfo.Rule.Name= " + Result.PhraseInfo.Rule.Name);                         // <- blank.
            logfile.Log(". Result.PhraseInfo.Rule.Id= " + Result.PhraseInfo.Rule.Id);
            logfile.Log(". Result.PhraseInfo.Rule.EngineConfidence= " + Result.PhraseInfo.Rule.EngineConfidence);
            logfile.Log(". Result.PhraseInfo.Rule.Confidence= " + Result.PhraseInfo.Rule.Confidence);

            logfile.Log(". wordcount= " + Result.PhraseInfo.Elements.Count);
#endif

            List <OrthographicResult> ars = null;
            switch (_generato)
            {
            case Generator.Dictati: ars = _ars_def; break;

            case Generator.Dialogi: ars = _ars_enh; break;
            }

            foreach (ISpeechPhraseElement word in Result.PhraseInfo.Elements)
            {
#if DEBUG
                logfile.Log(". . word= " + word.DisplayText);
                logfile.Log(". . LexicalForm= " + word.LexicalForm);
                logfile.Log(". . DisplayAttributes= " + word.DisplayAttributes);
                logfile.Log(". . EngineConfidence= " + word.EngineConfidence);
                logfile.Log(". . ActualConfidence= " + word.ActualConfidence);
                var ids = (ushort[])word.Pronunciation;
                foreach (var id in ids)
                {
                    logfile.Log(". . . PhoneId= " + id + " - " + _phoneConverter.IdToPhone(id));
                }

                logfile.Log(". . word.AudioStreamOffset= " + word.AudioStreamOffset);
                logfile.Log(". . word.AudioSizeBytes   = " + word.AudioSizeBytes);
                logfile.Log(". . word.AudioTimeOffset  = " + word.AudioTimeOffset);
                logfile.Log(". . word.AudioSizeTime    = " + word.AudioSizeTime);
#endif

                var ar = new OrthographicResult();
                ar.Orthography = word.DisplayText;

                string phons = _phoneConverter.IdToPhone(word.Pronunciation);                 // NOTE: object is a ushort or ushort[]

                ar.Phons = new List <string>(phons.Split(' '));
                ar.Confi = word.EngineConfidence;
                ar.Level = word.ActualConfidence.ToString().Replace("SEC", String.Empty).Replace("Confidence", String.Empty);
                ar.Start = _offset + Utility.GarpstoSecs(word.AudioTimeOffset);
                ar.Stop  = _offset + Utility.GarpstoSecs(word.AudioTimeOffset + word.AudioSizeTime);

                ars.Add(ar);
            }

            // NOTE: Recognition could be fired before the entire audiofile has
            // completed, which means it's going to fire again but the AudioTimeOffsets
            // will be completely borked obviously. So add this time-offset to any
            // second or subsequent Recognition event that happens on this stream
            _offset += Utility.GarpstoSecs(Result.PhraseInfo.AudioSizeTime);             // TODO. is not accurate.

            if (_text == String.Empty)
            {
                ++Confidence_def_count;
                Confidence_def += Result.PhraseInfo.Rule.EngineConfidence;
            }
#if DEBUG
            logfile.Log();
#endif
        }
Esempio n. 28
0
        internal void OnCommandRecognized(ISpeechRecoResult result)
        {
            int id = result.PhraseInfo.Rule.Id;
            if (m_handlers.ContainsKey(id))
            {
                CommandHandler handler = m_handlers[id];
                if (handler != null)
                {
                    // Pull any parameters from properties
                    Dictionary<string, string> parameters = new Dictionary<string, string>();
                    foreach (ISpeechPhraseProperty property in result.PhraseInfo.Properties)
                    {
                        if (property.Children != null)
                        {
                            foreach (ISpeechPhraseProperty childProperty in property.Children)
                            {
                                if (childProperty.Name.Length > 0 &&
                                    childProperty.Value != null)
                                {
                                    parameters[childProperty.Name] = childProperty.Value.ToString();
                                }
                            }
                        }
                    }

                    RaiseCommandExecuting(handler);
                    handler(parameters);
                    RaiseCommandExecuted(handler);
                }
            }
            else
            {
                throw new ArgumentOutOfRangeException("Command Recognizer Mode does not contain the recognized command.");
            }
        }
        private void Hypo_Event(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
        {
            String text = Result.PhraseInfo.GetText(0, -1, true);
            synth.Speak("Hypothesis: " + text); // DEBUG

            // TODO: Did you mean? If "yes", call DoActionFromVoiceCommand(text).
        }
Esempio n. 30
0
        private void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            ////result_textBox.AppendText(Result.PhraseInfo.GetText(0, -1, true) + "\n");
            //string result;

            //ISpeechPhraseInfo info;
            //ISpeechPhraseAlternate alternate;
            //ISpeechPhraseAlternates alternates = Result.Alternates(20, 0, -1);
            //ISpeechPhraseReplacements replacements;

            //if (alternates != null)
            //    alternate = alternates.Item(0);
            //info = Result.PhraseInfo;
            //replacements = info.Replacements;
            //string rep;
            //if (replacements != null)
            //    rep = replacements.Item(0).Text;

            //result = Result.PhraseInfo.GetText(0, -1, true);
            //if (result.Length < 1) result = "???";
            //OnSpeechRecognized(null);

            ////result_textBox.AppendText(Result.PhraseInfo.GetText(0, -1, true) + "\n");
            //string result;

            string result;
            float  confidence;

            RecognizedSpeechAlternate[] alternates;
            RecognizedSpeech            recognizedSpeech;

            result     = Result.PhraseInfo.GetText(0, -1, true);
            confidence = Result.PhraseInfo.Rule.EngineConfidence;
            //confidence = Result.PhraseInfo.Rule.Confidence;
            if (result.Length < 1)
            {
                return;
            }

            alternates       = new RecognizedSpeechAlternate[1];
            alternates[0]    = new RecognizedSpeechAlternate(result, confidence);
            recognizedSpeech = new RecognizedSpeech(alternates);

            OnSpeechRecognized(recognizedSpeech);
        }
Esempio n. 31
0
 private void ContexRecognition(int iIndex, object obj, SpeechRecognitionType type, ISpeechRecoResult result)
 {
     SetMessage?.Invoke(result.PhraseInfo.GetText(0, -1, true));
 }
Esempio n. 32
0
 /**
  * Places a recognized commmand in the buffer.
  *
  * @param StreamNumber
  * @param StreamPosition
  * @param RecongitionType
  * @param Result
  */
 private void RecoEvent(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
 {
     _buffer.Enqueue(Result.PhraseInfo.GetText(0, -1, true));
 }
 /// <summary>
 /// Fires when a voice command has been received
 /// </summary>
 /// <param name="StreamNumber"></param>
 /// <param name="StreamPosition"></param>
 /// <param name="RecognitionType"></param>
 /// <param name="Result"></param>
 private void recoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
 {
     // Fire the CommandReceived event with the received command text
     CommandReceived(Result.PhraseInfo.GetText(0, -1, false));
 }
Esempio n. 34
0
 private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
 {
     txtReco.Text = Result.PhraseInfo.GetText(0, -1, true);
 }
Esempio n. 35
0
        private void SsrContex_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            bool flag = this.SetMessage != null;

            if (flag)
            {
                this.SetMessage(Result.PhraseInfo.GetText(0, -1, true));
            }
        }
Esempio n. 36
0
        private void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            ////result_textBox.AppendText(Result.PhraseInfo.GetText(0, -1, true) + "\n");
            //string result;

            //ISpeechPhraseInfo info;
            //ISpeechPhraseAlternate alternate;
            //ISpeechPhraseAlternates alternates = Result.Alternates(20, 0, -1);
            //ISpeechPhraseReplacements replacements;

            //if (alternates != null)
            //    alternate = alternates.Item(0);
            //info = Result.PhraseInfo;
            //replacements = info.Replacements;
            //string rep;
            //if (replacements != null)
            //    rep = replacements.Item(0).Text;

            //result = Result.PhraseInfo.GetText(0, -1, true);
            //if (result.Length < 1) result = "???";
            //OnSpeechRecognized(null);

            ////result_textBox.AppendText(Result.PhraseInfo.GetText(0, -1, true) + "\n");
            //string result;

            string result;
            float confidence;
            RecognizedSpeechAlternate[] alternates;
            RecognizedSpeech recognizedSpeech;

            result = Result.PhraseInfo.GetText(0, -1, true);
            confidence = Result.PhraseInfo.Rule.EngineConfidence;
            //confidence = Result.PhraseInfo.Rule.Confidence;
            if (result.Length < 1)
                return;

            alternates = new RecognizedSpeechAlternate[1];
            alternates[0] = new RecognizedSpeechAlternate(result, confidence);
            recognizedSpeech = new RecognizedSpeech(alternates);

            OnSpeechRecognized(recognizedSpeech);
        }
Esempio n. 37
0
        private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            string lWord = Result.PhraseInfo.GetText(0, -1, true);

            if (VoiceCommandReceivedEvent != null)
            {
                VoiceCommandReceivedEvent(this, new VoiceCommandEventArgs(lWord));
            }
        }
Esempio n. 38
0
 private void writeHypothesis(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
 {
     texBHyp.Text = Result.PhraseInfo.GetText(0, -1, true);
 }
Esempio n. 39
0
 private void context_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
 {
     m_parent.onSpeech(Result.PhraseInfo.Rule.Name);
 }
Esempio n. 40
0
 private void RecognitionHandler(
     int StreamNumber,
     object StreamPosition,
     SpeechRecognitionType RecognitionType,
     ISpeechRecoResult Result)
 {
     int id = Result.PhraseInfo.Rule.Id;
     foreach (CommandRecognizerMode mode in Modes)
     {
         if (id >= mode.FirstRuleId &&
             id <= mode.LastRuleId)
         {
             mode.OnCommandRecognized(Result);
         }
     }
 }
Esempio n. 41
0
 private void writeCommand(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
 {
     commandBox.Text = Result.PhraseInfo.GetText(0, -1, true);
 }
Esempio n. 42
0
        /// <summary>
        /// main objRecoContext event
        /// launched when engine recognized a phrase
        /// </summary>
        /// <param name="e">contained information on the phrase that been recognized</param>
        public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType,	ISpeechRecoResult e)
        {
            //calculate accuracy
            float accuracy=(float)e.PhraseInfo.Elements.Item(0).EngineConfidence;

            //change accuracyMax dynamicly
            if (accuracyMax<accuracy)
                accuracyMax=accuracy;

            if (accuracy<0)
                accuracy=0;

            accuracy=(int)((float)accuracy/accuracyMax*100);
            label2.Text="Accuracy "+accuracy.ToString()+ "%";

            //get phrase
            string phrase=e.PhraseInfo.GetText(0,-1,true);
            //make sure it's in lower case (for safer use only)
            phrase=phrase.ToLower();

            //if recognized any ...
            if (phrase!="" && accuracy>=accuracyLimit)
            {
                //Only if agent enabled
                if (menuItem14.Checked==true)
                {
                    agent1.StopAll("");
                    agent1.Speak(phrase,"");
                }

                switch (e.PhraseInfo.Rule.Name)		//rule name (not the phrase !)
                {
                    case "Activate":
                    {
                        //Only if agent enabled
                        if (menuItem14.Checked==true)
                        {
                            //show character
                            agent1.Show(false);
                        }

                        //load grammar
                        SAPIGrammarFromFile("XMLDeactivate.xml");

                        //notify user
                        label1.Text="Activate";

                        //Only if agent enabled
                        if (menuItem14.Checked==true)
                        {
                            //animate character
                            agent1.Play("StartListening");
                            agent1.Speak("I'm listening","");
                        }
                        break;
                    }
                    case "Deactivate":
                    {
                        //load grammar
                        SAPIGrammarFromFile("XMLActivate.xml");

                        //notify user
                        label1.Text="Deactivate";

                        //Only if agent enabled
                        if (menuItem14.Checked==true)
                        {
                            //animate character
                            agent1.Play("Wave");
                            agent1.Hide(false);
                        }
                        break;
                    }
                    case "Start":
                    {
                        keybd_event((byte)Keys.LWin,0,0,0);	//key down
                        keybd_event((byte)Keys.LWin,0,2,0);	//key up

                        //load grammar
                        SAPIGrammarFromFile("XMLStart.xml");

                        //notify user
                        label1.Text="Start";
                        break;
                    }
                    case "Right":
                    {
                        keybd_event((byte)Keys.Right,0,0,0);	//key down
                        keybd_event((byte)Keys.Right,0,2,0);	//key up
                        break;
                    }
                    case "Left":
                    {
                        keybd_event((byte)Keys.Left,0,0,0);	//key down
                        keybd_event((byte)Keys.Left,0,2,0);	//key up
                        break;
                    }
                    case "Up":
                    {
                        keybd_event((byte)Keys.Up,0,0,0);		//key down
                        keybd_event((byte)Keys.Up,0,2,0);		//key up
                        break;
                    }
                    case "Down":
                    {
                        keybd_event((byte)Keys.Down,0,0,0);	//key down
                        keybd_event((byte)Keys.Down,0,2,0);	//key up
                        break;
                    }
                    case "Enter":
                    {
                        keybd_event((byte)Keys.Enter,0,0,0);	//key down
                        keybd_event((byte)Keys.Enter,0,2,0);	//key up
                        break;
                    }
                    case "Escape":
                    {
                        keybd_event((byte)Keys.Escape,0,0,0);	//key down
                        keybd_event((byte)Keys.Escape,0,2,0);	//key up
                        keybd_event((byte)Keys.LMenu,0,2,0);	//key up

                        //load grammar (used to reset grammar in case it contains menu stuff ...)
                        SAPIGrammarFromFile("XMLDeactivate.xml");

                        //notify user
                        label1.Text="Activate";
                        break;
                    }
                    case "PureEscape":
                    {
                        keybd_event((byte)Keys.Escape,0,0,0);	//key down
                        keybd_event((byte)Keys.Escape,0,2,0);	//key up
                        break;
                    }
                    case "Alt":
                    {
                        keybd_event((byte)Keys.LMenu,0,0,0);	//key down
                        keybd_event((byte)Keys.LMenu,0,2,0);	//key up

                        //check if there is any menu and hook it
                        IntPtr hWnd=GetForegroundWindow();
                        IntPtr hMnu=GetMenu(hWnd);
                        int mnuCnt=GetMenuItemCount(hMnu);

                        if (mnuCnt!=0)
                        {
                            //Only if agent enabled
                            if (menuItem14.Checked==true)
                            {
                                //animate character
                                agent1.Play("DoMagic1");
                                agent1.Think("Hooking menu ...");
                            }

                            //add menu to grammar
                            hookMenu(hMnu);

                            //Only if agent enabled
                            if (menuItem14.Checked==true)
                            {
                                //animate character
                                agent1.Play("Idle1_1");
                            }
                        }
                        else
                        {
                            //load grammar
                            SAPIGrammarFromFile("XMLDeactivate.xml");

                            //notify user
                            label1.Text="Activate";
                        }
                        break;
                    }
                    case "Tab":
                    {
                        keybd_event((byte)Keys.Tab,0,0,0);	//key down
                        keybd_event((byte)Keys.Tab,0,2,0);	//key up
                        break;
                    }
                    case "ShiftTab":
                    {
                        keybd_event((byte)Keys.LShiftKey,0,0,0);	//key down
                        keybd_event((byte)Keys.Tab,0,0,0);		//key down
                        keybd_event((byte)Keys.Tab,0,2,0);		//key up
                        keybd_event((byte)Keys.LShiftKey,0,2,0);	//key up
                        break;
                    }
                    case "CloseProgram":
                    {
                        Close();
                        break;
                    }
                    case "ShowAbout":
                    {
                        if (frmAbout1==null)
                        {
                            //show frmAbout
                            frmAbout1=new frmAbout();
                            frmAbout1.Closed+=new EventHandler(frmAbout1_Closed);
                            //send user profile
                            frmAbout1.Tag=(string)objRecoContext.Recognizer.Profile.GetDescription(0);
                            frmAbout1.Show();
                        }

                        //load grammar
                        SAPIGrammarFromFile("XMLAbout.xml");

                        //notify user
                        label1.Text="About Speech Recognition";
                        break;
                    }
                    case "CloseAbout":
                    {
                        //close frmAbout
                        if (frmAbout1!=null)
                        {
                            frmAbout1.Close();
                            frmAbout1=null;
                        }
                        break;
                    }
                    case "ShowCommands":
                    {
                        if (frmCommands1==null)
                        {
                            //show frmAbout
                            frmCommands1=new frmCommands();
                            frmCommands1.Closed+=new EventHandler(frmCommands1_Closed);
                            //send grammar
                            frmCommands1.Tag=label1.Text;
                            frmCommands1.Show();
                        }

                        //load grammar
                        SAPIGrammarFromFile("XMLCommands.xml");
                        break;
                    }
                    case "CloseCommands":
                    {
                        //close frmCommands
                        if (frmCommands1!=null)
                        {
                            frmCommands1.Close();
                            frmCommands1=null;
                        }
                        break;
                    }
                    case "ShowFavorites":
                    {
                        if (frmFavorites1==null)
                        {
                            //show frmFavorites
                            frmFavorites1=new frmFavorites();
                            frmFavorites1.Closed+=new EventHandler(frmFavorites1_Closed);
                            //send file name
                            frmFavorites1.Tag=appPath+"XMLFavorites.xml";
                            frmFavorites1.Show();
                        }

                        //load grammar
                        SAPIGrammarFromFile("XMLFavorites.xml");

                        //notify user
                        label1.Text="Favorites";
                        break;
                    }
                    case "CloseFavorites":
                    {
                        //show frmAbout
                        if (frmFavorites1!=null)
                        {
                            frmFavorites1.Close();
                            frmFavorites1=null;
                        }
                        break;
                    }
                    case "CloseForm":
                    {
                        IntPtr hWnd=GetForegroundWindow();

                        //make sure we are not closing our program ...
                        if (hWnd!=this.Handle)
                        {
                            keybd_event((byte)Keys.LMenu,0,0,0);	//key down
                            keybd_event((byte)Keys.F4,0,0,0);		//key down
                            keybd_event((byte)Keys.LMenu,0,2,0);	//key up
                            keybd_event((byte)Keys.F4,0,2,0);		//key up
                        }
                        break;
                    }
                    case "Programs":
                    case "Documents":
                    case "Settings":
                    case "Search":
                    case "Help":
                    case "Run":
                    {
                        keybd_event((byte)(e.PhraseInfo.Rule.Name[0]),0,0,0);	//key down
                        keybd_event((byte)(e.PhraseInfo.Rule.Name[0]),0,2,0);	//key up

                        //load grammar
                        SAPIGrammarFromFile("XMLDeactivate.xml");

                        //notify user
                        label1.Text="Activate";
                        break;
                    }
                    case "RunProgram":
                    {
                        //show frmAbout
                        if (frmFavorites1!=null)
                        {
                            frmFavorites1.Close();
                            frmFavorites1=null;
                        }

                        try
                        {
                            System.Diagnostics.Process.Start(phrase);
                        }
                        catch
                        {
                            //Only if agent enabled
                            if (menuItem14.Checked==true)
                            {
                                agent1.Speak("Could not run : "+phrase,"");
                            }
                        }

                        //load grammar
                        SAPIGrammarFromFile("XMLDeactivate.xml");

                        //notify user
                        label1.Text="Activate";
                        break;
                    }
                    case "SwitchProgram":
                    {
                        keybd_event((byte)Keys.LMenu,0,0,0);	//key down
                        keybd_event((byte)Keys.Tab,0,0,0);	//key down
                        keybd_event((byte)Keys.Tab,0,2,0);	//key up

                        //load grammar
                        SAPIGrammarFromFile("XMLSwitchProgram.xml");

                        //notify user
                        label1.Text="Switch Program";
                        break;
                    }
                    case "SwitchEnter":
                    {
                        keybd_event((byte)Keys.LMenu,0,2,0);	//key up

                        //load grammar
                        SAPIGrammarFromFile("XMLDeactivate.xml");

                        //notify user
                        label1.Text="Activate";
                        break;
                    }

                    case "HoldKey":
                    {
                        //load grammar
                        SAPIGrammarFromFile("XMLStickyKeys.xml");

                        //notify user
                        label1.Text="Press key";
                        break;
                    }

                    case "ReleaseKey":
                    {
                        timer2.Enabled=false;

                        //load grammar
                        SAPIGrammarFromFile("XMLDeactivate.xml");

                        //notify user
                        label1.Text="Activate";
                        break;
                    }

                    case "HoldRight":
                    {
                        keyHolding=(byte)Keys.Right;
                        timer2.Enabled=true;
                        break;
                    }
                    case "HoldLeft":
                    {
                        keyHolding=(byte)Keys.Left;
                        timer2.Enabled=true;
                        break;
                    }
                    case "HoldUp":
                    {
                        keyHolding=(byte)Keys.Up;
                        timer2.Enabled=true;
                        break;
                    }
                    case "HoldDown":
                    {
                        keyHolding=(byte)Keys.Down;
                        timer2.Enabled=true;
                        break;
                    }
                    case "PageUp":
                    {
                        keybd_event((byte)Keys.PageUp,0,0,0);	//key down
                        keybd_event((byte)Keys.PageUp,0,2,0);	//key up
                        break;
                    }
                    case "Yes":
                    {
                        keybd_event((byte)Keys.Y,0,0,0);	//key down
                        keybd_event((byte)Keys.Y,0,2,0);	//key up
                        break;
                    }
                    case "No":
                    {
                        keybd_event((byte)Keys.N,0,0,0);	//key down
                        keybd_event((byte)Keys.N,0,2,0);	//key up
                        break;
                    }
                    case "BackSpace":
                    {
                        keybd_event((byte)Keys.Back,0,0,0);	//key down
                        keybd_event((byte)Keys.Back,0,2,0);	//key up
                        break;
                    }
                    case "ShutDown":
                    {
                        Shell32.ShellClass a=new Shell32.ShellClass();
                        a.ShutdownWindows();

                        //load grammar
                        SAPIGrammarFromFile("XMLShutDown.xml");

                        //notify user
                        label1.Text="Shut Down";
                        break;
                    }
                    case "ActivateWithoutAnimation":
                    {
                        //load grammar
                        SAPIGrammarFromFile("XMLDeactivate.xml");

                        //notify user
                        label1.Text="Activate";
                        break;
                    }
                    case "EnterNumericState":
                    {
                        //load grammar
                        SAPIGrammarFromFile("XMLNumericState.xml");

                        //notify user
                        label1.Text="Numeric State...";
                        break;
                    }
                    case "Zero":
                    case "One":
                    case "Two":
                    case "Three":
                    case "Four":
                    case "Five":
                    case "Six":
                    case "Seven":
                    case "Eight":
                    case "Nine":
                    {
                        byte k=(byte)e.PhraseInfo.GetText(0,-1,false)[0];

                        keybd_event((byte)(k+'0'),0,0,0);	//key down
                        keybd_event((byte)(k+'0'),0,2,0);	//key up
                        break;
                    }
                    case "Plus":
                    {
                        keybd_event((byte)Keys.Add,0,0,0);	//key down
                        keybd_event((byte)Keys.Add,0,2,0);	//key up
                        break;
                    }
                    case "Minus":
                    {
                        keybd_event((byte)Keys.Subtract,0,0,0);	//key down
                        keybd_event((byte)Keys.Subtract,0,2,0);	//key up
                        break;
                    }
                    case "Div":
                    {
                        keybd_event((byte)Keys.Divide,0,0,0);	//key down
                        keybd_event((byte)Keys.Divide,0,2,0);	//key up
                        break;
                    }
                    case "Mul":
                    {
                        keybd_event((byte)Keys.Multiply,0,0,0);	//key down
                        keybd_event((byte)Keys.Multiply,0,2,0);	//key up
                        break;
                    }
                    case "Equal":
                    {
                        keybd_event(187,0,0,0);	//key down
                        keybd_event(187,0,2,0);	//key up
                        break;
                    }
                    case "EnterAlphabeticState":
                    {
                        //load grammar
                        SAPIGrammarFromFile("XMLAlphabeticState.xml");

                        //notify user
                        label1.Text="Alphabetic State...";
                        break;
                    }
                    case "abcA":case "abcB":case "abcC":case "abcD":case "abcE":case "abcF":case "abcG":
                    case "abcH":case "abcI":case "abcJ":case "abcK":case "abcL":case "abcM":case "abcN":
                    case "abcO":case "abcP":case "abcQ":case "abcR":case "abcS":case "abcT":case "abcU":
                    case "abcV":case "abcW":case "abcX":case "abcY":case "abcZ":
                    {
                        firstRecognition=phrase;
                        string str1=phrase;
                        str1=str1.ToUpper();
                        keybd_event((byte)(str1[0]),0,0,0);	//key down
                        keybd_event((byte)(str1[0]),0,2,0);	//key up
                        break;
                    }
                    case "At":
                    {
                        keybd_event((byte)Keys.LShiftKey,0,0,0);	//key down
                        keybd_event((byte)Keys.D2,0,0,0);			//key down
                        keybd_event((byte)Keys.D2,0,2,0);			//key up
                        keybd_event((byte)Keys.LShiftKey,0,2,0);	//key up
                        break;
                    }
                    case "UnderLine":
                    {
                        keybd_event((byte)Keys.LShiftKey,0,0,0);	//key down
                        keybd_event((byte)Keys.OemMinus,0,0,0);		//key down
                        keybd_event((byte)Keys.OemMinus,0,2,0);		//key up
                        keybd_event((byte)Keys.LShiftKey,0,2,0);	//key up
                        break;
                    }
                    case "Dash":
                    {
                        keybd_event((byte)Keys.Subtract,0,0,0);		//key down
                        keybd_event((byte)Keys.Subtract,0,2,0);		//key up
                        break;
                    }
                    case "Dot":
                    {
                        keybd_event(190,0,0,0);	//key down
                        keybd_event(190,0,2,0);	//key up
                        break;
                    }
                    case "BackSlash":
                    {
                        keybd_event((byte)Keys.Divide,0,0,0);	//key down
                        keybd_event((byte)Keys.Divide,0,2,0);	//key up
                        break;
                    }
                    case "AlphabeticStateNo":
                    {
                        //delete the first letter
                        keybd_event((byte)Keys.Back,0,0,0);	//key down
                        keybd_event((byte)Keys.Back,0,2,0);	//key up

                        //write the replacement letter
                        string str1=firstRecognition;

                        //fix miss recognition
                        switch(firstRecognition)
                        {
                            case "a": str1="h"; break;
                            case "b": str1="d"; break;
                            case "c": str1="t"; break;
                            case "d": str1="p"; break;
                            case "f": str1="x"; break;
                            case "h": str1="f"; break;
                            case "m": str1="n"; break;
                            case "n": str1="l"; break;
                            case "l": str1="m"; break;
                            case "p": str1="v"; break;
                            case "u": str1="q"; break;
                            case "v": str1="t"; break;
                            case "e": str1="b"; break;
                            case "j": str1="k"; break;
                        }

                        firstRecognition=str1;
                        str1=str1.ToUpper();

                        keybd_event((byte)(str1[0]),0,0,0);	//key down
                        keybd_event((byte)(str1[0]),0,2,0);	//key up
                        break;
                    }

                    //else press the key (probably a menu ...)
                    default:
                    {
                        string str1=e.PhraseInfo.Rule.Name;
                        str1=str1.ToUpper();

                        keybd_event((byte)(str1[0]),0,0,0);	//key down
                        keybd_event((byte)(str1[0]),0,2,0);	//key up

                        //could be submenu (hook it)
                        hookSubmenu(e.PhraseInfo.Rule.Name[0].ToString());
                        break;
                    }
                }
            }

            //if not recognized ...
            else
            {
                //Only if agent enabled
                if (menuItem14.Checked==true)
                {
                    //animate character
                    agent1.Play("Decline");
                }
            }
        }
Esempio n. 43
0
 internal void OnCommandNotRecognized(ISpeechRecoResult result)
 {
     RaiseCommandNotRecognized();
 }
Esempio n. 44
0
        /// <summary>
        ///     RecoContext_Hypothesis is the event handler function for
        ///     SpInProcRecoContext object's Recognition event.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="RecognitionType"></param>
        /// <param name="Result"></param>
        /// <remarks>
        ///     See EnableSpeech() for how to hook up this function with the
        ///     event.
        /// </remarks>
        private void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            Debug.WriteLine("Recognition: " + Result.PhraseInfo.GetText(0, -1, true) + ", " + StreamNumber + ", " + StreamPosition);

            int index;
            int index1;
            int index2;
            ISpeechPhraseProperty oCard;
            ISpeechPhraseProperty oNumber;
            ISpeechPhraseProperty oCommand;

            // oItem will be the property of the second part in the recognized
            // phase. For example, if the top level rule matchs
            // "select Seattle". Then the ListItemsRule matches "Seattle" part.
            // The following code will get the property of the "Seattle"
            // phrase, which is set when the word "Seattle" is added to the
            // ruleListItems in RebuildGrammar.
            oCommand = Result.PhraseInfo.Properties.Item(0).Children.Item(0);
            index    = oCommand.Id;

            oNumber = Result.PhraseInfo.Properties.Item(1).Children.Item(0);
            index1  = oNumber.Id;

            oCard  = Result.PhraseInfo.Properties.Item(2).Children.Item(0);
            index2 = oCard.Id;

            if ((System.Decimal)Result.PhraseInfo.GrammarId == grammarId)
            {
                // Check to see if the item at the same position in the list
                // still has the same text.
                // This is to prevent the rare case that the user keeps
                // talking while the list is being added or removed. By the
                // time this event is fired and handled, the list box may have
                // already changed.
                if (oCard.Name.CompareTo(libcards[index2].ToString()) == 0 || oCard.Name.CompareTo(cryptcards[index2].ToString()) == 0)
                {
                    listView1.Items[index2].Selected = true;
                    listView1.Items[index2].Focused  = true;
                    listView1.TopItem       = listView1.Items[index2];
                    txtNumber.Text          = oNumber.Name;
                    comboBox2.SelectedIndex = index;
                }
            }
        }
Esempio n. 45
0
        public void RecoContext_Recognition(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            //MessageBox.Show(charReq.Status.ToString());
            if(charReq.Status == 0)
            {
                grammar.DictationSetState(SpeechRuleState.SGDSInactive);

                inputBox.Text = Result.PhraseInfo.GetText(0, -1, true);

                doChatting();

                grammar.DictationSetState(SpeechLib.SpeechRuleState.SGDSActive);
            }
        }
Esempio n. 46
0
        private void ContexRecognition(int iIndex, object obj, SpeechRecognitionType type, ISpeechRecoResult result)
        {
            bool flag = this.SetMessage != null;

            if (flag)
            {
                this.SetMessage(result.PhraseInfo.GetText(0, -1, true));
            }
        }
 public void RecoContext_Recognition(int StreamNumber, object StreamPosition,
     SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
 {
     Debug.WriteLine("Recognition: " + Result.PhraseInfo.GetText(0, -1, true) + ", " +
         StreamNumber + ", " + StreamPosition);
     this.understandet(Result.PhraseInfo.GetText(0, -1, true));
     //ISpeechPhraseProperty oItem;
 }
Esempio n. 48
0
 private void Hypo_Event(int StreamNumber, object StreamPosition, ISpeechRecoResult Result)
 {
     string lWord = Result.PhraseInfo.GetText(0, -1, true);
 }
Esempio n. 49
0
 /// <summary>
 ///     RecoContext_Hypothesis is the event handler function for 
 ///     SpSharedRecoContext object's Hypothesis event.
 /// </summary>
 /// <param name="StreamNumber"></param>
 /// <param name="StreamPosition"></param>
 /// <param name="Result"></param>
 /// <remarks>
 ///     See EnableSpeech() for how to hook up this function with the 
 ///     event.
 /// </remarks>
 public void RecoContext_Hypothesis(int StreamNumber, 
     object StreamPosition, 
     ISpeechRecoResult Result)
 {
     Debug.WriteLine("Hypothesis: " + 
         Result.PhraseInfo.GetText(0, -1, true) + ", " +
         StreamNumber + ", " + StreamPosition);
 }
Esempio n. 50
0
        private void Reco_Event(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            String text = Result.PhraseInfo.GetText(0, -1, true);

            synth.Speak("Recognition: " + text); // DEBUG

            // TODO: For "Do you mean?" functionality, check yes/no NOT in the following function
            //      because they are not actions.

            DoActionFromVoiceCommand(text);
        }
Esempio n. 51
0
        /// <summary>
        ///     RecoContext_Hypothesis is the event handler function for 
        ///     SpSharedRecoContext object's Recognition event.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="RecognitionType"></param>
        /// <param name="Result"></param>
        /// <remarks>
        ///     See EnableSpeech() for how to hook up this function with the 
        ///     event.
        /// </remarks>
        public void RecoContext_Recognition(int StreamNumber, 
            object StreamPosition, 
            SpeechRecognitionType RecognitionType,
            ISpeechRecoResult Result)
        {
            Debug.WriteLine("Recognition: " + 
                Result.PhraseInfo.GetText(0, -1, true) + ", " +
                StreamNumber + ", " + StreamPosition);

            int                     index;
            ISpeechPhraseProperty   oItem;
            
            // oItem will be the property of the second part in the recognized 
            // phase. For example, if the top level rule matchs 
            // "select Seattle". Then the ListItemsRule matches "Seattle" part.
            // The following code will get the property of the "Seattle" 
            // phrase, which is set when the word "Seattle" is added to the 
            // ruleListItems in RebuildGrammar.
            oItem = Result.PhraseInfo.Properties.Item(0).Children.Item(0);
            index = oItem.Id;

            if ((System.Decimal)Result.PhraseInfo.GrammarId == grammarId)
            {
                // Check to see if the item at the same position in the list 
                // still has the same text.
                // This is to prevent the rare case that the user keeps 
                // talking while the list is being added or removed. By the 
                // time this event is fired and handled, the list box may have 
                // already changed.
                if( oItem.Name.CompareTo(this.Items[index].ToString())==0 )
                {
                    this.SelectedIndex = index;
                }
            }
        }
Esempio n. 52
0
        private void OnReco(int StreamNumber, object StreamPosition, SpeechRecognitionType RecognitionType, ISpeechRecoResult Result)
        {
            string recoResult = Result.PhraseInfo.GetText(); // Translates whatever was somewhat definitively recognized by Windows Speech Recognition into text.

            recoResult = recoResult.ToLower();               // This is the same as taking inquiry text and making it all lowercase in Minerva.

            submit(recoResult);
        }
Esempio n. 53
0
 private void Reco_Event(int StreamNumber, object StreamPosition,SpeechRecognitionType RecognitionType,ISpeechRecoResult Result)
 {
     txtReco.Text = Result.PhraseInfo.GetText(0, -1, true);
 }