void SetCommandMode()
        {
            int grammar = 1;

            // 認識させたいコマンド
            string[] commands = new string[] {
                "Hello",
                "Good",
                "Bad",
            };

            // 認識させたいコマンドを解析する
            var sts = recognition.BuildGrammarFromStringList(grammar, commands, null);

            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("コマンドの解析に失敗しました");
            }

            // 認識させたいコマンドを登録する
            sts = recognition.SetGrammar(grammar);
            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("コマンドの設定に失敗しました");
            }
        }
示例#2
0
        /// <summary>
        /// Configures the speech recognition.
        /// </summary>
        private bool ConfigureSpeechRecognition()
        {
            /* Create the AudioSource instance */
            source = session.CreateAudioSource();

            /* Set audio volume to 0.2 */
            source.SetVolume(0.2f);

            /* Set Audio Source */
            source.SetDevice(sourceDeviceInfo[_activeSource]);

            /* Set Module */
            PXCMSession.ImplDesc mdesc = new PXCMSession.ImplDesc();
            mdesc.iuid = modulesIuID[_activeModule];

            pxcmStatus sts = session.CreateImpl <PXCMSpeechRecognition>(out sr);

            if (sts >= pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                /* Configure */
                PXCMSpeechRecognition.ProfileInfo pinfo;
                sr.QueryProfile(_activeLanguage, out pinfo);
                sr.SetProfile(pinfo);

                /* Set Command/Control or Dictation */
                if (SpeechModuleMode == SpeechModuleModeType.CommandControl)
                {
                    string[] cmds = Commands;
                    if (cmds != null && cmds.GetLength(0) != 0)
                    {
                        // voice commands available, use them
                        sr.BuildGrammarFromStringList(1, cmds, null);
                        sr.SetGrammar(1);
                    }
                    else
                    {
                        Debug.Log("Speech Command List Empty!");
                        SetError(SpeechManagerErrorType.VoiceThreadError_CommandsListEmpty);

                        //Switch to dictaction mode
                        //SpeechModuleMode = SpeechModuleModeType.Dictation;
                        //sr.SetDictation();
                    }
                }
                else
                {
                    sr.SetDictation();
                }
            }
            else
            {
                Debug.Log("VoiceThreadError - InitFailed - CreateImpl!");
                SetError(SpeechManagerErrorType.VoiceThreadError_InitFailed_CreateImpl);
                return(false);
            }
            return(true);
        }
示例#3
0
        bool SetGrammarFromFile(String GrammarFilename)
        {
            Int32 grammar = 1;

            pxcmStatus sts = sr.BuildGrammarFromFile(grammar, PXCMSpeechRecognition.GrammarFileType.GFT_NONE, GrammarFilename);

            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                form.PrintStatus("Grammar Compile Errors:");
                form.PrintStatus(sr.GetGrammarCompileErrors(grammar));
                return(false);
            }

            sts = sr.SetGrammar(grammar);
            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                return(false);
            }


            return(true);
        }
        private void ConfigureRealSenseSpeech()
        {
            // Instantiate session and audio source objects
            session     = PXCMSession.CreateInstance();
            audioSource = session.CreateAudioSource();

            // Select the first audio device
            PXCMAudioSource.DeviceInfo deviceInfo;
            deviceInfo = new PXCMAudioSource.DeviceInfo();
            audioSource.QueryDeviceInfo(0, out deviceInfo);
            audioSource.SetDevice(deviceInfo);

            // Set the audio recording volume
            audioSource.SetVolume(0.2f);

            // Create a speech recognition instance
            session.CreateImpl <PXCMSpeechRecognition>(out speechRecognition);

            // Initialize the speech recognition module
            PXCMSpeechRecognition.ProfileInfo profileInfo;
            speechRecognition.QueryProfile(0, out profileInfo);
            profileInfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_US_ENGLISH;
            speechRecognition.SetProfile(profileInfo);

            // Build and set the active grammar
            pxcmStatus status = speechRecognition.BuildGrammarFromFile(1, PXCMSpeechRecognition.GrammarFileType.GFT_JSGF, "grammarsvm.jsgf");

            if (status == pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                speechRecognition.SetGrammar(1);
            }
            else
            {
                MessageBox.Show("Java Speech Grammar Format (JSGF) file not found!");
                this.Close();
            }

            // Display device information
            //lblDeviceInfo.Content = string.Format("[Device: {0}, Language Profile: {1}]", deviceInfo.name, profileInfo.language);

            // Set the speech recognition handler
            PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler();
            handler.onRecognition = OnRecognition;
            speechRecognition.StartRec(null, handler);
        }
示例#5
0
    private bool ConfigureSpeechRecognition()
    {
        /* Create the AudioSource instance */
        source = session.CreateAudioSource();

        /* Set audio volume to 0.2 */
        source.SetVolume(0.2f);

        /* Set Audio Source */
        source.SetDevice(sourceDeviceInfo[_activeSource]);

        pxcmStatus sts = session.CreateImpl <PXCMSpeechRecognition>(out sr);

        if (sts >= pxcmStatus.PXCM_STATUS_NO_ERROR)
        {
            /* Configure */
            PXCMSpeechRecognition.ProfileInfo pinfo;
            sr.QueryProfile(_activeLanguage, out pinfo);
            sr.SetProfile(pinfo);

            /* Set Command/Control or Dictation */
            string[] cmds = new String[4] {
                "Create", "Save", "Load", "Run"
            };
            if (cmds != null && cmds.GetLength(0) != 0)
            {
                // voice commands available, use them
                sr.BuildGrammarFromStringList(1, cmds, null);
                sr.SetGrammar(1);
            }
        }
        else
        {
            Debug.Log("VoiceThreadError - InitFailed - CreateImpl!");
            return(false);
        }
        return(true);
    }
示例#6
0
        static void SetupRecognizer(PXCMSession session)
        {
            PXCMAudioSource.DeviceInfo dinfo = null;
            if (session != null)
            {
                #region Audio Source

                // session is a PXCMSession instance.
                source = session.CreateAudioSource();

                // Scan and Enumerate audio devices
                source.ScanDevices();

                for (int d = source.QueryDeviceNum() - 1; d >= 0; d--)
                {
                    source.QueryDeviceInfo(d, out dinfo);

                    // Select one and break out of the loop
                    break;
                }
                if (dinfo != null)
                {
                    // Set the active device
                    source.SetDevice(dinfo);
                }

                #endregion

                #region Recognizer Instance

                pxcmStatus sts = session.CreateImpl <PXCMSpeechRecognition>(out sr);

                if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
                {
                    return;
                }

                PXCMSpeechRecognition.ProfileInfo pinfo;
                sr.QueryProfile(0, out pinfo);
                sr.SetProfile(pinfo);

                //sr.SetDictation();

                #endregion

                #region Grammar
                Perintah = new Dictionary <string, Module>();
                // sr is a PXCMSpeechRecognition instance.
                using (var data = new JONGOS_DBEntities())
                {
                    var listCommand = from c in data.Modules
                                      orderby c.ID
                                      select c;
                    foreach (var item in listCommand.Distinct())
                    {
                        Perintah.Add(item.VoiceCommand, item);
                    }
                }
                List <string> cmds = new List <string>();
                foreach (var cmd in Perintah.Keys)
                {
                    cmds.Add(cmd);
                }

                // Build the grammar.
                sr.BuildGrammarFromStringList(1, cmds.ToArray(), null);

                // Set the active grammar.
                sr.SetGrammar(1);
                #endregion

                #region recognition Event
                // Set handler
                PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler();
                handler.onRecognition = OnRecognition;
                //handler.onAlert = OnAlert;
                // sr is a PXCMSpeechRecognition instance
                pxcmStatus stsrec = sr.StartRec(source, handler);
                if (stsrec < pxcmStatus.PXCM_STATUS_NO_ERROR)
                {
                    Console.WriteLine("Recognizer error!");
                }


                #endregion
            }
        }