Exemplo n.º 1
0
    public void Start()
    {
        filePath = Application.dataPath + @"\LogData.txt";
        File.CreateText(filePath);

        //インスタンスの生成
        session = PXCMSession.CreateInstance();
        //音声データの入力
        source = session.CreateAudioSource();

        PXCMAudioSource.DeviceInfo dinfo = null;

        //デバイスを検出して出力
        source.QueryDeviceInfo(0, out dinfo);
        source.SetDevice(dinfo);
        Debug.Log(dinfo.name);

        //音声認識
        session.CreateImpl <PXCMSpeechRecognition>(out sr);

        //音声認識の初期設定
        PXCMSpeechRecognition.ProfileInfo pinfo;
        sr.QueryProfile(out pinfo);
        pinfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_JP_JAPANESE;
        sr.SetProfile(pinfo);

        //handlerにメソッドを渡す工程
        handler = new PXCMSpeechRecognition.Handler();
        handler.onRecognition = (x) => Dataoutput(x.scores[0].sentence, x.duration);
        sr.SetDictation();
    }
    // Use this for initialization
    void Start()
    {
        // テキストを渡すスクリプトの参照
        textMesh   = GameObject.Find("Word");
        controller = textMesh.GetComponent <TextController>();

        // 音声認識セッションの初期化
        session = PXCMSession.CreateInstance();
        source  = session.CreateAudioSource();

        PXCMAudioSource.DeviceInfo dinfo = null;

        source.QueryDeviceInfo(1, out dinfo);
        source.SetDevice(dinfo);
        Debug.Log(dinfo.name);

        session.CreateImpl <PXCMSpeechRecognition>(out sr);

        PXCMSpeechRecognition.ProfileInfo pinfo;
        sr.QueryProfile(out pinfo);
        pinfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_JP_JAPANESE;
        sr.SetProfile(pinfo);

        handler = new PXCMSpeechRecognition.Handler();
        handler.onRecognition = (x) => controller.SetText(x.scores[0].sentence);
        sr.SetDictation();
        sr.StartRec(source, handler);
    }
        private void Initialize()
        {
            this.GetSessionAndSenseManager();

            this.audioSource = this.session.CreateAudioSource();

            pxcmStatus sts = this.session.CreateImpl <PXCMSpeechRecognition>(this.descs[0], out this.recognition);

            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("Could not create audio source.");
            }

            // 音声入力デバイスを設定する
            //PXCMAudioSource.DeviceInfo dinfo = (PXCMAudioSource.DeviceInfo)deviceInfos[FInAudioDevice[0]];
            //FLogger.Log(LogType.Debug, dinfo.name);
            //this.audioSource.SetDevice(dinfo);
            for (int i = 0; i < deviceInfos.Count; i++)
            {
                PXCMAudioSource.DeviceInfo dinfo = deviceInfos[i];
                if (dinfo.name.Equals(FInAudioDevice[0]))
                {
                    this.audioSource.SetDevice(dinfo);
                }
            }

            // set language
            for (int i = 0; i < profileInfos.Count; i++)
            {
                PXCMSpeechRecognition.ProfileInfo pinfo = profileInfos[i];
                if (pinfo.language.ToString().Equals(FInLanguage[0]))
                {
                    this.recognition.SetProfile(pinfo);
                }
            }

            // set dictation mode
            sts = this.recognition.SetDictation();
            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("Could not set dictation mode. " + sts.ToString());
            }

            PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler();
            handler.onRecognition = OnRecognition;

            sts = this.recognition.StartRec(this.audioSource, handler);
            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("Could not start recording.");
            }

            this.initialized = true;
        }
        private void ConfigureRealSenseSpeech()
        {
            // Instantiate session and audio source objects
            session     = PXCMSession.CreateInstance();
            audioSource = session.CreateAudioSource();

            // Select the first audio device
            PXCMAudioSource.DeviceInfo deviceInfo;
            deviceInfo = new PXCMAudioSource.DeviceInfo();
            audioSource.QueryDeviceInfo(0, out deviceInfo);
            audioSource.SetDevice(deviceInfo);

            // Set the audio recording volume
            audioSource.SetVolume(0.2f);

            // Create a speech recognition instance
            session.CreateImpl <PXCMSpeechRecognition>(out speechRecognition);

            // Initialize the speech recognition module
            PXCMSpeechRecognition.ProfileInfo profileInfo;
            speechRecognition.QueryProfile(0, out profileInfo);
            profileInfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_US_ENGLISH;
            speechRecognition.SetProfile(profileInfo);

            // Build and set the active grammar
            pxcmStatus status = speechRecognition.BuildGrammarFromFile(1, PXCMSpeechRecognition.GrammarFileType.GFT_JSGF, "grammarsvm.jsgf");

            if (status == pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                speechRecognition.SetGrammar(1);
            }
            else
            {
                MessageBox.Show("Java Speech Grammar Format (JSGF) file not found!");
                this.Close();
            }

            // Display device information
            //lblDeviceInfo.Content = string.Format("[Device: {0}, Language Profile: {1}]", deviceInfo.name, profileInfo.language);

            // Set the speech recognition handler
            PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler();
            handler.onRecognition = OnRecognition;
            speechRecognition.StartRec(null, handler);
        }
Exemplo n.º 5
0
    public void Start()
    {
        session = PXCMSession.CreateInstance();
        source  = session.CreateAudioSource();

        PXCMAudioSource.DeviceInfo dinfo = null;

        source.QueryDeviceInfo(1, out dinfo);
        source.SetDevice(dinfo);

        session.CreateImpl <PXCMSpeechRecognition>(out sr);

        PXCMSpeechRecognition.ProfileInfo pinfo;
        sr.QueryProfile(out pinfo);
        pinfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_JP_JAPANESE;
        sr.SetProfile(pinfo);

        handler = new PXCMSpeechRecognition.Handler();
        handler.onRecognition = (x) => Debug.Log(x.scores[0].sentence);
        sr.SetDictation();
        sr.StartRec(source, handler);
    }
Exemplo n.º 6
0
        // 音声認識を初期化する
        private void InitializeSpeechRecognition()
        {
            pxcmStatus sts;
            var        session = senseManager.QuerySession();

            // 音声入力デバイスを作成する
            audioSource = session.CreateAudioSource();
            if (audioSource == null)
            {
                throw new Exception("音声入力デバイスの作成に失敗しました");
            }

            // 音声入力デバイスを列挙する
            TextDesc.Text  = "";
            TextDesc.Text += "音声入力デバイス\n";

            PXCMAudioSource.DeviceInfo device = null;

            audioSource.ScanDevices();
            for (int i = 0;; ++i)
            {
                PXCMAudioSource.DeviceInfo dinfo;
                sts = audioSource.QueryDeviceInfo(i, out dinfo);
                if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
                {
                    break;
                }

                // 音声入力デバイス名を表示する
                TextDesc.Text += "\t" + dinfo.name + "\n";

                // 最初のデバイスを使う
                if (i == 0)
                {
                    device = dinfo;
                }
            }

            // 音声入力デバイスを設定する
            sts = audioSource.SetDevice(device);
            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("音声入力デバイスの設定に失敗しました");
            }


            // 音声認識エンジンを列挙する
            TextDesc.Text += "音声認識エンジン\n";

            PXCMSession.ImplDesc inDesc  = new PXCMSession.ImplDesc();
            PXCMSession.ImplDesc outDesc = null;
            PXCMSession.ImplDesc desc    = null;
            inDesc.cuids[0] = PXCMSpeechRecognition.CUID;

            for (int i = 0; ; ++i)
            {
                // 音声認識エンジンを取得する
                sts = session.QueryImpl(inDesc, i, out outDesc);
                if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
                {
                    break;
                }

                // 音声認識エンジン名称を表示する
                TextDesc.Text += "\t" + outDesc.friendlyName + "\n";

                // 最初の音声認識エンジンを使う
                if (i == 0)
                {
                    desc = outDesc;
                }
            }

            // 音声認識エンジンオブジェクトを作成する
            sts = session.CreateImpl <PXCMSpeechRecognition>(desc, out recognition);
            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("音声認識エンジンオブジェクトの作成に失敗しました");
            }

            // 対応言語を列挙する
            PXCMSpeechRecognition.ProfileInfo profile = null;

            for (int j = 0;; ++j)
            {
                // 音声認識エンジンが持っているプロファイルを取得する
                PXCMSpeechRecognition.ProfileInfo pinfo;
                sts = recognition.QueryProfile(j, out pinfo);
                if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
                {
                    break;
                }

                // 対応言語を表示する
                TextDesc.Text += "\t\t" + LanguageToString(pinfo.language) + "\n";

                // 英語のエンジンを使う(日本語対応時には日本語に変更する)
                if (pinfo.language == PXCMSpeechRecognition.LanguageType.LANGUAGE_US_ENGLISH)
                {
                    profile = pinfo;
                }
            }

            if (profile == null)
            {
                throw new Exception("選択した音声認識エンジンが見つかりませんでした");
            }

            // 使用する言語を設定する
            sts = recognition.SetProfile(profile);
            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("音声認識エンジンオブジェクトの設定に失敗しました");
            }

            // コマンドモードを設定する
            SetCommandMode();

            // 音声認識の通知ハンドラを作成する
            PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler();
            handler.onRecognition = OnRecognition;

            // 音声認識を開始する
            sts = recognition.StartRec(audioSource, handler);
            if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
            {
                throw new Exception("音声認識の開始に失敗しました");
            }
        }
Exemplo n.º 7
0
        static void SetupRecognizer(PXCMSession session)
        {
            PXCMAudioSource.DeviceInfo dinfo = null;
            if (session != null)
            {
                #region Audio Source

                // session is a PXCMSession instance.
                source = session.CreateAudioSource();

                // Scan and Enumerate audio devices
                source.ScanDevices();

                for (int d = source.QueryDeviceNum() - 1; d >= 0; d--)
                {
                    source.QueryDeviceInfo(d, out dinfo);

                    // Select one and break out of the loop
                    break;
                }
                if (dinfo != null)
                {
                    // Set the active device
                    source.SetDevice(dinfo);
                }

                #endregion

                #region Recognizer Instance

                pxcmStatus sts = session.CreateImpl <PXCMSpeechRecognition>(out sr);

                if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR)
                {
                    return;
                }

                PXCMSpeechRecognition.ProfileInfo pinfo;
                sr.QueryProfile(0, out pinfo);
                sr.SetProfile(pinfo);

                //sr.SetDictation();

                #endregion

                #region Grammar
                Perintah = new Dictionary <string, Module>();
                // sr is a PXCMSpeechRecognition instance.
                using (var data = new JONGOS_DBEntities())
                {
                    var listCommand = from c in data.Modules
                                      orderby c.ID
                                      select c;
                    foreach (var item in listCommand.Distinct())
                    {
                        Perintah.Add(item.VoiceCommand, item);
                    }
                }
                List <string> cmds = new List <string>();
                foreach (var cmd in Perintah.Keys)
                {
                    cmds.Add(cmd);
                }

                // Build the grammar.
                sr.BuildGrammarFromStringList(1, cmds.ToArray(), null);

                // Set the active grammar.
                sr.SetGrammar(1);
                #endregion

                #region recognition Event
                // Set handler
                PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler();
                handler.onRecognition = OnRecognition;
                //handler.onAlert = OnAlert;
                // sr is a PXCMSpeechRecognition instance
                pxcmStatus stsrec = sr.StartRec(source, handler);
                if (stsrec < pxcmStatus.PXCM_STATUS_NO_ERROR)
                {
                    Console.WriteLine("Recognizer error!");
                }


                #endregion
            }
        }