void Start() { session = PXCMSession.CreateInstance(); source = session.CreateAudioSource(); if (source == null) { Debug.Log("Error Creating Audio Source"); } int numOfDevices = 0; for (int i = 0; ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } numOfDevices++; } if (numOfDevices == 0) { Debug.Log("No Audio Device Found!"); } source.ScanDevices(); AvailableSources = new string[numOfDevices]; sourceDeviceInfo = new PXCMAudioSource.DeviceInfo[numOfDevices]; for (int i = 0; i < numOfDevices; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { AvailableSources[i] = "FailedToQueryDeviceInfo"; sourceDeviceInfo[i] = null; Debug.Log("QueryDeviceInfo Failed for Index " + i.ToString()); } else { sourceDeviceInfo[i] = dinfo; AvailableSources[i] = dinfo.name; } } source.Dispose(); _isInitialized = true; stop = false; reset = false; }
public void Start() { filePath = Application.dataPath + @"\LogData.txt"; File.CreateText(filePath); //インスタンスの生成 session = PXCMSession.CreateInstance(); //音声データの入力 source = session.CreateAudioSource(); PXCMAudioSource.DeviceInfo dinfo = null; //デバイスを検出して出力 source.QueryDeviceInfo(0, out dinfo); source.SetDevice(dinfo); Debug.Log(dinfo.name); //音声認識 session.CreateImpl <PXCMSpeechRecognition>(out sr); //音声認識の初期設定 PXCMSpeechRecognition.ProfileInfo pinfo; sr.QueryProfile(out pinfo); pinfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_JP_JAPANESE; sr.SetProfile(pinfo); //handlerにメソッドを渡す工程 handler = new PXCMSpeechRecognition.Handler(); handler.onRecognition = (x) => Dataoutput(x.scores[0].sentence, x.duration); sr.SetDictation(); }
private void PopulateSource() { ToolStripMenuItem sm = new ToolStripMenuItem("マイク"); devices.Clear(); PXCMAudioSource source = session.CreateAudioSource(); if (source != null) { source.ScanDevices(); for (int i = 0; ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } ToolStripMenuItem sm1 = new ToolStripMenuItem(dinfo.name, null, new EventHandler(Source_Item_Click)); devices[sm1] = dinfo; sm.DropDownItems.Add(sm1); } source.Dispose(); } if (sm.DropDownItems.Count > 0) { (sm.DropDownItems[0] as ToolStripMenuItem).Checked = true; } MainMenu.Items.RemoveAt(0); MainMenu.Items.Insert(0, sm); }
// Use this for initialization void Start() { // テキストを渡すスクリプトの参照 textMesh = GameObject.Find("Word"); controller = textMesh.GetComponent <TextController>(); // 音声認識セッションの初期化 session = PXCMSession.CreateInstance(); source = session.CreateAudioSource(); PXCMAudioSource.DeviceInfo dinfo = null; source.QueryDeviceInfo(1, out dinfo); source.SetDevice(dinfo); Debug.Log(dinfo.name); session.CreateImpl <PXCMSpeechRecognition>(out sr); PXCMSpeechRecognition.ProfileInfo pinfo; sr.QueryProfile(out pinfo); pinfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_JP_JAPANESE; sr.SetProfile(pinfo); handler = new PXCMSpeechRecognition.Handler(); handler.onRecognition = (x) => controller.SetText(x.scores[0].sentence); sr.SetDictation(); sr.StartRec(source, handler); }
public void OnImportsSatisfied() { FLogger.Log(LogType.Debug, "OnImport"); this.descs = new List <PXCMSession.ImplDesc>(); this.GetSessionAndSenseManager(); pxcmStatus sts = pxcmStatus.PXCM_STATUS_NO_ERROR; PXCMAudioSource audio = this.session.CreateAudioSource(); if (audio == null) { throw new Exception("Could not create audio source."); } // enumrate audio source // scan available devices this.deviceInfos = new List <PXCMAudioSource.DeviceInfo>(); audio.ScanDevices(); int deviceNum = audio.QueryDeviceNum(); string[] deviceNames = new string[deviceNum]; for (int i = 0; i < deviceNum; ++i) { PXCMAudioSource.DeviceInfo tmpDeviceInfo; sts = audio.QueryDeviceInfo(i, out tmpDeviceInfo); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { throw new Exception("Could not get audio device."); } FLogger.Log(LogType.Debug, "audio device info: " + tmpDeviceInfo.name); deviceNames[i] = tmpDeviceInfo.name; this.deviceInfos.Add(tmpDeviceInfo); } EnumManager.UpdateEnum("AudioDevice", deviceNames[0], deviceNames); audio.Dispose(); PXCMSession.ImplDesc inDesc = new PXCMSession.ImplDesc(); inDesc.cuids[0] = PXCMSpeechRecognition.CUID; for (int i = 0; ; ++i) { // get speech recognition engine PXCMSession.ImplDesc outDesc = null; sts = this.session.QueryImpl(inDesc, i, out outDesc); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } FLogger.Log(LogType.Debug, "speech recognition engine: " + outDesc.friendlyName); this.descs.Add(outDesc); } }
private PXCMAudioSource FindAudioSource() { PXCMAudioSource audioSource = _session.CreateAudioSource(); audioSource.ScanDevices(); int devicesCount = audioSource.QueryDeviceNum(); var deviceIndex = 0; PXCMAudioSource.DeviceInfo deviceInfo; for (int i = 0; i < devicesCount; i++) { audioSource.QueryDeviceInfo(i, out deviceInfo); if (deviceInfo.name.Contains("Array")) { deviceIndex = i; break; } } audioSource.QueryDeviceInfo(deviceIndex, out deviceInfo); audioSource.SetDevice(deviceInfo); audioSource.SetVolume(0.8f); return(audioSource); }
private void ConfigureRealSenseSpeech() { // Instantiate session and audio source objects session = PXCMSession.CreateInstance(); audioSource = session.CreateAudioSource(); // Select the first audio device PXCMAudioSource.DeviceInfo deviceInfo; deviceInfo = new PXCMAudioSource.DeviceInfo(); audioSource.QueryDeviceInfo(0, out deviceInfo); audioSource.SetDevice(deviceInfo); // Set the audio recording volume audioSource.SetVolume(0.2f); // Create a speech recognition instance session.CreateImpl <PXCMSpeechRecognition>(out speechRecognition); // Initialize the speech recognition module PXCMSpeechRecognition.ProfileInfo profileInfo; speechRecognition.QueryProfile(0, out profileInfo); profileInfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_US_ENGLISH; speechRecognition.SetProfile(profileInfo); // Build and set the active grammar pxcmStatus status = speechRecognition.BuildGrammarFromFile(1, PXCMSpeechRecognition.GrammarFileType.GFT_JSGF, "grammarsvm.jsgf"); if (status == pxcmStatus.PXCM_STATUS_NO_ERROR) { speechRecognition.SetGrammar(1); } else { MessageBox.Show("Java Speech Grammar Format (JSGF) file not found!"); this.Close(); } // Display device information //lblDeviceInfo.Content = string.Format("[Device: {0}, Language Profile: {1}]", deviceInfo.name, profileInfo.language); // Set the speech recognition handler PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler(); handler.onRecognition = OnRecognition; speechRecognition.StartRec(null, handler); }
void AudioSourceCheck() { /* Create the AudioSource instance */ this.source = session.CreateAudioSource(); if (this.source != null) { this.source.ScanDevices(); for (int i = 0; ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } devices.Add(dinfo); UnityEngine.Debug.Log("Device : " + dinfo.name); } } }
public void Start() { session = PXCMSession.CreateInstance(); source = session.CreateAudioSource(); PXCMAudioSource.DeviceInfo dinfo = null; source.QueryDeviceInfo(1, out dinfo); source.SetDevice(dinfo); session.CreateImpl <PXCMSpeechRecognition>(out sr); PXCMSpeechRecognition.ProfileInfo pinfo; sr.QueryProfile(out pinfo); pinfo.language = PXCMSpeechRecognition.LanguageType.LANGUAGE_JP_JAPANESE; sr.SetProfile(pinfo); handler = new PXCMSpeechRecognition.Handler(); handler.onRecognition = (x) => Debug.Log(x.scores[0].sentence); sr.SetDictation(); sr.StartRec(source, handler); }
void audioSourceCheck() { /* Create the AudioSource instance */ source = session.CreateAudioSource(); if (source != null) { source.ScanDevices(); for (int i = 0; ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } devices.Add(dinfo); UnityEngine.Debug.Log("Device : " + dinfo.name); } } }
void Start() { session = PXCMSession.CreateInstance(); source = session.CreateAudioSource(); if (source == null) { Debug.Log("Error Creating Audio Source"); } int numOfDevices = 0; for (int i = 0; ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) break; numOfDevices++; } if (numOfDevices == 0) { Debug.Log("No Audio Device Found!"); } source.ScanDevices(); AvailableSources = new string[numOfDevices]; sourceDeviceInfo = new PXCMAudioSource.DeviceInfo[numOfDevices]; for (int i = 0; i < numOfDevices; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { AvailableSources[i] = "FailedToQueryDeviceInfo"; sourceDeviceInfo[i] = null; Debug.Log("QueryDeviceInfo Failed for Index " + i.ToString()); } else { sourceDeviceInfo[i] = dinfo; AvailableSources[i] = dinfo.name; } } source.Dispose(); _isInitialized = true; stop = false; reset = false; }
// 音声認識を初期化する private void InitializeSpeechRecognition() { pxcmStatus sts; var session = senseManager.QuerySession(); // 音声入力デバイスを作成する audioSource = session.CreateAudioSource(); if ( audioSource == null ){ throw new Exception( "音声入力デバイスの作成に失敗しました" ); } // 音声入力デバイスを列挙する TextDesc.Text = ""; TextDesc.Text += "音声入力デバイス\n"; PXCMAudioSource.DeviceInfo device = null; audioSource.ScanDevices(); for ( int i = 0;; ++i ) { PXCMAudioSource.DeviceInfo dinfo; sts = audioSource.QueryDeviceInfo( i, out dinfo ); if ( sts < pxcmStatus.PXCM_STATUS_NO_ERROR ) { break; } // 音声入力デバイス名を表示する TextDesc.Text += "\t" + dinfo.name + "\n"; // 最初のデバイスを使う if ( i == 0 ){ device = dinfo; } } // 音声入力デバイスを設定する sts = audioSource.SetDevice( device ); if ( sts < pxcmStatus.PXCM_STATUS_NO_ERROR ) { throw new Exception( "音声入力デバイスの設定に失敗しました" ); } // 音声認識エンジンを列挙する TextDesc.Text += "音声認識エンジン\n"; PXCMSession.ImplDesc inDesc = new PXCMSession.ImplDesc(); PXCMSession.ImplDesc outDesc = null; PXCMSession.ImplDesc desc = null; inDesc.cuids[0] = PXCMSpeechRecognition.CUID; for ( int i = 0; ; ++i ) { // 音声認識エンジンを取得する sts = session.QueryImpl( inDesc, i, out outDesc ); if ( sts < pxcmStatus.PXCM_STATUS_NO_ERROR ) { break; } // 音声認識エンジン名称を表示する TextDesc.Text += "\t" + outDesc.friendlyName + "\n"; // 最初の音声認識エンジンを使う if( i== 0 ){ desc = outDesc; } } // 音声認識エンジンオブジェクトを作成する sts = session.CreateImpl<PXCMSpeechRecognition>( desc, out recognition ); if ( sts < pxcmStatus.PXCM_STATUS_NO_ERROR ) { throw new Exception( "音声認識エンジンオブジェクトの作成に失敗しました" ); } // 対応言語を列挙する PXCMSpeechRecognition.ProfileInfo profile = null; for ( int j = 0;; ++j ) { // 音声認識エンジンが持っているプロファイルを取得する PXCMSpeechRecognition.ProfileInfo pinfo; sts = recognition.QueryProfile( j, out pinfo ); if ( sts < pxcmStatus.PXCM_STATUS_NO_ERROR ) { break; } // 対応言語を表示する TextDesc.Text += "\t\t" + LanguageToString( pinfo.language ) + "\n"; // 英語のエンジンを使う(日本語対応時には日本語に変更する) if ( pinfo.language == PXCMSpeechRecognition.LanguageType.LANGUAGE_US_ENGLISH ){ profile = pinfo; } } if ( profile == null ){ throw new Exception( "選択した音声認識エンジンが見つかりませんでした" ); } // 使用する言語を設定する sts = recognition.SetProfile( profile ); if ( sts < pxcmStatus.PXCM_STATUS_NO_ERROR ) { throw new Exception( "音声認識エンジンオブジェクトの設定に失敗しました" ); } // コマンドモードを設定する SetCommandMode(); // 音声認識の通知ハンドラを作成する PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler(); handler.onRecognition = OnRecognition; // 音声認識を開始する sts = recognition.StartRec( audioSource, handler ); if ( sts < pxcmStatus.PXCM_STATUS_NO_ERROR ) { throw new Exception( "音声認識の開始に失敗しました" ); } }
// 音声認識を初期化する private void InitializeSpeechRecognition() { pxcmStatus sts; var session = senseManager.QuerySession(); // 音声入力デバイスを作成する audioSource = session.CreateAudioSource(); if (audioSource == null) { throw new Exception("音声入力デバイスの作成に失敗しました"); } // 音声入力デバイスを列挙する TextDesc.Text = ""; TextDesc.Text += "音声入力デバイス\n"; PXCMAudioSource.DeviceInfo device = null; audioSource.ScanDevices(); for (int i = 0;; ++i) { PXCMAudioSource.DeviceInfo dinfo; sts = audioSource.QueryDeviceInfo(i, out dinfo); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } // 音声入力デバイス名を表示する TextDesc.Text += "\t" + dinfo.name + "\n"; // 最初のデバイスを使う if (i == 0) { device = dinfo; } } // 音声入力デバイスを設定する sts = audioSource.SetDevice(device); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { throw new Exception("音声入力デバイスの設定に失敗しました"); } // 音声認識エンジンを列挙する TextDesc.Text += "音声認識エンジン\n"; PXCMSession.ImplDesc inDesc = new PXCMSession.ImplDesc(); PXCMSession.ImplDesc outDesc = null; PXCMSession.ImplDesc desc = null; inDesc.cuids[0] = PXCMSpeechRecognition.CUID; for (int i = 0; ; ++i) { // 音声認識エンジンを取得する sts = session.QueryImpl(inDesc, i, out outDesc); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } // 音声認識エンジン名称を表示する TextDesc.Text += "\t" + outDesc.friendlyName + "\n"; // 最初の音声認識エンジンを使う if (i == 0) { desc = outDesc; } } // 音声認識エンジンオブジェクトを作成する sts = session.CreateImpl <PXCMSpeechRecognition>(desc, out recognition); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { throw new Exception("音声認識エンジンオブジェクトの作成に失敗しました"); } // 対応言語を列挙する PXCMSpeechRecognition.ProfileInfo profile = null; for (int j = 0;; ++j) { // 音声認識エンジンが持っているプロファイルを取得する PXCMSpeechRecognition.ProfileInfo pinfo; sts = recognition.QueryProfile(j, out pinfo); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } // 対応言語を表示する TextDesc.Text += "\t\t" + LanguageToString(pinfo.language) + "\n"; // 英語のエンジンを使う(日本語対応時には日本語に変更する) if (pinfo.language == PXCMSpeechRecognition.LanguageType.LANGUAGE_US_ENGLISH) { profile = pinfo; } } if (profile == null) { throw new Exception("選択した音声認識エンジンが見つかりませんでした"); } // 使用する言語を設定する sts = recognition.SetProfile(profile); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { throw new Exception("音声認識エンジンオブジェクトの設定に失敗しました"); } // コマンドモードを設定する SetCommandMode(); // 音声認識の通知ハンドラを作成する PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler(); handler.onRecognition = OnRecognition; // 音声認識を開始する sts = recognition.StartRec(audioSource, handler); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { throw new Exception("音声認識の開始に失敗しました"); } }
static void SetupRecognizer(PXCMSession session) { PXCMAudioSource.DeviceInfo dinfo = null; if (session != null) { #region Audio Source // session is a PXCMSession instance. source = session.CreateAudioSource(); // Scan and Enumerate audio devices source.ScanDevices(); for (int d = source.QueryDeviceNum() - 1; d >= 0; d--) { source.QueryDeviceInfo(d, out dinfo); // Select one and break out of the loop break; } if (dinfo != null) { // Set the active device source.SetDevice(dinfo); } #endregion #region Recognizer Instance pxcmStatus sts = session.CreateImpl <PXCMSpeechRecognition>(out sr); if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) { return; } PXCMSpeechRecognition.ProfileInfo pinfo; sr.QueryProfile(0, out pinfo); sr.SetProfile(pinfo); //sr.SetDictation(); #endregion #region Grammar Perintah = new Dictionary <string, Module>(); // sr is a PXCMSpeechRecognition instance. using (var data = new JONGOS_DBEntities()) { var listCommand = from c in data.Modules orderby c.ID select c; foreach (var item in listCommand.Distinct()) { Perintah.Add(item.VoiceCommand, item); } } List <string> cmds = new List <string>(); foreach (var cmd in Perintah.Keys) { cmds.Add(cmd); } // Build the grammar. sr.BuildGrammarFromStringList(1, cmds.ToArray(), null); // Set the active grammar. sr.SetGrammar(1); #endregion #region recognition Event // Set handler PXCMSpeechRecognition.Handler handler = new PXCMSpeechRecognition.Handler(); handler.onRecognition = OnRecognition; //handler.onAlert = OnAlert; // sr is a PXCMSpeechRecognition instance pxcmStatus stsrec = sr.StartRec(source, handler); if (stsrec < pxcmStatus.PXCM_STATUS_NO_ERROR) { Console.WriteLine("Recognizer error!"); } #endregion } }
/// <summary> /// Initializes the speech module. /// </summary> public bool InitalizeSpeech() { if (_isInitialized) { return(true); } ErrorDetected = SpeechManagerErrorType.NoError; SpeechModuleMode = SpeechModuleModeType.CommandControl; _isInitialized = false; session = PXCMSession.CreateInstance(); //Get Sources source = session.CreateAudioSource(); if (source == null) { SetError(SpeechManagerErrorType.CreateAudioSourceFailed); return(false);; } int numOfDevices = 0; for (int i = 0; ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } numOfDevices++; } if (numOfDevices == 0) { Debug.Log("No Audio Device Found!"); SetError(SpeechManagerErrorType.NoAudioDeviceFound); return(false); } source.ScanDevices(); AvailableSources = new string[numOfDevices]; sourceDeviceInfo = new PXCMAudioSource.DeviceInfo[numOfDevices]; for (int i = 0; i < numOfDevices; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { AvailableSources[i] = "FailedToQueryDeviceInfo"; sourceDeviceInfo[i] = null; Debug.Log("QueryDeviceInfo Failed for Index " + i.ToString()); SetError(SpeechManagerErrorType.QueryDeviceInfoFailed); } else { sourceDeviceInfo[i] = dinfo; AvailableSources[i] = dinfo.name; } } source.Dispose(); //Get Modules PXCMSession.ImplDesc desc = new PXCMSession.ImplDesc(); int NumOfModules = 0; PXCMSession.ImplDesc desc1; desc.cuids[0] = PXCMSpeechRecognition.CUID; for (int i = 0; ; i++) { if (session.QueryImpl(desc, i, out desc1) < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } NumOfModules++; } if (NumOfModules == 0) { Debug.Log("No Audio Modules Found!"); SetError(SpeechManagerErrorType.NoAudioModulesFound); return(false); } AvailableModules = new string[NumOfModules]; modulesIuID = new int[NumOfModules]; for (int i = 0; i < NumOfModules; i++) { if (session.QueryImpl(desc, i, out desc1) < pxcmStatus.PXCM_STATUS_NO_ERROR) { AvailableModules[i] = "FailedToQueryModuleInfo"; Debug.Log("QueryImpl Failed for Index " + i.ToString()); SetError(SpeechManagerErrorType.QueryImplFailed); } else { AvailableModules[i] = desc1.friendlyName; modulesIuID[i] = desc1.iuid; } } //Get Languages //PXCMSession.ImplDesc desc = new PXCMSession.ImplDesc(); //desc.cuids[0] = PXCMSpeechRecognition.CUID; desc.iuid = -1; for (int i = 0; i < NumOfModules; i++) { if (!AvailableModules[i].Equals("FailedToQueryModuleInfo")) { desc.iuid = modulesIuID[i]; break; } } if (desc.iuid == -1) { Debug.Log("No Valid Module Found!"); SetError(SpeechManagerErrorType.NoValidModuleFound); return(false); } PXCMSpeechRecognition vrec; if (session.CreateImpl <PXCMSpeechRecognition>(desc, out vrec) < pxcmStatus.PXCM_STATUS_NO_ERROR) { Debug.Log("CreateImpl for Languages Failed!"); SetError(SpeechManagerErrorType.CreateImplFailed); return(false); } int NumOfLanguages = 0; for (int i = 0; ; i++) { PXCMSpeechRecognition.ProfileInfo pinfo; if (vrec.QueryProfile(i, out pinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { break; } NumOfLanguages++; } AvailableLanguages = new string[NumOfLanguages]; for (int i = 0; i < NumOfLanguages; i++) { PXCMSpeechRecognition.ProfileInfo pinfo; if (vrec.QueryProfile(i, out pinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { AvailableLanguages[i] = "FailedToQueryProfile"; Debug.Log("QueryProfile for Languages Failed!"); SetError(SpeechManagerErrorType.QueryProfileFailed); } else { AvailableLanguages[i] = LanguageToString(pinfo.language); } } vrec.Dispose(); _isInitialized = true; return(true); }
public MainWindow() { InitializeComponent(); #region Hand nodes = new PXCMHandData.JointData[][] { new PXCMHandData.JointData[0x20], new PXCMHandData.JointData[0x20] }; xValues = new float[arraySize]; yValues = new float[arraySize]; zValues = new float[arraySize]; #endregion Hand // Setto la modalita' test per la guida del drone ON/OFF TestModeCheck.IsChecked = true; genericItems = new ObservableCollection<GenericItem>(); se = PXCMSession.CreateInstance(); if (se != null) { //processingThread = new Thread(new ThreadStart(ProcessingHandThread)); //senseManager = PXCMSenseManager.CreateInstance(); //senseManager.EnableHand(); //senseManager.Init(); //ConfigureHandModule(); //processingThread.Start(); // session is a PXCMSession instance. audiosource = se.CreateAudioSource(); // Scan and Enumerate audio devices audiosource.ScanDevices(); PXCMAudioSource.DeviceInfo dinfo = null; for (int d = audiosource.QueryDeviceNum() - 1; d >= 0; d--) { audiosource.QueryDeviceInfo(d, out dinfo); } audiosource.SetDevice(dinfo); se.CreateImpl<PXCMSpeechRecognition>(out sr); PXCMSpeechRecognition.ProfileInfo pinfo; sr.QueryProfile(0, out pinfo); sr.SetProfile(pinfo); // sr is a PXCMSpeechRecognition instance. String[] cmds = new String[] { "Takeoff", "Land", "Rotate Left", "Rotate Right", "Advance", "Back", "Up", "Down", "Left", "Right", "Stop" , "Dance"}; int[] labels = new int[] { 1, 2, 4, 5, 8, 16, 32, 64, 128, 256, 512, 1024 }; // Build the grammar. sr.BuildGrammarFromStringList(1, cmds, labels); // Set the active grammar. sr.SetGrammar(1); // Set handler RecognitionHandler = new PXCMSpeechRecognition.Handler(); RecognitionHandler.onRecognition = OnRecognition; Legenda.Items.Add("------ Available Commands ------"); foreach (var cmd in cmds) { Legenda.Items.Add(cmd); } } }
private void PopulateSource() { devices.Clear(); Debug.Log("PopulateSource"); source = session.CreateAudioSource(); if (source != null) { source.ScanDevices(); for (int i = 0; ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) break; devices[i] = dinfo; } sorceList = new string[devices.Count]; for (int i = 0; i < devices.Count; i++) { sorceList[i] = devices[i].name; } source.Dispose(); } }
/// <summary> /// Initializes the speech module. /// </summary> public bool InitalizeSpeech() { if (_isInitialized) { return true; } ErrorDetected = SpeechManagerErrorType.NoError; SpeechModuleMode = SpeechModuleModeType.CommandControl; _isInitialized = false; session = PXCMSession.CreateInstance(); //Get Sources source = session.CreateAudioSource(); if (source == null) { SetError(SpeechManagerErrorType.CreateAudioSourceFailed); return false;; } int numOfDevices = 0; for (int i=0; ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) break; numOfDevices++; } if (numOfDevices == 0) { Debug.Log("No Audio Device Found!"); SetError(SpeechManagerErrorType.NoAudioDeviceFound); return false; } source.ScanDevices(); AvailableSources = new string[numOfDevices]; sourceDeviceInfo = new PXCMAudioSource.DeviceInfo[numOfDevices]; for (int i=0; i<numOfDevices ; i++) { PXCMAudioSource.DeviceInfo dinfo; if (source.QueryDeviceInfo(i, out dinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { AvailableSources[i] = "FailedToQueryDeviceInfo"; sourceDeviceInfo[i] = null; Debug.Log("QueryDeviceInfo Failed for Index " + i.ToString()); SetError(SpeechManagerErrorType.QueryDeviceInfoFailed); } else { sourceDeviceInfo[i] = dinfo; AvailableSources[i] = dinfo.name; } } source.Dispose(); //Get Modules PXCMSession.ImplDesc desc = new PXCMSession.ImplDesc(); int NumOfModules = 0; PXCMSession.ImplDesc desc1; desc.cuids[0] = PXCMSpeechRecognition.CUID; for (int i = 0; ; i++) { if (session.QueryImpl(desc, i, out desc1) < pxcmStatus.PXCM_STATUS_NO_ERROR) break; NumOfModules++; } if (NumOfModules == 0) { Debug.Log("No Audio Modules Found!"); SetError(SpeechManagerErrorType.NoAudioModulesFound); return false; } AvailableModules = new string[NumOfModules]; modulesIuID = new int[NumOfModules]; for (int i=0; i<NumOfModules ; i++) { if (session.QueryImpl(desc, i, out desc1) < pxcmStatus.PXCM_STATUS_NO_ERROR) { AvailableModules[i] = "FailedToQueryModuleInfo"; Debug.Log("QueryImpl Failed for Index " + i.ToString()); SetError(SpeechManagerErrorType.QueryImplFailed); } else { AvailableModules[i] = desc1.friendlyName; modulesIuID[i] = desc1.iuid; } } //Get Languages //PXCMSession.ImplDesc desc = new PXCMSession.ImplDesc(); //desc.cuids[0] = PXCMSpeechRecognition.CUID; desc.iuid = -1; for (int i=0; i<NumOfModules ; i++) { if (!AvailableModules[i].Equals("FailedToQueryModuleInfo")) { desc.iuid=modulesIuID[i]; break; } } if (desc.iuid == -1) { Debug.Log("No Valid Module Found!"); SetError(SpeechManagerErrorType.NoValidModuleFound); return false; } PXCMSpeechRecognition vrec; if (session.CreateImpl<PXCMSpeechRecognition>(desc, out vrec) < pxcmStatus.PXCM_STATUS_NO_ERROR) { Debug.Log("CreateImpl for Languages Failed!"); SetError(SpeechManagerErrorType.CreateImplFailed); return false; } int NumOfLanguages = 0; for (int i=0; ; i++) { PXCMSpeechRecognition.ProfileInfo pinfo; if (vrec.QueryProfile(i,out pinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) break; NumOfLanguages++; } AvailableLanguages = new string[NumOfLanguages]; for (int i=0; i<NumOfLanguages ; i++) { PXCMSpeechRecognition.ProfileInfo pinfo; if (vrec.QueryProfile(i,out pinfo) < pxcmStatus.PXCM_STATUS_NO_ERROR) { AvailableLanguages[i] = "FailedToQueryProfile"; Debug.Log("QueryProfile for Languages Failed!"); SetError(SpeechManagerErrorType.QueryProfileFailed); } else { AvailableLanguages[i] = LanguageToString(pinfo.language); } } vrec.Dispose(); _isInitialized = true; return true; }