public MainWindowViewModel(IRegionManager regionManager, SpeechRecognition speechRecognition) { string exeFile = new System.Uri(Assembly.GetEntryAssembly().CodeBase).AbsolutePath; string exeDir = Path.GetDirectoryName(exeFile); _filesPath = Path.Combine(exeDir, @"..\..\Files").Replace("%20", " "); StreamReader reader = new StreamReader(_filesPath + @"\ASCII.txt", Encoding.GetEncoding(850)); ASCII = reader.ReadToEnd(); reader = new StreamReader(_filesPath + @"\ASCII_Minimize.txt", Encoding.GetEncoding(850)); ASCII_Minimize = reader.ReadToEnd(); reader = new StreamReader(_filesPath + @"\ASCII_Close.txt", Encoding.GetEncoding(850)); ASCII_Close = reader.ReadToEnd(); regionManager.RegisterViewWithRegion(Names.contentRegion, typeof(StartScreenView)); SpeechRecognition = speechRecognition; MinimizeCommand = new DelegateCommand(new Action(MinimizeApp)); CloseCommand = new DelegateCommand(new Action(CloseApp)); ClosingCommand = new DelegateCommand(new Action(Closing)); }
/// <summary> /// This method is called when the Devices main screen first appears and loads the header with the /// Speech Recognition icon, refreshes the devices list and sets the edit/done and settings buttons /// </summary> public override void ViewDidLoad() { base.ViewDidLoad(); SpeechRecognition.RequestAuthorization(); // Fix the bottom position of the view, such that icons appear at same place when reloaded. bottomOfView = TableView.Bounds.Bottom; DevicesTable.TableFooterView = GetTableViewFooter(); RefreshDeviceList(); // Set the light gray color in Globals, it is used in the Edit device (change name) screen Globals.DefaultLightGray = TableView.BackgroundColor; // To be able to tap a row in editing mode for changing name DevicesTable.AllowsSelectionDuringEditing = true; done = new UIBarButtonItem(UIBarButtonSystemItem.Done, (s, e) => { CancelEditingState(); }); edit = new UIBarButtonItem(UIBarButtonSystemItem.Edit, (s, e) => { SetEditingState(); }); //Pull to refresh RefreshControl = new UIRefreshControl(); RefreshControl.ValueChanged += RefreshTable; TableView.Add(RefreshControl); // Set right button initially to edit NavigationItem.LeftBarButtonItem = edit; NavigationItem.RightBarButtonItem = SettingsButton; }
private void Delete() { if (SelectedItem != null) { int temp = SelectedIndex; if (temp >= 0) { SpeechRecognition.Commands.RemoveAt(temp); } if (SpeechRecognition.Commands.Count > 0) { if (temp < SpeechRecognition.Commands.Count) { SelectedIndex = temp; } else { SelectedIndex = temp - 1; } } else { SelectedItem = null; } SpeechRecognition.SaveToJSON(); } }
public void MicPocTest() { SpeechRecognition sp = new SpeechRecognition(); StreamReader sr = new StreamReader(@"Z:\Projects\TOCI_PiastCode\Toci.Piastcode.Social.Entities.Interfaces\Toci.Piastcode.VoiceRecognition.Poc\data\grammar.xml"); GrammarBuilder builder = new GrammarBuilder(); Choices ch = new Choices("Erley", "play", "create", "public", "interface"); builder.Append(ch); //sp.LoadGrammar(sr.ReadToEnd().ToLower(), "exGrammar"); sp.LoadGrammar(new Grammar(builder)); //sp.SpeechRecognitionEngine.c //sp.SpeechRecognitionEngine = new SpeechRecognitionEngine(CultureInfo.CurrentCulture); sp.SpeechRecognitionEngine.SpeechDetected += SpeechRecognitionEngine_SpeechDetected; sp.SetInputToDefaultAudioDevice(); sp.Recognized += new EventHandler <SpeechRecognitionEventArgs>(Sp_Recognized); sp.NotRecognized += Sp_NotRecognized; sp.Start(); AudioState state = sp.SpeechRecognitionEngine.AudioState; //sp.Stop(); }
private void DoneConfirm() { if (_newCommand) { SelectedCommandIndex++; SpeechRecognition.Commands.Insert(SelectedCommandIndex, SelectedCommand); } else { SpeechRecognition.Commands[SelectedCommandIndex] = SelectedCommand; } SpeechRecognition.SaveToJSON(); SpeechRecognition.AddVocabulary(SelectedCommand.Phrase); foreach (CommandAction action in SelectedCommand.CommandActions) { SpeechRecognition.AddVocabulary(action.Parameter); } NavigationParameters parameters = new NavigationParameters { { "SelectedIndex", SelectedCommandIndex } }; _regionManager.RequestNavigate(Names.contentRegion, Names.manageCommandsView, parameters); }
public static void Start() { // |-------[ Folders & Files ]-------| // if (Reference.Folders.Count() > 0) { // Create folders foreach (String _Folder in Reference.Folders) { if (!String.IsNullOrEmpty(_Folder)) { Directory.CreateDirectory(_Folder); } } } // IronPythonLib File.WriteAllBytes(Reference.IronModules, Resources.IronPythonLib); // |-------[ Settings ]-------| // Settings.Init(); // |-------[ Languages ]-------| // Langs.Init(); // |-------[ Visual ]-------| // SkinManager.ApplySkin(); // |-------[ Updates ]-------| // // Delete temporary files Update.DeleteTemporaryFiles(); // Update is available if (Reference.JsonSettings.Check_for_updates && Update.UpdateAvaible()) { // Ask user to install it, and then install if (Update.InstallUpdate(true)) { // User install the update, stop execution return; } } // |-------[ Views ]-------| // Reference.MainWindow = new Main(); // |-------[ Recognition ]-------| // Synthesizer.Init(); SpeechRecognition.Init(); // |-------[ Profiles ]-------| // Profiles.Init(); // Hide SplashScreen Application.Current.MainWindow.Hide(); // Show MainWindow Reference.MainWindow.Show(); Utils.Log(Langs.Get("wolfy_loaded")); }
/// <summary> /// Initializes the speech recognition object. /// </summary> public async void Initialize() { speechRecognition = new SpeechRecognition(); if (await speechRecognition.Initialize()) { ChangeState(EarsState.Initialized); } }
private void Recognition() { if (recorder.Starting) { Stop(); } SpeechRecognition?.Invoke();// 调用委托,识别录制的语音 }
// Empieza a grabar private async Task OnClickStart() { if (!IsListening) { IsListening = true; await SpeechRecognition.StartAsync(); } }
// Termina de grabar private async Task OnClickStop() { if (IsListening) { IsListening = false; await SpeechRecognition.StopAsync(); } }
public SpeechRecognitionView() { this.mw = (MainWindow)Application.Current.MainWindow; this.sr = this.mw.SpeechRecogniton; this.sr.srPaused += pauseSRView; this.sr.srResumed += unpauseSRView; InitializeComponent(); }
public void OnChangeState(SpeechRecognition.State newState) { //throw new System.NotImplementedException(); if (newState == SpeechRecognition.State.NOT_INITIALIZED) { SpeechRecognition.StartListening(); } //throw new System.NotImplementedException(); }
private void Start() { activeSword = null; state = LabelState.Idle; audio = GetComponent <AudioSource>(); speechRecognition = FindObjectOfType <SpeechRecognition>(); speechRecognition.OnSpeechRecognized += NameSword; }
// Use this for initialization void Awake() { //next line is added to avoid the not used warning touchToListenRect.Contains(new Vector3(1, 1, 1)); if (instance != null) { Destroy(this.gameObject); return; } DontDestroyOnLoad(this.gameObject); instance = this; speechDictionary = gameObject.GetComponent <SpeechDictionary>(); speechDictionary.ReloadDictionary(); errorMessages.Add(3, "Audio recording error."); errorMessages.Add(5, "Other client side errors."); errorMessages.Add(9, "Insufficient permissions"); errorMessages.Add(2, "Other network related errors."); errorMessages.Add(1, "Network operation timed out."); errorMessages.Add(7, "No recognition result matched."); errorMessages.Add(8, "RecognitionService busy."); errorMessages.Add(4, "Server sends error status."); errorMessages.Add(6, "No speech input."); #if UNITY_ANDROID && !UNITY_EDITOR AndroidJavaClass jc = new AndroidJavaClass("com.unity3d.player.UnityPlayer"); currentActivity = jc.GetStatic <AndroidJavaObject>("currentActivity"); speechRecognition = new AndroidJavaClass("be.jannesplyson.unity3dspeechrecognition.Unity3DSpeechRecognition"); isRecognitionAvailable = speechRecognition.CallStatic <bool>("isRecognitionAvailable", currentActivity); if (isRecognitionAvailable) { speechRecognition.SetStatic <int>("maxResults", maxResults); speechRecognition.SetStatic <string>("preferredLanguage", preferredLanguage); speechRecognition.SetStatic <bool>("enableOnBeginningOfSpeech", enableOnBeginningOfSpeech); speechRecognition.SetStatic <bool>("enableOnBufferReceived", enableOnBufferReceived); speechRecognition.SetStatic <bool>("enableOnEndOfSpeech", enableOnEndOfSpeech); speechRecognition.SetStatic <bool>("enableOnEvent", enableOnEvent); speechRecognition.SetStatic <bool>("enableOnPartialResults", enableOnPartialResults); speechRecognition.SetStatic <bool>("enableOnReadyForSpeech", enableOnReadyForSpeech); speechRecognition.SetStatic <bool>("enableOnRmsChanged", enableOnRmsChanged); speechRecognition.SetStatic <bool>("autoRestart", autoRestart); speechRecognition.SetStatic <bool>("autoRestartOnResume", autoRestartOnResume); speechRecognition.SetStatic <float>("autoRestartAmpThreshold", autoRestartAmpThreshold); maxResultsJni = speechRecognition.GetStatic <int>("maxResults"); preferredLanguageJni = speechRecognition.GetStatic <string>("preferredLanguage"); enableOnBeginningOfSpeechJni = speechRecognition.GetStatic <bool>("enableOnBeginningOfSpeech"); enableOnBufferReceivedJni = speechRecognition.GetStatic <bool>("enableOnBufferReceived"); enableOnEndOfSpeechJni = speechRecognition.GetStatic <bool>("enableOnEndOfSpeech"); enableOnEventJni = speechRecognition.GetStatic <bool>("enableOnEvent"); enableOnPartialResultsJni = speechRecognition.GetStatic <bool>("enableOnPartialResults"); enableOnReadyForSpeechJni = speechRecognition.GetStatic <bool>("enableOnReadyForSpeech"); enableOnRmsChangedJni = speechRecognition.GetStatic <bool>("enableOnRmsChanged"); autoRestartJni = speechRecognition.GetStatic <bool>("autoRestart"); autoRestartOnResumeJni = speechRecognition.GetStatic <bool>("autoRestartOnResume"); autoRestartAmpThresholdJni = speechRecognition.GetStatic <float>("autoRestartAmpThreshold"); speechRecognition.CallStatic("initSpeechRecognition", currentActivity); } #endif }
void Awake() { if (Instance != null) { Destroy(gameObject); } else { Instance = this; } m_DictationRecognizer = new DictationRecognizer(); m_DictationRecognizer.DictationResult += (text, confidence) => { waitingForFinalResult = false; Dbg.Log(LogMessageType.SPEECH_REC_FINAL, text); //AutoGazeBehavior.Instance.EventUserStartedSpeaking(); if (m_speechBox != null) { m_speechBox.text = text; } Speaker.Speak(text, null, null, true, 2, 1, 1, ""); }; m_DictationRecognizer.DictationHypothesis += (text) => { if (!waitingForFinalResult) { waitingForFinalResult = true; AutonomousGazeBehavior.Instance?.EventUserStartedSpeaking(); } Dbg.Log(LogMessageType.SPEECH_REC_HYPOTHESIS, text); if (m_speechBox) { m_speechBox.text = text + "..."; } }; m_DictationRecognizer.DictationComplete += (completionCause) => { if (completionCause != DictationCompletionCause.Complete) { // Debug.Log("Dictation Stopped"); Debug.LogErrorFormat("Dictation completed unsuccessfully: {0}.", completionCause); } // StartSpeechRecognition(); }; m_DictationRecognizer.DictationError += (error, hresult) => { Debug.LogErrorFormat("Dictation error: {0}; HResult = {1}.", error, hresult); }; m_DictationRecognizer.AutoSilenceTimeoutSeconds = 600; m_DictationRecognizer.InitialSilenceTimeoutSeconds = 600; //m_DictationRecognizer.Start(); }
public void setWiimotesObject(Wiimotes pWiimotes) { mWiimotes = pWiimotes; mWiimotes.WiimoteDisconnectedEvent += new Wiimotes.OnWiimoteDisconnectedEvent(OnWiimoteDisconnectedEvent); mSpeechRecognition = SpeechRecognition.getSpeechRecognition(); mSpeechRecognition.VoiceCommandReceivedEvent += new SpeechRecognition.OnVoiceCommandReceivedEvent(OnVoiceCommandReceivedEvent); mSpeechRecognition.startEngine(); }
private async Task Initialze() { if (LightningProvider.IsLightningEnabled) { LowLevelDevicesController.DefaultProvider = LightningProvider.GetAggregateProvider(); } else { throw new Exception("Lightning drivers not enabled. Please enable Lightning drivers."); } _camera = new Camera(); await _camera.Initialize(); SpeedSensor.Initialize(); SpeedSensor.Start(); SpeechSynthesis.Initialze(); await AudioPlayerController.Initialize(); _accelerometerSensor = new AccelerometerGyroscopeSensor(); await _accelerometerSensor.Initialize(); _accelerometerSensor.Start(); _automaticSpeakController = new AutomaticSpeakController(_accelerometerSensor); _motorController = new MotorController(); await _motorController.Initialize(_automaticSpeakController); _servoController = new ServoController(); await _servoController.Initialize(); _distanceMeasurementSensor = new DistanceMeasurementSensor(); await _distanceMeasurementSensor.Initialize(I2C_ADDRESS_SERVO); _automaticDrive = new AutomaticDrive(_motorController, _servoController, _distanceMeasurementSensor); _speechRecognation = new SpeechRecognition(); await _speechRecognation.Initialze(_motorController, _servoController, _automaticDrive); _speechRecognation.Start(); _gamepadController = new GamepadController(_motorController, _servoController, _automaticDrive, _accelerometerSensor); _camera.Start(); _httpServerController = new HttpServerController(_motorController, _servoController, _automaticDrive, _camera); SystemController.Initialize(_accelerometerSensor, _automaticSpeakController, _motorController, _servoController, _automaticDrive, _camera, _httpServerController, _speechRecognation, _gamepadController); await SystemController.SetAudioRenderVolume(AUDIO_RENDER_VOLUME, true); await SystemController.SetAudioCaptureVolume(AUDIO_CAPTURE_VOLUME, true); await AudioPlayerController.PlayAndWaitAsync(AudioName.Welcome); _automaticSpeakController.Start(); }
public static void SpeechRecognitionFromFile(string filePath) { Console.WriteLine($"Speech recognition on file {filePath}"); // detect spoken text string textSpoken = SpeechRecognition.SpeechToText(filePath); Console.WriteLine($"Text spoken is:"); Console.WriteLine($"{textSpoken}"); }
// 识别录音 private void button_Recognition_Click(object sender, EventArgs e) { //if (recorder.RecordedTime == -1) // return; if (recorder.Starting) { button2_Stop_Click(null, null); } SpeechRecognition?.Invoke();// 调用委托,识别录制的语音 }
public MainPage() { this.InitializeComponent(); var controller = new Controller(); engine = new Engine(); engine.GameDifficulty = Engine.Difficulty.Easy; engine.NewGame(); var reco = new SpeechRecognition(engine, controller); }
public StartScreenViewModel(IRegionManager regionManager, IDialogService dialogService, SpeechRecognition speechRecognition) { _regionManager = regionManager; _speechRecognition = speechRecognition; StartCommand = new DelegateCommand(Start); StopCommand = new DelegateCommand(Stop); OpenManageCommandsViewCommand = new DelegateCommand(OpenManageCommandsView); }
private void Duplicate() { if (SelectedIndex > -1) { SpeechRecognition.Commands.Insert(SelectedIndex + 1, new Command(SelectedItem)); SelectedIndex++; SpeechRecognition.SaveToJSON(); } }
public void ReloadButtons(bool isEditing) { RemoveButtons(); // Voice control / add device button UIButton button = new UIButton(UIButtonType.System); button.Frame = new CGRect(TableView.Bounds.Width - IconDimension - Padding, bottomOfView - TableViewFooterHeight - Padding, IconDimension, IconDimension); if (isEditing) { button.SetBackgroundImage(UIImage.FromBundle(strings.addDeviceIcon), UIControlState.Normal); } else { button.SetBackgroundImage(UIImage.FromBundle(strings.voiceControlIcon), UIControlState.Normal); } button.TouchDown += (object sender, EventArgs e) => { if (!isEditing) { speechRecognizer = new SpeechRecognition(this); speechRecognizer.StartRecording(out int warningStatus); if (warningStatus == (int)Warning.AccessDenied) // Access to speech recognition denied { WarningMessage.Display(strings.speechAccessDenied, strings.speechAllowAccess, this); } else if (warningStatus == (int)Warning.RecordProblem) // Couldn't start speech recording { WarningMessage.Display(strings.speechStartRecordProblem, strings.tryAgain, this); } } }; button.TouchUpInside += (object sender, EventArgs e) => { if (isEditing) { // segue to add device ((TableSourceDevicesMain)DevicesTable.Source).InsertAction(); } else { speechRecognizer.StopRecording(); } }; button.TouchDragExit += (object sender, EventArgs e) => { if (!isEditing) { speechRecognizer.CancelRecording(); } }; ParentViewController.View.AddSubview(button); }
private void DoneCancel() { SpeechRecognition.SaveToJSON(); NavigationParameters parameters = new NavigationParameters { { "SelectedIndex", SelectedCommandIndex } }; _regionManager.RequestNavigate(Names.contentRegion, Names.manageCommandsView, parameters); }
public void OnChangeState(SpeechRecognition.State newState) { //throw new System.NotImplementedException(); /* if (newState == SpeechRecognition.State.IDLE) { SpeechRecognition.StartListening(); } */ //throw new System.NotImplementedException(); }
private IEnumerator Start() { speechRecognition = FindObjectOfType <SpeechRecognition>(); speechRecognition.OnSpeechRecognized += UpdateUI; // Wait until the mic is initialized before we setup the UI. while (speechRecognition.mic == null) { yield return(null); } SetupUI(); }
/// <summary> /// This method defines what happens after the application is launched. It initializes SpeechRecognition and retrieves user defaults from memory. /// It checks the user defaults to perform the appropriate action. If the previous login was local and the default values are still valid, /// it goes directly to the devices main screen. The same holds for global. /// </summary> public override bool FinishedLaunching(UIApplication application, NSDictionary launchOptions) { // Used for UITesting Xamarin.Calabash.Start(); SpeechRecognition.RequestAuthorization(); NSUserDefaults userDefaults = NSUserDefaults.StandardUserDefaults; string defaultLocal = userDefaults.StringForKey(strings.defaultsLocalHestia); defaultIP = userDefaults.StringForKey(strings.defaultsIpHestia); defaultPort = userDefaults.StringForKey(strings.defaultsPortHestia); defaultServername = userDefaults.StringForKey(strings.defaultsServerNameHestia); defaultAuth0AccessToken = userDefaults.StringForKey(strings.defaultsAccessTokenHestia); // The main window where the app lives in Window = new UIWindow(UIScreen.MainScreen.Bounds); // Check if defaults for local/global are present if (defaultLocal == bool.TrueString) { Globals.LocalLogin = true; // If the server is valid go directly to the Devices main screen if (IsServerValid(defaultIP)) { UINavigationController navigationController = devices2Storyboard.InstantiateViewController(strings.navigationControllerDevicesMain) as UINavigationController; Window.RootViewController = navigationController; // Make key and visible to be able to present possibly Alert window Window.MakeKeyAndVisible(); SetGlobalsToDefaultsLocalLogin(); } else { // Server is not valid. Go to server connect screen. Window.RootViewController = devices2Storyboard.InstantiateInitialViewController();; } } else if (defaultLocal == bool.FalseString && defaultAuth0AccessToken != null) { Globals.LocalLogin = false; Window.RootViewController = devices2Storyboard.InstantiateViewController(strings.navigationControllerServerSelectList);; // Make key and visible to be able to present possibly Alert window Window.MakeKeyAndVisible(); SetGlobalsToDefaultsGlobalLogin(); } else { // No previous login information available. Go to local/global choose screen. Window.RootViewController = mainStoryboard.InstantiateInitialViewController() as UIViewControllerLocalGlobal; } Window.MakeKeyAndVisible(); return(true); }
//public StreamReader reader = new StreamReader(path); void Start() { var sr = new StreamReader(Path.Combine(Application.streamingAssetsPath, fileName)); var fileContents = sr.ReadToEnd(); sr.Close(); lines = fileContents.Split("\n"[0]); getNewSentence(level + 3); srec = gameObject.GetComponent <SpeechRecognition>(); srec.loadKeywords(); }
private void FrmMain_Load(object sender, EventArgs e) { txtReceive.UseWinFormControl(); txtReceive.SetDefaultStyle(12); txtSend.SetDefaultStyle(12); numMutilSend.SetDefaultStyle(12); gbReceive.Tag = gbReceive.Text; gbSend.Tag = gbSend.Text; _task.ContinueWith(t => { var list = EnumHelper.GetDescriptions <WorkModes>().Select(kv => kv.Value).ToList(); foreach (var item in t.Result) { list.Add(item.Name); } this.Invoke(() => { cbMode.DataSource = list; //cbMode.SelectedIndex = 0; }); }); cbAddr.DropDownStyle = ComboBoxStyle.DropDownList; cbAddr.DataSource = GetIPs(); var cfg = NetConfig.Current; if (cfg.Port > 0) { numPort.Value = cfg.Port; } // 加载保存的颜色 UIConfig.Apply(txtReceive); LoadConfig(); // 语音识别 Task.Factory.StartNew(() => { SpeechRecognition.Register("打开", () => this.Invoke(Connect)); SpeechRecognition.Register("关闭", () => this.Invoke(Disconnect)); SpeechRecognition.Register("退出", () => Application.Exit()); SpeechRecognition.Register("发送", () => this.Invoke(() => btnSend_Click(null, null))); XTrace.WriteLine("有效的语音识别命令:{0}", SpeechRecognition.GetAllKeys().Join()); }); }
public MainWindow() { viewModel = new MainWindowViewModel(); DataContext = viewModel; InitializeComponent(); engine = new AppEngine(); tts = new TTS.SpeechSynth(); asr = new SpeechRecognition(); asr.SpeechRecognized += SpeechRecognized; asr.SpeechNotRecognized += SpeechNotRecognized; Loaded += StartEngine; }
public static void WordCountFromSpeechRecognition(string filePath) { Console.WriteLine($"Word detection on file {filePath}"); // detect spoken text string textSpoken = SpeechRecognition.SpeechToText(filePath); // count text words int nWords = textSpoken.Split(null).Length; Console.WriteLine($"{nWords} words detected in speech"); Console.WriteLine("Text detected:"); Console.WriteLine(textSpoken); }
public void OnChangeState(SpeechRecognition.State newState) { if(newState == SpeechRecognition.State.NOT_INITIALIZED){ Camera.main.backgroundColor = Color.red; }else if(newState == SpeechRecognition.State.IDLE){ Camera.main.backgroundColor = Color.blue; }else if(newState == SpeechRecognition.State.LISTENING_TO_SOUND){ Camera.main.backgroundColor = Color.yellow; }else if(newState == SpeechRecognition.State.LISTINING_TO_SPEECH_INIT){ Camera.main.backgroundColor = new Color(1f,0.5f,0); }else if(newState == SpeechRecognition.State.LISTENING_TO_SPEECH){ Camera.main.backgroundColor = Color.green; } }
public Recognition() { InitializeComponent(); #region Recognition language // List SpeechRecognition.GetInstalledRecognizers().ToList().ForEach(a => RecognitionLangCombo.Items.Add(new ListBoxItem() { Content = a.Culture.NativeName, Tag = a.Id })); // Default RecognitionLangCombo.SelectedValue = Reference.JsonSettings.Speech_language; // Event RecognitionLangCombo.SelectionChanged += delegate { Reference.JsonSettings.Speech_language = RecognitionLangCombo.SelectedValue.ToString(); }; #endregion #region Recognition threshold // Default RecognitionThresholdSlider.Value = Reference.JsonSettings.Confidence; // Event RecognitionThresholdSlider.ValueChanged += delegate { Reference.JsonSettings.Confidence = (float)RecognitionThresholdSlider.Value; }; #endregion #region Recognition at launch // Default RecognitionAtLaunchCb.IsChecked = Reference.JsonSettings.Recognition_at_launch; #endregion #region Synthesizer voice // List Synthesizer.GetInstalledVoices().ToList().ForEach(a => SynthesizerVoiceCombo.Items.Add(new ListBoxItem() { Content = a.VoiceInfo.Culture.NativeName, Tag = a.VoiceInfo.Id })); // Default SynthesizerVoiceCombo.SelectedValue = Reference.JsonSettings.Synthesizer_voice; // Event SynthesizerVoiceCombo.SelectionChanged += delegate { Reference.JsonSettings.Synthesizer_voice = SynthesizerVoiceCombo.SelectedValue.ToString(); }; #endregion }
// Use this for initialization void Awake() { //next line is added to avoid the not used warning touchToListenRect.Contains(new Vector3(1,1,1)); if(instance != null){ Destroy(this.gameObject); return; } DontDestroyOnLoad(this.gameObject); instance = this; speechDictionary = gameObject.GetComponent<SpeechAnimationDictionary>(); string fileContent = loadFile(fileName); Debug.Log ("Filecontent:"+fileContent); speechDictionary.loadDictionary(fileContent); speechDictionary.ReloadDictionary(); errorMessages.Add(3, "Audio recording error."); errorMessages.Add(5, "Other client side errors."); errorMessages.Add(9, "Insufficient permissions"); errorMessages.Add(2, "Other network related errors."); errorMessages.Add(1, "Network operation timed out."); errorMessages.Add(7, "No recognition result matched."); errorMessages.Add(8, "RecognitionService busy."); errorMessages.Add(4, "Server sends error status."); errorMessages.Add(6, "No speech input."); #if UNITY_ANDROID && !UNITY_EDITOR AndroidJavaClass jc = new AndroidJavaClass("com.unity3d.player.UnityPlayer"); currentActivity = jc.GetStatic<AndroidJavaObject>("currentActivity"); speechRecognition = new AndroidJavaClass("be.jannesplyson.unity3dspeechrecognition.Unity3DSpeechRecognition"); isRecognitionAvailable = speechRecognition.CallStatic<bool>("isRecognitionAvailable",currentActivity); if(isRecognitionAvailable){ speechRecognition.SetStatic<int>("maxResults",maxResults); speechRecognition.SetStatic<string>("preferredLanguage",preferredLanguage); speechRecognition.SetStatic<bool>("enableOnBeginningOfSpeech", enableOnBeginningOfSpeech); speechRecognition.SetStatic<bool>("enableOnBufferReceived", enableOnBufferReceived); speechRecognition.SetStatic<bool>("enableOnEndOfSpeech", enableOnEndOfSpeech); speechRecognition.SetStatic<bool>("enableOnEvent", enableOnEvent); speechRecognition.SetStatic<bool>("enableOnPartialResults", enableOnPartialResults); speechRecognition.SetStatic<bool>("enableOnReadyForSpeech", enableOnReadyForSpeech); speechRecognition.SetStatic<bool>("enableOnRmsChanged", enableOnRmsChanged); speechRecognition.SetStatic<bool>("autoRestart", autoRestart); speechRecognition.SetStatic<bool>("autoRestartOnResume", autoRestartOnResume); speechRecognition.SetStatic<float>("autoRestartAmpThreshold", autoRestartAmpThreshold); maxResultsJni = speechRecognition.GetStatic<int>("maxResults"); preferredLanguageJni = speechRecognition.GetStatic<string>("preferredLanguage"); enableOnBeginningOfSpeechJni = speechRecognition.GetStatic<bool>("enableOnBeginningOfSpeech"); enableOnBufferReceivedJni = speechRecognition.GetStatic<bool>("enableOnBufferReceived"); enableOnEndOfSpeechJni = speechRecognition.GetStatic<bool>("enableOnEndOfSpeech"); enableOnEventJni = speechRecognition.GetStatic<bool>("enableOnEvent"); enableOnPartialResultsJni = speechRecognition.GetStatic<bool>("enableOnPartialResults"); enableOnReadyForSpeechJni = speechRecognition.GetStatic<bool>("enableOnReadyForSpeech"); enableOnRmsChangedJni = speechRecognition.GetStatic<bool>("enableOnRmsChanged"); autoRestartJni = speechRecognition.GetStatic<bool>("autoRestart"); autoRestartOnResumeJni = speechRecognition.GetStatic<bool>("autoRestartOnResume"); autoRestartAmpThresholdJni = speechRecognition.GetStatic<float>("autoRestartAmpThreshold"); speechRecognition.CallStatic("initSpeechRecognition",currentActivity); } #endif }
public void OnChangeState(SpeechRecognition.State newState) { //throw new System.NotImplementedException(); // lastResults = newState.ToString(); //throw new System.NotImplementedException(); }
public MainWindow() : base(Gtk.WindowType.Toplevel) { Build(); notebook.CurrentPage = 1; volume.Percentage = 70; Gdk.Color bk = new Gdk.Color(17, 118, 207); Gdk.Color fg = new Gdk.Color(255, 255, 255); this.ModifyBg(StateType.Normal, bk); this.ModifyFg(StateType.Normal, fg); ctlArtwork.ModifyBg(StateType.Normal, bk); ctlArtwork.ModifyFg(StateType.Normal, fg); ctlDrawingMeta.ModifyBg(StateType.Normal, bk); ctlDrawingMeta.ModifyFg(StateType.Normal, fg); drawWiFi.ModifyBg(StateType.Normal, bk); drawWiFi.ModifyFg(StateType.Normal, fg); eventLeft.ModifyBg(StateType.Normal, bk); eventLeft.ModifyFg(StateType.Normal, fg); eventRight.ModifyBg(StateType.Normal, bk); eventRight.ModifyFg(StateType.Normal, fg); analogclock.ModifyBg(StateType.Normal, bk); analogclock.ModifyFg(StateType.Normal, fg); notebook.ModifyBg(StateType.Normal, bk); notebook.ModifyFg(StateType.Normal, fg); notebook.ModifyBg(StateType.Active, bk); notebook.ModifyFg(StateType.Active, fg); volume.ModifyBg(StateType.Normal, bk); volume.ModifyFg(StateType.Normal, fg); labelClockInfo.ModifyBg(StateType.Normal, bk); labelClockInfo.ModifyFg(StateType.Normal, fg); labelClockInfo.ModifyFont(Pango.FontDescription.FromString("Serif 12")); weatherview.ChangeColors(fg, bk); loadImages(); wordList = new List<string>(); if (File.Exists("missing_words.txt")) { FileStream file = new FileStream("missing_words.txt", FileMode.Open, FileAccess.Read); StreamReader reader = new StreamReader(file); string line = reader.ReadLine(); while (line != null) { wordList.Add(line); line = reader.ReadLine(); } reader.Close(); reader.Dispose(); file.Close(); file.Dispose(); } clockEvents = new EventList(); clockEvents.Load(); Console.WriteLine("Events ******************"); foreach (EventItem item in clockEvents.Events) { Console.WriteLine(" " + item.Name); } Console.WriteLine("*************************"); Console.WriteLine("Loaded " + clockEvents.Events.Count + " events"); clockAlarms = new AlarmList(); clockAlarms.Load(); Console.WriteLine("Alarms ******************"); foreach (AlarmItem item in clockAlarms.Alarms) { Console.WriteLine(" " + item.Name); } Console.WriteLine("*************************"); Console.WriteLine("Loaded " + clockAlarms.Alarms.Count + " alarms"); wireless = new WirelessWrapper(); wireless.Status += Wireless_Status; network = new NetworkWorker(); network.Start(); findArtwork = new FindArtwork(); volumeService = new VolumeService(); stations = new List<StationInfo>(); stations.Add(null); LoadStations(); ttsRecordings = new TTSRecordings(); ttsRecordings.MissingWords += TtsRecordings_MissingWords; ttsRecordings.Status += TtsRecordings_Status; player = new SoundPlayer(); radio = new RadioPlayer(); radio.MetadataReceived += Player_MetadataReceived; recognizer = new SpeechRecognition(); recognizer.StatusChanged += Recognizer_StatusChanged; recognizer.Recognized += Recognizer_Recognized; //recognizer.LogMessage += Recognizer_LogMessage; Console.WriteLine("Starting recognizer"); recognizer.Start(); analogclock.TimeChanged += Analogclock_TimeChanged; intercomView.Start(); timedimage.Start(); timer = new Timer(new TimerCallback(timerTick)); timer.Change(250, 250); background = new Timer(new TimerCallback(backgroundTick)); background.Change(2000, 2000); }