public SearchPageModel(IPhotoService photoService, IAutoSuggestService autoSuggestService, ISpeechRecognition speechRecognition, IUserDialogs userDialogs) { _photoService = photoService; _autoSuggestService = autoSuggestService; _speechRecognition = speechRecognition; _userDialogs = userDialogs; }
/// <summary> /// Execute initialization tasks. /// </summary> protected void Init() { ShowPenColor(); // This requires that a Kinect is connected at the time of app startup. // To make the app robust against plug/unplug, // Microsoft recommends using KinectSensorChooser provided in Microsoft.Kinect.Toolkit (See components in Toolkit Browser). if (KinectV1Utils.StartKinectSensor() == null) { statusBarText.Text = Properties.Resources.NoKinectReady; imgKinect.Visibility = Visibility.Hidden; } else { statusBarText.Text = Properties.Resources.KinectReady; imgKinect.Visibility = Visibility.Visible; } speechSynthesis = new SpeechSynthesis(); speechRecognition = new SpeechRecognitionKinectV1(); //will fallback to same engine used by SpeechRecognition class automatically if it can't find Kinect V1 sensor speechRecognition.LoadGrammar(Properties.Resources.SpeechGrammar_en, "Main"); //could use SpeechGrammar_en.Create() to generate the grammar programmatically instead of loading it from an XML (resource) file speechRecognition.LoadGrammar(SpeechRecognitionUtils.CreateGrammarFromNames(ColorUtils.GetKnownColorNames(), "en", "Colors")); //setup recognition event handlers speechRecognition.Recognized += SpeechRecognition_Recognized; speechRecognition.NotRecognized += SpeechRecognition_NotRecognized; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // This will prevent recognition accuracy from degrading over time. //// speechRecognition.AcousticModelAdaptation = false; speechRecognition.Start(); //start speech recognition (set to keep on firing speech recognition events, not just once) }
/// <summary> /// Starts the speech recognition. /// </summary> protected void StartSpeechRecognition() //called by LoadSpeechRecognitionPlugin { if (speechRecognition == null) { return; } try { LoadSpeechRecognitionGrammarsForUI(); LoadSpeechRecognitionGrammarForGestureCollection(); GestureCollectionLoaded += MainWindow_GestureCollectionLoaded; speechRecognition.Recognized += SpeechRecognition_Recognized; speechRecognition.NotRecognized += SpeechRecognition_NotRecognized; /* * //For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. * //This will prevent recognition accuracy from degrading over time. * speechRecognition.AcousticModelAdaptation = false; */ speechRecognition.Start(); } catch (Exception e) { speechRecognition = null; MessageBox.Show(e.Message); } }
private void StartSpeechRecognition() { if (speechRecognition == null) { return; } try { string grammarsFolder = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "Grammars", "SRGS"); speechRecognition.LoadGrammar(new FileStream(Path.Combine(grammarsFolder, "TrackingCam_en.xml"), FileMode.Open), "TrackingCam"); speechRecognition.Recognized += SpeechRecognition_Recognized; speechRecognition.NotRecognized += SpeechRecognition_NotRecognized; /* * //For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. * //This will prevent recognition accuracy from degrading over time. * speechRecognition.AcousticModelAdaptation = false; */ speechRecognition.Start(); } catch (Exception e) { speechRecognitionKinect = null; speechRecognition = null; MessageBox.Show(e.Message); } }
public TextHandler(ISpeaker speaker, ISpeechRecognition speechRecognition) { _speaker = speaker; _speechRecognition = speechRecognition; var parser = new Parser(); _formList = parser.ParseDocument(); }
public TranslatorPage() { InitializeComponent(); // Dependency Injection - reference to classes in Android module recorder = DependencyService.Get <IRecorder>(); speechRecognition = DependencyService.Get <ISpeechRecognition>(); tranlation = DependencyService.Get <ITranslation>(); azureEasyTableClient = DependencyService.Get <IAzureEasyTableClient>(); isRecording = false; }
public MainWindow() { ISpeaker speaker = new Speaker(); _speechRecognitionEngine = new SpeechRecognition(); _textHandler = new TextHandler(speaker, _speechRecognitionEngine); _kebabManager = new KebabManager(_speechRecognitionEngine, _textHandler); InitializeComponent(); _worker.DoWork += Run; _worker.RunWorkerAsync(); }
public void LoadSpeechRecognitionPlugin() { Lazy<ISpeechRecognitionKinect> plugin1 = PluginsCatalog.mefContainer.GetExports<ISpeechRecognitionKinect>("SpeechLib.Recognition.KinectV1").FirstOrDefault(); speechRecognition = (plugin1 != null) ? plugin1.Value : null; if (speechRecognition == null) //SpeechRecognitionKinect plugin couldn't be loaded, try to fallback to the SpeechRecognition one (which uses the default audio source as input) { Lazy<ISpeechRecognition> plugin2 = PluginsCatalog.mefContainer.GetExports<ISpeechRecognition>("SpeechLib.Recognition").FirstOrDefault(); speechRecognition = (plugin2 != null) ? plugin2.Value : null; } if (speechRecognition != null) StartSpeechRecognition(); }
public void LoadSpeechRecognitionPlugin() { Lazy <ISpeechRecognitionKinect> plugin1 = PluginsCatalog.mefContainer.GetExports <ISpeechRecognitionKinect>("SpeechLib.Recognition.KinectV1").FirstOrDefault(); speechRecognition = speechRecognitionKinect = (plugin1 != null) ? plugin1.Value : null; if (speechRecognition == null) //SpeechRecognitionKinect plugin couldn't be loaded, try to fallback to the SpeechRecognition one (which uses the default audio source as input) { Lazy <ISpeechRecognition> plugin2 = PluginsCatalog.mefContainer.GetExports <ISpeechRecognition>("SpeechLib.Recognition").FirstOrDefault(); speechRecognition = (plugin2 != null) ? plugin2.Value : null; } if (speechRecognition != null) { StartSpeechRecognition(); } }
public AudioController(IDataRepository <Audio> audioRepository, ISpeechRecognition speechRecognition, IConfiguration configuration, IUnitOfWork unitOfWork, IMapper mapper) { Guard.Against.Null(audioRepository, nameof(audioRepository)); Guard.Against.Null(speechRecognition, nameof(speechRecognition)); Guard.Against.Null(configuration, nameof(configuration)); Guard.Against.Null(unitOfWork, nameof(unitOfWork)); Guard.Against.Null(mapper, nameof(mapper)); _audioRepository = audioRepository; _speechRecognition = speechRecognition; _configuration = configuration; _unitOfWork = unitOfWork; _mapper = mapper; _subDirectory = Resources.SubDirectory; }
public KebabManager(ISpeechRecognition speechRecognition, ITextAnalyzer textAnalyzer) { _speechRecognition = speechRecognition; _textAnalyzer = textAnalyzer; }
public ConvBot(TelegramProvider b, TgLog l, ISpeechRecognition speech) { bot = b.bot; tgLog = l; YaSpeech = speech; }
public AudioSocket(IConfiguration config, ISpeechRecognition speech, ILogger <AudioSocket> logger) { this.speech = speech; this.config = config; this.logger = logger; }
//called by LoadSpeechRecognitionPlugin /// <summary> /// Starts the speech recognition. /// </summary> protected void StartSpeechRecognition() { if (speechRecognition == null) return; try { LoadSpeechRecognitionGrammarsForUI(); LoadSpeechRecognitionGrammarForGestureCollection(); GestureCollectionLoaded += MainWindow_GestureCollectionLoaded; speechRecognition.Recognized += SpeechRecognition_Recognized; speechRecognition.NotRecognized += SpeechRecognition_NotRecognized; /* //For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. //This will prevent recognition accuracy from degrading over time. speechRecognition.AcousticModelAdaptation = false; */ speechRecognition.Start(); } catch(Exception e) { speechRecognition = null; MessageBox.Show(e.Message); } }