private KinectModel() { kinectSensor = KinectSensor.GetDefault(); timerFoundBodies.Interval = TimeSpan.FromSeconds(1.5); timerNoBodies.Interval = TimeSpan.FromSeconds(10); timerFoundBodies.Tick += TimerFoundBodies_Tick; timerNoBodies.Tick += TimerNoBodies_Tick; if (kinectSensor != null) { this.kinectSensor.Open(); IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams; Stream audioStream = audioBeamList[0].OpenInputStream(); this.convertStream = new KinectAudioStream(audioStream); } else { return; } ri = TryGetKinectRecognizer(); // Setup Gestures InitGestures(); ActiveViews = new List <IReceptionistView>(); }
void InitializeAudio() { AudioSource audioSource = kinect.AudioSource; if (audioSource == null) { throw new Exception("no audio source"); } IReadOnlyList <AudioBeam> audioBeamList = audioSource.AudioBeams; Stream inputStream = audioBeamList[0].OpenInputStream(); convertStream = new KinectAudioStream(inputStream); convertStream.SpeechActive = true; }
internal bool TryInitializeAgleVoice(KinectSensor kinectSensor) { if (null == kinectSensor) { return(false); } this.kinectSensor = kinectSensor; IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams; System.IO.Stream audioStream = audioBeamList[0].OpenInputStream(); this.convertStream = new KinectAudioStream(audioStream); RecognizerInfo ri = TryGetKinectRecognizer(); if (null != ri) { this.speechEngine = new SpeechRecognitionEngine(ri.Id); this.agleVoiceDictionary = new Choices(); this.CreateAgleVoiceDictionary(); var gb = new GrammarBuilder { Culture = ri.Culture }; gb.Append(this.agleVoiceDictionary); var g = new Grammar(gb); this.speechEngine.LoadGrammar(g); this.speechEngine.SpeechRecognized += this.SpeechRecognized; this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected; // let the convertStream know speech is going active this.convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // This will prevent recognition accuracy from degrading over time. ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); this.speechEngine.SetInputToAudioStream( this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); this.speechEngine.RecognizeAsync(RecognizeMode.Multiple); } else { return(false); } return(true); }
/// <summary> /// Initializes a new instance of the MainWindow class /// </summary> /// public MainWindow() { // only one sensor is currently supported this.kinectSensor = KinectSensor.GetDefault(); // set IsAvailableChanged event notifier this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged; // open the sensor this.kinectSensor.Open(); // set the status text this.StatusText = this.kinectSensor.IsAvailable ? Properties.Resources.RunningStatusText : Properties.Resources.NoSensorStatusText; // open the reader for the body frames this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader(); // set the BodyFramedArrived event notifier this.bodyFrameReader.FrameArrived += this.Reader_BodyFrameArrived; // initialize the BodyViewer object for displaying tracked bodies in the UI this.kinectBodyView = new KinectBodyView(this.kinectSensor); // initialize the gesture detection objects for our gestures this.gestureDetectorList = new List <GestureDetector>(); // initialize the MainWindow this.InitializeComponent(); try { sp.PortName = "COM6"; sp.BaudRate = 9600; sp.Open(); } catch (Exception) { MessageBox.Show("Please give a valid port number or check your connection"); } speaker.SelectVoiceByHints(System.Speech.Synthesis.VoiceGender.Female); // set our data context objects for display in UI this.DataContext = this; this.kinectBodyViewbox.DataContext = this.kinectBodyView; // create a gesture detector for each body (6 bodies => 6 detectors) and create content controls to display results in the UI // int col0Row = 0; // int col1Row = 0; int maxBodies = this.kinectSensor.BodyFrameSource.BodyCount; //int maxBodies = 1; for (int i = 0; i < maxBodies; ++i) { GestureResultView result = new GestureResultView(i, false, false, 0.0f); GestureDetector detector = new GestureDetector(this.kinectSensor, result); this.gestureDetectorList.Add(detector); // split gesture results across the first two columns of the content grid ContentControl contentControl = new ContentControl(); contentControl.Content = this.gestureDetectorList[i].GestureResultView; if (this.kinectSensor != null) { // open the sensor this.kinectSensor.Open(); // grab the audio stream IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams; System.IO.Stream audioStream = audioBeamList[0].OpenInputStream(); // create the convert stream this.convertStream = new KinectAudioStream(audioStream); } else { // on failure, set the status text return; } RecognizerInfo ri = TryGetKinectRecognizer(); if (null != ri) { this.speechEngine = new SpeechRecognitionEngine(ri.Id); // Create a grammar from grammar definition XML file. using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar))) { var g = new Grammar(memoryStream); this.speechEngine.LoadGrammar(g); } this.speechEngine.SpeechRecognized += this.SpeechRecognized; this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected; // let the convertStream know speech is going active this.convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // This will prevent recognition accuracy from degrading over time. ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); this.speechEngine.SetInputToAudioStream( this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); this.speechEngine.RecognizeAsync(RecognizeMode.Multiple); } else { //this.statusBarText.Text = Properties.Resources.NoSpeechRecognizer; } } ContentControl contentControl2 = new ContentControl(); contentControl2.Content = this.gestureDetectorList[0].GestureResultView; this.contentGrid.Children.Add(contentControl2); }
public void Start() { #if KINECT this.sensor = KinectSensor.GetDefault(); if (null != this.sensor) { try { this.sensor.Open(); } catch (IOException ex) { this.sensor = null; //Logger.Error(ex.Message); } } if (null == this.sensor) { return; } var ri = GetKinectRecognizer(); if (null != ri) { this.speechEngine = new SpeechRecognitionEngine(ri.Id); var audioBeamList = this.sensor.AudioSource.AudioBeams; var audioStream = audioBeamList[0].OpenInputStream(); convertStream = new KinectAudioStream(audioStream); // Create a grammar definition ... this._PluginsList = Plugin.loadPlugins(this.speechEngine); speechEngine.SpeechRecognized += SpeechRecognized; convertStream.SpeechActive = true; speechEngine.SetInputToAudioStream(convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); Console.WriteLine("ok"); #endif #if MICRO this.speechEngine = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("fr-FR")); this._PluginsList = Plugin.loadPlugins(this.speechEngine); speechEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(SpeechRecognized); speechEngine.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", (int)(this.ConfidenceThreshold * 100)); speechEngine.MaxAlternates = 10; speechEngine.InitialSilenceTimeout = TimeSpan.FromSeconds(0); speechEngine.BabbleTimeout = TimeSpan.FromSeconds(0); speechEngine.EndSilenceTimeout = TimeSpan.FromSeconds(0.150); speechEngine.EndSilenceTimeoutAmbiguous = TimeSpan.FromSeconds(0.500); speechEngine.SetInputToDefaultAudioDevice(); #endif speechEngine.RecognizeAsync(RecognizeMode.Multiple); speaker.Speak(ConfigurationManager.AppSettings["OperationalSystem"]); #if KINECT } else { speaker.Speak(ConfigurationManager.AppSettings["ErroredSystem"]); } #endif }
/// <summary> /// Execute initialization tasks. /// </summary> /// <param name="sender">object sending the event</param> /// <param name="e">event arguments</param> private void WindowLoaded() { // Only one sensor is supported this.kinectSensor = KinectSensor.GetDefault(); if (this.kinectSensor != null) { // open the sensor this.kinectSensor.Open(); // grab the audio stream IReadOnlyList<AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams; System.IO.Stream audioStream = audioBeamList[0].OpenInputStream(); // create the convert stream this.convertStream = new KinectAudioStream(audioStream); } RecognizerInfo ri = TryGetKinectRecognizer(); if (null != ri) { this.speechEngine = new SpeechRecognitionEngine(ri.Id); /**************************************************************** * * Use this code to create grammar programmatically rather than from * a grammar file. * * var directions = new Choices(); * directions.Add(new SemanticResultValue("forward", "FORWARD")); * directions.Add(new SemanticResultValue("forwards", "FORWARD")); * directions.Add(new SemanticResultValue("straight", "FORWARD")); * directions.Add(new SemanticResultValue("backward", "BACKWARD")); * directions.Add(new SemanticResultValue("backwards", "BACKWARD")); * directions.Add(new SemanticResultValue("back", "BACKWARD")); * directions.Add(new SemanticResultValue("turn left", "LEFT")); * directions.Add(new SemanticResultValue("turn right", "RIGHT")); * * var gb = new GrammarBuilder { Culture = ri.Culture }; * gb.Append(directions); * * var g = new Grammar(gb); * ****************************************************************/ // Create a grammar from grammar definition XML file. using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar))) { var g = new Grammar(memoryStream); this.speechEngine.LoadGrammar(g); } this.speechEngine.SpeechRecognized += this.SpeechRecognized; this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected; // let the convertStream know speech is going active this.convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // This will prevent recognition accuracy from degrading over time. ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); this.speechEngine.SetInputToAudioStream( this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); this.speechEngine.RecognizeAsync(RecognizeMode.Multiple); } }
/// <summary> /// Execute start up tasks /// </summary> /// <param name="sender">object sending the event</param> /// <param name="e">event arguments</param> private void MainWindow_Loaded(object sender, RoutedEventArgs e) { // Onle one sensor is supported this.kinectSensor = KinectSensor.GetDefault(); if (this.kinectSensor != null) { // open the sensor this.kinectSensor.Open(); // grab the audio stream var audioBeamList = this.kinectSensor.AudioSource.AudioBeams; var audioStream = audioBeamList[0].OpenInputStream(); // create the convert stream this.convertStream = new KinectAudioStream(audioStream); } else { return; } RecognizerInfo ri = GetKinectRecognizer(); if (null != ri) { this.speechEngine = new SpeechRecognitionEngine(ri.Id); /**************************************************************** * * Use this code to create grammar programmatically rather than from * a grammar file. * * var directions = new Choices(); * directions.Add(new SemanticResultValue("forward", "FORWARD")); * directions.Add(new SemanticResultValue("forwards", "FORWARD")); * directions.Add(new SemanticResultValue("straight", "FORWARD")); * directions.Add(new SemanticResultValue("backward", "BACKWARD")); * directions.Add(new SemanticResultValue("backwards", "BACKWARD")); * directions.Add(new SemanticResultValue("back", "BACKWARD")); * directions.Add(new SemanticResultValue("turn left", "LEFT")); * directions.Add(new SemanticResultValue("turn right", "RIGHT")); * * var gb = new GrammarBuilder { Culture = ri.Culture }; * gb.Append(directions); * * var g = new Grammar(gb); * ****************************************************************/ // Create a grammar from grammar definition XML file. using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar))) { var g = new Grammar(memoryStream); this.speechEngine.LoadGrammar(g); } this.speechEngine.SpeechRecognized += this.SpeechRecognized; // let the convertStream know speech is going active this.convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // This will prevent recognition accuracy from degrading over time. ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); this.speechEngine.SetInputToAudioStream( this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); this.speechEngine.RecognizeAsync(RecognizeMode.Multiple); } else { // NoSpeechRecognizer } // Face detection for (int i = 0; i < this.bodyCount; i++) { if (this.faceFrameReaders[i] != null) { // wire handler for face frame arrival this.faceFrameReaders[i].FrameArrived += this.Reader_FaceFrameArrived; } } if (this.bodyFrameReader != null) { // wire handler for body frame arrival this.bodyFrameReader.FrameArrived += this.Reader_BodyFrameArrived; } }
static public SpeechRecognitionEngine init() { SpeechRecognitionEngine.InstalledRecognizers(); // Only one sensor is supported _sensor = KinectSensor.GetDefault(); if (_sensor != null) { // open the sensor _sensor.Open(); // grab the audio stream IReadOnlyList <AudioBeam> audioBeamList = _sensor.AudioSource.AudioBeams; System.IO.Stream audioStream = audioBeamList[0].OpenInputStream(); // create the convert stream convertStream = new KinectAudioStream(audioStream); } else { return(null); } RecognizerInfo ri = TryGetKinectRecognizer(); if (null != ri) { speechEngine = new SpeechRecognitionEngine(ri.Id); var commands = new Choices(); //define the vocabelery of the commfpytands commands.Add(new SemanticResultValue("check", "Check")); commands.Add(new SemanticResultValue("check result", "Check")); commands.Add(new SemanticResultValue("checks result", "Check")); commands.Add(new SemanticResultValue("mechika", "Erase")); commands.Add(new SemanticResultValue("erase", "Erase")); commands.Add(new SemanticResultValue("delete", "Erase")); commands.Add(new SemanticResultValue("Erase Screen", "Erase")); commands.Add(new SemanticResultValue("start", "Start")); commands.Add(new SemanticResultValue("stop", "Stop")); var gb = new GrammarBuilder { Culture = ri.Culture }; gb.Append(commands); var g = new Grammar(gb); speechEngine.LoadGrammar(g); // let the convertStream know speech is going active convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // This will prevent recognition accuracy from degrading over time. ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); speechEngine.SetInputToAudioStream( convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); speechEngine.RecognizeAsync(RecognizeMode.Multiple); } return(speechEngine); }
/// <summary> /// Execute initialization tasks. /// </summary> /// <param name="sender">object sending the event</param> /// <param name="e">event arguments</param> private void WindowLoaded(object sender, RoutedEventArgs e) { LoadCrayons(sender, e); // Only one sensor is supported CurrentSensor = KinectSensor.GetDefault(); if (CurrentSensor != null) { // open the sensor CurrentSensor.Open(); // grab the audio stream IReadOnlyList <AudioBeam> audioBeamList = CurrentSensor.AudioSource.AudioBeams; System.IO.Stream audioStream = audioBeamList[0].OpenInputStream(); // create the convert stream this._convertStream = new KinectAudioStream(audioStream); _bodyReader = CurrentSensor.BodyFrameSource.OpenReader(); _bodyReader.FrameArrived += BodyReader_FrameArrived; _bodies = new Body[CurrentSensor.BodyFrameSource.BodyCount]; } else { // on failure, set the status text this.StatusBarText.Text = Properties.Resources.NoKinectReady; return; } RecognizerInfo ri = TryGetKinectRecognizer(); if (null != ri) { this._speechEngine = new SpeechRecognitionEngine(ri.Id); // Create a grammar from grammar definition XML file. using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar))) { var g = new Grammar(memoryStream); this._speechEngine.LoadGrammar(g); } this._speechEngine.SpeechRecognized += SpeechRecognized; this._speechEngine.SpeechRecognitionRejected += SpeechRejected; // let the convertStream know speech is going active this._convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // This will prevent recognition accuracy from degrading over time. // _speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); this._speechEngine.SetInputToAudioStream( this._convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); this._speechEngine.RecognizeAsync(RecognizeMode.Multiple); } else { this.StatusBarText.FontSize = 45; this.StatusBarText.Text = Properties.Resources.NoSpeechRecognizer; } }
public MainWindow() { this.Hide(); StartPeriodic(); StartDobot(); viewer = new ImageViewer(); viewer.SetBounds(0, 0, 1920, 1080); KinectSensor sensor = KinectSensor.GetDefault(); if (sensor != null) { // open the sensor sensor.Open(); // grab the audio stream IReadOnlyList <AudioBeam> audioBeamList = sensor.AudioSource.AudioBeams; System.IO.Stream audioStream = audioBeamList[0].OpenInputStream(); // create the convert stream this.convertStream = new KinectAudioStream(audioStream); } else { // on failure, set the status text //this.statusBarText.Text = Properties.Resources.NoKinectReady; return; } RecognizerInfo ri = TryGetKinectRecognizer(); if (null != ri) { this.speechEngine = new SpeechRecognitionEngine(ri.Id); var directions = new Choices(); directions.Add(new SemanticResultValue("split", "SPLIT")); directions.Add(new SemanticResultValue("double down", "DOUBLE DOWN")); directions.Add(new SemanticResultValue("hit", "HIT")); directions.Add(new SemanticResultValue("stand", "STAND")); //directions.Add(new SemanticResultValue("blue", "BLUE")); //directions.Add(new SemanticResultValue("orange", "ORANGE")); var gb = new GrammarBuilder { Culture = ri.Culture }; gb.Append(directions); var g = new Grammar(gb); this.speechEngine.LoadGrammar(g); this.speechEngine.SpeechRecognized += this.SpeechRecognized; this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected; // let the convertStream know speech is going active this.convertStream.SpeechActive = true; // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. // This will prevent recognition accuracy from degrading over time. ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0); this.speechEngine.SetInputToAudioStream( this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); this.speechEngine.RecognizeAsync(RecognizeMode.Multiple); } ColorFrameReader frameReader = sensor.ColorFrameSource.OpenReader(); frameReader.FrameArrived += Reader_ColorFrameArrived; FrameDescription colorFrameDescription = sensor.ColorFrameSource.CreateFrameDescription(ColorImageFormat.Bgra); colorBitmap = new WriteableBitmap(colorFrameDescription.Width, colorFrameDescription.Height, 96.0, 96.0, PixelFormats.Bgr32, null); Bitmap img = BitmapFromWriteableBitmap(colorBitmap); Image <Bgr, Byte> myImage = new Image <Bgr, Byte>(img); loadLibrary(CvInvoke.Imread("hearts (2).png"), 1); loadLibrary(CvInvoke.Imread("diamonds (2).png"), 2); loadLibrary(CvInvoke.Imread("clubs (2).png"), 3); loadLibrary(CvInvoke.Imread("spades (2).png"), 4); dobotStuff(); //Mat image = CvInvoke.Imread("testbild-cropped.png"); //detectCards(myImage.Mat, 1); viewer.ShowDialog(); //Environment.Exit(0); }
static void Main(string[] args) { Console.WriteLine("Bienvenu dans Mathias"); RUNNING = true; active = true; GlobalManager.InitMathias(); Console.WriteLine("Initialisation de la Kinect"); speaker = new SpeechSynthesizer(); List <InstalledVoice> voices = speaker.GetInstalledVoices().ToList(); Console.WriteLine(speaker.Voice.Name); speaker.Speak("Démarrage en cours"); kinectSensor = KinectSensor.GetDefault(); if (kinectSensor != null) { Console.WriteLine("La kinect est récupérée"); kinectSensor.Open(); Console.WriteLine("La kinect est prête à recevoir les informations"); Console.WriteLine("Récupération de l'audio beam"); IReadOnlyList <AudioBeam> audioBeamList = kinectSensor.AudioSource.AudioBeams; Stream audioStream = audioBeamList[0].OpenInputStream(); Console.WriteLine("Stream et audio beam OK"); Console.WriteLine("Conversion de l'audioStream"); convertStream = new KinectAudioStream(audioStream); Console.WriteLine("Conversion OK"); } else { Console.WriteLine("Impossible de récupérer la kinect"); } Console.WriteLine(GlobalManager.RI.Name + "Récupéré"); if (GlobalManager.RI != null) { Console.WriteLine("Construction du grammar sample"); speechEngine = new SpeechRecognitionEngine(GlobalManager.RI.Id); Console.WriteLine("Construction du grammar terminée"); speechEngine.LoadGrammar((Grammar)GlobalManager.CONTEXT.GRAMMAR); speechEngine.SpeechRecognized += SpeechRecognized; speechEngine.SpeechRecognitionRejected += SpeechRejected; convertStream.SpeechActive = true; speechEngine.SetInputToAudioStream(convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null)); speechEngine.RecognizeAsync(RecognizeMode.Multiple); Console.WriteLine("Il ne reste plus qu'a parler"); } else { Console.WriteLine("Could not find speech recognizer"); } while (GlobalManager.RUNNING) { } if (!GlobalManager.RUNNING) { speaker.Speak("Au revoir"); } }