コード例 #1
0
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Only one sensor is supported
            this.kinectSensor = KinectSensor.GetDefault();

            if (this.kinectSensor != null)
            {
                // open the sensor
                this.kinectSensor.Open();

                // grab the audio stream
                IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
                System.IO.Stream          audioStream   = audioBeamList[0].OpenInputStream();

                // create the convert stream
                this.convertStream = new KinectAudioStream(audioStream);
            }
            else
            {
                // on failure, set the status text
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);



                var gb = new GrammarBuilder {
                    Culture = ri.Culture
                };
                gb.Append(commands);
                var g = new Grammar(gb);
                this.speechEngine.LoadGrammar(g);

                this.speechEngine.SpeechRecognized          += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;
                this.speechEngine.SpeechHypothesized        += this.SpeechHypothesized;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                this.statusBarText.Text = Properties.Resources.NoSpeechRecognizer;
            }
        }
コード例 #2
0
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            Server.StartListening();
            // Only one sensor is supported
            this.kinectSensor = KinectSensor.GetDefault();
            if (this.kinectSensor != null)
            {
                // open the sensor
                this.kinectSensor.Open();

                // grab the audio stream
                IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
                System.IO.Stream          audioStream   = audioBeamList[0].OpenInputStream();

                // create the convert stream
                this.convertStream = new KinectAudioStream(audioStream);
            }
            else
            {
                // on failure, set the status text
                Console.WriteLine("fAILURE");
            }

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    this.speechEngine.LoadGrammar(g);
                    //Console.Write(g.ToString());
                    // Console.Write("abc\n");
                }

                this.speechEngine.SpeechRecognized          += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                Console.WriteLine("No speech Recognized");
            }
        }
コード例 #3
0
ファイル: MainWindow.xaml.cs プロジェクト: noa99kee/K4W2-Book
 void InitializeAudio()
 {
     AudioSource audioSource = kinect.AudioSource;
     if(audioSource==null)
     {
         throw new Exception("no audio source");
     }
     IReadOnlyList<AudioBeam> audioBeamList = audioSource.AudioBeams;
     Stream inputStream = audioBeamList[0].OpenInputStream();
     convertStream = new KinectAudioStream(inputStream);
     convertStream.SpeechActive = true;
 }
コード例 #4
0
ファイル: AgleVoice.cs プロジェクト: jzzfreedom/Repo
        internal bool TryInitializeAgleVoice(KinectSensor kinectSensor)
        {
            if (null == kinectSensor)
            {
                return false;
            }
            this.kinectSensor = kinectSensor;
            IReadOnlyList<AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
            System.IO.Stream audioStream = audioBeamList[0].OpenInputStream();
            this.convertStream = new KinectAudioStream(audioStream);

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {

                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                this.agleVoiceDictionary = new Choices();
                this.CreateAgleVoiceDictionary();
                var gb = new GrammarBuilder { Culture = ri.Culture };
                gb.Append(this.agleVoiceDictionary);

                var g = new Grammar(gb);
                this.speechEngine.LoadGrammar(g);
                this.speechEngine.SpeechRecognized += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. 
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                return false;
            }
            return true;
        }
コード例 #5
0
        /// <summary>
        /// Execute start up tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void MainWindow_Loaded(object sender, RoutedEventArgs e)
        {
            // Onle one sensor is supported
            this.kinectSensor = KinectSensor.GetDefault();

            if (this.kinectSensor != null)
            {
                // open the sensor
                this.kinectSensor.Open();

                // grab the audio stream
                var audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
                var audioStream = audioBeamList[0].OpenInputStream();

                // create the convert stream
                this.convertStream = new KinectAudioStream(audioStream);
            }
            else
            {
                return;
            }

            RecognizerInfo ri = GetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                /****************************************************************
                * 
                * Use this code to create grammar programmatically rather than from
                * a grammar file.
                * 
                * var directions = new Choices();
                * directions.Add(new SemanticResultValue("forward", "FORWARD"));
                * directions.Add(new SemanticResultValue("forwards", "FORWARD"));
                * directions.Add(new SemanticResultValue("straight", "FORWARD"));
                * directions.Add(new SemanticResultValue("backward", "BACKWARD"));
                * directions.Add(new SemanticResultValue("backwards", "BACKWARD"));
                * directions.Add(new SemanticResultValue("back", "BACKWARD"));
                * directions.Add(new SemanticResultValue("turn left", "LEFT"));
                * directions.Add(new SemanticResultValue("turn right", "RIGHT"));
                *
                * var gb = new GrammarBuilder { Culture = ri.Culture };
                * gb.Append(directions);
                *
                * var g = new Grammar(gb);
                * 
                ****************************************************************/

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    this.speechEngine.LoadGrammar(g);
                }

                this.speechEngine.SpeechRecognized += this.SpeechRecognized;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. 
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                // NoSpeechRecognizer
            }

            // Face detection
            for (int i = 0; i < this.bodyCount; i++)
            {
                if (this.faceFrameReaders[i] != null)
                {
                    // wire handler for face frame arrival
                    this.faceFrameReaders[i].FrameArrived += this.Reader_FaceFrameArrived;
                }
            }

            if (this.bodyFrameReader != null)
            {
                // wire handler for body frame arrival
                this.bodyFrameReader.FrameArrived += this.Reader_BodyFrameArrived;
            }
        }
コード例 #6
0
        /// <summary>
        /// Execute initialization tasks.
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Only one sensor is supported
            this.kinectSensor = KinectSensor.GetDefault();

            if (this.kinectSensor != null)
            {
                // open the sensor
                this.kinectSensor.Open();

                // grab the audio stream
                IReadOnlyList <AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
                System.IO.Stream          audioStream   = audioBeamList[0].OpenInputStream();

                // create the convert stream
                this.convertStream = new KinectAudioStream(audioStream);
            }
            else
            {
                // on failure, set the status text
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this.recognitionSpans = new List <Span> {
                    forwardSpan, backSpan, rightSpan, leftSpan
                };

                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                /****************************************************************
                *
                * Use this code to create grammar programmatically rather than from
                * a grammar file.
                *
                * var directions = new Choices();
                * directions.Add(new SemanticResultValue("forward", "FORWARD"));
                * directions.Add(new SemanticResultValue("forwards", "FORWARD"));
                * directions.Add(new SemanticResultValue("straight", "FORWARD"));
                * directions.Add(new SemanticResultValue("backward", "BACKWARD"));
                * directions.Add(new SemanticResultValue("backwards", "BACKWARD"));
                * directions.Add(new SemanticResultValue("back", "BACKWARD"));
                * directions.Add(new SemanticResultValue("turn left", "LEFT"));
                * directions.Add(new SemanticResultValue("turn right", "RIGHT"));
                *
                * var gb = new GrammarBuilder { Culture = ri.Culture };
                * gb.Append(directions);
                *
                * var g = new Grammar(gb);
                *
                ****************************************************************/

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    this.speechEngine.LoadGrammar(g);
                    //Console.Write(g.ToString());
                    // Console.Write("abc\n");
                }

                this.speechEngine.SpeechRecognized          += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                this.statusBarText.Text = Properties.Resources.NoSpeechRecognizer;
            }
        }
コード例 #7
0
ファイル: MainWindow.xaml.cs プロジェクト: StrathHACK16/Team3
        /// <summary>
        /// Execute initialization tasks.
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Only one sensor is supported
            this.kinectSensor = KinectSensor.GetDefault();

            if (this.kinectSensor != null)
            {
                // open the sensor
                this.kinectSensor.Open();

                // grab the audio stream
                IReadOnlyList<AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
                System.IO.Stream audioStream = audioBeamList[0].OpenInputStream();

                // create the convert stream
                this.convertStream = new KinectAudioStream(audioStream);
            }
            else
            {
                // on failure, set the status text
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this.recognitionSpans = new List<Span> { forwardSpan, backSpan, rightSpan, leftSpan };

                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                /****************************************************************
                *
                * Use this code to create grammar programmatically rather than from
                * a grammar file.
                *
                * var directions = new Choices();
                * directions.Add(new SemanticResultValue("forward", "FORWARD"));
                * directions.Add(new SemanticResultValue("forwards", "FORWARD"));
                * directions.Add(new SemanticResultValue("straight", "FORWARD"));
                * directions.Add(new SemanticResultValue("backward", "BACKWARD"));
                * directions.Add(new SemanticResultValue("backwards", "BACKWARD"));
                * directions.Add(new SemanticResultValue("back", "BACKWARD"));
                * directions.Add(new SemanticResultValue("turn left", "LEFT"));
                * directions.Add(new SemanticResultValue("turn right", "RIGHT"));
                *
                * var gb = new GrammarBuilder { Culture = ri.Culture };
                * gb.Append(directions);
                *
                * var g = new Grammar(gb);
                *
                ****************************************************************/

                // Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    this.speechEngine.LoadGrammar(g);
                }

                this.speechEngine.SpeechRecognized += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                this.statusBarText.Text = Properties.Resources.NoSpeechRecognizer;
            }
        }
コード例 #8
0
        /// <summary>
        /// Initializes a new instance of the MainWindow class.
        /// </summary>
        public MainWindow()
        {
            // one sensor is currently supported
            this.kinectSensor = KinectSensor.GetDefault();

            // get the coordinate mapper
            this.coordinateMapper = this.kinectSensor.CoordinateMapper;

            // get the depth (display) extents
            FrameDescription frameDescription = this.kinectSensor.DepthFrameSource.FrameDescription;

            // get size of joint space
            this.displayWidth = frameDescription.Width;
            this.displayHeight = frameDescription.Height;

            // open the reader for the body frames
            this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader();

            // a bone defined as a line between two joints
            this.bones = new List<Tuple<JointType, JointType>>();

            // Torso
            this.bones.Add(new Tuple<JointType, JointType>(JointType.Head, JointType.Neck));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.Neck, JointType.SpineShoulder));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineShoulder, JointType.SpineMid));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineMid, JointType.SpineBase));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineShoulder, JointType.ShoulderRight));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineShoulder, JointType.ShoulderLeft));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineBase, JointType.HipRight));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineBase, JointType.HipLeft));

            // Right Arm
            this.bones.Add(new Tuple<JointType, JointType>(JointType.ShoulderRight, JointType.ElbowRight));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.ElbowRight, JointType.WristRight));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.WristRight, JointType.HandRight));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.HandRight, JointType.HandTipRight));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.WristRight, JointType.ThumbRight));

            // Left Arm
            this.bones.Add(new Tuple<JointType, JointType>(JointType.ShoulderLeft, JointType.ElbowLeft));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.ElbowLeft, JointType.WristLeft));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.WristLeft, JointType.HandLeft));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.HandLeft, JointType.HandTipLeft));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.WristLeft, JointType.ThumbLeft));

            // Right Leg
            this.bones.Add(new Tuple<JointType, JointType>(JointType.HipRight, JointType.KneeRight));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.KneeRight, JointType.AnkleRight));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.AnkleRight, JointType.FootRight));

            // Left Leg
            this.bones.Add(new Tuple<JointType, JointType>(JointType.HipLeft, JointType.KneeLeft));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.KneeLeft, JointType.AnkleLeft));
            this.bones.Add(new Tuple<JointType, JointType>(JointType.AnkleLeft, JointType.FootLeft));

            // populate body colors, one for each BodyIndex
            this.bodyColors = new List<Pen>();

            this.bodyColors.Add(new Pen(Brushes.Red, 6));
            this.bodyColors.Add(new Pen(Brushes.Orange, 6));
            this.bodyColors.Add(new Pen(Brushes.Green, 6));
            this.bodyColors.Add(new Pen(Brushes.Blue, 6));
            this.bodyColors.Add(new Pen(Brushes.Indigo, 6));
            this.bodyColors.Add(new Pen(Brushes.Violet, 6));

            // set IsAvailableChanged event notifier
            this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged;

            // open the sensor
            this.kinectSensor.Open();

            // grab the audio stream
            IReadOnlyList<AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
            System.IO.Stream audioStream = audioBeamList[0].OpenInputStream();

            // create the convert stream
            this.convertStream = new KinectAudioStream(audioStream);

            // set the status text
            this.StatusText = this.kinectSensor.IsAvailable ? Properties.Resources.RunningStatusText
                                                            : Properties.Resources.NoSensorStatusText;

            RecognizerInfo ri = TryGetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                //var directions = new Choices();
                //directions.Add(new SemanticResultValue("hide", "hide"));
                //directions.Add(new SemanticResultValue("minimize", "minimize"));
                //directions.Add(new SemanticResultValue("maximize", "maximize"));
                //directions.Add(new SemanticResultValue("snap left", "snap left"));
                //directions.Add(new SemanticResultValue("snap right", "snap right"));
                //directions.Add(new SemanticResultValue("front", "front"));
                //directions.Add(new SemanticResultValue("drag", "drag"));
                //directions.Add(new SemanticResultValue("get windows", "get windows"));

                // Grammar for snapping
                GrammarBuilder snap = new GrammarBuilder { Culture = ri.Culture };
                // Any window
                snap.Append(new Choices("snap"));
                snap.Append(new Choices("Spotify", "Genie", "Chrome", "Media Player", "Visual Studio", "Github", "Eclipse", "Word", "Notepad"));
                snap.Append(new Choices("left", "right", "down", "up"));
                var g = new Grammar(snap);
                this.speechEngine.LoadGrammar(g);


                GrammarBuilder snap2 = new GrammarBuilder { Culture = ri.Culture };
                snap2.Append(new Choices("snap"));
                snap2.AppendWildcard();
                snap2.Append(new Choices("left", "right", "down", "up"));
                var g4 = new Grammar(snap2);
                this.speechEngine.LoadGrammar(g4);


                GrammarBuilder grab1 = new GrammarBuilder { Culture = ri.Culture };
                grab1.Append(new Choices("grab"));
                grab1.Append(new Choices("Spotify", "Genie", "Chrome", "Media Player", "Visual Studio", "Github", "Eclipse", "Word", "Notepad"));
                var g1 = new Grammar(grab1);
                this.speechEngine.LoadGrammar(g1);


                GrammarBuilder drag1 = new GrammarBuilder { Culture = ri.Culture };
                drag1.Append(new Choices("grab"));
                drag1.Append(new Choices("Spotify", "Genie", "Chrome", "Media Player", "Visual Studio", "Github", "Eclipse", "Word", "Notepad"));
                var d1 = new Grammar(drag1);
                this.speechEngine.LoadGrammar(d1);

                GrammarBuilder grab2 = new GrammarBuilder { Culture = ri.Culture };
                // Any window
                grab2.Append(new Choices("grab"));
                var g2 = new Grammar(grab2);
                this.speechEngine.LoadGrammar(g2);

                GrammarBuilder dropit = new GrammarBuilder { Culture = ri.Culture };
                // Any window
                dropit.Append(new Choices("drop it"));
                var drop = new Grammar(dropit);
                this.speechEngine.LoadGrammar(drop);

                GrammarBuilder mouse = new GrammarBuilder { Culture = ri.Culture };
                // Any window
                mouse.Append(new Choices("mouse mode"));
                var mg = new Grammar(mouse);
                this.speechEngine.LoadGrammar(mg);



                GrammarBuilder click = new GrammarBuilder { Culture = ri.Culture };
                click.Append(new Choices("click", "double click", "right click"));
                var clickGram = new Grammar(click);
                this.speechEngine.LoadGrammar(clickGram);

                GrammarBuilder go = new GrammarBuilder { Culture = ri.Culture };
                go.Append(new Choices("lets hack", "shut it down"));
                var goGram = new Grammar(go);
                this.speechEngine.LoadGrammar(goGram);







                this.speechEngine.SpeechRecognized += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. 
                // This will prevent recognition accuracy from degrading over time.
                speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                this.StatusText = "No recognizer";
            }

            // Create the drawing group we'll use for drawing
            this.drawingGroup = new DrawingGroup();

            // Create an image source that we can use in our image control
            this.imageSource = new DrawingImage(this.drawingGroup);

            // use the window object as the view model in this simple example
            this.DataContext = this;

            // initialize the components (controls) of the window
            this.InitializeComponent();

            KnockSegment1 knockSegment1 = new KnockSegment1();
            KnockSegment2 knockSegment2 = new KnockSegment2();
            KnockSegment3 knockSegment3 = new KnockSegment3();
            SlapSegment1 slapSegment1 = new SlapSegment1();
            SlapSegment2 slapSegment2 = new SlapSegment2();
            PokeSegment1 pokeSegment1 = new PokeSegment1();
            PokeSegment2 pokeSegment2 = new PokeSegment2();

            IGestureSegment[] knock = new IGestureSegment[]
            {
                knockSegment1,
                knockSegment2
            };
            IGestureSegment[] knockPull = new IGestureSegment[]
            {
                knockSegment1,
                knockSegment3
            };
            IGestureSegment[] slap = new IGestureSegment[]
            {
                slapSegment1,
                slapSegment2
            };
            IGestureSegment[] poke = new IGestureSegment[]
            {
                pokeSegment1,
                pokeSegment2
            };

            knockGesture = new GestureListener(knock);
            knockGesture.GestureRecognized += Gesture_KnockRecognized;
            knockPullGesture = new GestureListener(knockPull);
            knockPullGesture.GestureRecognized += Gesture_KnockPullRecognized;
            slapGesture = new GestureListener(slap);
            slapGesture.GestureRecognized += Gesture_SlapRecognized;
            pokeGesture = new GestureListener(poke);
            pokeGesture.GestureRecognized += Gesture_PokeRecognized;

            WindowDragStart dragSeg1 = new WindowDragStart();
            WindowDragMove dragSeg2 = new WindowDragMove();
            MouseMoveStart mouseSeg1 = new MouseMoveStart();
            ScrollUpStart scrollUpSeg1 = new ScrollUpStart();
            ScrollDownStart scrollUpSeg2 = new ScrollDownStart();
            VolumeUpStart volumeUpStart = new VolumeUpStart();
            VolumeDownStart volumeDownStart = new VolumeDownStart();
            PausePlaySegment1 pausePlaySeg1 = new PausePlaySegment1();
            PausePlaySegment2 pausePlaySeg2 = new PausePlaySegment2();
            ShowAllStart showSeg1 = new ShowAllStart();
            HideAllStart showSeg2 = new HideAllStart();
            MouseMove mouseSeg2 = new MouseMove();
            DragFinishedGesture dragFinished = new DragFinishedGesture();
            VolumeFinishGesture volumeFinished = new VolumeFinishGesture();
            ScrollFinishedGesture scrollFinished = new ScrollFinishedGesture();
            MouseFinishedGesture mouseFinished = new MouseFinishedGesture();
            
            IGestureSegment[] windowDrag = new IGestureSegment[]
            {
                dragSeg1,
                dragSeg2
            };
            IGestureSegment[] mouseMove = new IGestureSegment[]
            {
                mouseSeg1,
                mouseSeg2
            };
            IGestureSegment[] dragFinishedSequence = new IGestureSegment[]
            {
                dragFinished
            };
            IGestureSegment[] volumeFinishedSequence = new IGestureSegment[]
            {
                volumeFinished
            };
            IGestureSegment[] scrollFinishedSequence = new IGestureSegment[]
            {
                scrollFinished
            };
            IGestureSegment[] mouseFinishedSequence = new IGestureSegment[]
            {
                mouseFinished
            };

            IGestureSegment[] scrollUp = new IGestureSegment[]
            {
                scrollUpSeg1,
                scrollUpSeg2
            };
            IGestureSegment[] scrollDown = new IGestureSegment[]
            {
                scrollUpSeg2,
                scrollUpSeg1
            };

            IGestureSegment[] volumeUp = new IGestureSegment[]
            {
                volumeUpStart,
                volumeDownStart
            };
            IGestureSegment[] volumeDown = new IGestureSegment[]
            {
                volumeDownStart,
                volumeUpStart
            };
            IGestureSegment[] pausePlay = new IGestureSegment[]
            {
                pausePlaySeg1,
                pausePlaySeg2
            };

            IGestureSegment[] bringUp = new IGestureSegment[]
            {
                showSeg1,
                showSeg2
            };
            IGestureSegment[] bringDown = new IGestureSegment[]
            {
                showSeg2,
                showSeg1
            };

            windowDragGesture = new GestureListener(windowDrag);
            windowDragGesture.GestureRecognized += Gesture_DragMove;

            windowDragGestureFinish = new GestureListener(dragFinishedSequence);
            windowDragGestureFinish.GestureRecognized += Gesture_DragFinish;

            mouseMoveGesture = new GestureListener(mouseMove);
            mouseMoveGesture.GestureRecognized += Gesture_MouseMove;

            mouseMoveGestureFinish = new GestureListener(mouseFinishedSequence);
            mouseMoveGestureFinish.GestureRecognized += Gesture_MouseMoveFinish;

            scrollUpGesture = new GestureListener(scrollUp);
            scrollUpGesture.GestureRecognized += Gesture_ScrollUp;

            scrollDownGesture = new GestureListener(scrollDown);
            scrollDownGesture.GestureRecognized += Gesture_ScrollDown;

            scrollGestureFinish = new GestureListener(scrollFinishedSequence);
            scrollGestureFinish.GestureRecognized += Gesture_ScrollFinish;

            volumeUpGesture = new GestureListener(volumeUp);
            volumeUpGesture.GestureRecognized += Gesture_VolumeUp;

            volumeDownGesture = new GestureListener(volumeDown);
            volumeDownGesture.GestureRecognized += Gesture_VolumeDown;

            volumeGestureFinish = new GestureListener(volumeFinishedSequence);
            volumeGestureFinish.GestureRecognized += Gesture_VolumeFinish;

            pausePlayGesture = new GestureListener(pausePlay);
            pausePlayGesture.GestureRecognized += Gesture_PausePlay;
            showAllGesture = new GestureListener(bringUp);
            showAllGesture.GestureRecognized += Gesture_ShowAll;

            hideAllGesture = new GestureListener(bringDown);
            hideAllGesture.GestureRecognized += Gesture_HideAll;
        }