private void ThisAddIn_Startup(object sender, System.EventArgs e)
        {
            // only one sensor is currently supported
            kinectSensor = KinectSensor.GetDefault();

            // set IsAvailableChanged event notifier
            kinectSensor.IsAvailableChanged += Sensor_IsAvailableChanged;

            // open the sensor
            kinectSensor.Open();

            // open the reader for the body frames
            bodyFrameReader = kinectSensor.BodyFrameSource.OpenReader();

            // initialize the gesture detection objects for our gestures
            gestureDetectorList = new List <GestureDetector>();

            // create a gesture detector for each body (6 bodies => 6 detectors) and create content controls to display results in the UI
            int maxBodies = 6;

            for (int i = 0; i < maxBodies; ++i)
            {
                GestureResultView result   = new GestureResultView(i, false, false, 0.0f);
                GestureDetector   detector = new GestureDetector(kinectSensor, result);
                gestureDetectorList.Add(detector);
            }

            // set the BodyFramedArrived event notifier
            bodyFrameReader.FrameArrived += Reader_BodyFrameArrived;
        }
        // this class is loaded when we want to open kinect after using the app manually
        void OnOpenSensor(object sender, RoutedEventArgs e) // called when the button is clicked to open sensor
        {
            // just to tell kinect started
            speaker = new SpeechSynthesizer();         // Used for getting output voice from computer
            speaker.Speak("App is started"); 
            System.Windows.Forms.MessageBox.Show("Kinect started - Click on Map Now"); 

            // FOR THE VGB ONE

            // open the sensor
            this.kinectSensor.Open();

            // open the reader for the body frames
            this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader();

            // set the BodyFramedArrived event notifier
            this.bodyFrameReader.FrameArrived += this.Reader_BodyFrameArrived;

            // initialize the BodyViewer object for displaying tracked bodies in the UI
            this.kinectBodyView = new KinectBodyView(this.kinectSensor);

            // initialize the gesture detection objects for our gestures
            this.gestureDetectorList = new List<GestureDetector>();

            // set our data context objects for display in UI
            // It is usd to show the bone stucture in UI
            this.DataContext = this;
            this.kinectBodyViewbox.DataContext = this.kinectBodyView;

            // As Kinect V2 can detect upto 6 bdies and so can detect gesture from all six bodies so we can used that by assigning gesture detector for each body
            // create a gesture detector for each body (6 bodies => 6 detectors) and create content controls to display results in the UI
            int maxBodies = this.kinectSensor.BodyFrameSource.BodyCount; // if body is six then it goes to check for each.
            
            for (int i = 0; i < maxBodies; ++i)
            {
                GestureResultView result = new GestureResultView(i, false, false, 0.0f, webView1,gestureState.Text); 
                // i is body number,webview1 is the html control which have to pass to access it in  this method. // others defiend later
                GestureDetector detector = new GestureDetector(this.kinectSensor, result);        
                this.gestureDetectorList.Add(detector);             
            }


            // FOR THE JOINT ONE
            if (_sensor != null)
            {
                _sensor.Open();

                _reader = _sensor.OpenMultiSourceFrameReader(FrameSourceTypes.Color | FrameSourceTypes.Depth | FrameSourceTypes.Infrared | FrameSourceTypes.Body);
                _reader.MultiSourceFrameArrived += Reader_MultiSourceFrameArrived;
            }
            
            
            // FOR THE SOUND
            this.sensor = KinectSensor.GetDefault();

            // open the sensor
            this.sensor.Open();

            // grab the audio stream
            IReadOnlyList<AudioBeam> audioBeamList = this.sensor.AudioSource.AudioBeams;
            System.IO.Stream audioStream = audioBeamList[0].OpenInputStream();

            // create the convert stream
            this.convertStream = new KinectAudioStream(audioStream);

            RecognizerInfo ri = TryGetKinectRecognizer();


            // these are engine where we put what to analyse in speech recogniition it can not be done if we can use bing speech recognizer but it is not possible to use now in germany as in beta phase

            this.speechEngine = new SpeechRecognitionEngine(ri.Id);  // speech recognition engine

            var directions = new Choices();                                  // direction is the vaariable used to direct where we have to go
            directions.Add(new SemanticResultValue("ZoomIn", "ZOOMIN"));    // first one is the text which system see in audio and if match then it save the second name in it which we can access to run what we want
            directions.Add(new SemanticResultValue("ZoomIn", "ZOOMIN"));
            directions.Add(new SemanticResultValue("In", "ZOOMIN"));
            directions.Add(new SemanticResultValue("ZoomOut", "ZOOMOUT"));
            directions.Add(new SemanticResultValue("Out", "ZOOMOUT"));
            directions.Add(new SemanticResultValue("ZoomOut", "ZOOMOUT"));
            directions.Add(new SemanticResultValue("Left", "LEFT"));
            directions.Add(new SemanticResultValue("Right", "RIGHT"));
            directions.Add(new SemanticResultValue("Up", "UP"));
            directions.Add(new SemanticResultValue("Down", "DOWN"));

            // places
            directions.Add(new SemanticResultValue("Go India", "INDIA"));
            directions.Add(new SemanticResultValue("india", "INDIA"));
            directions.Add(new SemanticResultValue("Go AMERICA", "AMERICA"));
            directions.Add(new SemanticResultValue("america", "AMERICA"));
            directions.Add(new SemanticResultValue("Go SanDiego", "SANDIEGO"));
            directions.Add(new SemanticResultValue("SanDiego", "SANDIEGO"));
            directions.Add(new SemanticResultValue("MY PLACE", "MYPLACE"));              // this will not work through window app as cant activate gps through eo web browser 
            directions.Add(new SemanticResultValue("San Francisco Bay", "SANFRANCISCO"));
            directions.Add(new SemanticResultValue("Mount Everest", "MOUNTEVEREST"));
            directions.Add(new SemanticResultValue("Grand Canyon", "GRANDCANYON"));
            directions.Add(new SemanticResultValue("hannover", "HANOVER"));
            directions.Add(new SemanticResultValue("newyork", "NEWYORK"));
            directions.Add(new SemanticResultValue("Delhi", "DELHI"));
            directions.Add(new SemanticResultValue("Goa", "GOA"));
            directions.Add(new SemanticResultValue("Mumbai", "MUMBAI"));
            directions.Add(new SemanticResultValue("Banglore", "BANGLORE"));
            directions.Add(new SemanticResultValue("Europe", "EUROPE"));
            directions.Add(new SemanticResultValue("Germany", "GERMANY"));
            directions.Add(new SemanticResultValue("Switzerland", "SWITZERLAND"));
            directions.Add(new SemanticResultValue("Amsterdam", "AMSTERDAM"));
            directions.Add(new SemanticResultValue("Belgium", "BELGIUM"));
            directions.Add(new SemanticResultValue("Hildesheim", "HILDESHEIM"));
            directions.Add(new SemanticResultValue("Hamburg", "HAMBURG"));
            directions.Add(new SemanticResultValue("Berlin", "BERLIN"));
            directions.Add(new SemanticResultValue("Prague", "PRAGUE"));
            directions.Add(new SemanticResultValue("Sylt", "SYLT"));
            directions.Add(new SemanticResultValue("Paris", "PARIS"));
            directions.Add(new SemanticResultValue("Great Pyramid", "GREAT"));
            directions.Add(new SemanticResultValue("Effiel Tower", "Tower"));
            directions.Add(new SemanticResultValue("Taj Mahal", "TAj"));
            directions.Add(new SemanticResultValue("Pisa", "PISA"));
            directions.Add(new SemanticResultValue("Venice", "VENICE"));
            //tools
            directions.Add(new SemanticResultValue("Fly", "FLY"));
            directions.Add(new SemanticResultValue("walk", "WALK"));
            directions.Add(new SemanticResultValue("valk", "WALK"));
            directions.Add(new SemanticResultValue("Back", "BACK"));
            directions.Add(new SemanticResultValue("photo", "PHOTO"));
            directions.Add(new SemanticResultValue("PHOTO", "PHOTO"));
            var gb = new GrammarBuilder { Culture = ri.Culture }; // to run recognizer
            gb.Append(directions);
            gb.AppendWildcard();  // this is used so that second word of saying wil not be count to detect the acccuracy of word

            var grr = new Grammar(gb);

            this.speechEngine.LoadGrammar(grr); // to load the grammer

            this.speechEngine.SpeechRecognized += this.SpeechRecognized;              // called if speech recognized
            this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;       // called if speech rejected

            // let the convertStream know speech is going active
            this.convertStream.SpeechActive = true;

            this.speechEngine.SetInputToAudioStream(
            this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
      
        }
示例#3
0
        /// <summary>
        /// Initializes a new instance of the MainWindow class
        /// </summary>
        public MainWindow()
        {
            try
            {
                InitializeComponent();
                this.kinectSensor = KinectSensor.GetDefault();

                // set IsAvailableChanged event notifier
                //this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged;

                // open the sensor
                this.kinectSensor.Open();


                // open the reader for the body frames
                this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader();

                // set the BodyFramedArrived event notifier
                this.bodyFrameReader.FrameArrived += this.Reader_BodyFrameArrived;

                // initialize the BodyViewer object for displaying tracked bodies in the UI
                this.kinectBodyView = new KinectBodyView(this.kinectSensor);

                // initialize the gesture detection objects for our gestures
                //this.gestureDetectorList = new GestureDetector(this.kinectSensor,this.gestureResultView);

                // initialize the MainWindow
                this.InitializeComponent();

                // set our data context objects for display in UI
                this.DataContext = this;
                //this.kinectBodyViewbox.DataContext = this.kinectBodyView;
                // GestureResultView result = new GestureResultView(0, false, false, 0.0f);
                //GestureDetector detector = new GestureDetector(this.kinectSensor, result);
                this.gestureDetectorList = new List <GestureDetector>();

                // set our data context objects for display in UI
                this.DataContext = this;
                this.kinectBodyViewbox.DataContext = this.kinectBodyView;

                // create a gesture detector for each body (6 bodies => 6 detectors) and create content controls to display results in the UI
                //int col0Row = 0;
                //int col1Row = 0;
                int maxBodies = this.kinectSensor.BodyFrameSource.BodyCount;
                //for (int i = 0; i < maxBodies; ++i)
                //{
                GestureResultView result   = new GestureResultView(0, false, false, 0.0f);
                GestureDetector   detector = new GestureDetector(this.kinectSensor, result);
                this.gestureDetectorList.Add(detector);

                // split gesture results across the first two columns of the content grid

                /*ContentControl contentControl = new ContentControl();
                 * contentControl.Content = this.gestureDetectorList[i].GestureResultView;
                 *
                 * if (i % 2 == 0)
                 * {
                 *  // Gesture results for bodies: 0, 2, 4
                 *  Grid.SetColumn(contentControl, 0);
                 *  Grid.SetRow(contentControl, col0Row);
                 ++col0Row;
                 * }
                 * else
                 * {
                 *  // Gesture results for bodies: 1, 3, 5
                 *  Grid.SetColumn(contentControl, 1);
                 *  Grid.SetRow(contentControl, col1Row);
                 ++col1Row;
                 * }
                 *
                 * this.contentGrid.Children.Add(contentControl);
                 * }*/

                // split gesture results across the first two columns of the content grid
                // ContentControl contentControl = new ContentControl();
                //contentControl.Content = this.gestureDetectorList.GestureResultView;
            }
            catch (Exception e)
            {
                System.Windows.MessageBox.Show(e.StackTrace);
                //throw;
            }
        }
            /// <summary>
            /// Initializes a new instance of the GestureDetector class along with the gesture frame source and reader
            /// </summary>
            /// <param name="kinectSensor">Active sensor to initialize the VisualGestureBuilderFrameSource object with</param>
            /// <param name="gestureResultView">GestureResultView object to store gesture results of a single body to</param>
            public GestureDetector(KinectSensor kinectSensor, GestureResultView gestureResultView)
            {
                if (kinectSensor == null)
                {
                    throw new ArgumentNullException("kinectSensor");
                }

                if (gestureResultView == null)
                {
                    throw new ArgumentNullException("gestureResultView");
                }

                this.GestureResultView = gestureResultView;

                // create the vgb source. The associated body tracking ID will be set when a valid body frame arrives from the sensor.
                this.vgbFrameSource = new VisualGestureBuilderFrameSource(kinectSensor, 0);
                this.vgbFrameSource.TrackingIdLost += this.Source_TrackingIdLost;

                // open the reader for the vgb frames
                this.vgbFrameReader = this.vgbFrameSource.OpenReader();
                if (this.vgbFrameReader != null)
                {
                    this.vgbFrameReader.IsPaused = true;
                    this.vgbFrameReader.FrameArrived += this.Reader_GestureFrameArrived;
                }

                // load the gestures from the gesture database
                using (VisualGestureBuilderDatabase database = new VisualGestureBuilderDatabase(this.gestureDatabase))
                {
                    vgbFrameSource.AddGestures(database.AvailableGestures);
                }
            }