public SkeletonFaceTracking(KinectSensor kinect)
        {
            this.kinectSensor      = kinect;
            this.skeletonPublisher = new NetworkPublisher();
            this.skeletonPublisher.SetConflate();
            this.skeletonPublisher.Bind("33406");

            this.coordinateMapper         = this.kinectSensor.CoordinateMapper;
            this.bodyFrameReader          = this.kinectSensor.BodyFrameSource.OpenReader();
            this.bodyFrameReader.IsPaused = true;

            this.filter = new KinectJointFilter(smoothingParam, smoothingParam, smoothingParam);
            this.filter.Init(smoothingParam, smoothingParam, smoothingParam);

            this.dicoPos      = new Dictionary <JointType, object>(25);
            this.jointPoints  = new Dictionary <JointType, Point>(25);
            this.dicoBodies   = new Dictionary <ulong, Dictionary <JointType, object> >(25);
            this.dicoFaces    = new Dictionary <ulong, Dictionary <String, String> >(11);
            this.dicoFeatures = new Dictionary <string, string>(11);
            this.dicoOr       = new Dictionary <JointType, Vector4>(25);
            this.qChild       = new Quaternion();
            this.qParent      = new Quaternion();

            this.maxBodyCount = this.kinectSensor.BodyFrameSource.BodyCount;
            // specify the required face frame results
            FaceFrameFeatures faceFrameFeatures =
                FaceFrameFeatures.BoundingBoxInColorSpace
                | FaceFrameFeatures.PointsInColorSpace
                | FaceFrameFeatures.RotationOrientation
                | FaceFrameFeatures.FaceEngagement
                | FaceFrameFeatures.Glasses
                | FaceFrameFeatures.Happy
                | FaceFrameFeatures.LeftEyeClosed
                | FaceFrameFeatures.RightEyeClosed
                | FaceFrameFeatures.LookingAway
                | FaceFrameFeatures.MouthMoved
                | FaceFrameFeatures.MouthOpen;

            // create a face frame source + reader to track each face in the FOV
            this.faceFrameSources = new FaceFrameSource[this.maxBodyCount];
            this.faceFrameReaders = new FaceFrameReader[this.maxBodyCount];
            for (int i = 0; i < this.maxBodyCount; i++)
            {
                // create the face frame source with the required face frame features and an initial tracking Id of 0
                this.faceFrameSources[i] = new FaceFrameSource(this.kinectSensor, 0, faceFrameFeatures);

                // open the corresponding reader
                this.faceFrameReaders[i] = this.faceFrameSources[i].OpenReader();

                // pausing the reader to prevent getting frames before we need them
                this.faceFrameReaders[i].IsPaused = true;
            }

            // allocate storage to store face frame results for each face in the FOV
            this.faceFrameResults = new FaceFrameResult[this.maxBodyCount];
        }
Exemple #2
0
        public SkeletonTracking(KinectSensor kinect, NetworkPublisher network)
        {
            this.kinectSensor = kinect;
            this.network      = network;

            this.coordinateMapper = this.kinectSensor.CoordinateMapper;
            this.bodyFrameReader  = this.kinectSensor.BodyFrameSource.OpenReader();

            this.filter = new KinectJointFilter(smoothingParam, smoothingParam, smoothingParam);
            this.filter.Init(smoothingParam, smoothingParam, smoothingParam);

            this.dicoPos     = new Dictionary <JointType, object>(25);
            this.jointPoints = new Dictionary <JointType, Point>(25);
            this.dicoBodies  = new Dictionary <ulong, Dictionary <JointType, object> >(25);
            this.dicoOr      = new Dictionary <JointType, Vector4>(25);
            this.qChild      = new Quaternion();
            this.qParent     = new Quaternion();
        }