public VideoShot(ColorImageProcesser processer, MainWindow window, int videoNum,
            KinectSensor kinectDevice,
            int dWidht, int dHeight,
            int cWidth, int cHeight,
            DepthImageFormat dImageFormat, ColorImageFormat cImageFormat)
        {
            parentProcesser = processer;
            videoName = PadLeft(videoNum);
            _windowUI = window;
            _kinectDevice = kinectDevice;

            depthFrameWidth = dWidht;
            depthFrameHeight = dHeight;

            colorFrameWidth = cWidth;
            colorFrameHeight = cHeight;

            depthFrameStride = depthFrameWidth * BytesPerPixel;
            colorFrameStride = colorFrameWidth * BytesPerPixel;

            depthImageFormat = dImageFormat;
            colorImageFormat = cImageFormat;

            screenHeight = SystemParameters.PrimaryScreenHeight;
            screenWidth = SystemParameters.PrimaryScreenWidth;

            Start();
        }
 public CoordinateConverter(IEnumerable<byte> kinectParams, ColorImageFormat cif, 
                     DepthImageFormat dif)
 {
     mapper = new CoordinateMapper(kinectParams);
       this.cif = cif;
       this.dif = dif;
 }
        /// <summary>
        /// Initializes a new instance of the KinectChooser class.
        /// </summary>
        /// <param name="game">The related game object.</param>
        /// <param name="colorFormat">The desired color image format.</param>
        /// <param name="depthFormat">The desired depth image format.</param>
        public KinectChooser(Game game, ColorImageFormat colorFormat, DepthImageFormat depthFormat)
            : base(game)
        {
            this.colorImageFormat = colorFormat;
            this.depthImageFormat = depthFormat;

            this.nearMode = false;
            this.seatedMode = false;
            this.SimulateMouse = false;

            if (!Game1.SIMULATE_NO_KINECT)
            {
                KinectSensor.KinectSensors.StatusChanged += this.KinectSensors_StatusChanged;
                this.DiscoverSensor();
            }

            this.statusMap.Add(KinectStatus.Undefined, "Not connected, or in use");
            this.statusMap.Add(KinectStatus.Connected, string.Empty);
            this.statusMap.Add(KinectStatus.DeviceNotGenuine, "Device Not Genuine");
            this.statusMap.Add(KinectStatus.DeviceNotSupported, "Device Not Supported");
            this.statusMap.Add(KinectStatus.Disconnected, "Required");
            this.statusMap.Add(KinectStatus.Error, "Error");
            this.statusMap.Add(KinectStatus.Initializing, "Initializing...");
            this.statusMap.Add(KinectStatus.InsufficientBandwidth, "Insufficient Bandwidth");
            this.statusMap.Add(KinectStatus.NotPowered, "Not Powered");
            this.statusMap.Add(KinectStatus.NotReady, "Not Ready");
        }
Exemple #4
0
        /// <summary>
        /// Initializes a new instance of the FaceTracker class from a reference of the Kinect device.
        /// <param name="sensor">Reference to kinect sensor instance</param>
        /// </summary>
        public FaceTracker(KinectSensor sensor)
        {
            if (sensor == null) {
            throw new ArgumentNullException("sensor");
              }

              if (!sensor.ColorStream.IsEnabled) {
            throw new InvalidOperationException("Color stream is not enabled yet.");
              }

              if (!sensor.DepthStream.IsEnabled) {
            throw new InvalidOperationException("Depth stream is not enabled yet.");
              }

              this.operationMode = OperationMode.Kinect;
              this.coordinateMapper = sensor.CoordinateMapper;
              this.initializationColorImageFormat = sensor.ColorStream.Format;
              this.initializationDepthImageFormat = sensor.DepthStream.Format;

              var newColorCameraConfig = new CameraConfig(
              (uint)sensor.ColorStream.FrameWidth,
              (uint)sensor.ColorStream.FrameHeight,
              sensor.ColorStream.NominalFocalLengthInPixels,
              FaceTrackingImageFormat.FTIMAGEFORMAT_UINT8_B8G8R8X8);
              var newDepthCameraConfig = new CameraConfig(
              (uint)sensor.DepthStream.FrameWidth,
              (uint)sensor.DepthStream.FrameHeight,
              sensor.DepthStream.NominalFocalLengthInPixels,
              FaceTrackingImageFormat.FTIMAGEFORMAT_UINT16_D13P3);
              this.Initialize(newColorCameraConfig, newDepthCameraConfig, IntPtr.Zero, IntPtr.Zero, this.DepthToColorCallback);
        }
        /*/////////////////////////////////////////
          * CONSTRUCTOR(S)/DESTRUCTOR(S)
          */
        ///////////////////////////////////////
        public KinectManager(ColorImageFormat p_colour_format,
                             DepthImageFormat p_depth_format,
                             KinectGame_WindowsXNA p_game)
        {
            // Initialise the Kinect selector...
            this.colour_image_format = p_colour_format;
            this.depth_image_format = p_depth_format;
            this.root_game = p_game;

            this.colour_stream = null;
            this.depth_stream = null;
            this.skeleton_stream = null;

            this.debug_video_stream_dimensions = new Vector2(200, 150);

            status_map = new Dictionary<KinectStatus, string>();
            KinectSensor.KinectSensors.StatusChanged += this.KinectSensorsStatusChanged; // handler function for changes in the Kinect system
            this.DiscoverSensor();

            this.status_map.Add(KinectStatus.Undefined, "UNKNOWN STATUS MESSAGE");
            this.status_map.Add(KinectStatus.Connected, "Connected.");//string.Empty);
            this.status_map.Add(KinectStatus.DeviceNotGenuine, "Detected device is not genuine!");
            this.status_map.Add(KinectStatus.DeviceNotSupported, "Detected device is not supported!");
            this.status_map.Add(KinectStatus.Disconnected, "Disconnected/Device required!");
            this.status_map.Add(KinectStatus.Error, "Error in Kinect sensor!");
            this.status_map.Add(KinectStatus.Initializing, "Initialising Kinect sensor...");
            this.status_map.Add(KinectStatus.InsufficientBandwidth, "Insufficient bandwidth for Kinect sensor!");
            this.status_map.Add(KinectStatus.NotPowered, "Detected device is not powered!");
            this.status_map.Add(KinectStatus.NotReady, "Detected device is not ready!");

            // Load the status message font:
            this.msg_font = this.root_game.Content.Load<SpriteFont>("Fonts/Segoe16");
            this.msg_label_pos = new Vector2(4.0f, 2.0f);
        }
        public DepthFrameClass(KinectSensor sensor, DepthImageFormat depthFormat, float depthScale)
        {
            switch (depthFormat)
            {
                case DepthImageFormat.Resolution640x480Fps30:
                    this.FrameWidth = 640;
                    this.FrameHeight = 480;
                    break;
                case DepthImageFormat.Resolution320x240Fps30:
                    this.FrameWidth = 320;
                    this.FrameHeight = 240;
                    break;
                case DepthImageFormat.Resolution80x60Fps30:
                    this.FrameWidth = 80;
                    this.FrameHeight = 60;
                    break;
                default:
                    throw new FormatException();
            }

            this.kinectSensor = sensor;
            this.DepthScale = depthScale;

            this.AllocateMemory();
        }
Exemple #7
0
        private KinectHelper(TransformSmoothParameters tsp, bool near = false, 
                             ColorImageFormat colorFormat = ColorImageFormat.RgbResolution1280x960Fps12, 
                             DepthImageFormat depthFormat = DepthImageFormat.Resolution640x480Fps30)
        {
            _kinectSensor = KinectSensor.KinectSensors.FirstOrDefault(s => s.Status == KinectStatus.Connected);

            if (_kinectSensor == null)
            {
                throw new Exception("No Kinect-Sensor found.");
            }
            if (near)
            {
                _kinectSensor.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
                _kinectSensor.DepthStream.Range = DepthRange.Near;
                _kinectSensor.SkeletonStream.EnableTrackingInNearRange = true;
            }

            DepthImageFormat = depthFormat;
            ColorImageFormat = colorFormat;

            _kinectSensor.SkeletonStream.Enable(tsp);
            _kinectSensor.ColorStream.Enable(colorFormat);
            _kinectSensor.DepthStream.Enable(depthFormat);
            _kinectSensor.AllFramesReady += AllFramesReady;

            _kinectSensor.Start();
            _faceTracker = new FaceTracker(_kinectSensor);
        }
        public void ProcessData(KinectSensor kinectSensor, ColorImageFormat colorImageFormat,
                                 byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage,
                                 Skeleton[] skeletons, int skeletonFrameNumber)
        {
            if (skeletons == null)
            {
                return;
            }

            // Update the list of trackers and the trackers with the current frame information
            foreach (Skeleton skeleton in skeletons)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this._trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this._trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this._trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(kinectSensor, colorImageFormat, colorImage, depthImageFormat,
                                                         depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrameNumber;
                    }
                }
            }

            RemoveOldTrackers(skeletonFrameNumber);
        }
        /// <summary>
        /// Map PointSkeleton3D to PointDepth3D
        /// </summary>
        /// <param name="pointSkleton3D"></param>
        /// <param name="depthImageFormat"></param>
        /// <returns></returns>
        public PointDepth3D MapSkeletonPointToDepthPoint(PointSkeleton3D pointSkleton3D,DepthImageFormat depthImageFormat)
        {
            SkeletonPoint point = new SkeletonPoint();
            point.X = pointSkleton3D.X;
            point.Y = pointSkleton3D.Y;
            point.Z = pointSkleton3D.Z;

            return new PointDepth3D(mapper.MapSkeletonPointToDepthPoint(point,depthImageFormat));
        }
        /// <summary>
        /// Map PointDepth3D to PointSkeleton3D
        /// </summary>
        /// <param name="depthImageFormat"></param>
        /// <param name="pointDepth3D"></param>
        /// <returns></returns>
        public PointSkeleton3D MapDepthPointToSketelonPoint(DepthImageFormat depthImageFormat, PointDepth3D pointDepth3D)
        {
            DepthImagePoint point = new DepthImagePoint();
            point.X = pointDepth3D.X;
            point.Y = pointDepth3D.Y;
            point.Depth = pointDepth3D.Depth;

            return new PointSkeleton3D(mapper.MapDepthPointToSkeletonPoint(depthImageFormat, point));
        }
Exemple #11
0
        public Kinect(Game game, ColorImageFormat colorFormat, DepthImageFormat depthFormat)
            : base(game)
        {
            this.colorImageFormat = colorFormat;
            this.depthImageFormat = depthFormat;

            KinectSensor.KinectSensors.StatusChanged += this.KinectSensors_StatusChanged;
            this.DiscoverSensor();
        }
 /// <summary>
 /// Map PointDepth3D List to PointSkeleton3D List
 /// </summary>
 /// <param name="depthImageFormat"></param>
 /// <param name="pointDepth3D"></param>
 /// <returns></returns>
 public List<PointSkeleton3D> MapDepthPointsToSketelonPoints(DepthImageFormat depthImageFormat, List<PointDepth3D> pointDepth3D)
 {
     List<PointSkeleton3D> ret = new List<PointSkeleton3D>();
     foreach (var element in pointDepth3D)
     {
         ret.Add(MapDepthPointToSketelonPoint(depthImageFormat, element));
     }
     return ret;
 }
        protected override void OnKinectChanged(KinectSensor oldKinectSensor, KinectSensor newKinectSensor)
        {
            if (oldKinectSensor != null) {
                oldKinectSensor.DepthFrameReady -= this.DepthImageReady;
                kinectDepthImage.Source = null;
                this.lastImageFormat = DepthImageFormat.Undefined;
            }

            if (newKinectSensor != null && newKinectSensor.Status == KinectStatus.Connected) {
                ResetFrameRateCounters();

                newKinectSensor.DepthFrameReady += this.DepthImageReady;
            }
        }
        /// <summary>
        /// Get the depth image size from the depth image format.
        /// </summary>
        /// <param name="imageFormat">The depth image format.</param>
        /// <returns>The width and height of the depth image format.</returns>
        public static Size GetDepthSize(DepthImageFormat imageFormat)
        {
            switch (imageFormat)
            {
                case DepthImageFormat.Resolution320x240Fps30:
                    return new Size(320, 240);

                case DepthImageFormat.Resolution640x480Fps30:
                    return new Size(640, 480);

                case DepthImageFormat.Resolution80x60Fps30:
                    return new Size(80, 60);
                case DepthImageFormat.Undefined:
                    return new Size(0, 0);
            }

            throw new ArgumentOutOfRangeException("imageFormat");
        }
Exemple #15
0
        /// <summary>
        /// Initializes a new instance of the KinectChooser class.
        /// </summary>
        /// <param name="game">The related game object.</param>
        /// <param name="colorFormat">The desired color image format.</param>
        /// <param name="depthFormat">The desired depth image format.</param>
        public KinectChooser(Game game, ColorImageFormat colorFormat, DepthImageFormat depthFormat)
            : base(game)
        {
            this.colorImageFormat = colorFormat;
            this.depthImageFormat = depthFormat;

            KinectSensor.KinectSensors.StatusChanged += this.KinectSensors_StatusChanged;
            this.DiscoverSensor();

            this.statusMap.Add(KinectStatus.Connected, string.Empty);
            this.statusMap.Add(KinectStatus.DeviceNotGenuine, "Device Not Genuine");
            this.statusMap.Add(KinectStatus.DeviceNotSupported, "Device Not Supported");
            this.statusMap.Add(KinectStatus.Disconnected, "Required");
            this.statusMap.Add(KinectStatus.Error, "Error");
            this.statusMap.Add(KinectStatus.Initializing, "Initializing...");
            this.statusMap.Add(KinectStatus.InsufficientBandwidth, "Insufficient Bandwidth");
            this.statusMap.Add(KinectStatus.NotPowered, "Not Powered");
            this.statusMap.Add(KinectStatus.NotReady, "Not Ready");
        }
Exemple #16
0
        public void Render(CompositePlayer[] players, KinectSensor sensor, DepthImageFormat format)
        {
            var colorizer = new BoneColorizer();
            var data = players.Where(p => p.PlayerId > 0)
                .Select(player =>
                {
                    var verts = player.DepthPoints.Select(dp => sensor.CoordinateMapper.MapDepthPointToSkeletonPoint(format, dp))
                        .Select(sp => new Vector3(sp.X, sp.Y, sp.Z)).ToArray();
                    Vector3[] normals = player.DepthPoints.Select(_ => new Vector3(0, 0, -1)).ToArray();
                    Color[] colors = player.DepthPoints.Select(_ => Color.Gray).ToArray();

                    if (player.Skeleton != null && player.Skeleton.TrackingState == Microsoft.Kinect.SkeletonTrackingState.Tracked)
                    {
                        var bones = Bone.Interpret(player.Skeleton);
                        var interp = verts.Select(v =>
                            {
                                Bone[] close;
                                double[] scaling;
                                bones.Interpolate(v, out close, out scaling);
                                return new { bones = close, scaling = scaling, v = v };
                            }).ToArray();
                        colors = interp.Select(item => colorizer.Colorize(item.bones, item.scaling)).ToArray();
                        normals = interp.Select(item => Bone.Normal(item.v, item.bones, item.scaling)).ToArray();
                        //normals = verts.Select(v => Normal(Closest(player.Skeleton.Joints, v), v)).ToArray();
                        //colors = verts.Select(v => Closest(player.Skeleton.Joints, v)).Select(j => Colorize(j)).ToArray();
                    }
                    return new { Vertices = verts, Normals = normals, Colors = colors };
                }).ToArray();
            if (data.Length > 0)
            {
                output.SetPositions(data.SelectMany(d => d.Vertices).ToArray(),
                    data.SelectMany(d => d.Normals).ToArray(),
                    data.SelectMany(d => d.Colors).ToArray());
            }
            else
            {
                output.SetPositions(new float[0][]);
            }
        }
Exemple #17
0
        private KinectHelper(TransformSmoothParameters tsp, bool near, ColorImageFormat colorFormat, DepthImageFormat depthFormat)
        {
            _kinectSensor = KinectSensor.KinectSensors.FirstOrDefault(s => s.Status == KinectStatus.Connected);

            if (_kinectSensor == null)
            {
                throw new Exception("No Kinect-Sensor found.");
            }
            if (near)
            {
                _kinectSensor.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
                _kinectSensor.DepthStream.Range = DepthRange.Near;
                _kinectSensor.SkeletonStream.EnableTrackingInNearRange = true;
            }
            _kinectSensor.SkeletonStream.Enable(tsp);
            _kinectSensor.ColorStream.Enable(colorFormat);
            _kinectSensor.DepthStream.Enable(depthFormat);
            _kinectSensor.AllFramesReady += AllFramesReady;

            _kinectSensor.Start();

            DepthImageFormat = depthFormat;
            ColorImageFormat = colorFormat;
        }
        internal void ProcessFrame(CoordinateMapper mapper, Skeleton skeletonOfInterest, DepthImageFormat depthImageFormat)
        {
            _joints.Clear();
            if (skeletonOfInterest != null)
            {
                var size = FormatHelper.GetDepthSize(depthImageFormat);

                var depthWidth = (int)size.Width;

                var headJoint = skeletonOfInterest.Joints[JointType.Head];
                var neckJoint = skeletonOfInterest.Joints[JointType.ShoulderCenter];

                var _headPoint = mapper.MapSkeletonPointToDepthPoint(headJoint.Position, depthImageFormat);
                var _neckPoint = mapper.MapSkeletonPointToDepthPoint(neckJoint.Position, depthImageFormat);

                _headPoint.X = depthWidth - _headPoint.X;
                _neckPoint.X = depthWidth - _neckPoint.X;

                _joints.Add(_headPoint);
                _joints.Add(_neckPoint);

            }
            RaiseFrameUpdated();
        }
        public ColorImageProcesser(MainWindow window, KinectSensor kinectDevice,
            int depthDataLength, int colorDataLength,
            int dWidht, int dHeight,
            int cWidth, int cHeight,
            DepthImageFormat dImageFormat, ColorImageFormat cImageFormat)
        {
            _windowUI = window;
            _kinectDevice = kinectDevice;

            depthPixelData = new short[depthDataLength];
            colorPixelData = new byte[colorDataLength];

            depthFrameWidth = dWidht;
            depthFrameHeight = dHeight;

            colorFrameWidth = cWidth;
            colorFrameHeight = cHeight;

            depthFrameStride = depthFrameWidth * BytesPerPixel;
            colorFrameStride = colorFrameWidth * BytesPerPixel;

            depthImageFormat = dImageFormat;
            colorImageFormat = cImageFormat;
        }
        public void ProcessFrame(KinectSensor sensor, byte[] colorImage, ColorImageFormat colorImageFormat, DepthImageFrame depthFrame, short[] depthImage, DepthImageFormat depthImageFormat, Skeleton[] skeletonData, SkeletonFrame skeletonFrame)
        {
            //Console.WriteLine("N: ---------");
            coordinates.Clear();
            int detectedFace          = 0;
            int trackedSkeletonsCount = 0;

            int playerIndex = -1;

            for (int i = 0; i < skeletonData.Length; i++)
            //foreach (Skeleton skeleton in skeletonData)
            {
                Skeleton skeleton = skeletonData[i];
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeleton.Joints[JointType.Head].Position);
                    ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, colorImageFormat);

                    Coordinates2D c = new Coordinates2D();

                    playerIndex = i + 1;

                    c.X           = colorPoint.X;
                    c.Y           = colorPoint.Y;
                    c.Width       = 0;
                    c.Height      = 0;
                    c.LeftEyeX    = 0;
                    c.LeftEyeY    = 0;
                    c.RightEyeX   = 0;
                    c.RightEyeY   = 0;
                    c.PlayerIndex = playerIndex;

                    trackedSkeletonsCount++;

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (!scannedIdentities.Contains(skeleton.TrackingId) &&
                        detectedFace < 1 &&
                        trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        detectedFace++;
                        scannedIdentities.Add(skeleton.TrackingId);


                        skeletonFaceTracker.OnFrameReady(sensor, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        Coordinates2D?realCoords = skeletonFaceTracker.GetFaceCoordinates();
                        if (realCoords.HasValue)
                        {
                            c             = realCoords.Value;
                            c.PlayerIndex = playerIndex;
                        }
                    }

                    c.TrackingId = skeleton.TrackingId;
                    coordinates.Add(c);
                }
            }

            if (scannedIdentities.Count > 0 && scannedIdentities.Count >= trackedSkeletonsCount)
            {
                scannedIdentities.Clear();
                //Console.WriteLine("Clearing");
            }

            RemoveOldTrackers(skeletonFrame.FrameNumber);

            //if (coordinates.Count > 0)
            {
                int[] identities = new int[coordinates.Count];


                //  stopwatch.Reset();
                //  stopwatch.Start();
                double[] distances = new double[coordinates.Count * 8];
                this.
                ProcessImage(colorImage, GetWidth(colorImageFormat), GetHeight(colorImageFormat), depthImage, 640, 480, coordinates.ToArray(), identities, distances);
                //  stopwatch.Stop();
                //       foreach (int i in identities)
                //       {
                //           Console.WriteLine("Recognized: {0} (in {1} millis - {2} ticks)", i, stopwatch.ElapsedMilliseconds, stopwatch.ElapsedTicks);
                //       }
            }
        }
Exemple #21
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);
                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            //    only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        this.facePoints3DRaw = frame.Get3DShape();
                        this.facePoints      = frame.GetProjected3DShape();
                        animationUnitsRaw    = frame.GetAnimationUnitCoefficients();
                    }
                    x              = frame.Rotation.X;
                    y              = frame.Rotation.Y;
                    z              = frame.Rotation.Z;
                    facePointS3D   = this.facePoints3DRaw;
                    animationUnits = animationUnitsRaw;
                    //Debug.WriteLine(animationUnits[AnimationUnit.JawLower]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.BrowLower]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.BrowRaiser]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.JawLower]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.LipCornerDepressor]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.LipRaiser]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.LipStretcher]);
                    //Debug.WriteLine(frame.Translation.ToString());
                    //Debug.WriteLine(frame.Rotation.ToString());
                    //this.facePoints[FeaturePoint.AboveChin].X+2;
                    //Debug.WriteLine(frame.Translation.X.ToString());
                    //Debug.WriteLine(frame.Translation.Y.ToString());
                    //Debug.WriteLine(frame.Translation.Z.ToString());
                }
            }
Exemple #22
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            ///

            public void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                //No Touchy
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;
                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    return;
                }
                if (faceTracker == null)
                {
                    faceTracker = new FaceTracker(kinectSensor);
                }
                frame = this.faceTracker.Track(
                    colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);
                this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                if (this.lastFaceTrackSucceeded)
                {
                    if (faceTriangles == null)
                    {
                        faceTriangles = frame.GetTriangles();
                    }
                    this.facePoints = frame.GetProjected3DShape();



                    //Touchy

                    //Assign Reference points
                    this.absfacePoints = frame.Get3DShape();
                    leftForehead       = this.absfacePoints[FeaturePoint.TopLeftForehead];
                    rightForehead      = this.absfacePoints[FeaturePoint.TopRightForehead];
                    jaw           = this.absfacePoints[FeaturePoint.BottomOfChin];
                    faceRotationX = frame.Rotation.X;
                    faceRotationY = frame.Rotation.Y;
                    faceRotationZ = frame.Rotation.Z;

                    //Calculate Reference Points
                    foreheadReferencePointX = ((rightForehead.X - leftForehead.X) / 2);
                    foreheadReferencePointY = ((rightForehead.Y - leftForehead.Y) / 2);
                    foreheadReferencePointZ = ((rightForehead.Z - leftForehead.Z) / 2);

                    //Set Animation Units
                    AUCoeff        = frame.GetAnimationUnitCoefficients();
                    jawLowererAU   = AUCoeff[AnimationUnit.JawLower];
                    lipStretcherAU = AUCoeff[AnimationUnit.LipStretcher];
                    browRaiserAU   = AUCoeff[AnimationUnit.BrowRaiser];
                    setJawData(jaw.Y, leftForehead.Y, rightForehead.Y, jawLowererAU, lipStretcherAU);

                    rotations = new float[5];
                    //set up matlab
                    matlab = new MLApp.MLApp();
                    matlab.Execute(@"cd C:\Users\Bala\Documents\MATLAB");
                    result = null;

                    //get rotation values
                    rotations[0] = faceRotationX;
                    rotations[1] = faceRotationY;
                    rotations[2] = faceRotationZ;
                    rotations[3] = jawLowererAU;
                    rotations[4] = lipStretcherAU;
                    //Set up GlovePie
                    OscPacket.LittleEndianByteOrder = false;
                    IPEndPoint myapp    = new IPEndPoint(IPAddress.Loopback, 1944);
                    IPEndPoint glovepie = new IPEndPoint(IPAddress.Loopback, 1945);
                    Console.WriteLine(browRaiserAU);

                    matlab.Feval("nnW", 1, out result, rotations[0]);
                    object[] resW = result as object[];
                    nnoutput = (int)((float)resW[0] + 0.5f);
                    if (nnoutput == 1)
                    {
                        commandtoSend = 1;
                    }
                    else
                    {
                        result = null;
                        matlab.Feval("nnA", 1, out result, rotations[1]);
                        object[] resA = result as object[];
                        nnoutput = (int)((float)resA[0] + 0.5f);
                        if (nnoutput == 1)
                        {
                            commandtoSend = 2;
                        }
                        else
                        {
                            result = null;
                            matlab.Feval("nnS", 1, out result, rotations[0]);
                            object[] resS = result as object[];
                            nnoutput = (int)((float)resS[0] + 0.5f);
                            if (nnoutput == 1)
                            {
                                commandtoSend = 3;
                            }
                            else
                            {
                                result = null;
                                matlab.Feval("nnd", 1, out result, rotations[1]);
                                object[] resD = result as object[];
                                nnoutput = (int)((float)resD[0] + 0.5f);
                                if (nnoutput == 1)
                                {
                                    commandtoSend = 4;
                                }
                                else
                                {
                                    result = null;
                                    matlab.Feval("nnLC", 1, out result, rotations[2]);
                                    object[] resLC = result as object[];
                                    nnoutput = (int)((float)resLC[0] + 0.5f);
                                    if (nnoutput == 1)
                                    {
                                        commandtoSend = 5;
                                    }
                                    else
                                    {
                                        result = null;
                                        matlab.Feval("nnRC", 1, out result, rotations[2]);
                                        object[] resRC = result as object[];
                                        nnoutput = (int)((float)resRC[0] + 0.5f);
                                        if (nnoutput == 1)
                                        {
                                            commandtoSend = 6;
                                        }
                                        else
                                        {
                                            result = null;
                                            if (jawLowererAU > 0.7)
                                            {
                                                commandtoSend = 7;
                                            }

                                            /*
                                             * matlab.Feval("nnSpace", 1, out result, rotations[3]);
                                             * object[] resSpace = result as object[];
                                             * nnoutput = (int)((float)resSpace[0] + 0.5f);
                                             * if (nnoutput == 1)
                                             * {
                                             *  commandtoSend = 7;
                                             * }*/
                                            else
                                            {
                                                result = null;
                                                if (browRaiserAU > 0.4)
                                                {
                                                    commandtoSend = 8;
                                                }
                                                else
                                                {
                                                    result        = null;
                                                    commandtoSend = 0;
                                                }

                                                /*result = null;
                                                 * matlab.Feval("nnMiddle", 1, out result, lipStretcherAU);
                                                 * object[] resMiddle = result as object[];
                                                 * nnoutput = (int)((float)resMiddle[0] + 0.5f);
                                                 * if (nnoutput == 1)
                                                 * {
                                                 *  commandtoSend = 8;
                                                 * }
                                                 * else
                                                 * {
                                                 *  result = null;
                                                 *  commandtoSend = 0;
                                                 * }*/
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }
                    //Console.WriteLine("Iteration Complete");
                    switch (commandtoSend)
                    {
                    case 0:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 1:
                        msg = new OscMessage(myapp, "/move/w", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 2:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 3:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 4:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 5:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 6:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 7:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 8:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 10.0f);
                        msg.Send(glovepie);
                        break;
                    }
                }
            }
        /// <summary>
        /// Starts face tracking from Kinect input data. Track() detects a face
        /// based on the passed parameters, then identifies characteristic
        /// points and begins tracking. The first call to this API is more
        /// expensive, but if the tracking succeeds then subsequent calls use
        /// the tracking information generated from first call and is faster,
        /// until a tracking failure happens.
        /// </summary>
        /// <param name="colorImageFormat">format of the colorImage array</param>
        /// <param name="colorImage">Input color image frame retrieved from Kinect sensor</param>
        /// <param name="depthImageFormat">format of the depthImage array</param>
        /// <param name="depthImage">Input depth image frame retrieved from Kinect sensor</param>
        /// <param name="skeletonOfInterest">Input skeleton to track. Head & shoulder joints in the skeleton are used to calculate the head vector</param>
        /// <param name="regionOfInterest">Region of interest in the passed video frame where the face tracker should search for a face to initiate tracking.
        /// Passing Rectangle.Empty (default) causes the entire frame to be searched.</param>
        /// <returns>Returns computed face tracking results for this image frame</returns>
        private FaceTrackFrame Track(
            ColorImageFormat colorImageFormat,
            byte[] colorImage,
            DepthImageFormat depthImageFormat,
            short[] depthImage,
            Skeleton skeletonOfInterest,
            Rect regionOfInterest)
        {
            this.totalTracks++;
            this.trackStopwatch.Start();

            if (this.operationMode != OperationMode.Kinect)
            {
                throw new InvalidOperationException(
                          "Cannot use Track with Kinect input types when face tracker is initialized for tracking videos/images");
            }

            if (colorImage == null)
            {
                throw new ArgumentNullException("colorImage");
            }

            if (depthImage == null)
            {
                throw new ArgumentNullException("depthImage");
            }

            if (colorImageFormat != this.initializationColorImageFormat)
            {
                throw new InvalidOperationException("Color image frame format different from initialization");
            }

            if (depthImageFormat != this.initializationDepthImageFormat)
            {
                throw new InvalidOperationException("Depth image frame format different from initialization");
            }

            if (colorImage.Length != this.videoCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("colorImage", "Color image data size is needs to match initialization configuration.");
            }

            if (depthImage.Length != this.depthCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("depthImage", "Depth image data size is needs to match initialization configuration.");
            }

            int        hr;
            HeadPoints headPointsObj = null;

            Vector3DF[] headPoints = GetHeadPointsFromSkeleton(skeletonOfInterest);

            if (headPoints != null && headPoints.Length == 2)
            {
                headPointsObj = new HeadPoints {
                    Points = headPoints
                };
            }



            this.copyStopwatch.Start();
            this.colorFaceTrackingImage.CopyFrom(colorImage);
            this.depthFaceTrackingImage.CopyFrom(depthImage);
            this.copyStopwatch.Stop();

            var sensorData = new SensorData(this.colorFaceTrackingImage, this.depthFaceTrackingImage, DefaultZoomFactor, Point.Empty);
            FaceTrackingSensorData faceTrackSensorData = sensorData.FaceTrackingSensorData;

            this.startOrContinueTrackingStopwatch.Start();
            if (this.trackSucceeded)
            {
                hr = this.faceTrackerInteropPtr.ContinueTracking(ref faceTrackSensorData, headPointsObj, this.frame.ResultPtr);
            }
            else
            {
                hr = this.faceTrackerInteropPtr.StartTracking(
                    ref faceTrackSensorData, ref regionOfInterest, headPointsObj, this.frame.ResultPtr);
            }

            this.startOrContinueTrackingStopwatch.Stop();

            this.trackSucceeded = hr == (int)ErrorCode.Success && this.frame.Status == ErrorCode.Success;
            this.trackStopwatch.Stop();

            if (this.trackSucceeded)
            {
                ++this.totalSuccessTracks;
                this.totalSuccessTrackMs      += this.trackStopwatch.ElapsedMilliseconds - this.lastSuccessTrackElapsedMs;
                this.lastSuccessTrackElapsedMs = this.trackStopwatch.ElapsedMilliseconds;
            }

            return(this.frame);
        }
 /// <summary>
 /// Starts face tracking from Kinect input data. Track() detects a face
 /// based on the passed parameters, then identifies characteristic
 /// points and begins tracking. The first call to this API is more
 /// expensive, but if the tracking succeeds then subsequent calls use
 /// the tracking information generated from first call and is faster,
 /// until a tracking failure happens.
 /// </summary>
 /// <param name="colorImageFormat">
 /// format of the colorImage array
 /// </param>
 /// <param name="colorImage">
 /// Input color image frame retrieved from Kinect sensor
 /// </param>
 /// <param name="depthImageFormat">
 /// format of the depthImage array
 /// </param>
 /// <param name="depthImage">
 /// Input depth image frame retrieved from Kinect sensor
 /// </param>
 /// <returns>
 /// Returns computed face tracking results for this image frame
 /// </returns>
 public FaceTrackFrame Track(
     ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage)
 {
     return(this.Track(colorImageFormat, colorImage, depthImageFormat, depthImage, null, Rect.Empty));
 }
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest, FaceRecognitionActivityWindow win)
            {
                if (CheckFace(kinectSensor, colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest))
                {
                    count++;
                }
                else count = 0;
                if (count == 1)
                {
                    count = 0;
                    currentState = (currentState + 1) % 3;
                    // highlight the next exercise


                    if (currentState == 0)
                    {
                        Color tileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush1 = new SolidColorBrush(tileFill);
                        win.SadTile.Fill = brush1;


                        Color focusTileFill = Color.FromRgb(96, 96, 96);
                        SolidColorBrush brush2 = new SolidColorBrush(focusTileFill);
                        win.HappyTile.Fill = brush2;

                        Color secTileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush3 = new SolidColorBrush(tileFill);
                        win.AngryTile.Fill = brush3;

                        // display the large icon
                        win.ActivityImage.Source = new BitmapImage(new Uri(@"happy_big.png"));
                        win.ActivityLabel.Content = "Happy";
                    }
                    else if (currentState == 1)
                    {
                        Color tileFill = Color.FromRgb(96, 96, 96);
                        SolidColorBrush brush1 = new SolidColorBrush(tileFill);
                        win.SadTile.Fill = brush1;

                        Color focusTileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush2 = new SolidColorBrush(focusTileFill);
                        win.HappyTile.Fill = brush2;

                        Color secTileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush3 = new SolidColorBrush(tileFill);
                        win.AngryTile.Fill = brush3;

                        // display the large icon
                        win.ActivityImage.Source = new BitmapImage(new Uri(@"sad_big.png"));
                        win.ActivityLabel.Content = "Sad";
                    }
                    else if (currentState == 2)
                    {
                        Color tileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush1 = new SolidColorBrush(tileFill);
                        win.SadTile.Fill = brush1;

                        Color focusTileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush2 = new SolidColorBrush(focusTileFill);
                        win.HappyTile.Fill = brush2;

                        Color secTileFill = Color.FromRgb(96, 96, 96);
                        SolidColorBrush brush3 = new SolidColorBrush(tileFill);
                        win.AngryTile.Fill = brush3;

                        // display the large icon
                        win.ActivityImage.Source = new BitmapImage(new Uri(@"angry_big.png"));
                        win.ActivityLabel.Content = "Angry";
                    }


                    this.speech.SpeakAsync("Moving to next level");
                    // Notify to change face
                    Trace.WriteLine("Change state to: " + states[currentState]);
                }
            }
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest).Clone() as FaceTrackFrame;
                }
            }
        void KinectFaceNode_AllFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            colorImageFrame = e.OpenColorImageFrame();
            depthImageFrame = e.OpenDepthImageFrame();
            skeletonFrame   = e.OpenSkeletonFrame();

            if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
            {
                return;
            }

            if (first)
            {
                first         = false;
                this.olddepth = depthImageFrame.Format;
            }
            else
            {
                if (this.olddepth != depthImageFrame.Format)
                {
                    //Need a reset
                    if (this.depthImage != null)
                    {
                        this.depthImage = null;
                    }
                    if (this.face != null)
                    {
                        this.face.Dispose(); this.face = null;
                    }
                    this.trackedSkeletons.Clear();
                    this.olddepth = depthImageFrame.Format;
                }
            }

            if (this.depthImage == null)
            {
                this.depthImage = new short[depthImageFrame.PixelDataLength];
            }

            if (this.colorImage == null)
            {
                this.colorImage = new byte[colorImageFrame.PixelDataLength];
            }

            if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
            {
                this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
            }

            if (face == null)
            {
                face = new FaceTracker(this.runtime.Runtime);
            }

            colorImageFrame.CopyPixelDataTo(this.colorImage);
            depthImageFrame.CopyPixelDataTo(this.depthImage);
            skeletonFrame.CopySkeletonDataTo(this.skeletonData);

            foreach (Skeleton skeleton in this.skeletonData)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(this.runtime.Runtime, colorImageFrame.Format, colorImage, depthImageFrame.Format, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }
            }

            this.RemoveOldTrackers(skeletonFrame.FrameNumber);

            colorImageFrame.Dispose();
            depthImageFrame.Dispose();
            skeletonFrame.Dispose();

            this.FInvalidate = true;
        }
Exemple #28
0
        private void SaveCoordinatesAssistant(Skeleton skeleton, KinectSensor sensor, DepthImageFormat depthFormat)
        {
            if (inSquat)
            {
                //Determines whether or not this is the initial frame being recorded. If it is, a header is created and then data is passed through.
                //If not, then new frames are recorded without an extra header.
                try
                {
                    StreamReader headerVerifier = new StreamReader(@"C:\Users\amkos\Documents\SquattingData\" + fileDescrip + ".csv");
                    headerVerifier.Close();
                    //Console.WriteLine("found file");
                    SaveCoordinates(skeleton, sensor, depthFormat);
                }


                catch (FileNotFoundException)
                {
                    StreamWriter headerWriterInitial = new StreamWriter(@"C:\Users\amkos\Documents\SquattingData\" + fileDescrip + ".csv");
                    headerWriterInitial.WriteLine("Frame" + "," + "TimeStamp" + "," + "time.ms" +
                                                  "," + "ShoulderHeight" + "," + "ShoulderConfidence" +
                                                  "," + "lKnee.X" + "," + "rKnee.X" +
                                                  "," + "lAnkle.X" + "," + "rAnkle.X" + "," +
                                                  "KASR" + "," + "rKneeAngle" + "," + "lKneeAngle");
                    headerWriterInitial.Close();

                    SaveCoordinates(skeleton, sensor, depthFormat);
                }
            }
        }
Exemple #29
0
        private void SaveCoordinates(Skeleton skeleton, KinectSensor sensor, DepthImageFormat depthFormat)
        {
            StreamWriter coordinateStream = new StreamWriter(@"C:\Users\amkos\Documents\SquattingData\" + fileDescrip + ".csv", true);

            TotalFrames++;

            /*foreach (Joint joint in skeleton.Joints)
             * {
             *  //Console.WriteLine(joint.JointType);
             *  DepthImagePoint mappedPoint = sensor.MapSkeletonPointToDepth(joint.Position, depthFormat);
             *  coordinateStream.WriteLine(TotalFrames + "," + DateTime.Now.ToLongTimeString() + "," +
             *      DateTime.Now.Millisecond + "," + skeleton.TrackingId + "," + joint.JointType + "," + joint.TrackingState + "," +
             *      mappedPoint.X + "," + mappedPoint.Y);
             *  //Console.WriteLine(mappedPoint.X);
             * }
             */

            //left knee
            DepthImagePoint leftKneeDepthPoint =
                sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[JointType.KneeLeft].Position,
                                                                     sensor.DepthStream.Format);
            //right knee
            DepthImagePoint rightKneeDepthPoint =
                sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[JointType.KneeRight].Position,
                                                                     sensor.DepthStream.Format);
            //left ankle
            DepthImagePoint leftAnkleDepthPoint =
                sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[JointType.AnkleLeft].Position,
                                                                     sensor.DepthStream.Format);
            //right ankle
            DepthImagePoint rightAnkleDepthPoint =
                sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[JointType.AnkleRight].Position,
                                                                     sensor.DepthStream.Format);
            //left hip
            DepthImagePoint leftHipDepthPoint =
                sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[JointType.HipLeft].Position,
                                                                     sensor.DepthStream.Format);
            //right hip
            DepthImagePoint rightHipDepthPoint =
                sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[JointType.HipRight].Position,
                                                                     sensor.DepthStream.Format);
            //shoulder center (used to check for start, bottom, and end of squat)
            DepthImagePoint shoulderCenterDepthPoint =
                sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[JointType.ShoulderCenter].Position,
                                                                     sensor.DepthStream.Format);


            double kasr = CalculateKASR(rightAnkleDepthPoint, leftAnkleDepthPoint, rightKneeDepthPoint,
                                        leftKneeDepthPoint, rightHipDepthPoint, leftHipDepthPoint);

            double leftKneeAngle  = AngleBetweenJoints(skeleton.Joints[JointType.HipLeft], skeleton.Joints[JointType.KneeLeft], skeleton.Joints[JointType.AnkleLeft]);
            double rightKneeAngle = AngleBetweenJoints(skeleton.Joints[JointType.HipRight], skeleton.Joints[JointType.KneeRight], skeleton.Joints[JointType.AnkleRight]);


            coordinateStream.WriteLine(TotalFrames + "," + DateTime.Now.Second + "," +
                                       DateTime.Now.Millisecond + "," + shoulderCenterDepthPoint.X + "," +
                                       skeleton.Joints[JointType.ShoulderCenter].TrackingState + "," +
                                       leftKneeDepthPoint.X + "," + rightKneeDepthPoint.X + "," +
                                       leftAnkleDepthPoint.X + "," + rightAnkleDepthPoint.X + "," +
                                       kasr + "," + leftKneeAngle + "," + rightKneeAngle);

            coordinateStream.Close();
        }
 public DepthImagePoint MapSkeletonPointToDepthPoint(SkeletonPoint skelpoint, DepthImageFormat depthImageFormat)
 {
     return(sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skelpoint, depthImageFormat));
 }
        private void DepthImageReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            int imageWidth = 0;
            int imageHeight = 0;
            bool haveNewFormat = false;

            using (DepthImageFrame imageFrame = e.OpenDepthImageFrame())
            {
                if (imageFrame != null)
                {
                    imageWidth = imageFrame.Width;
                    imageHeight = imageFrame.Height;

                    // We need to detect if the format has changed.
                    haveNewFormat = this.lastImageFormat != imageFrame.Format;

                    if (haveNewFormat)
                    {
                        this.pixelData = new short[imageFrame.PixelDataLength];
                        this.depthFrame32 = new byte[imageFrame.Width * imageFrame.Height * Bgr32BytesPerPixel];
                        this.lastImageFormat = imageFrame.Format;

                        // We also need to reallocate the outputBitmap, but WriteableBitmap has 
                        // thread affinity based on the allocating thread.  Since we want this to
                        // be displayed in the UI, we need to do this allocation on the UI thread (below).
                    }

                    imageFrame.CopyPixelDataTo(this.pixelData);
                }
            }

            // Did we get a depth frame?
            if (imageWidth != 0)
            {
                this.ConvertDepthFrame(this.pixelData, ((KinectSensor)sender).DepthStream);

                // The images are converted, update the UI on the UI thread.
                // We use Invoke here instead of BeginInvoke so that the processing frame is blocked from overwriting
                // this.pixelData and this.depthFrame32.
                this.Dispatcher.Invoke((Action)(() =>
                    {
                        if (haveNewFormat)
                        {
                            // A WriteableBitmap is a WPF construct that enables resetting the Bits of the image.
                            // This is more efficient than creating a new Bitmap every frame.
                            this.outputBitmap = new WriteableBitmap(
                                imageWidth,
                                imageHeight,
                                96, // DpiX
                                96, // DpiY
                                PixelFormats.Bgr32,
                                null);

                            this.kinectDepthImage.Source = this.outputBitmap;
                        }

                        this.outputBitmap.WritePixels(
                            new Int32Rect(0, 0, imageWidth, imageHeight),
                            this.depthFrame32,
                            imageWidth * Bgr32BytesPerPixel,
                            0);

                        UpdateFrameRate();
                    }));
            }
        }
        private void OnAllFramesReady(object sender, Microsoft.Kinect.AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.SkeletonData == null || this.SkeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.SkeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.SkeletonData);
                Skeleton activeSkeleton = null;
                activeSkeleton = (from skel in this.SkeletonData where skel.TrackingState == SkeletonTrackingState.Tracked select skel).FirstOrDefault();


                //Idea: Separate Eye-Parts of Color Image
                //Use learning Algorithm for right and left eye
                //Detect blink on separated parts of color Image

                //colorImage is one dimensional array with 640 x 480 x 4 (RGBA) values


                if (activeSkeleton != null)
                {
                    FaceTrackFrame currentFaceFrame = faceTracker.Track(ColorImageFormat.RgbResolution640x480Fps30, colorImage, depthImageFormat, depthImage, activeSkeleton);
                    float          browRaiserValue  = currentFaceFrame.GetAnimationUnitCoefficients()[AnimationUnit.BrowRaiser];
                    float          browLowererValue = currentFaceFrame.GetAnimationUnitCoefficients()[AnimationUnit.BrowLower];
                    tbBrowLowerer.Text = browLowererValue.ToString();
                    tbBrowRaiser.Text  = browRaiserValue.ToString();
                    //Get relevant Points for blink detection
                    //Left eye
                    int    minX    = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.AboveOneFourthLeftEyelid].X);
                    int    minY    = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.AboveOneFourthLeftEyelid].Y);
                    int    maxX    = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.BelowThreeFourthLeftEyelid].X);
                    int    maxY    = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.BelowThreeFourthLeftEyelid].Y);
                    Bitmap leftEye = EyeExtract(colorImageFrame, currentFaceFrame, minX, minY, maxX, maxY, false);
                    pbLeftEye.Image = leftEye;

                    //Right eye
                    minX = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.AboveThreeFourthRightEyelid].X);
                    minY = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.AboveThreeFourthRightEyelid].Y);
                    maxX = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.OneFourthBottomRightEyelid].X);
                    maxY = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.OneFourthBottomRightEyelid].Y);

                    Bitmap rightEye = EyeExtract(colorImageFrame, currentFaceFrame, minX, minY, maxX, maxY, true);
                    pbRightEye.Image = rightEye;

                    //Wende Kantenfilter auf die beiden Augen an.
                    double dxRight;
                    double dyRight;
                    double dxLeft;
                    double dyLeft;
                    if (rightEye != null && leftEye != null)
                    {
                        Bitmap edgePicRight = Convolution(ConvertGrey(rightEye), true, out dxRight, out dyRight);
                        Bitmap edgePicLeft  = Convolution(ConvertGrey(leftEye), false, out dxLeft, out dyLeft);



                        //If Face is rotated, move Mouse
                        if (headRotationHistory.Count > filterLength && currentFaceFrame.TrackSuccessful)
                        {
                            int x = 0;
                            int y = 0;

                            //Method 1: Ohne Glättung
                            //ScaleXY(currentFaceFrame.Rotation, out x, out y);
                            //MouseControl.Move(x, y);

                            ////Method 2: Glättung über die letzten x Bilder:
                            //int i = 0;
                            //Vector3DF rotationMedium = new Vector3DF();
                            //while (i < 10 && headRotationHistory.Count - 1 > i)
                            //{
                            //    i++;
                            //    rotationMedium.X += headRotationHistory[headRotationHistory.Count - 1 - i].X;
                            //    rotationMedium.Y += headRotationHistory[headRotationHistory.Count - 1 - i].Y;
                            //}
                            //rotationMedium.X = rotationMedium.X / i;
                            //rotationMedium.Y = rotationMedium.Y / i;
                            //ScaleXY(rotationMedium, out x, out y);
                            //MouseControl.Move(x, y);

                            //Method 3: Gauß-Filter: Gewichte die letzten Bilder stärker.



                            Vector3DF rotationMedium = new Vector3DF();
                            rotationMedium.X = currentFaceFrame.Rotation.X * gaussFilter[0];
                            rotationMedium.Y = currentFaceFrame.Rotation.Y * gaussFilter[0];
                            int i = 0;
                            while (i < filterLength - 1)
                            {
                                i++;
                                rotationMedium.X += (headRotationHistory[headRotationHistory.Count - 1 - i].X * gaussFilter[i]);
                                rotationMedium.Y += (headRotationHistory[headRotationHistory.Count - 1 - i].Y * gaussFilter[i]);
                            }
                            rotationMedium.X = (float)(rotationMedium.X / gaussFactor);
                            rotationMedium.Y = (float)(rotationMedium.Y / gaussFactor);
                            ScaleXY(rotationMedium, out x, out y);

                            MouseControl.Move(x, y);
                            //Method 4: Quadratische Glättung
                            //double deltaX = ((-currentFaceFrame.Rotation.Y) - (-headRotationHistory.Last().Y));
                            //double deltaY = ((-currentFaceFrame.Rotation.X) - (-headRotationHistory.Last().X));
                            //if (deltaX < 0)
                            //    deltaX = -Math.Pow(deltaX, 2) * 4;
                            //else
                            //    deltaX = Math.Pow(deltaX, 2) * 4;
                            //if (deltaY < 0)
                            //    deltaY = -Math.Pow(deltaY, 2) * 5;
                            //else
                            //    deltaY = Math.Pow(deltaY, 2) * 5;
                            //MouseControl.DeltaMove((int)Math.Round(deltaX, 0), (int)Math.Round(deltaY));
                        }

                        headRotationHistory.Add(currentFaceFrame.Rotation);
                        if (headRotationHistory.Count >= 100)
                        {
                            headRotationHistory.RemoveAt(0);
                        }
                    }
                }
            }
            catch (Exception e)
            {
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest).Clone() as FaceTrackFrame;
                }
            }
Exemple #34
0
        private void Kinect_AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }


                // TODO look into using the Timestamp on each frame
                var time = pipeline.GetCurrentTime();

                var sharedColorImage = ImagePool.GetOrCreate(colorImageFrame.Width, colorImageFrame.Height, Imaging.PixelFormat.BGRX_32bpp);
                var sharedDepthImage = ImagePool.GetOrCreate(depthImageFrame.Width, depthImageFrame.Height, Imaging.PixelFormat.Gray_16bpp);

                colorImageFrame.CopyPixelDataTo(sharedColorImage.Resource.ImageData, (colorImageFrame.Width * colorImageFrame.Height * 4));
                this.ColorImage.Post(sharedColorImage, time);

                //depthImageFrame.CopyPixelDataTo(sharedDepthImage.Resource.ImageData, (depthImageFrame.Width * depthImageFrame.Height * 2));
                depthImageFrame.CopyPixelDataTo(sharedDepthImage.Resource.ImageData, depthImageFrame.PixelDataLength);
                this.DepthImage.Post(sharedDepthImage, time);


                skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                this.Skeletons.Post(this.skeletonData.ToList(), time);
            }
            catch
            {
                // TODO catch a cold
            }
        }
        private bool TrackIntegrate(DepthImagePixel[] depthPixels, DepthImageFormat depthFormat)
        {
            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            // Convert the depth image frame to depth float image frame
            FusionDepthProcessor.DepthToDepthFloatFrame(
                depthPixels,
                (int)depthSize.Width,
                (int)depthSize.Height,
                this.depthFloatBuffer,
                FusionDepthProcessor.DefaultMinimumDepth,
                FusionDepthProcessor.DefaultMaximumDepth,
                false);

            bool trackingSucceeded = this.volume.AlignDepthFloatToReconstruction(
                depthFloatBuffer,
                FusionDepthProcessor.DefaultAlignIterationCount,
                residualFloatBuffer,
                out _alignmentEnergy,
                volume.GetCurrentWorldToCameraTransform());

            //if (trackingSucceeded && _alignmentEnergy == 0.0)
            //    trackingSucceeded = false;

            // ProcessFrame will first calculate the camera pose and then integrate
            // if tracking is successful
            //bool trackingSucceeded = this.volume.ProcessFrame(
            //    this.depthFloatBuffer,
            //    FusionDepthProcessor.DefaultAlignIterationCount,
            //    IntegrationWeight,
            //    this.volume.GetCurrentWorldToCameraTransform());

            // If camera tracking failed, no data integration or raycast for reference
            // point cloud will have taken place, and the internal camera pose
            // will be unchanged.
            if (!trackingSucceeded)
            {
                this.trackingErrorCount++;

                // Show tracking error on status bar
                FusionStatusMessage = Properties.Resources.CameraTrackingFailed;
                _audioManager.State = AudioState.Error;
            }
            else
            {
                ProcessResidualImage();

                this.worldToCameraTransform = volume.GetCurrentWorldToCameraTransform();

                if (!IsIntegrationPaused)
                {
                    this.volume.IntegrateFrame(depthFloatBuffer, IntegrationWeight, this.worldToCameraTransform);
                }

                this.trackingErrorCount = 0;
            }

            if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
            {
                // Auto Reset due to bad tracking
                FusionStatusMessage = Properties.Resources.ResetVolume;

                // Automatically Clear Volume and reset tracking if tracking fails
                this.ResetReconstruction(_currentVolumeCenter);
            }
            return(trackingSucceeded);
        }
Exemple #36
0
        private void AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for changes in any of the data this function is receiving
                // and reset things appropriately.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.colorImage               = null;
                    this.colorImageFormat         = colorImageFrame.Format;
                    this.colorImageWritableBitmap = null;
                    this.ColorImage.Source        = null;
                    this.theMaterial.Brush        = null;
                }

                if (this.skeletonData != null && this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = null;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                if (this.colorImageWritableBitmap == null)
                {
                    this.colorImageWritableBitmap = new WriteableBitmap(
                        colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
                    this.ColorImage.Source = this.colorImageWritableBitmap;
                    this.theMaterial.Brush = new ImageBrush(this.colorImageWritableBitmap)
                    {
                        ViewportUnits = BrushMappingMode.Absolute
                    };
                }

                if (this.skeletonData == null)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                // Copy data received in this event to our buffers.
                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                this.colorImageWritableBitmap.WritePixels(
                    new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                    this.colorImage,
                    colorImageFrame.Width * Bgr32BytesPerPixel,
                    0);

                // Find a skeleton to track.
                // First see if our old one is good.
                // When a skeleton is in PositionOnly tracking state, don't pick a new one
                // as it may become fully tracked again.
                Skeleton skeletonOfInterest =
                    this.skeletonData.FirstOrDefault(
                        skeleton =>
                        skeleton.TrackingId == this.trackingId &&
                        skeleton.TrackingState != SkeletonTrackingState.NotTracked);

                if (skeletonOfInterest == null)
                {
                    // Old one wasn't around.  Find any skeleton that is being tracked and use it.
                    skeletonOfInterest =
                        this.skeletonData.FirstOrDefault(
                            skeleton => skeleton.TrackingState == SkeletonTrackingState.Tracked);

                    if (skeletonOfInterest != null)
                    {
                        // This may be a different person so reset the tracker which
                        // could have tuned itself to the previous person.
                        if (this.faceTracker != null)
                        {
                            this.faceTracker.ResetTracking();
                        }

                        this.trackingId = skeletonOfInterest.TrackingId;
                    }
                }

                if (skeletonOfInterest != null && skeletonOfInterest.TrackingState == SkeletonTrackingState.Tracked)
                {
                    if (this.faceTracker == null)
                    {
                        try
                        {
                            this.faceTracker = new FaceTracker(this.Kinect);
                        }
                        catch (InvalidOperationException)
                        {
                            // During some shutdown scenarios the FaceTracker
                            // is unable to be instantiated.  Catch that exception
                            // and don't track a face.
                            Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                            this.faceTracker = null;
                        }
                    }

                    if (this.faceTracker != null)
                    {
                        FaceTrackFrame faceTrackFrame = this.faceTracker.Track(
                            this.colorImageFormat,
                            this.colorImage,
                            this.depthImageFormat,
                            this.depthImage,
                            skeletonOfInterest);

                        if (faceTrackFrame.TrackSuccessful)
                        {
                            if (!visited)
                            {
                                visited = true;
                                //counter.Text = "60 seconds";
                                aTimer.Interval = 1000;
                                aTimer.Tick    += new EventHandler(aTimer_Tick);
                                aTimer.Start();
                            }
                            if (saveModel)
                            {
                                saveDepthImagebmp(depthImageFrame);
                                saveColorImage(colorImageFrame.Width, colorImageFrame.Height, (colorImageFrame.Width * Bgr32BytesPerPixel));
                                saveFaceModel();
                            }
                        }
                    }
                }
                else
                {
                    this.trackingId = -1;
                }
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
        public IEnumerable <WeightedRect> DetectFaces(ColorImageFormat colorImageFormat,
                                                      byte[] colorImage,
                                                      DepthImageFormat depthImageFormat,
                                                      short[] depthImage, Rect roi)
        {
            if (this.operationMode != OperationMode.Kinect)
            {
                throw new InvalidOperationException(
                          "Cannot use Track with Kinect input types when face tracker is initialized for tracking videos/images");
            }

            if (colorImage == null)
            {
                throw new ArgumentNullException("colorImage");
            }

            if (depthImage == null)
            {
                throw new ArgumentNullException("depthImage");
            }

            if (colorImageFormat != this.initializationColorImageFormat)
            {
                throw new InvalidOperationException("Color image frame format different from initialization");
            }

            if (depthImageFormat != this.initializationDepthImageFormat)
            {
                throw new InvalidOperationException("Depth image frame format different from initialization");
            }

            if (colorImage.Length != this.videoCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("colorImage", "Color image data size is needs to match initialization configuration.");
            }

            if (depthImage.Length != this.depthCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("depthImage", "Depth image data size is needs to match initialization configuration.");
            }

            this.copyStopwatch.Start();
            this.colorFaceTrackingImage.CopyFrom(colorImage);
            this.depthFaceTrackingImage.CopyFrom(depthImage);
            this.copyStopwatch.Stop();

            var sensorData = new SensorData(this.colorFaceTrackingImage, this.depthFaceTrackingImage, DefaultZoomFactor, Point.Empty);
            FaceTrackingSensorData faceTrackSensorData = sensorData.FaceTrackingSensorData;

            int  hr;
            uint count = 4;

            WeightedRect[] rects = new WeightedRect[count];

            GCHandle handle = GCHandle.Alloc(rects, GCHandleType.Pinned);

            try
            {
                IntPtr rectsPtr = handle.AddrOfPinnedObject();
                hr = this.faceTrackerInteropPtr.DetectFaces(ref faceTrackSensorData, ref roi, rectsPtr, ref count);
            }
            finally
            {
                if (handle.IsAllocated)
                {
                    handle.Free();
                }
            }

            this.trackSucceeded = hr == (int)ErrorCode.Success;

            return(rects.Take((int)count));
        }
        private void GetPlayerSilhouette(DepthImageFrame depthFrame, int playerIndex)
        {
            if (depthFrame != null)
            {
                bool haveNewFormat = this.lastImageFormat != depthFrame.Format;

                if (haveNewFormat)
                {
                    this.pixelData          = new short[depthFrame.PixelDataLength];
                    this.depthFrame32       = new byte[depthFrame.Width * depthFrame.Height * Bgra32BytesPerPixel];
                    this.convertedDepthBits = new byte[this.depthFrame32.Length];
                }

                depthFrame.CopyPixelDataTo(this.pixelData);

                for (int i16 = 0, i32 = 0; i16 < pixelData.Length && i32 < this.depthFrame32.Length; i16++, i32 += 4)
                {
                    int player = pixelData[i16] & DepthImageFrame.PlayerIndexBitmask;
                    if (player == playerIndex)
                    {
                        convertedDepthBits[i32 + RedIndex]   = 0x44;
                        convertedDepthBits[i32 + GreenIndex] = 0x23;
                        convertedDepthBits[i32 + BlueIndex]  = 0x59;
                        convertedDepthBits[i32 + 3]          = 0x66;
                    }
                    else if (player > 0)
                    {
                        convertedDepthBits[i32 + RedIndex]   = 0xBC;
                        convertedDepthBits[i32 + GreenIndex] = 0xBE;
                        convertedDepthBits[i32 + BlueIndex]  = 0xC0;
                        convertedDepthBits[i32 + 3]          = 0x66;
                    }
                    else
                    {
                        convertedDepthBits[i32 + RedIndex]   = 0x0;
                        convertedDepthBits[i32 + GreenIndex] = 0x0;
                        convertedDepthBits[i32 + BlueIndex]  = 0x0;
                        convertedDepthBits[i32 + 3]          = 0x0;
                    }
                }

                if (this.Silhouette == null || haveNewFormat)
                {
                    this.Silhouette = new WriteableBitmap(
                        depthFrame.Width,
                        depthFrame.Height,
                        96,
                        96,
                        PixelFormats.Bgra32,
                        null);
                }

                this.Silhouette.WritePixels(
                    new Int32Rect(0, 0, depthFrame.Width, depthFrame.Height),
                    convertedDepthBits,
                    depthFrame.Width * Bgra32BytesPerPixel,
                    0);

                this.lastImageFormat = depthFrame.Format;
            }
        }
            //public void DrawFaceModel(DrawingContext drawingContext)
            //{
            //    if (!this.lastFaceTrackSucceeded || this.skeletonTrackingState != SkeletonTrackingState.Tracked)
            //    {
            //        return;
            //    }

            //    var faceModelPts = new List<Point>();
            //    var faceModel = new List<FaceModelTriangle>();

            //    for (int i = 0; i < this.facePoints.Count; i++)
            //    {
            //        faceModelPts.Add(new Point(this.facePoints[i].X + 0.5f, this.facePoints[i].Y + 0.5f));
            //    }

            //    foreach (var t in faceTriangles)
            //    {
            //        var triangle = new FaceModelTriangle();
            //        triangle.P1 = faceModelPts[t.First];
            //        triangle.P2 = faceModelPts[t.Second];
            //        triangle.P3 = faceModelPts[t.Third];
            //        faceModel.Add(triangle);
            //    }

            //    var faceModelGroup = new GeometryGroup();
            //    for (int i = 0; i < faceModel.Count; i++)
            //    {
            //        var faceTriangle = new GeometryGroup();
            //        faceTriangle.Children.Add(new LineGeometry(faceModel[i].P1, faceModel[i].P2));
            //        faceTriangle.Children.Add(new LineGeometry(faceModel[i].P2, faceModel[i].P3));
            //        faceTriangle.Children.Add(new LineGeometry(faceModel[i].P3, faceModel[i].P1));
            //        faceModelGroup.Children.Add(faceTriangle);
            //    }

            //    drawingContext.DrawGeometry(Brushes.LightYellow, new Pen(Brushes.LightYellow, 1.0), faceModelGroup);
            //}

            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Console.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        this.facePoints = frame.GetProjected3DShape();
                    }
                }
            }
        /// <summary>
        /// Map PointSkeleton3D List to PointDepth3D List
        /// </summary>
        /// <param name="pointSkleton3D"></param>
        /// <param name="depthImageFormat"></param>
        /// <returns></returns>
        public List <PointDepth3D> MapSkeletonPointsToDepthPoints(List <PointSkeleton3D> pointSkeleton3D, DepthImageFormat depthImageFormat)
        {
            List <PointDepth3D> ret = new List <PointDepth3D>();

            foreach (var element in pointSkeleton3D)
            {
                ret.Add(MapSkeletonPointToDepthPoint(element, depthImageFormat));
            }
            return(ret);
        }
        private void DecodeSkeletonData(AllFramesReadyEventArgs e, KinectSensor sensor)
        {
            #region GetImageFormat

            ColorImageFormat colorFormat = ColorImageFormat.Undefined;
            int colorWidth  = 0;
            int colorHeight = 0;

            DepthImageFormat depthFormat = DepthImageFormat.Undefined;
            int depthWidth  = 0;
            int depthHeight = 0;

            switch (this._imageType)
            {
            case ImageType.Color:
                // Retrieve the current color format, from the frame if present, and from the sensor if not.
                using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
                {
                    if (null != colorImageFrame)
                    {
                        colorFormat = colorImageFrame.Format;
                        colorWidth  = colorImageFrame.Width;
                        colorHeight = colorImageFrame.Height;
                    }
                    else if (null != sensor.ColorStream)
                    {
                        colorFormat = sensor.ColorStream.Format;
                        colorWidth  = sensor.ColorStream.FrameWidth;
                        colorHeight = sensor.ColorStream.FrameHeight;
                    }
                }

                break;

            case ImageType.Depth:
                // Retrieve the current depth format, from the frame if present, and from the sensor if not.
                using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
                {
                    if (null != depthImageFrame)
                    {
                        depthFormat = depthImageFrame.Format;
                        depthWidth  = depthImageFrame.Width;
                        depthHeight = depthImageFrame.Height;
                    }
                    else if (null != sensor.DepthStream)
                    {
                        depthFormat = sensor.DepthStream.Format;
                        depthWidth  = sensor.DepthStream.FrameWidth;
                        depthHeight = sensor.DepthStream.FrameHeight;
                    }
                }

                break;
            }

            #endregion

            // Clear the play canvas
            this.playField.Children.Clear();

            // Check every skeleton
            for (int skeletonSlot = 0; skeletonSlot < this._skeletonBuffer.Length; skeletonSlot++)
            {
                var skeleton = this._skeletonBuffer[skeletonSlot];

                #region Skeleton Position

                // Map points between skeleton and color/depth
                var jointMapping = this._jointMappings[skeletonSlot];
                jointMapping.Clear();

                try
                {
                    // Transform the data into the correct space
                    // For each joint, we determine the exact X/Y coordinates for the target view
                    foreach (Joint joint in skeleton.Joints)
                    {
                        ColorImagePoint colorPoint = sensor.CoordinateMapper.MapSkeletonPointToColorPoint(joint.Position, colorFormat);

                        Point mappedPoint = new Point(
                            (int)(this._renderSize.Width * colorPoint.X / colorWidth),
                            (int)(this._renderSize.Height * colorPoint.Y / colorHeight));

                        jointMapping[joint.JointType] = new JointMapping
                        {
                            Joint       = joint,
                            MappedPoint = mappedPoint,
                            OriginPoint = colorPoint,
                        };
                    }
                }
                catch (UnauthorizedAccessException)
                {
                    // Kinect is no longer available.
                    return;
                }

                // Look up the center point
                Point centerPoint = PositionCalculator.Get2DPosition(
                    sensor,
                    this._imageType,
                    this._renderSize,
                    skeleton.Position,
                    colorFormat,
                    colorWidth,
                    colorHeight,
                    depthFormat,
                    depthWidth,
                    depthHeight);

                #endregion

                // Scale the skeleton thickness
                // 1.0 is the desired size at 640 width
                this._scaleFactor = this._renderSize.Width / colorWidth;

                // Displays a gradient near the edge of the display
                // where the skeleton is leaving the screen
                this.DrawClippedEdges(skeleton);

                switch (skeleton.TrackingState)
                {
                case SkeletonTrackingState.PositionOnly:
                {
                    // The skeleton is being tracked, but we only know the general position, and
                    // we do not know the specific joint locations.
                    this.DrawBodyCenter(centerPoint);
                }
                break;

                case SkeletonTrackingState.Tracked:
                {
                    // The skeleton is being tracked and the joint data is available for consumption.
                    this.DrawBody(skeleton, jointMapping);

                    // Track player
                    this.TrackPlayer(skeleton, jointMapping);
                }
                break;
                }
            }
        }
        void nui_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            ColorImageFrame cf = e.OpenColorImageFrame();
            DepthImageFrame df = e.OpenDepthImageFrame();
            SkeletonFrame   sf = e.OpenSkeletonFrame();

            //            Skeleton[] skeletonData = new Skeleton[sf.SkeletonArrayLength];

            if (cf == null || df == null || sf == null)
            {
                return;
            }


            byte[] ImageBits = new byte[cf.PixelDataLength];
            cf.CopyPixelDataTo(ImageBits);

            BitmapSource src = null;

            src = BitmapSource.Create(cf.Width, cf.Height,
                                      96, 96, PixelFormats.Bgr32, null,
                                      ImageBits,
                                      cf.Width * cf.BytesPerPixel);
            image2.Source = src;



            // Check for image format changes.  The FaceTracker doesn't
            // deal with that so we need to reset.
            if (this.depthImageFormat != df.Format)
            {
                this.ResetFaceTracking();
                this.depthImage       = null;
                this.depthImageFormat = df.Format;
            }

            if (this.colorImageFormat != cf.Format)
            {
                this.ResetFaceTracking();
                this.colorImage       = null;
                this.colorImageFormat = cf.Format;
            }

            // Create any buffers to store copies of the data we work with
            if (this.depthImage == null)
            {
                this.depthImage = new short[df.PixelDataLength];
            }

            if (this.colorImage == null)
            {
                this.colorImage = new byte[cf.PixelDataLength];
            }

            // Get the skeleton information
            if (this.skeletonData == null || this.skeletonData.Length != sf.SkeletonArrayLength)
            {
                this.skeletonData = new Skeleton[sf.SkeletonArrayLength];
            }

            cf.CopyPixelDataTo(this.colorImage);
            df.CopyPixelDataTo(this.depthImage);
            sf.CopySkeletonDataTo(this.skeletonData);

            foreach (Skeleton skeleton in this.skeletonData)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(nui2, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = sf.FrameNumber;
                    }
                }
            }



            using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
            {
                if (depthImageFrame != null)
                {
                    foreach (Skeleton sd in skeletonData)
                    {
                        if (sd.TrackingState == SkeletonTrackingState.Tracked)
                        {
                            Joint joint = sd.Joints[JointType.Head];

                            DepthImagePoint depthPoint;

                            //                            CoordinateMapper coordinateMapper = new CoordinateMapper(nui);
                            //                            depthPoint = coordinateMapper.MapSkeletonPointToDepthPoint(joint.Position, DepthImageFormat.Resolution320x240Fps30);

                            depthPoint = depthImageFrame.MapFromSkeletonPoint(joint.Position);

                            System.Windows.Point point = new System.Windows.Point((int)(image2.ActualWidth * depthPoint.X
                                                                                        / depthImageFrame.Width),
                                                                                  (int)(image2.ActualHeight * depthPoint.Y
                                                                                        / depthImageFrame.Height));


                            Canvas.SetLeft(ellipse1, (point.X) - ellipse1.Width);
                            Canvas.SetTop(ellipse1, (point.Y) - ellipse1.Height);

                            App thisApp = App.Current as App;

                            Canvas.SetLeft(rect2, thisApp.m_dbX - rect2.Width);
                            Canvas.SetTop(rect2, thisApp.m_dbY - rect2.Height);

                            double GapX, GapY;
                            GapX = point.X - (thisApp.m_dbX - 2);
                            GapY = point.Y - (thisApp.m_dbY - 2);

                            int siteX = 999, siteY = 999;

                            if (GapX < 30 && GapX > -30)
                            {
                                siteX = 1;
                            }
                            else if (GapX >= 30)
                            {
                                siteX = 0;
                            }
                            else if (GapY <= -30)
                            {
                                siteX = 2;
                            }

                            if (GapY >= -40)
                            {
                                siteY = 0;
                            }
                            else if (GapY < -40 && GapY > -60)
                            {
                                siteY = 1;
                            }
                            else if (GapY <= -60)
                            {
                                siteY = 2;
                            }

                            int site;
                            site = siteX + (siteY * 3);
                            if (site == 0)
                            {
                                text2.Text = "좌상";
                            }
                            else if (site == 1)
                            {
                                text2.Text = "상";
                            }
                            else if (site == 2)
                            {
                                text2.Text = "우상";
                            }
                            else if (site == 3)
                            {
                                text2.Text = "좌";
                            }
                            else if (site == 4)
                            {
                                text2.Text = "정";
                            }
                            else if (site == 5)
                            {
                                text2.Text = "우";
                            }
                            else if (site == 6)
                            {
                                text2.Text = "좌하";
                            }
                            else if (site == 7)
                            {
                                text2.Text = "하";
                            }
                            else if (site == 8)
                            {
                                text2.Text = "우하";
                            }

                            thisApp.nowsite = site;

                            /*
                             *
                             * rect4.X = facePoints[i].X - 2;
                             * rect4.Y = facePoints[i].Y - 2;
                             * rect4.Width = 4;
                             * rect4.Height = 4;
                             */
                        }
                    }
                }
            }
        }
        /// <summary>
        /// Start depth stream at specific resolution
        /// </summary>
        /// <param name="format">The resolution of image in depth stream</param>
        /// <returns>Returns true if the sensor supports near mode.</returns>
        public bool StartDepthStream(DepthImageFormat format)
        {
            if (null == this.sensor)
            {
                this.StatusMessage = "No ready Kinect found!";
                return(true);
            }

            bool isSupportNearMode = true;

            try
            {
                // Enable depth stream, register event handler and start
                this.Sensor.DepthStream.Enable(format);
                this.Sensor.DepthFrameReady += this.OnDepthFrameReady;
                this.depthFormat             = format;

                if (!this.IsStarted())
                {
                    this.Sensor.Start();
                    this.started = true;
                }

                // Set Near Mode by default
                try
                {
                    this.Sensor.DepthStream.Range    = DepthRange.Near;
                    this.ReconSensorControl.NearMode = true;
                }
                catch (InvalidOperationException)
                {
                    isSupportNearMode = false;
                }

                // Create frustum and graphics camera
                Size imageSize = Helper.GetImageSize(this.DepthFormat);
                this.depthWidth  = (int)imageSize.Width;
                this.depthHeight = (int)imageSize.Height;

                // Create the graphics camera and set at origin initially - we will override by setting the transform explicitly below
                this.reconstructionSensorCamera = new GraphicsCamera(new Point3D(0, 0, 0), Quaternion.Identity, (float)this.depthWidth / (float)this.depthHeight);

                // Update view transform now, from ReconstructionSensorControl
                this.SetCameraTransformation((float)this.reconstructionSensorControl.AngleX, (float)this.reconstructionSensorControl.AngleY, (float)this.reconstructionSensorControl.AngleZ, (float)this.reconstructionSensorControl.AxisDistance);
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor        = null;
                this.StatusMessage = ex.Message;
                throw;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor        = null;
                this.StatusMessage = ex.Message;
                throw;
            }

            this.StatusMessage = string.Empty;

            return(isSupportNearMode);
        }
        private void AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame skeletonFrame = null;


            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for changes in any of the data this function is receiving
                // and reset things appropriately.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.depthImage = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.colorImage = null;
                    this.colorImageFormat = colorImageFrame.Format;
                    this.colorImageWritableBitmap = null;
                    this.ColorImage.Source = null;
                    this.theMaterial.Brush = null;
                }

                if (this.skeletonData != null && this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = null;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                if (this.colorImageWritableBitmap == null)
                {
                    this.colorImageWritableBitmap = new WriteableBitmap(
                        colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
                    this.ColorImage.Source = this.colorImageWritableBitmap;
                    this.theMaterial.Brush = new ImageBrush(this.colorImageWritableBitmap)
                        {
                            ViewportUnits = BrushMappingMode.Absolute
                        };
                }

                if (this.skeletonData == null)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                // Copy data received in this event to our buffers.
                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                this.colorImageWritableBitmap.WritePixels(
                    new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                    this.colorImage,
                    colorImageFrame.Width * Bgr32BytesPerPixel,
                    0);

                // Find a skeleton to track.
                // First see if our old one is good.
                // When a skeleton is in PositionOnly tracking state, don't pick a new one
                // as it may become fully tracked again.
                Skeleton skeletonOfInterest =
                    this.skeletonData.FirstOrDefault(
                        skeleton =>
                        skeleton.TrackingId == this.trackingId
                        && skeleton.TrackingState != SkeletonTrackingState.NotTracked);

                if (skeletonOfInterest == null)
                {
                    // Old one wasn't around.  Find any skeleton that is being tracked and use it.
                    skeletonOfInterest =
                        this.skeletonData.FirstOrDefault(
                            skeleton => skeleton.TrackingState == SkeletonTrackingState.Tracked);

                    if (skeletonOfInterest != null)
                    {
                        // This may be a different person so reset the tracker which
                        // could have tuned itself to the previous person.
                        if (this.faceTracker != null)
                        {
                            this.faceTracker.ResetTracking();
                        }

                        this.trackingId = skeletonOfInterest.TrackingId;
                    }
                }

                bool displayFaceMesh = false;

                if (skeletonOfInterest != null && skeletonOfInterest.TrackingState == SkeletonTrackingState.Tracked)
                {
                    if (this.faceTracker == null)
                    {
                        try
                        {
                            this.faceTracker = new FaceTracker(this.Kinect);
                        }
                        catch (InvalidOperationException)
                        {
                            // During some shutdown scenarios the FaceTracker
                            // is unable to be instantiated.  Catch that exception
                            // and don't track a face.
                            Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                            this.faceTracker = null;
                        }
                    }

                    if (this.faceTracker != null)
                    {
                        FaceTrackFrame faceTrackFrame = this.faceTracker.Track(
                            this.colorImageFormat,
                            this.colorImage,
                            this.depthImageFormat,
                            this.depthImage,
                            skeletonOfInterest);

                        if (faceTrackFrame.TrackSuccessful && status.Text.ToString() == "STATUS:MONITORING")
                        {
                            this.UpdateMesh(faceTrackFrame);

                            // Only display the face mesh if there was a successful track.
                            displayFaceMesh = true;
                        }
                    }
                }
                else
                {
                    this.trackingId = -1;
                }

                this.viewport3d.Visibility = displayFaceMesh ? Visibility.Visible : Visibility.Hidden;
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }

            try
            {
                    kinectRegion.KinectSensor = Kinect;
            }
            catch (Exception)
            {
                throw;
            }
        }
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame skeletonFrame = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked
                        || skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        // We want keep a record of any skeleton, tracked or untracked.
                        if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                        {
                            this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                        }

                        // Give each tracker the upated frame.
                        SkeletonFaceTracker skeletonFaceTracker;
                        if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                        {
                            skeletonFaceTracker.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                            skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        }
                    }
                }

                this.RemoveOldTrackers(skeletonFrame.FrameNumber);

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
        /// <summary>
        /// Map PointSkeleton3D to PointDepth3D
        /// </summary>
        /// <param name="pointSkleton3D"></param>
        /// <param name="depthImageFormat"></param>
        /// <returns></returns>
        public PointDepth3D MapSkeletonPointToDepthPoint(PointSkeleton3D pointSkleton3D, DepthImageFormat depthImageFormat)
        {
            SkeletonPoint point = new SkeletonPoint();

            point.X = pointSkleton3D.X;
            point.Y = pointSkleton3D.Y;
            point.Z = pointSkleton3D.Z;

            return(new PointDepth3D(mapper.MapSkeletonPointToDepthPoint(point, depthImageFormat)));
        }
        public void EnableDepthStream(DepthImageFormat format, int width, int height, bool filterRange = false, bool trackPoints = false)
        {
            this.filterRange = filterRange;
            this.trackPoints = trackPoints;
            this.depthWidth = width;
            this.depthHeight = height;

            if (trackPoints)
            {
                pointTracker = new BlobCounter();
                pointTracker.MinWidth = MIN_POINT_WIDTH;
                pointTracker.MinHeight = MIN_POINT_WIDTH;
                pointTracker.MaxWidth = MAX_POINT_WIDTH;
                pointTracker.MaxHeight = MAX_POINT_WIDTH;
                pointTracker.ObjectsOrder = OBJECTS_ORDER;
                pointTracker.FilterBlobs = true;

                Points = new List<System.Drawing.Point>();
            }

            KinectSensor.DepthStream.Enable(format);
            KinectSensor.DepthFrameReady += new EventHandler<DepthImageFrameReadyEventArgs>(kinectSensor_DepthFrameReady);
        }
            private bool CheckFace(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return(false);
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        //getting the Animation Unit Coefficients
                        this.AUs = frame.GetAnimationUnitCoefficients();
                        var jawLowerer   = AUs[AnimationUnit.JawLower];
                        var browLower    = AUs[AnimationUnit.BrowLower];
                        var browRaiser   = AUs[AnimationUnit.BrowRaiser];
                        var lipDepressor = AUs[AnimationUnit.LipCornerDepressor];
                        var lipRaiser    = AUs[AnimationUnit.LipRaiser];
                        var lipStretcher = AUs[AnimationUnit.LipStretcher];
                        //set up file for output
                        using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                                                 (@"C:\Users\Public\data.txt"))
                        {
                            file.WriteLine("FaceTrack Data, started recording at " + DateTime.Now.ToString("HH:mm:ss tt"));
                        }

                        //here is the algorithm to test different facial features

                        //BrowLower is messed up if you wear glasses, works if you don't wear 'em

                        string state = "";

                        //surprised
                        if ((jawLowerer < 0.25 || jawLowerer > 0.25) && browLower < 0)
                        {
                            state = "surprised";
                        }
                        //smiling
                        if (lipStretcher > 0.4 || lipDepressor < 0)
                        {
                            state = "smiling";
                        }
                        //sad
                        if (browRaiser < 0 && lipDepressor > 0)
                        {
                            state = "sad";
                        }
                        //angry
                        if ((browLower > 0 && (jawLowerer > 0.25 || jawLowerer < -0.25)) ||
                            (browLower > 0 && lipDepressor > 0))
                        {
                            state = "angry";
                        }
                        //System.Diagnostics.Debug.WriteLine(browLower);

                        this.facePoints = frame.GetProjected3DShape();

                        if (states[currentState] == state)
                        {
                            Trace.WriteLine("Yo!");
                            return(true);
                        }
                    }
                }

                return(false);
            }
		void FSensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
		{
			//if we have listeners, then make all data anyway
			bool DoAnyway = Update != null;

			//cache a value across whole function
			var UseRegistration = FInRegistration[0] || DoAnyway;

			//depth
			if (FOutDepth.IsConnected || FOutWorld.IsConnected || DoAnyway)
			{
				var depth = e.OpenDepthImageFrame();
				if (depth != null)
				{
					lock (DepthLock)
					{
						if (DepthData == null || DepthData.Length != depth.PixelDataLength)
							DepthData = new short[depth.PixelDataLength];

						depth.CopyPixelDataTo(DepthData);
						FDepthFormat = depth.Format;
					}
					depth.Dispose();
					FDepthInvalidate = true;

					if (FOutWorld.IsConnected)
						FWorldInInvalidate = true;

					if (UseRegistration)
					{
						if (FRegistrationData == null || DepthData.Length != FRegistrationData.Length)
							FRegistrationData = new ColorImagePoint[DepthData.Length];

						var depthFormat = FSensor.DepthStream.Format;
						var colorFormat = FSensor.ColorStream.Format;
						FSensor.MapDepthFrameToColorFrame(depthFormat, DepthData, colorFormat, FRegistrationData);
					}
				}
			}

			//color
			if (FOutColor.IsConnected || DoAnyway)
			{
				var color = e.OpenColorImageFrame();
				if (color != null)
				{
					if (UseRegistration)
					{
						if (FUnregisteredColorData == null || FUnregisteredColorData.Length != color.PixelDataLength)
							FUnregisteredColorData = new byte[color.PixelDataLength];
						color.CopyPixelDataTo(FUnregisteredColorData);

						lock (ColorLock)
						{
							if (ColorData == null || ColorData.Length != color.PixelDataLength)
								ColorData = new byte[color.PixelDataLength];

							//clear output
							ClearColorData();

							//remap colors
							RemapColors();
						}
					}
					else
					{
						lock (ColorLock)
						{
							if (ColorData == null || ColorData.Length != color.PixelDataLength)
								ColorData = new byte[color.PixelDataLength];
							color.CopyPixelDataTo(ColorData);
						}
					}

					FTimestamp = color.Timestamp;
					FFrameNumber = color.FrameNumber;
					FTimestampInvalidate = true;

					color.Dispose();
					FColorInvalidate = true;
				}
			}

			//skeletons
			if (FOutSkeleton.PluginIO.IsConnected || DoAnyway)
			{
				var skeletons = e.OpenSkeletonFrame();
				if (skeletons != null)
				{
					if (FSkeletonData == null || FSkeletonData.Length != skeletons.SkeletonArrayLength)
						FSkeletonData = new Skeleton[skeletons.SkeletonArrayLength];
					skeletons.CopySkeletonDataTo(FSkeletonData);
					skeletons.Dispose();
					FSkeletonInvalidate = true;
				}
			}

			OnUpdate();
		}
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                        skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        // We want keep a record of any skeleton, tracked or untracked.
                        if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                        {
                            this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                        }

                        // Give each tracker the upated frame.
                        SkeletonFaceTracker skeletonFaceTracker;
                        if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                        {
                            skeletonFaceTracker.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                            skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        }
                    }
                }

                this.RemoveOldTrackers(skeletonFrame.FrameNumber);

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
 private void ResetOutput()
 {
     this.Dispatcher.Invoke((Action)(() =>
     {
         if (!this.RetainImageOnSensorChange)
         {
             this.kinectDepthImage.Source = null;
             this.outputBitmap = null;
             this.lastImageFormat = DepthImageFormat.Undefined;
         }
         
         this.ResetFrameRateCounters();
     }));
 }
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit (See components in Toolkit Browser).
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (this.sensor != null)
            {
                // Turn on the skeleton, color, depth stream to receive skeleton frames
                TransformSmoothParameters smoothingParam = new TransformSmoothParameters();
                {
                    smoothingParam.Smoothing          = 0.5f;
                    smoothingParam.Correction         = 0.1f;
                    smoothingParam.Prediction         = 0.5f;
                    smoothingParam.JitterRadius       = 0.1f;
                    smoothingParam.MaxDeviationRadius = 0.1f;
                };
                this.sensor.SkeletonStream.Enable(smoothingParam);

                //this.sensor.SkeletonStream.Enable();

                this.sensor.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
                this.checkBoxSeatedMode.SetCurrentValue(CheckBox.IsCheckedProperty, true);

                this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
                this.sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);

                this.RenderHeight     = 480;
                this.RenderWidth      = 640;
                this.depthImageFormat = this.sensor.DepthStream.Format;
                this.colorImageFormat = this.sensor.ColorStream.Format;

                // Create the drawing group we'll use for drawing
                this.drawingGroup = new DrawingGroup();
                this.drawingGroup.ClipGeometry = new RectangleGeometry(new Rect(0.0, 0.0, RenderWidth, RenderHeight));

                this.outputDrawingGroup = new DrawingGroup();
                this.outputDrawingGroup.ClipGeometry = new RectangleGeometry(new Rect(0.0, 0.0, RenderWidth, RenderHeight));

                // Display the drawing using our image control
                Image.Source = new DrawingImage(this.drawingGroup);
                // Allocate space to put the pixels we'll receive
                this.colorImage = new byte[this.sensor.ColorStream.FramePixelDataLength];
                // This is the bitmap we'll display on-screen
                this.colorBitmap      = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);
                this.blankColorBitmap = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);

                OutputImage.Source = new DrawingImage(this.outputDrawingGroup);

                RoomSetting.SetCameraMatrix();

                RoomSetting.SetPlates();

                // Add an event handler to be called whenever there is new all frame data
                this.sensor.AllFramesReady += this.OnAllFramesReady;

                // Start the sensor!
                try
                {
                    this.sensor.Start();
                }
                catch (IOException)
                {
                    this.sensor = null;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
            }
        }
        void KinectFaceNode_AllFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame skeletonFrame = null;

            colorImageFrame = e.OpenColorImageFrame();
            depthImageFrame = e.OpenDepthImageFrame();
            skeletonFrame = e.OpenSkeletonFrame();

            if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
            {
                return;
            }

            if (first)
            {
                first = false;
                this.olddepth = depthImageFrame.Format;
            }
            else
            {
                if (this.olddepth != depthImageFrame.Format)
                {
                    //Need a reset
                    if (this.depthImage != null) { this.depthImage = null; }
                    if (this.face != null) { this.face.Dispose(); this.face = null; }
                    this.trackedSkeletons.Clear();
                    this.olddepth = depthImageFrame.Format;
                }
            }

            if (this.depthImage == null)
            {
                this.depthImage = new short[depthImageFrame.PixelDataLength];
            }

            if (this.colorImage == null)
            {
                this.colorImage = new byte[colorImageFrame.PixelDataLength];
            }

            if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
            {
                this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
            }

            if (face == null)
            {
                face = new FaceTracker(this.runtime.Runtime);
            }

            colorImageFrame.CopyPixelDataTo(this.colorImage);
            depthImageFrame.CopyPixelDataTo(this.depthImage);
            skeletonFrame.CopySkeletonDataTo(this.skeletonData);

            foreach (Skeleton skeleton in this.skeletonData)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked
                    || skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(this.runtime.Runtime, colorImageFrame.Format, colorImage, depthImageFrame.Format, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }
            }

            this.RemoveOldTrackers(skeletonFrame.FrameNumber);

            colorImageFrame.Dispose();
            depthImageFrame.Dispose();
            skeletonFrame.Dispose();

            this.FInvalidate = true;
        }
 /// <summary>
 /// Maps every point in a depth frame to the corresponding location in a ColorImageFormat coordinate space.
 ///
 /// </summary>
 /// <param name="depthImageFormat">The depth format of the source.</param><param name="depthPixelData">The depth frame pixel data, as retrieved from DepthImageFrame.CopyPixelDataTo.
 ///             Must be equal in length to Width*Height of the depth format specified by depthImageFormat.
 ///             </param><param name="colorImageFormat">The desired target image format.</param><param name="colorCoordinates">The ColorImagePoint array to receive the data.  Each element will be be the result of mapping the
 ///             corresponding depthPixelDatum to the specified ColorImageFormat coordinate space.
 ///             Must be equal in length to depthPixelData.
 ///             </param>
 public void MapDepthFrameToColorFrame(DepthImageFormat depthImageFormat, short[] depthPixelData,
                                       ColorImageFormat colorImageFormat, ColorImagePoint[] colorCoordinates)
 {
     _kinectSensor.MapDepthFrameToColorFrame(depthImageFormat, depthPixelData, colorImageFormat, colorCoordinates);
 }
Exemple #55
0
        internal void ProcessFrame(CoordinateMapper mapper, Skeleton skeletonOfInterest, DepthImageFormat depthImageFormat)
        {
            _joints.Clear();
            if (skeletonOfInterest != null)
            {
                var size = FormatHelper.GetDepthSize(depthImageFormat);

                var depthWidth = (int)size.Width;

                var headJoint = skeletonOfInterest.Joints[JointType.Head];
                var neckJoint = skeletonOfInterest.Joints[JointType.ShoulderCenter];

                var _headPoint = mapper.MapSkeletonPointToDepthPoint(headJoint.Position, depthImageFormat);
                var _neckPoint = mapper.MapSkeletonPointToDepthPoint(neckJoint.Position, depthImageFormat);

                _headPoint.X = depthWidth - _headPoint.X;
                _neckPoint.X = depthWidth - _neckPoint.X;

                _joints.Add(_headPoint);
                _joints.Add(_neckPoint);
            }
            RaiseFrameUpdated();
        }
Exemple #56
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest).Clone() as FaceTrackFrame;

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        /*if (faceTriangles == null)
                         * {
                         *  // only need to get this once.  It doesn't change.
                         *  faceTriangles = frame.GetTriangles();
                         * }
                         *
                         * this.facePoints = frame.GetProjected3DShape();*/
                    }
                }
            }
        private void DepthImageReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (DepthImageFrame imageFrame = e.OpenDepthImageFrame())
            {
                if (imageFrame != null)
                {
                    // We need to detect if the format has changed.
                    bool haveNewFormat = this.lastImageFormat != imageFrame.Format;

                    if (haveNewFormat)
                    {
                        this.pixelData = new short[imageFrame.PixelDataLength];
                        this.depthFrame32 = new byte[imageFrame.Width * imageFrame.Height * Bgr32BytesPerPixel];
                    }

                    imageFrame.CopyPixelDataTo(this.pixelData);

                    byte[] convertedDepthBits = this.ConvertDepthFrame(this.pixelData, ((KinectSensor)sender).DepthStream);

                    // A WriteableBitmap is a WPF construct that enables resetting the Bits of the image.
                    // This is more efficient than creating a new Bitmap every frame.
                    if (haveNewFormat)
                    {
                        this.outputBitmap = new WriteableBitmap(
                            imageFrame.Width, 
                            imageFrame.Height, 
                            96,  // DpiX
                            96,  // DpiY
                            PixelFormats.Bgr32, 
                            null);

                        this.kinectDepthImage.Source = this.outputBitmap;
                    }

                    this.outputBitmap.WritePixels(
                        new Int32Rect(0, 0, imageFrame.Width, imageFrame.Height), 
                        convertedDepthBits,
                        imageFrame.Width * Bgr32BytesPerPixel,
                        0);

                    this.lastImageFormat = imageFrame.Format;

                    UpdateFrameRate();
                }
            }
        }
        private void KinectAllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            KinectSensor sensor = sender as KinectSensor;

            foreach (var skeletonCanvas in this.skeletonCanvases)
            {
                skeletonCanvas.Skeleton = null;
            }

            // Have we already been "shut down" by the user of this viewer,
            // or has the SkeletonStream been disabled since this event was posted?
            if ((null == this.KinectSensorManager) ||
                (null == sensor) ||
                (null == sensor.SkeletonStream) ||
                !sensor.SkeletonStream.IsEnabled)
            {
                return;
            }

            bool haveSkeletonData = false;
            long frameTimeStamp   = -1;

            using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
            {
                if (skeletonFrame != null)
                {
                    if ((this.skeletonData == null) || (this.skeletonData.Length != skeletonFrame.SkeletonArrayLength))
                    {
                        this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                    }

                    skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                    frameTimeStamp = skeletonFrame.Timestamp;

                    haveSkeletonData = true;
                }
            }

            int trackedIndex = -1;

            // find the first tracked skeleton and set var trackedSkeleton accordingly
            for (int i = 0; i < skeletonData.Length; i++)
            {
                if (skeletonData[i].TrackingState.Equals(SkeletonTrackingState.Tracked))
                {
                    trackedIndex = i;
                    break;
                }
            }


            bool isFullyTracked = false;

            if (isMeasuring && trackedIndex > -1)
            {
                // check to see if the skeleton @ trackedIndex is fully tracked
                if (fullyTrackedMapping == null && IsFullyTracked(skeletonData[trackedIndex]))
                {
                    isFullyTracked = true;
                }

                SkeletonMeasurer measurer = new SkeletonMeasurer(skeletonData[trackedIndex]);
                measurer.determineMeasurements();
                AddMeasurementsToBuffer(measurer.TestMeasurements);

                skeletonBuffer.Add(ObjectCopier.Clone <Skeleton>(skeletonData[trackedIndex]));
                frameTimeStampBuffer.Add(frameTimeStamp);
            }

            if (haveSkeletonData)
            {
                ColorImageFormat colorFormat = ColorImageFormat.Undefined;
                int colorWidth  = 0;
                int colorHeight = 0;

                DepthImageFormat depthFormat = DepthImageFormat.Undefined;
                int depthWidth  = 0;
                int depthHeight = 0;

                switch (this.ImageType)
                {
                case ImageType.Color:
                    // Retrieve the current color format, from the frame if present, and from the sensor if not.
                    using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
                    {
                        if (null != colorImageFrame)
                        {
                            colorFormat = colorImageFrame.Format;
                            colorWidth  = colorImageFrame.Width;
                            colorHeight = colorImageFrame.Height;
                        }
                        else if (null != sensor.ColorStream)
                        {
                            colorFormat = sensor.ColorStream.Format;
                            colorWidth  = sensor.ColorStream.FrameWidth;
                            colorHeight = sensor.ColorStream.FrameHeight;
                        }
                    }

                    break;

                case ImageType.Depth:
                    // Retrieve the current depth format, from the frame if present, and from the sensor if not.
                    using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
                    {
                        if (null != depthImageFrame)
                        {
                            depthFormat = depthImageFrame.Format;
                            depthWidth  = depthImageFrame.Width;
                            depthHeight = depthImageFrame.Height;
                        }
                        else if (null != sensor.DepthStream)
                        {
                            depthFormat = sensor.DepthStream.Format;
                            depthWidth  = sensor.DepthStream.FrameWidth;
                            depthHeight = sensor.DepthStream.FrameHeight;
                        }
                    }

                    break;
                }

                for (int i = 0; i < this.skeletonData.Length && i < this.skeletonCanvases.Count; i++)
                {
                    var skeleton       = this.skeletonData[i];
                    var skeletonCanvas = this.skeletonCanvases[i];
                    var jointMapping   = this.jointMappings[i];

                    jointMapping.Clear();

                    try
                    {
                        // Transform the data into the correct space
                        // For each joint, we determine the exact X/Y coordinates for the target view
                        foreach (Joint joint in skeleton.Joints)
                        {
                            Point mappedPoint = Get2DPosition(
                                sensor,
                                this.ImageType,
                                this.RenderSize,
                                joint.Position,
                                colorFormat,
                                colorWidth,
                                colorHeight,
                                depthFormat,
                                depthWidth,
                                depthHeight);

                            jointMapping[joint.JointType] = new JointMapping
                            {
                                Joint       = joint,
                                MappedPoint = mappedPoint
                            };
                        }
                    }
                    catch (UnauthorizedAccessException)
                    {
                        // Kinect is no longer available.
                        return;
                    }

                    // Look up the center point
                    Point centerPoint = Get2DPosition(
                        sensor,
                        this.ImageType,
                        this.RenderSize,
                        skeleton.Position,
                        colorFormat,
                        colorWidth,
                        colorHeight,
                        depthFormat,
                        depthWidth,
                        depthHeight);

                    // Scale the skeleton thickness
                    // 1.0 is the desired size at 640 width
                    double scale = this.RenderSize.Width / 640;

                    skeletonCanvas.Skeleton      = skeleton;
                    skeletonCanvas.JointMappings = jointMapping;
                    skeletonCanvas.Center        = centerPoint;
                    skeletonCanvas.ScaleFactor   = scale;
                }

                if (isFullyTracked)
                {
                    fullyTrackedMapping = new Dictionary <JointType, JointMapping>();

                    foreach (JointType type in jointMappings[trackedIndex].Keys)
                    {
                        fullyTrackedMapping[type]             = new JointMapping();
                        fullyTrackedMapping[type].Joint       = jointMappings[trackedIndex][type].Joint;
                        fullyTrackedMapping[type].MappedPoint = jointMappings[trackedIndex][type].MappedPoint;
                    }
                }
            }
        }
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        //getting the Animation Unit Coefficients
                        this.AUs = frame.GetAnimationUnitCoefficients();
                        var jawLowerer = AUs[AnimationUnit.JawLower];
                        var browLower = AUs[AnimationUnit.BrowLower];
                        var browRaiser = AUs[AnimationUnit.BrowRaiser];
                        var lipDepressor = AUs[AnimationUnit.LipCornerDepressor];
                        var lipRaiser = AUs[AnimationUnit.LipRaiser];
                        var lipStretcher = AUs[AnimationUnit.LipStretcher];
                        //set up file for output
                        using (System.IO.StreamWriter file = new System.IO.StreamWriter
                            (@"C:\Users\Public\data.txt"))
                        {
                            file.WriteLine("FaceTrack Data, started recording at " + DateTime.Now.ToString("HH:mm:ss tt"));
                        }

                        //here is the algorithm to test different facial features

                        //BrowLower is messed up if you wear glasses, works if you don't wear 'em

                        //surprised
                        if ((jawLowerer < 0.25 || jawLowerer > 0.25) && browLower < 0)
                        {
                            System.Diagnostics.Debug.WriteLine("surprised");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": surprised");
                                file.WriteLine("JawLowerer: " + jawLowerer);
                                file.WriteLine("BrowLowerer: " + browLower);
                            }
                        }
                        //smiling
                        if (lipStretcher > 0.4 || lipDepressor<0)
                        {
                            System.Diagnostics.Debug.WriteLine("Smiling");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": smiling");
                                file.WriteLine("LipStretcher: " + lipStretcher);
                            }
                        }
                        //kissing face
                        if (lipStretcher < -0.75)
                        {
                            System.Diagnostics.Debug.WriteLine("kissing face");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": kissing face");
                                file.WriteLine("LipStretcher: " + lipStretcher);
                            }
                        }
                        //sad
                        if (browRaiser < 0 && lipDepressor>0)
                        {
                            System.Diagnostics.Debug.WriteLine("sad");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": sad");
                                file.WriteLine("LipCornerDepressor: " + lipDepressor);
                                file.WriteLine("OuterBrowRaiser: " + browRaiser);
                            }
                        }
                        //angry
                        if ((browLower > 0 && (jawLowerer > 0.25 || jawLowerer < -0.25)) ||
                            (browLower > 0 && lipDepressor > 0))
                        {
                            System.Diagnostics.Debug.WriteLine("angry");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": angry");
                                file.WriteLine("LipCornerDepressor: " + lipDepressor);
                                file.WriteLine("BrowLowerer: " + browLower);
                                file.WriteLine("JawLowerer: " + jawLowerer);
                            }
                        }
                        //System.Diagnostics.Debug.WriteLine(browLower);

                        this.facePoints = frame.GetProjected3DShape();
                    }
                }
            }
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.resetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.resetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                        skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        try
                        {
                            this.trackedSkeleton.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                        }
                        catch (NullReferenceException)
                        {
                            //se si perder il tracking del viso si evita un crash
                        }
                        this.trackedSkeleton.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }