/// <summary> /// CorrectSensorTilt applies camera tilt correction to the skeleton data. /// </summary> /// <param name="skeleton">The skeleton to correct</param> /// <param name="floorPlane">The floor plane (consisting of up normal and sensor height) detected by skeleton tracking (if any).</param> /// <param name="sensorElevationAngle">The tilt of the sensor as detected by Kinect.</param> public static void CorrectSensorTilt(Skeleton skeleton, Tuple<float, float, float, float> floorPlane, int sensorElevationAngle) { if (null == skeleton) { return; } // To correct the tilt of the skeleton due to a tilted camera, we have three possible up vectors: // one from any floor plane detected in Skeleton Tracking, one from the gravity normal produced by the 3D accelerometer, // and one from the tilt value sensed by the camera motor. // The raw accelerometer value is not currently available in the Kinect for Windows SDK, so instead we use the // the sensorElevationAngle, as the floor plane from skeletal tracking is typically only detected when the // camera is pointing down and sees the floor. // Note: SensorElevationAngle value varies around +/- 60 degrees. Vector3 floorNormal = Vector3.Up; // default value (has no tilt effect) // Assume camera base is level, and use the tilt of the Kinect motor. // Rotate an up vector by the negated elevation angle around the X axis floorNormal = Vector3.Transform( floorNormal, Quaternion.CreateFromAxisAngle(new Vector3(1, 0, 0), MathHelper.ToRadians(sensorElevationAngle))); if (floorPlane != null) { Vector4 floorPlaneVec = new Vector4(floorPlane.Item1, floorPlane.Item2, floorPlane.Item3, floorPlane.Item4); if (floorPlaneVec.Length() > float.Epsilon && (sensorElevationAngle == 0 || Math.Abs(sensorElevationAngle) > 50)) { // Use the floor plane for everything. floorNormal = new Vector3(floorPlaneVec.X, floorPlaneVec.Y, floorPlaneVec.Z); } } Array jointTypeValues = Enum.GetValues(typeof(JointType)); // Running average of floor normal averagedFloorNormal = (averagedFloorNormal * 0.9f) + (floorNormal * 0.1f); Quaternion rotationToRoomSpace = KinectHelper.GetShortestRotationBetweenVectors(Vector3.Up, averagedFloorNormal); Vector3 hipCenter = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.HipCenter].Position); // De-tilt. foreach (JointType j in jointTypeValues) { Joint joint = skeleton.Joints[j]; SkeletonPoint pt = joint.Position; Vector3 pos = KinectHelper.SkeletonPointToVector3(pt); // Move it back to the origin to rotate pos -= hipCenter; Vector3 rotatedVec = Vector3.Transform(pos, rotationToRoomSpace); rotatedVec += hipCenter; joint.Position = KinectHelper.Vector3ToSkeletonPoint(rotatedVec); skeleton.Joints[j] = joint; } }
/// <summary> /// CorrectSensorTilt applies camera tilt correction to the skeleton data. /// </summary> /// <param name="skeleton">The skeleton to correct</param> /// <param name="floorPlane">The floor plane (consisting of up normal and sensor height) detected by skeleton tracking (if any).</param> /// <param name="sensorElevationAngle">The tilt of the sensor as detected by Kinect.</param> public static void CorrectSensorTilt(Skeleton skeleton, Tuple <float, float, float, float> floorPlane, int sensorElevationAngle) { if (null == skeleton) { return; } // To correct the tilt of the skeleton due to a tilted camera, we have three possible up vectors: // one from any floor plane detected in Skeleton Tracking, one from the gravity normal produced by the 3D accelerometer, // and one from the tilt value sensed by the camera motor. // The raw accelerometer value is not currently available in the Kinect for Windows SDK, so instead we use the // the sensorElevationAngle, as the floor plane from skeletal tracking is typically only detected when the // camera is pointing down and sees the floor. // Note: SensorElevationAngle value varies around +/- 60 degrees. Vector3 floorNormal = Vector3.Up; // default value (has no tilt effect) // Assume camera base is level, and use the tilt of the Kinect motor. // Rotate an up vector by the negated elevation angle around the X axis floorNormal = Vector3.Transform( floorNormal, Quaternion.CreateFromAxisAngle(new Vector3(1, 0, 0), MathHelper.ToRadians(sensorElevationAngle))); if (floorPlane != null) { Vector4 floorPlaneVec = new Vector4(floorPlane.Item1, floorPlane.Item2, floorPlane.Item3, floorPlane.Item4); if (floorPlaneVec.Length() > float.Epsilon && (sensorElevationAngle == 0 || Math.Abs(sensorElevationAngle) > 50)) { // Use the floor plane for everything. floorNormal = new Vector3(floorPlaneVec.X, floorPlaneVec.Y, floorPlaneVec.Z); } } Array jointTypeValues = Enum.GetValues(typeof(JointType)); // Running average of floor normal averagedFloorNormal = (averagedFloorNormal * 0.9f) + (floorNormal * 0.1f); Quaternion rotationToRoomSpace = KinectHelper.GetShortestRotationBetweenVectors(Vector3.Up, averagedFloorNormal); Vector3 hipCenter = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.HipCenter].Position); // De-tilt. foreach (JointType j in jointTypeValues) { Joint joint = skeleton.Joints[j]; SkeletonPoint pt = joint.Position; Vector3 pos = KinectHelper.SkeletonPointToVector3(pt); // Move it back to the origin to rotate pos -= hipCenter; Vector3 rotatedVec = Vector3.Transform(pos, rotationToRoomSpace); rotatedVec += hipCenter; joint.Position = KinectHelper.Vector3ToSkeletonPoint(rotatedVec); skeleton.Joints[j] = joint; } }
/// <summary> /// CorrectSkeletonOffsetFromFloor moves the skeleton to the floor. /// If no floor found in Skeletal Tracking, we can try and use the foot position /// but this can be very noisy, which causes the skeleton to bounce up and down. /// Note: Using the foot positions will reduce the visual effect of jumping when /// an avateer jumps, as we perform a running average. /// </summary> /// <param name="skeleton">The skeleton to correct.</param> /// <param name="floorPlane">The floor plane (consisting of up normal and sensor height) detected by skeleton tracking (if any).</param> /// <param name="avatarHipCenterHeight">The height of the avatar Hip Center joint.</param> public void CorrectSkeletonOffsetFromFloor(Skeleton skeleton, Tuple <float, float, float, float> floorPlane, float avatarHipCenterHeight) { if (skeleton == null || skeleton.TrackingState != SkeletonTrackingState.Tracked) { return; } Vector4 floorPlaneVec = Vector4.Zero; bool haveFloor = false; if (null != floorPlane) { floorPlaneVec = new Vector4(floorPlane.Item1, floorPlane.Item2, floorPlane.Item3, floorPlane.Item4); haveFloor = floorPlaneVec.Length() > float.Epsilon; } // If there's no floor found, try to use the lower foot position, if visible. Vector3 hipCenterPosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.HipCenter].Position); bool haveLeftFoot = KinectHelper.IsTrackedOrInferred(skeleton, JointType.FootLeft); bool haveLeftAnkle = KinectHelper.IsTracked(skeleton, JointType.AnkleLeft); bool haveRightFoot = KinectHelper.IsTrackedOrInferred(skeleton, JointType.FootRight); bool haveRightAnkle = KinectHelper.IsTracked(skeleton, JointType.AnkleRight); if (haveLeftFoot || haveLeftAnkle || haveRightFoot || haveRightAnkle) { // As this runs after de-tilt of the skeleton, so the floor-camera offset will // be the foot to camera 0 height in meters as the foot is at the floor plane. // Jumping is enabled to some extent due to the running average, but will appear reduced in height. Vector3 leftFootPosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.FootLeft].Position); Vector3 rightFootPosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.FootRight].Position); Vector3 leftAnklePosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.AnkleLeft].Position); Vector3 rightAnklePosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.AnkleRight].Position); // Average the foot and ankle if we have it float leftFootAverage = (haveLeftFoot && haveLeftAnkle) ? (leftFootPosition.Y + leftAnklePosition.Y) * 0.5f : haveLeftFoot ? leftFootPosition.Y : leftAnklePosition.Y; float rightFootAverage = (haveRightFoot && haveRightAnkle) ? (rightFootPosition.Y + rightAnklePosition.Y) * 0.5f : haveRightFoot ? rightFootPosition.Y : rightAnklePosition.Y; // We assume the lowest foot is placed on the floor float lowestFootPosition = 0; if ((haveLeftFoot || haveLeftAnkle) && (haveRightFoot || haveRightAnkle)) { // Negate, as we are looking for the camera height above the floor plane lowestFootPosition = Math.Min(leftFootAverage, rightFootAverage); } else if (haveLeftFoot || haveLeftAnkle) { lowestFootPosition = leftFootAverage; } else { lowestFootPosition = rightFootAverage; } // Running average of floor position this.averageFloorOffset = (this.averageFloorOffset * 0.9f) + (lowestFootPosition * 0.1f); } else if (haveFloor) { // Get the detected height of the camera off the floor in meters. if (0.0f == this.averageFloorOffset) { // If it's the initial frame of detection, just set the floor plane directly. this.averageFloorOffset = -floorPlaneVec.W; } else { // Running average of floor position this.averageFloorOffset = (this.averageFloorOffset * 0.9f) + (-floorPlaneVec.W * 0.1f); } } else { // Just set the avatar offset directly this.averageFloorOffset = hipCenterPosition.Y - avatarHipCenterHeight; } Array jointTypeValues = Enum.GetValues(typeof(JointType)); // Move to the floor. foreach (JointType j in jointTypeValues) { Joint joint = skeleton.Joints[j]; SkeletonPoint pt = joint.Position; pt.Y = pt.Y - this.averageFloorOffset; joint.Position = pt; skeleton.Joints[j] = joint; } }
public void Length() { var vector1 = new Vector4(1, 2, 3, 4); Assert.AreEqual(5.477226f,vector1.Length()); }
/// <summary> /// CorrectSkeletonOffsetFromFloor moves the skeleton to the floor. /// If no floor found in Skeletal Tracking, we can try and use the foot position /// but this can be very noisy, which causes the skeleton to bounce up and down. /// Note: Using the foot positions will reduce the visual effect of jumping when /// an avateer jumps, as we perform a running average. /// </summary> /// <param name="skeleton">The skeleton to correct.</param> /// <param name="floorPlane">The floor plane (consisting of up normal and sensor height) detected by skeleton tracking (if any).</param> /// <param name="avatarHipCenterHeight">The height of the avatar Hip Center joint.</param> public void CorrectSkeletonOffsetFromFloor(Skeleton skeleton, Tuple<float, float, float, float> floorPlane, float avatarHipCenterHeight) { if (skeleton == null || skeleton.TrackingState != SkeletonTrackingState.Tracked) { return; } Vector4 floorPlaneVec = Vector4.Zero; bool haveFloor = false; if (null != floorPlane) { floorPlaneVec = new Vector4(floorPlane.Item1, floorPlane.Item2, floorPlane.Item3, floorPlane.Item4); haveFloor = floorPlaneVec.Length() > float.Epsilon; } // If there's no floor found, try to use the lower foot position, if visible. Vector3 hipCenterPosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.HipCenter].Position); bool haveLeftFoot = KinectHelper.IsTrackedOrInferred(skeleton, JointType.FootLeft); bool haveLeftAnkle = KinectHelper.IsTracked(skeleton, JointType.AnkleLeft); bool haveRightFoot = KinectHelper.IsTrackedOrInferred(skeleton, JointType.FootRight); bool haveRightAnkle = KinectHelper.IsTracked(skeleton, JointType.AnkleRight); if (haveLeftFoot || haveLeftAnkle || haveRightFoot || haveRightAnkle) { // As this runs after de-tilt of the skeleton, so the floor-camera offset will // be the foot to camera 0 height in meters as the foot is at the floor plane. // Jumping is enabled to some extent due to the running average, but will appear reduced in height. Vector3 leftFootPosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.FootLeft].Position); Vector3 rightFootPosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.FootRight].Position); Vector3 leftAnklePosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.AnkleLeft].Position); Vector3 rightAnklePosition = KinectHelper.SkeletonPointToVector3(skeleton.Joints[JointType.AnkleRight].Position); // Average the foot and ankle if we have it float leftFootAverage = (haveLeftFoot && haveLeftAnkle) ? (leftFootPosition.Y + leftAnklePosition.Y) * 0.5f : haveLeftFoot ? leftFootPosition.Y : leftAnklePosition.Y; float rightFootAverage = (haveRightFoot && haveRightAnkle) ? (rightFootPosition.Y + rightAnklePosition.Y) * 0.5f : haveRightFoot ? rightFootPosition.Y : rightAnklePosition.Y; // We assume the lowest foot is placed on the floor float lowestFootPosition = 0; if ((haveLeftFoot || haveLeftAnkle) && (haveRightFoot || haveRightAnkle)) { // Negate, as we are looking for the camera height above the floor plane lowestFootPosition = Math.Min(leftFootAverage, rightFootAverage); } else if (haveLeftFoot || haveLeftAnkle) { lowestFootPosition = leftFootAverage; } else { lowestFootPosition = rightFootAverage; } // Running average of floor position this.averageFloorOffset = (this.averageFloorOffset * 0.9f) + (lowestFootPosition * 0.1f); } else if (haveFloor) { // Get the detected height of the camera off the floor in meters. if (0.0f == this.averageFloorOffset) { // If it's the initial frame of detection, just set the floor plane directly. this.averageFloorOffset = -floorPlaneVec.W; } else { // Running average of floor position this.averageFloorOffset = (this.averageFloorOffset * 0.9f) + (-floorPlaneVec.W * 0.1f); } } else { // Just set the avatar offset directly this.averageFloorOffset = hipCenterPosition.Y - avatarHipCenterHeight; } Array jointTypeValues = Enum.GetValues(typeof(JointType)); // Move to the floor. foreach (JointType j in jointTypeValues) { Joint joint = skeleton.Joints[j]; SkeletonPoint pt = joint.Position; pt.Y = pt.Y - this.averageFloorOffset; joint.Position = pt; skeleton.Joints[j] = joint; } }