Ejemplo n.º 1
0
        /// <summary>
        /// Test input camera frames against the camera pose finder database, adding frames to the
        /// database if dis-similar enough to existing frames. Both input depth and color frames
        /// must be identical sizes, a minimum size of 80x60, with valid camera parameters, and
        /// captured at the same time.
        /// Note that once the database reaches its maximum initialized size, it will overwrite old
        /// pose information. Check the <pararmref name="pHistoryTrimmed"/> flag or the number of
        /// poses in the database to determine whether the old poses are being overwritten.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <param name="worldToCameraTransform"> The current camera pose (usually the camera pose
        /// result from the last AlignPointClouds or AlignDepthFloatToReconstruction).</param>
        /// <param name="minimumDistanceThreshold">A float distance threshold between 0 and 1.0f which
        /// regulates how close together poses are stored in the database. Input frames
        /// which have a minimum distance equal to or above this threshold when compared against the
        /// database will be stored, as it indicates the input has become dis-similar to the existing
        /// stored poses. Set to 0.0f to ignore and always add a pose when this function is called,
        /// however in this case, unless there is an external test of distance, there is a risk this
        /// can lead to many duplicated poses.
        /// </param>
        /// <param name="addedPose">
        /// Set true when the input frame was added to the camera pose finder database.
        /// </param>
        /// <param name="trimmedHistory">
        /// Set true if the maxPoseHistoryCount was reached when the input frame is stored, so the
        /// oldest pose was overwritten in the camera pose finder database to store the latest pose.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and <paramref name="colorFrame"/>
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths, or the
        /// <paramref name="minimumDistanceThreshold"/> parameter is less than 0 or greater
        /// than 1.0f.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// The camera pose finder works by accumulating whether the values at each sample location pixel
        /// in a saved pose frame are less than or greater than a threshold which is randomly chosen
        /// between minimum and maximum boundaries (e.g. for color this is 0-255). Given enough samples
        /// this represents a unique key frame signature that we can match against, as different poses
        /// will have different values for surfaces which are closer or further away, or different
        /// colors.
        /// Note that unlike depth, the robustness of finding a valid camera pose can have issues with
        /// ambient illumination levels in the color image. For best matching results, both the Kinect
        /// camera and also the environment should have exactly the same configuration as when the
        /// database key frame images were captured i.e. if you had a fixed exposure and custom white
        /// balance, this should again be set when testing the database later, otherwise the matching
        /// accuracy will be reduced.
        /// To improve accuracy, it is possible to not just provide a red, green, blue input in the
        /// color image, but instead provide a different 3 channels of match data scaled 0-255. For
        /// example, to be more illumination independent, you could calculate hue and saturation, or
        /// convert RGB to to LAB and use the AB channels. Other measures such as texture response
        /// or corner response could additionally be computed and used in one or more of the channels.
        /// </remarks>
        public void ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame,
            Matrix4 worldToCameraTransform,
            float minimumDistanceThreshold,
            out bool addedPose,
            out bool trimmedHistory)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            HRESULT hr = cameraPoseFinder.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                ref worldToCameraTransform,
                minimumDistanceThreshold,
                out addedPose,
                out trimmedHistory);

            ExceptionHelper.ThrowIfFailed(hr);
        }
Ejemplo n.º 2
0
        /// <summary>
        /// Converts Kinect depth frames in unsigned short format to depth frames in float format
        /// representing distance from the camera in meters (parallel to the optical center axis).
        /// Note: <paramref name="depthImageData"/> and <paramref name="depthFloatFrame"/> must
        /// be the same pixel resolution and equal to <paramref name="depthImageDataWidth"/> by
        /// <paramref name="depthImageDataHeight"/>.
        /// The min and max depth clip values enable clipping of the input data, for example, to help
        /// isolate particular objects or surfaces to be reconstructed. Note that the thresholds return
        /// different values when a depth pixel is outside the threshold - pixels inside minDepthClip will
        /// will be returned as 0 and ignored in processing, whereas pixels beyond maxDepthClip will be set
        /// to 1000 to signify a valid depth ray with depth beyond the set threshold. Setting this far-
        /// distance flag is important for reconstruction integration in situations where the camera is
        /// static or does not move significantly, as it enables any voxels closer to the camera
        /// along this ray to be culled instead of persisting (as would happen if the pixels were simply
        /// set to 0 and ignored in processing). Note that when reconstructing large real-world size volumes,
        /// be sure to set large maxDepthClip distances, as when the camera moves around, any voxels in view
        /// which go beyond this threshold distance from the camera will be removed.
        /// </summary>
        /// <param name="depthImageData">
        /// An array which stores the extended-depth texture of a depth image from the Kinect camera.
        /// </param>
        /// <param name="depthImageDataWidth">Width of the depth image data.</param>
        /// <param name="depthImageDataHeight">Height of the depth image data.</param>
        /// <param name="depthFloatFrame">
        /// A pre-allocated depth float type image frame, to be filled with the floating point depth values.
        /// </param>
        /// <param name="minDepthClip">
        /// Minimum depth distance threshold in meters. Depth pixels below this value will be
        /// returned as invalid (0). Min depth must be positive or 0.
        /// </param>
        /// <param name="maxDepthClip">
        /// Maximum depth distance threshold in meters. Depth pixels above this value will be
        /// returned as invalid (1000). Max depth must be greater than 0.
        /// </param>
        /// <param name="mirrorDepth">
        /// A boolean parameter specifying whether to horizontally mirror the input depth image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthImageData"/> or the <paramref name="depthFloatFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthImageDataWidth"/> parameter and depthFloatFrame's
        /// <c>width</c> is not equal, or the <paramref name="depthImageDataHeight"/> parameter and
        /// depthFloatFrame's <c>height</c> member is not equal.
        /// Thrown when the <paramref name="minDepthClip"/> parameter or the
        /// <paramref name="maxDepthClip"/> is less than zero.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
#pragma warning disable 3001
        public static void DepthToDepthFloatFrame(
            ushort[] depthImageData,
            int depthImageDataWidth,
            int depthImageDataHeight,
            FusionFloatImageFrame depthFloatFrame,
            float minDepthClip,
            float maxDepthClip,
            bool mirrorDepth)
        {
            if (null == depthImageData)
            {
                throw new ArgumentNullException("depthImageData");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthToDepthFloatFrame(
                                              depthImageData,
                                              (uint)depthImageDataWidth,
                                              (uint)depthImageDataHeight,
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              minDepthClip,
                                              maxDepthClip,
                                              mirrorDepth));
        }
Ejemplo n.º 3
0
        /// <summary>
        /// Create a visible color shaded image of a point cloud and its normals with simple
        /// grayscale L.N surface shading. All image frames must have the same width and height.
        /// </summary>
        /// <param name="pointCloudFrame">The point cloud frame to be shaded.</param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) where the raycast was performed from.
        /// Pass identity if the point cloud did not originate from a raycast and is in the
        /// camera local coordinate system.
        /// </param>
        /// <param name="shadedSurfaceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the grayscale L.N
        /// shaded surface image. Pass null to skip this image.
        /// </param>
        /// <param name="shadedSurfaceNormalsFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color shaded
        /// normals image with color indicating orientation. Pass null to skip this image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters have different camera parameters.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void ShadePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            Matrix4 worldToCameraTransform,
            FusionColorImageFrame shadedSurfaceFrame,
            FusionColorImageFrame shadedSurfaceNormalsFrame)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionShadePointCloud2(
                                              FusionImageFrame.ToHandleRef(pointCloudFrame),
                                              ref worldToCameraTransform,
                                              IntPtr.Zero,
                                              FusionImageFrame.ToHandleRef(shadedSurfaceFrame),
                                              FusionImageFrame.ToHandleRef(shadedSurfaceNormalsFrame)));
        }
Ejemplo n.º 4
0
#pragma warning restore 3001

        /// <summary>
        /// Construct an oriented point cloud in the local camera frame of reference from a depth float
        /// image frame. Here we calculate the 3D position of each depth float pixel with the optical
        /// center of the camera as the origin. We use a right-hand coordinate system, and (in common
        /// with bitmap images with top left origin) +X is to the right, +Y down, and +Z is now forward
        /// from the Kinect camera into the scene, as though looking into the scene from behind the
        /// Kinect camera. Both images must be the same size and have the same camera parameters.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be converted.</param>
        /// <param name="pointCloudFrame">
        /// A pre-allocated point cloud frame, to be filled with 3D points and normals.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or the <paramref name="pointCloudFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="pointCloudFrame"/>
        /// parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthFloatFrameToPointCloud(
            FusionFloatImageFrame depthFloatFrame,
            FusionPointCloudImageFrame pointCloudFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthFloatFrameToPointCloud(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              FusionImageFrame.ToHandleRef(pointCloudFrame)));
        }
Ejemplo n.º 5
0
        /// <summary>
        /// Find the most similar camera poses to the current camera input by comparing against the
        /// camera pose finder database, and returning a set of similar camera poses. These poses
        /// and similarity measurements are ordered in terms of decreasing similarity (i.e. the most
        /// similar is first). Both input depth and color frames must be identical sizes, with valid
        /// camera parameters and captured at the same time.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <returns>Returns the matched frames object created by the camera pose finder.</returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and  <paramref name="colorFrame"/>
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <returns>Returns a set of matched frames/poses.</returns>
        public MatchCandidates FindCameraPose(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            INuiFusionMatchCandidates matchCandidates = null;

            ExceptionHelper.ThrowIfFailed(cameraPoseFinder.FindCameraPose(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              FusionImageFrame.ToHandleRef(colorFrame),
                                              out matchCandidates));

            return(new MatchCandidates(matchCandidates));
        }
Ejemplo n.º 6
0
        /// <summary>
        /// The AlignPointClouds function uses an iterative algorithm to align two sets of oriented
        /// point clouds and calculate the camera's relative pose. This is a generic function which
        /// can be used independently of a Reconstruction Volume with sets of overlapping point clouds.
        /// All images must be the same size and have the same camera parameters.
        /// To find the frame-to-frame relative transformation between two sets of point clouds in
        /// the camera local frame of reference (created by DepthFloatFrameToPointCloud),
        /// set the <paramref name="observedToReferenceTransform"/> to the identity.
        /// To calculate the frame-to-model pose transformation between point clouds calculated from
        /// new depth frames with DepthFloatFrameToPointCloud and point clouds calculated from an
        /// existing Reconstruction volume with CalculatePointCloud (e.g. from the previous frame),
        /// pass the CalculatePointCloud image as the reference frame, and the current depth frame
        /// point cloud from DepthFloatFrameToPointCloud as the observed frame. Set the
        /// <paramref name="observedToReferenceTransform"/> to the previous frames calculated camera
        /// pose that was used in the CalculatePointCloud call.
        /// Note that here the current frame point cloud will be in the camera local frame of
        /// reference, whereas the raycast points and normals will be in the global/world coordinate
        /// system. By passing the <paramref name="observedToReferenceTransform"/> you make the
        /// algorithm aware of the transformation between the two coordinate systems.
        /// The <paramref name="observedToReferenceTransform"/> pose supplied can also take into
        /// account information you may have from other sensors or sensing mechanisms to aid the
        /// tracking. To do this multiply the relative frame to frame delta transformation from
        /// the other sensing system with the previous frame's pose before passing to this function.
        /// Note that any delta transform used should be in the same coordinate system as that
        /// returned by the DepthFloatFrameToPointCloud calculation.
        /// </summary>
        /// <param name="referencePointCloudFrame">
        /// The point cloud frame of the reference camera, or the previous Kinect point cloud frame.
        /// </param>
        /// <param name="observedPointCloudFrame">
        /// The point cloud frame of the observed camera, or the current Kinect frame.
        /// </param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run. The minimum value is 1.
        /// Using only a small number of iterations will have a faster runtime, however, the
        /// algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with color-coded data
        /// from the camera tracking. This may be used as input to additional vision algorithms such as
        /// object segmentation. Values vary depending on whether the pixel was a valid pixel used in
        /// tracking (inlier) or failed in different tests (outlier). 0xff000000 indicates an invalid
        /// input vertex (e.g. from 0 input depth), or one where no correspondences occur between point
        /// cloud images. Outlier vertices rejected due to too large a distance between vertices are
        /// coded as 0xff008000. Outlier vertices rejected due to to large a difference in normal angle
        /// between point clouds are coded as 0xff800000. Inliers are color shaded depending on the
        /// residual energy at that point, with more saturated colors indicating more discrepancy
        /// between vertices and less saturated colors (i.e. more white) representing less discrepancy,
        /// or less information at that pixel. Pass null if this image is not required.
        /// </param>
        /// <param name="observedToReferenceTransform">
        /// A pre-allocated transformation matrix. At entry to the function this should be filled
        /// with the best guess for the observed to reference transform (usually the last frame's
        /// calculated pose). At exit this is filled with he calculated pose or identity if the
        /// calculation failed.
        /// </param>
        /// <returns>
        /// Returns true if successful; returns false if the algorithm encountered a problem aligning
        /// the input point clouds and could not calculate a valid transformation, and
        /// the <paramref name="observedToReferenceTransform"/> parameter is set to identity.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or the
        /// <paramref name="observedPointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters have different camera parameters.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static bool AlignPointClouds(
            FusionPointCloudImageFrame referencePointCloudFrame,
            FusionPointCloudImageFrame observedPointCloudFrame,
            int maxAlignIterationCount,
            FusionColorImageFrame deltaFromReferenceFrame,
            ref Matrix4 observedToReferenceTransform)
        {
            if (null == referencePointCloudFrame)
            {
                throw new ArgumentNullException("referencePointCloudFrame");
            }

            if (null == observedPointCloudFrame)
            {
                throw new ArgumentNullException("observedPointCloudFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = NativeMethods.NuiFusionAlignPointClouds(
                FusionImageFrame.ToHandleRef(referencePointCloudFrame),
                FusionImageFrame.ToHandleRef(observedPointCloudFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                ref observedToReferenceTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return(false);
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return(true);
        }
Ejemplo n.º 7
0
 /// <summary>
 /// Convert a FusionImageFrame to HandleRef structure.
 /// </summary>
 /// <param name="imageFrame">The FusionImageFrame to be converted.</param>
 /// <returns>
 /// Returns null if the input <para>imageFrame</para> is null or a HandleRef structure.
 /// </returns>
 public static HandleRef ToHandleRef(FusionImageFrame imageFrame)
 {
     return(null != imageFrame ?
            new HandleRef(imageFrame, NativeFrameHandle.ToIntPtr(imageFrame.Handle)) : new HandleRef());
 }