Exemplo n.º 1
0
        /// <summary>
        /// A high-level function to process a depth frame through the Kinect Fusion pipeline.
        /// Specifically, this performs on-GPU processing equivalent to the following functions
        /// for each frame:
        /// <para>
        /// 1) AlignDepthFloatToReconstruction
        /// 2) IntegrateFrame
        /// </para>
        /// Users may also optionally call the low-level functions individually, instead of calling this
        /// function, for more control. However, this function call will be faster due to the integrated
        /// nature of the calls. After this call completes, if a visible output image of the reconstruction
        /// is required, the user can call CalculatePointCloud and then ShadePointCloud.
        /// The maximum image resolution supported in this function is 640x480.
        /// <para/>
        /// If there is a tracking error in the AlignDepthFloatToReconstruction stage, no depth data
        /// integration will be performed, and the camera pose will remain unchanged.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the align camera tracking algorithm to run.
        /// The minimum value is 1. Using only a small number of iterations will have a faster
        /// runtime, however, the algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="maxIntegrationWeight">
        /// A parameter to control the temporal smoothing of depth integration. Lower values have
        /// more noisy representations, but objects that move appear and disappear faster, so are
        /// suitable for more dynamic environments. Higher values integrate objects more slowly,
        /// but provides finer detail with less noise.
        /// </param>
        /// <param name="worldToCameraTransform">
        /// The best guess of the latest camera pose (usually the camera pose result from the last
        /// process call).
        /// </param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        public bool ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            int maxAlignIterationCount,
            int maxIntegrationWeight,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);
            ushort maxWeight     = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxIntegrationWeight);

            HRESULT hr = volume.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                maxIterations,
                maxWeight,
                ref worldToCameraTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return(false);
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return(true);
        }
Exemplo n.º 2
0
        /// <summary>
        /// Converts Kinect depth frames in unsigned short format to depth frames in float format
        /// representing distance from the camera in meters (parallel to the optical center axis).
        /// </summary>
        /// <param name="depthImageData">
        /// An array which stores the extended-depth texture of a depth image from the Kinect camera.
        /// </param>
        /// <param name="depthImageDataWidth">Width of the depth image data.</param>
        /// <param name="depthImageDataHeight">Height of the depth image data.</param>
        /// <param name="depthFloatFrame">
        /// A pre-allocated depth float type image frame, to be filled with the floating point depth values.
        /// </param>
        /// <param name="minDepthClip">
        /// Minimum depth distance threshold in meters. Depth pixels below this value will be
        /// returned as invalid (0). Min depth must be positive or 0.
        /// </param>
        /// <param name="maxDepthClip">
        /// Maximum depth distance threshold in meters. Depth pixels above this value will be
        /// returned as invalid (0). Max depth must be greater than 0.
        /// </param>
        /// <param name="mirrorDepth">
        /// A boolean parameter specifying whether to horizontally mirror the input depth image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthImageData"/> or the <paramref name="depthFloatFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthImageDataWidth"/> parameter and depthFloatFrame's
        /// <c>width</c> is not equal, or the <paramref name="depthImageDataHeight"/> parameter and
        /// depthFloatFrame's <c>height</c> member is not equal.
        /// Thrown when the <paramref name="minDepthClip"/> parameter or the
        /// <paramref name="maxDepthClip"/> is less than zero.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthToDepthFloatFrame(
            DepthImagePixel[] depthImageData,
            int depthImageDataWidth,
            int depthImageDataHeight,
            FusionFloatImageFrame depthFloatFrame,
            float minDepthClip,
            float maxDepthClip,
            bool mirrorDepth)
        {
            if (null == depthImageData)
            {
                throw new ArgumentNullException("depthImageData");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthToDepthFloatFrame(
                                              depthImageData,
                                              (uint)depthImageDataWidth,
                                              (uint)depthImageDataHeight,
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              minDepthClip,
                                              maxDepthClip,
                                              mirrorDepth));
        }
Exemplo n.º 3
0
        /// <summary>
        /// Aligns a depth float image to the Reconstruction volume to calculate the new camera pose.
        /// This camera tracking method requires a Reconstruction volume, and updates the internal
        /// camera pose if successful. The maximum image resolution supported in this function is 640x480.
        /// Note that this function is designed primarily for tracking either with static scenes when
        /// performing environment reconstruction, or objects which move rigidly when performing object
        /// reconstruction from a static camera. Consider using the standalone function
        /// NuiFusionAlignPointClouds instead if tracking failures occur due to parts of a scene which
        /// move non-rigidly or should be considered as outliers, although in practice, such issues are
        /// best avoided by carefully designing or constraining usage scenarios wherever possible.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run.
        /// The minimum value is 1. Using only a small number of iterations will have a faster runtime,
        /// however, the algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated float image frame, to be filled with information about how
        /// well each observed pixel aligns with the passed in reference frame. This maybe processed
        /// to create a color rendering, or may be used as input to additional vision algorithms such
        /// as object segmentation. Pass null if not required.
        /// </param>
        /// <param name="alignmentEnergy">
        /// A float to receive a value describing how well the observed frame aligns to the model with
        /// the calculated pose. A larger magnitude value represent more discrepancy, and a lower value
        /// represent less discrepancy. Note that it is unlikely an exact 0 (perfect alignment) value will
        /// ever be returned as every frame from the sensor will contain some sensor noise.
        /// </param>
        /// <param name="worldToCameraTransform">
        /// The best guess of the camera pose (usually the camera pose result from the last
        /// AlignPointClouds or AlignDepthFloatToReconstruction).
        /// </param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public bool AlignDepthFloatToReconstruction(
            FusionFloatImageFrame depthFloatFrame,
            int maxAlignIterationCount,
            FusionFloatImageFrame deltaFromReferenceFrame,
            out float alignmentEnergy,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = volume.AlignDepthFloatToReconstruction(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                out alignmentEnergy,
                ref worldToCameraTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return(false);
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return(true);
        }
Exemplo n.º 4
0
        /// <summary>
        /// Test input camera frames against the camera pose finder database, adding frames to the
        /// database if dis-similar enough to existing frames. Both input depth and color frames
        /// must be identical sizes, a minimum size of 80x60, with valid camera parameters, and
        /// captured at the same time.
        /// Note that once the database reaches its maximum initialized size, it will overwrite old
        /// pose information. Check the <pararmref name="pHistoryTrimmed"/> flag or the number of
        /// poses in the database to determine whether the old poses are being overwritten.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <param name="worldToCameraTransform"> The current camera pose (usually the camera pose
        /// result from the last AlignPointClouds or AlignDepthFloatToReconstruction).</param>
        /// <param name="minimumDistanceThreshold">A float distance threshold between 0 and 1.0f which
        /// regulates how close together poses are stored in the database. Input frames
        /// which have a minimum distance equal to or above this threshold when compared against the
        /// database will be stored, as it indicates the input has become dis-similar to the existing
        /// stored poses. Set to 0.0f to ignore and always add a pose when this function is called,
        /// however in this case, unless there is an external test of distance, there is a risk this
        /// can lead to many duplicated poses.
        /// </param>
        /// <param name="addedPose">
        /// Set true when the input frame was added to the camera pose finder database.
        /// </param>
        /// <param name="trimmedHistory">
        /// Set true if the maxPoseHistoryCount was reached when the input frame is stored, so the
        /// oldest pose was overwritten in the camera pose finder database to store the latest pose.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and <paramref name="colorFrame"/>
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths, or the
        /// <paramref name="minimumDistanceThreshold"/> parameter is less than 0 or greater
        /// than 1.0f.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// The camera pose finder works by accumulating whether the values at each sample location pixel
        /// in a saved pose frame are less than or greater than a threshold which is randomly chosen
        /// between minimum and maximum boundaries (e.g. for color this is 0-255). Given enough samples
        /// this represents a unique key frame signature that we can match against, as different poses
        /// will have different values for surfaces which are closer or further away, or different
        /// colors.
        /// Note that unlike depth, the robustness of finding a valid camera pose can have issues with
        /// ambient illumination levels in the color image. For best matching results, both the Kinect
        /// camera and also the environment should have exactly the same configuration as when the
        /// database key frame images were captured i.e. if you had a fixed exposure and custom white
        /// balance, this should again be set when testing the database later, otherwise the matching
        /// accuracy will be reduced.
        /// To improve accuracy, it is possible to not just provide a red, green, blue input in the
        /// color image, but instead provide a different 3 channels of match data scaled 0-255. For
        /// example, to be more illumination independent, you could calculate hue and saturation, or
        /// convert RGB to to LAB and use the AB channels. Other measures such as texture response
        /// or corner response could additionally be computed and used in one or more of the channels.
        /// </remarks>
        public void ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame,
            Matrix4 worldToCameraTransform,
            float minimumDistanceThreshold,
            out bool addedPose,
            out bool trimmedHistory)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            HRESULT hr = cameraPoseFinder.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                ref worldToCameraTransform,
                minimumDistanceThreshold,
                out addedPose,
                out trimmedHistory);

            ExceptionHelper.ThrowIfFailed(hr);
        }
Exemplo n.º 5
0
        /// <summary>
        /// Calculate a point cloud by raycasting into the reconstruction volume, returning the point
        /// cloud containing 3D points and normals of the zero-crossing dense surface at every visible
        /// pixel in the image from the given camera pose.
        /// This point cloud can be used as a reference frame in the next call to
        /// FusionDepthProcessor.AlignPointClouds, or passed to FusionDepthProcessor.ShadePointCloud
        /// to produce a visible image output.
        /// The <paramref name="pointCloudFrame"/> can be an arbitrary image size, for example, enabling
        /// you to calculate point clouds at the size of your window and then create a visible image by
        /// calling FusionDepthProcessor.ShadePointCloud and render this image, however, be aware that
        /// large images will be expensive to calculate.
        /// </summary>
        /// <param name="pointCloudFrame">
        /// The pre-allocated point cloud frame, to be filled by raycasting into the reconstruction volume.
        /// Typically used as the reference frame with the FusionDepthProcessor.AlignPointClouds function
        /// or for visualization by calling FusionDepthProcessor.ShadePointCloud.
        /// </param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) to raycast from.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the call failed for an unknown reason.
        /// </exception>
        public void CalculatePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            Matrix4 worldToCameraTransform)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.CalculatePointCloud(
                                              FusionImageFrame.ToHandleRef(pointCloudFrame),
                                              ref worldToCameraTransform));
        }
Exemplo n.º 6
0
        /// <summary>
        /// Integrates depth float data into the reconstruction volume from the
        /// worldToCameraTransform camera pose.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be integrated.</param>
        /// <param name="maxIntegrationWeight">
        /// A parameter to control the temporal smoothing of depth integration. Minimum value is 1.
        /// Lower values have more noisy representations, but objects that move integrate and
        /// disintegrate faster, so are suitable for more dynamic environments. Higher values
        /// integrate objects more slowly, but provides finer detail with less noise.</param>
        /// <param name="worldToCameraTransform">
        /// The camera pose (usually the camera pose result from the last AlignPointClouds or
        /// AlignDepthFloatToReconstruction).
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="maxIntegrationWeight"/> parameter is less than 1
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public void IntegrateFrame(
            FusionFloatImageFrame depthFloatFrame,
            int maxIntegrationWeight,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort integrationWeight = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxIntegrationWeight);

            ExceptionHelper.ThrowIfFailed(volume.IntegrateFrame(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              integrationWeight,
                                              ref worldToCameraTransform));
        }
Exemplo n.º 7
0
        /// <summary>
        /// Create a visible color shaded image of a point cloud and its normals. All image
        /// frames must have the same width and height.
        /// </summary>
        /// <param name="pointCloudFrame">The point cloud frame to be shaded.</param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) where the raycast was performed from.
        /// Pass identity if the point cloud did not originate from a raycast and is in the
        /// camera local coordinate system.
        /// </param>
        /// <param name="shadedSurfaceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color L.N shaded
        /// surface image. Pass null to skip this image.
        /// </param>
        /// <param name="shadedSurfaceNormalsFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color shaded
        /// normals image with color indicating orientation. Pass null to skip this image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void ShadePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            Matrix4 worldToCameraTransform,
            FusionColorImageFrame shadedSurfaceFrame,
            FusionColorImageFrame shadedSurfaceNormalsFrame)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionShadePointCloud2(
                                              FusionImageFrame.ToHandleRef(pointCloudFrame),
                                              ref worldToCameraTransform,
                                              IntPtr.Zero,
                                              FusionImageFrame.ToHandleRef(shadedSurfaceFrame),
                                              FusionImageFrame.ToHandleRef(shadedSurfaceNormalsFrame)));
        }
Exemplo n.º 8
0
        /// <summary>
        /// Construct an oriented point cloud in the local camera frame of reference from a depth float
        /// image frame. Here we calculate the 3D position of each depth float pixel with the optical
        /// center of the camera as the origin. Both images must be the same size and have the same camera
        /// parameters. We use a right-hand coordinate system, and (in common with bitmap images with top left
        /// origin) +X is to the right, +Y down, and +Z is now forward from the Kinect camera into the scene,
        /// as though looking into the scene from behind the kinect.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be converted.</param>
        /// <param name="pointCloudFrame">
        /// A pre-allocated point cloud frame, to be filled with 3D points and normals.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or the <paramref name="pointCloudFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="pointCloudFrame"/>
        /// parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthFloatFrameToPointCloud(
            FusionFloatImageFrame depthFloatFrame,
            FusionPointCloudImageFrame pointCloudFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthFloatFrameToPointCloud(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              FusionImageFrame.ToHandleRef(pointCloudFrame)));
        }
Exemplo n.º 9
0
        /// <summary>
        /// Find the most similar camera poses to the current camera input by comparing against the
        /// camera pose finder database, and returning a set of similar camera poses. These poses
        /// and similarity measurements are ordered in terms of decreasing similarity (i.e. the most
        /// similar is first). Both input depth and color frames must be identical sizes, with valid
        /// camera parameters and captured at the same time.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <returns>Returns the matched frames object created by the camera pose finder.</returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and  <paramref name="colorFrame"/>
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <returns>Returns a set of matched frames/poses.</returns>
        public MatchCandidates FindCameraPose(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            INuiFusionMatchCandidates matchCandidates = null;

            ExceptionHelper.ThrowIfFailed(cameraPoseFinder.FindCameraPose(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              FusionImageFrame.ToHandleRef(colorFrame),
                                              out matchCandidates));

            return(new MatchCandidates(matchCandidates));
        }
Exemplo n.º 10
0
        /// <summary>
        /// The AlignPointClouds function uses an iterative algorithm to align two sets of oriented
        /// point clouds and calculate the camera's relative pose. This is a generic function which
        /// can be used independently of a Reconstruction Volume with sets of overlapping point clouds.
        /// To find the frame to frame relative transformation between two sets of point clouds in
        /// the camera local frame of reference (created by DepthFloatFrameToPointCloud),
        /// set the <paramref name="observedToReferenceTransform"/> to the identity.
        /// To calculate the pose transformation between new depth frames and an existing
        /// Reconstruction volume, pass in previous frames point cloud from RenderReconstruction as
        /// the reference frame, and the current frame point cloud (from DepthFloatFrameToPointCloud)
        /// as the observed frame. Set the <paramref name="observedToReferenceTransform"/> to the
        /// previous frames calculated camera pose.
        /// Note that here the current frame point cloud will be in the camera local frame of
        /// reference, whereas the synthetic points and normals will be in the global/world volume
        /// coordinate system. By passing the <paramref name="observedToReferenceTransform"/> you
        /// make the algorithm aware of the transformation between them.
        /// The <paramref name="observedToReferenceTransform"/> pose supplied can also take into
        /// account information you may have from other sensors or sensing mechanisms to aid the
        /// tracking. To do this multiply the relative frame to frame delta transformation from
        /// the other sensing system with the previous frame's pose before passing to this function.
        /// Note that any delta transform used should be in the same coordinate system as that
        /// returned by the DepthFloatFrameToPointCloud calculation.
        /// </summary>
        /// <param name="referencePointCloudFrame">
        /// The point cloud frame of the reference camera, or the previous Kinect point cloud frame.
        /// </param>
        /// <param name="observedPointCloudFrame">
        /// The point cloud frame of the observed camera, or the current Kinect frame.
        /// </param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run. The minimum value is 1.
        /// Using only a small number of iterations will have a faster runtime, however, the
        /// algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with color-coded data
        /// from the camera tracking. Values vary depending on whether the pixel was a valid pixel
        /// used in tracking (green) or failed in different tests. Pass null if not required.
        /// </param>
        /// <param name="observedToReferenceTransform">
        /// A pre-allocated transformation matrix. At entry to the function this should be filled
        /// with the best guess for the observed to reference transform (usually the last frame's
        /// calculated pose). At exit this is filled with he calculated pose or identity if the
        /// calculation failed.
        /// </param>
        /// <returns>
        /// Returns true if successful; returns false if the algorithm encountered a problem aligning
        /// the input point clouds and could not calculate a valid transformation, and
        /// the <paramref name="observedToReferenceTransform"/> parameter is set to identity.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or the
        /// <paramref name="observedPointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static bool AlignPointClouds(
            FusionPointCloudImageFrame referencePointCloudFrame,
            FusionPointCloudImageFrame observedPointCloudFrame,
            int maxAlignIterationCount,
            FusionColorImageFrame deltaFromReferenceFrame,
            ref Matrix4 observedToReferenceTransform)
        {
            if (null == referencePointCloudFrame)
            {
                throw new ArgumentNullException("referencePointCloudFrame");
            }

            if (null == observedPointCloudFrame)
            {
                throw new ArgumentNullException("observedPointCloudFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = NativeMethods.NuiFusionAlignPointClouds(
                FusionImageFrame.ToHandleRef(referencePointCloudFrame),
                FusionImageFrame.ToHandleRef(observedPointCloudFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                ref observedToReferenceTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return(false);
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return(true);
        }
Exemplo n.º 11
0
 /// <summary>
 /// Convert a FusionImageFrame to HandleRef structure.
 /// </summary>
 /// <param name="imageFrame">The FusionImageFrame to be converted.</param>
 /// <returns>
 /// Returns null if the input <para>imageFrame</para> is null or a HandleRef structure.
 /// </returns>
 public static HandleRef ToHandleRef(FusionImageFrame imageFrame)
 {
     return(null != imageFrame ?
            new HandleRef(imageFrame, NativeFrameHandle.ToIntPtr(imageFrame.Handle)) : new HandleRef());
 }
Exemplo n.º 12
0
 /// <summary>
 /// Convert a FusionImageFrame to HandleRef structure.
 /// </summary>
 /// <param name="imageFrame">The FusionImageFrame to be converted.</param>
 /// <returns>
 /// Returns null if the input <para>imageFrame</para> is null or a HandleRef structure.
 /// </returns>
 public static HandleRef ToHandleRef(FusionImageFrame imageFrame)
 {
     return null != imageFrame ?
         new HandleRef(imageFrame, NativeFrameHandle.ToIntPtr(imageFrame.Handle)) : new HandleRef();
 }