/// <summary> /// Create a visible color shaded image of a point cloud and its normals with simple /// grayscale L.N surface shading. All image frames must have the same width and height. /// </summary> /// <param name="pointCloudFrame">The point cloud frame to be shaded.</param> /// <param name="worldToCameraTransform"> /// The world to camera transform (camera pose) where the raycast was performed from. /// Pass identity if the point cloud did not originate from a raycast and is in the /// camera local coordinate system. /// </param> /// <param name="shadedSurfaceFrame"> /// Optionally, a pre-allocated color image frame, to be filled with the grayscale L.N /// shaded surface image. Pass null to skip this image. /// </param> /// <param name="shadedSurfaceNormalsFrame"> /// Optionally, a pre-allocated color image frame, to be filled with the color shaded /// normals image with color indicating orientation. Pass null to skip this image. /// </param> /// <exception cref="ArgumentNullException"> /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null. /// </exception> /// <exception cref="ArgumentException"> /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/> /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters are different image sizes. /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/> /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters have different camera parameters. /// </exception> /// <exception cref="OutOfMemoryException"> /// Thrown if a CPU memory allocation failed. /// </exception> /// <exception cref="InvalidOperationException"> /// Thrown when the Kinect Runtime could not be accessed, the device is not connected, /// a GPU memory allocation failed or the call failed for an unknown reason. /// </exception> public static void ShadePointCloud( FusionPointCloudImageFrame pointCloudFrame, Matrix4 worldToCameraTransform, FusionColorImageFrame shadedSurfaceFrame, FusionColorImageFrame shadedSurfaceNormalsFrame) { if (null == pointCloudFrame) { throw new ArgumentNullException("pointCloudFrame"); } ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionShadePointCloud2( FusionImageFrame.ToHandleRef(pointCloudFrame), ref worldToCameraTransform, IntPtr.Zero, FusionImageFrame.ToHandleRef(shadedSurfaceFrame), FusionImageFrame.ToHandleRef(shadedSurfaceNormalsFrame))); }
/// <summary> /// Find the most similar camera poses to the current camera input by comparing against the /// camera pose finder database, and returning a set of similar camera poses. These poses /// and similarity measurements are ordered in terms of decreasing similarity (i.e. the most /// similar is first). Both input depth and color frames must be identical sizes, with valid /// camera parameters and captured at the same time. /// </summary> /// <param name="depthFloatFrame">The depth float frame to be processed.</param> /// <param name="colorFrame">The color frame to be processed.</param> /// <returns>Returns the matched frames object created by the camera pose finder.</returns> /// <exception cref="ArgumentNullException"> /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/> /// parameter is null. </exception> /// <exception cref="ArgumentException"> /// Thrown when the <paramref name="depthFloatFrame"/> and <paramref name="colorFrame"/> /// parameter is an incorrect or different image size, or their <c>CameraParameters</c> /// member is null or has incorrectly sized focal lengths.</exception> /// <exception cref="InvalidOperationException"> /// Thrown when the Kinect Runtime could not be accessed, /// or the call failed for an unknown reason. /// </exception> /// <returns>Returns a set of matched frames/poses.</returns> public MatchCandidates FindCameraPose( FusionFloatImageFrame depthFloatFrame, FusionColorImageFrame colorFrame) { if (null == depthFloatFrame) { throw new ArgumentNullException("depthFloatFrame"); } if (null == colorFrame) { throw new ArgumentNullException("colorFrame"); } INuiFusionMatchCandidates matchCandidates = null; ExceptionHelper.ThrowIfFailed(cameraPoseFinder.FindCameraPose( FusionImageFrame.ToHandleRef(depthFloatFrame), FusionImageFrame.ToHandleRef(colorFrame), out matchCandidates)); return(new MatchCandidates(matchCandidates)); }
/// <summary> /// The AlignPointClouds function uses an iterative algorithm to align two sets of oriented /// point clouds and calculate the camera's relative pose. This is a generic function which /// can be used independently of a Reconstruction Volume with sets of overlapping point clouds. /// All images must be the same size and have the same camera parameters. /// To find the frame-to-frame relative transformation between two sets of point clouds in /// the camera local frame of reference (created by DepthFloatFrameToPointCloud), /// set the <paramref name="observedToReferenceTransform"/> to the identity. /// To calculate the frame-to-model pose transformation between point clouds calculated from /// new depth frames with DepthFloatFrameToPointCloud and point clouds calculated from an /// existing Reconstruction volume with CalculatePointCloud (e.g. from the previous frame), /// pass the CalculatePointCloud image as the reference frame, and the current depth frame /// point cloud from DepthFloatFrameToPointCloud as the observed frame. Set the /// <paramref name="observedToReferenceTransform"/> to the previous frames calculated camera /// pose that was used in the CalculatePointCloud call. /// Note that here the current frame point cloud will be in the camera local frame of /// reference, whereas the raycast points and normals will be in the global/world coordinate /// system. By passing the <paramref name="observedToReferenceTransform"/> you make the /// algorithm aware of the transformation between the two coordinate systems. /// The <paramref name="observedToReferenceTransform"/> pose supplied can also take into /// account information you may have from other sensors or sensing mechanisms to aid the /// tracking. To do this multiply the relative frame to frame delta transformation from /// the other sensing system with the previous frame's pose before passing to this function. /// Note that any delta transform used should be in the same coordinate system as that /// returned by the DepthFloatFrameToPointCloud calculation. /// </summary> /// <param name="referencePointCloudFrame"> /// The point cloud frame of the reference camera, or the previous Kinect point cloud frame. /// </param> /// <param name="observedPointCloudFrame"> /// The point cloud frame of the observed camera, or the current Kinect frame. /// </param> /// <param name="maxAlignIterationCount"> /// The maximum number of iterations of the algorithm to run. The minimum value is 1. /// Using only a small number of iterations will have a faster runtime, however, the /// algorithm may not converge to the correct transformation. /// </param> /// <param name="deltaFromReferenceFrame"> /// Optionally, a pre-allocated color image frame, to be filled with color-coded data /// from the camera tracking. This may be used as input to additional vision algorithms such as /// object segmentation. Values vary depending on whether the pixel was a valid pixel used in /// tracking (inlier) or failed in different tests (outlier). 0xff000000 indicates an invalid /// input vertex (e.g. from 0 input depth), or one where no correspondences occur between point /// cloud images. Outlier vertices rejected due to too large a distance between vertices are /// coded as 0xff008000. Outlier vertices rejected due to to large a difference in normal angle /// between point clouds are coded as 0xff800000. Inliers are color shaded depending on the /// residual energy at that point, with more saturated colors indicating more discrepancy /// between vertices and less saturated colors (i.e. more white) representing less discrepancy, /// or less information at that pixel. Pass null if this image is not required. /// </param> /// <param name="observedToReferenceTransform"> /// A pre-allocated transformation matrix. At entry to the function this should be filled /// with the best guess for the observed to reference transform (usually the last frame's /// calculated pose). At exit this is filled with he calculated pose or identity if the /// calculation failed. /// </param> /// <returns> /// Returns true if successful; returns false if the algorithm encountered a problem aligning /// the input point clouds and could not calculate a valid transformation, and /// the <paramref name="observedToReferenceTransform"/> parameter is set to identity. /// </returns> /// <exception cref="ArgumentNullException"> /// Thrown when the <paramref name="referencePointCloudFrame"/> or the /// <paramref name="observedPointCloudFrame"/> parameter is null. /// </exception> /// <exception cref="ArgumentException"> /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/> /// or <paramref name="deltaFromReferenceFrame"/> parameters are different image sizes. /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/> /// or <paramref name="deltaFromReferenceFrame"/> parameters have different camera parameters. /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1. /// </exception> /// <exception cref="OutOfMemoryException"> /// Thrown if a CPU memory allocation failed. /// </exception> /// <exception cref="InvalidOperationException"> /// Thrown when the Kinect Runtime could not be accessed, the device is not connected, /// a GPU memory allocation failed or the call failed for an unknown reason. /// </exception> public static bool AlignPointClouds( FusionPointCloudImageFrame referencePointCloudFrame, FusionPointCloudImageFrame observedPointCloudFrame, int maxAlignIterationCount, FusionColorImageFrame deltaFromReferenceFrame, ref Matrix4 observedToReferenceTransform) { if (null == referencePointCloudFrame) { throw new ArgumentNullException("referencePointCloudFrame"); } if (null == observedPointCloudFrame) { throw new ArgumentNullException("observedPointCloudFrame"); } ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount); HRESULT hr = NativeMethods.NuiFusionAlignPointClouds( FusionImageFrame.ToHandleRef(referencePointCloudFrame), FusionImageFrame.ToHandleRef(observedPointCloudFrame), maxIterations, FusionImageFrame.ToHandleRef(deltaFromReferenceFrame), ref observedToReferenceTransform); if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR) { return(false); } else { ExceptionHelper.ThrowIfFailed(hr); } return(true); }