Пример #1
0
        /// <summary>
        /// Test input camera frames against the camera pose finder database, adding frames to the
        /// database if dis-similar enough to existing frames. Both input depth and color frames
        /// must be identical sizes, a minimum size of 80x60, with valid camera parameters, and
        /// captured at the same time.
        /// Note that once the database reaches its maximum initialized size, it will overwrite old
        /// pose information. Check the <pararmref name="pHistoryTrimmed"/> flag or the number of
        /// poses in the database to determine whether the old poses are being overwritten.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <param name="worldToCameraTransform"> The current camera pose (usually the camera pose
        /// result from the last AlignPointClouds or AlignDepthFloatToReconstruction).</param>
        /// <param name="minimumDistanceThreshold">A float distance threshold between 0 and 1.0f which
        /// regulates how close together poses are stored in the database. Input frames
        /// which have a minimum distance equal to or above this threshold when compared against the
        /// database will be stored, as it indicates the input has become dis-similar to the existing
        /// stored poses. Set to 0.0f to ignore and always add a pose when this function is called,
        /// however in this case, unless there is an external test of distance, there is a risk this
        /// can lead to many duplicated poses.
        /// </param>
        /// <param name="addedPose">
        /// Set true when the input frame was added to the camera pose finder database.
        /// </param>
        /// <param name="trimmedHistory">
        /// Set true if the maxPoseHistoryCount was reached when the input frame is stored, so the
        /// oldest pose was overwritten in the camera pose finder database to store the latest pose.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and <paramref name="colorFrame"/>
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths, or the
        /// <paramref name="minimumDistanceThreshold"/> parameter is less than 0 or greater
        /// than 1.0f.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// The camera pose finder works by accumulating whether the values at each sample location pixel
        /// in a saved pose frame are less than or greater than a threshold which is randomly chosen
        /// between minimum and maximum boundaries (e.g. for color this is 0-255). Given enough samples
        /// this represents a unique key frame signature that we can match against, as different poses
        /// will have different values for surfaces which are closer or further away, or different
        /// colors.
        /// Note that unlike depth, the robustness of finding a valid camera pose can have issues with
        /// ambient illumination levels in the color image. For best matching results, both the Kinect
        /// camera and also the environment should have exactly the same configuration as when the
        /// database key frame images were captured i.e. if you had a fixed exposure and custom white
        /// balance, this should again be set when testing the database later, otherwise the matching
        /// accuracy will be reduced.
        /// To improve accuracy, it is possible to not just provide a red, green, blue input in the
        /// color image, but instead provide a different 3 channels of match data scaled 0-255. For
        /// example, to be more illumination independent, you could calculate hue and saturation, or
        /// convert RGB to to LAB and use the AB channels. Other measures such as texture response
        /// or corner response could additionally be computed and used in one or more of the channels.
        /// </remarks>
        public void ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame,
            Matrix4 worldToCameraTransform,
            float minimumDistanceThreshold,
            out bool addedPose,
            out bool trimmedHistory)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            HRESULT hr = cameraPoseFinder.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                ref worldToCameraTransform,
                minimumDistanceThreshold,
                out addedPose,
                out trimmedHistory);

            ExceptionHelper.ThrowIfFailed(hr);
        }
Пример #2
0
        /// <summary>
        /// Create a visible color shaded image of a point cloud and its normals. All image
        /// frames must have the same width and height.
        /// </summary>
        /// <param name="pointCloudFrame">The point cloud frame to be shaded.</param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) where the raycast was performed from.
        /// Pass identity if the point cloud did not originate from a raycast and is in the
        /// camera local coordinate system.
        /// </param>
        /// <param name="shadedSurfaceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color L.N shaded
        /// surface image. Pass null to skip this image.
        /// </param>
        /// <param name="shadedSurfaceNormalsFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color shaded
        /// normals image with color indicating orientation. Pass null to skip this image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void ShadePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            Matrix4 worldToCameraTransform,
            FusionColorImageFrame shadedSurfaceFrame,
            FusionColorImageFrame shadedSurfaceNormalsFrame)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionShadePointCloud2(
                                              FusionImageFrame.ToHandleRef(pointCloudFrame),
                                              ref worldToCameraTransform,
                                              IntPtr.Zero,
                                              FusionImageFrame.ToHandleRef(shadedSurfaceFrame),
                                              FusionImageFrame.ToHandleRef(shadedSurfaceNormalsFrame)));
        }
Пример #3
0
        /// <summary>
        /// Find the most similar camera poses to the current camera input by comparing against the
        /// camera pose finder database, and returning a set of similar camera poses. These poses
        /// and similarity measurements are ordered in terms of decreasing similarity (i.e. the most
        /// similar is first). Both input depth and color frames must be identical sizes, with valid
        /// camera parameters and captured at the same time.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <returns>Returns the matched frames object created by the camera pose finder.</returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and  <paramref name="colorFrame"/>
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <returns>Returns a set of matched frames/poses.</returns>
        public MatchCandidates FindCameraPose(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            INuiFusionMatchCandidates matchCandidates = null;

            ExceptionHelper.ThrowIfFailed(cameraPoseFinder.FindCameraPose(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              FusionImageFrame.ToHandleRef(colorFrame),
                                              out matchCandidates));

            return(new MatchCandidates(matchCandidates));
        }
Пример #4
0
        /// <summary>
        /// The AlignPointClouds function uses an iterative algorithm to align two sets of oriented
        /// point clouds and calculate the camera's relative pose. This is a generic function which
        /// can be used independently of a Reconstruction Volume with sets of overlapping point clouds.
        /// To find the frame to frame relative transformation between two sets of point clouds in
        /// the camera local frame of reference (created by DepthFloatFrameToPointCloud),
        /// set the <paramref name="observedToReferenceTransform"/> to the identity.
        /// To calculate the pose transformation between new depth frames and an existing
        /// Reconstruction volume, pass in previous frames point cloud from RenderReconstruction as
        /// the reference frame, and the current frame point cloud (from DepthFloatFrameToPointCloud)
        /// as the observed frame. Set the <paramref name="observedToReferenceTransform"/> to the
        /// previous frames calculated camera pose.
        /// Note that here the current frame point cloud will be in the camera local frame of
        /// reference, whereas the synthetic points and normals will be in the global/world volume
        /// coordinate system. By passing the <paramref name="observedToReferenceTransform"/> you
        /// make the algorithm aware of the transformation between them.
        /// The <paramref name="observedToReferenceTransform"/> pose supplied can also take into
        /// account information you may have from other sensors or sensing mechanisms to aid the
        /// tracking. To do this multiply the relative frame to frame delta transformation from
        /// the other sensing system with the previous frame's pose before passing to this function.
        /// Note that any delta transform used should be in the same coordinate system as that
        /// returned by the DepthFloatFrameToPointCloud calculation.
        /// </summary>
        /// <param name="referencePointCloudFrame">
        /// The point cloud frame of the reference camera, or the previous Kinect point cloud frame.
        /// </param>
        /// <param name="observedPointCloudFrame">
        /// The point cloud frame of the observed camera, or the current Kinect frame.
        /// </param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run. The minimum value is 1.
        /// Using only a small number of iterations will have a faster runtime, however, the
        /// algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with color-coded data
        /// from the camera tracking. Values vary depending on whether the pixel was a valid pixel
        /// used in tracking (green) or failed in different tests. Pass null if not required.
        /// </param>
        /// <param name="observedToReferenceTransform">
        /// A pre-allocated transformation matrix. At entry to the function this should be filled
        /// with the best guess for the observed to reference transform (usually the last frame's
        /// calculated pose). At exit this is filled with he calculated pose or identity if the
        /// calculation failed.
        /// </param>
        /// <returns>
        /// Returns true if successful; returns false if the algorithm encountered a problem aligning
        /// the input point clouds and could not calculate a valid transformation, and
        /// the <paramref name="observedToReferenceTransform"/> parameter is set to identity.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or the
        /// <paramref name="observedPointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static bool AlignPointClouds(
            FusionPointCloudImageFrame referencePointCloudFrame,
            FusionPointCloudImageFrame observedPointCloudFrame,
            int maxAlignIterationCount,
            FusionColorImageFrame deltaFromReferenceFrame,
            ref Matrix4 observedToReferenceTransform)
        {
            if (null == referencePointCloudFrame)
            {
                throw new ArgumentNullException("referencePointCloudFrame");
            }

            if (null == observedPointCloudFrame)
            {
                throw new ArgumentNullException("observedPointCloudFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = NativeMethods.NuiFusionAlignPointClouds(
                FusionImageFrame.ToHandleRef(referencePointCloudFrame),
                FusionImageFrame.ToHandleRef(observedPointCloudFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                ref observedToReferenceTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return(false);
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return(true);
        }
Пример #5
0
        /// <summary>
        /// A high-level function to process a depth frame through the Kinect Fusion pipeline.
        /// Also integrates color, further using a parameter to constrain the integration to
        /// integrate color over a given angle relative to the surface normal (recommended use 
        /// is for thin structure scanning).
        /// Specifically, this performs processing equivalent to the following functions for each frame:
        /// <para>
        /// 1) AlignDepthFloatToReconstruction
        /// 2) IntegrateFrame
        /// </para>
        /// If there is a tracking error in the AlignDepthFloatToReconstruction stage, no depth data 
        /// integration will be performed, and the camera pose will remain unchanged.
        /// The maximum image resolution supported in this function is 640x480.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the align camera tracking algorithm to run.
        /// The minimum value is 1. Using only a small number of iterations will have a faster
        /// runtime, however, the algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="maxIntegrationWeight">
        /// A parameter to control the temporal smoothing of depth integration. Lower values have
        /// more noisy representations, but objects that move appear and disappear faster, so are
        /// suitable for more dynamic environments. Higher values integrate objects more slowly,
        /// but provides finer detail with less noise.
        /// </param>
        /// <param name="maxColorIntegrationAngle">An angle parameter in degrees to specify the angle
        /// with respect to the surface normal over which color will be integrated.This can be used so
        /// only when the camera sensor is near parallel with the surface (i.e. the camera direction of
        /// view is perpendicular to the surface), or  +/- an angle from the surface normal direction that
        /// color is integrated. 
        /// Pass FusionDepthProcessor.DefaultColorIntegrationOfAllAngles to ignore and accept color from
        /// all angles (default, fastest processing).
        /// This angle relative to this normal direction vector describe the acceptance half angle, for 
        /// example, a +/- 90 degree acceptance angle in all directions (i.e. a 180 degree hemisphere) 
        /// relative to the normal would integrate color in any orientation of the sensor towards the 
        /// front of the surface, even when parallel to the surface, whereas a 0 acceptance angle would
        /// only integrate color directly along a single ray exactly perpendicular to the surface.
        /// In reality, the useful range of values is actually between 0 and 90 exclusively 
        /// (e.g. setting +/- 60 degrees = 120 degrees total acceptance angle).
        /// Note that there is a trade-off here, as setting this has a runtime cost, however, conversely,
        /// ignoring this will integrate color from any angle over all voxels along camera rays around the
        /// zero crossing surface region in the volume, which can cause thin structures to have the same 
        /// color on both sides.</param>
        /// <param name="worldToCameraTransform">
        /// The best guess of the latest camera pose (usually the camera pose result from the last
        /// process call).
        /// </param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>  
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>  
        /// parameter is an incorrect image size.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1 or
        /// greater than the maximum unsigned short value.
        /// Thrown when the <paramref name="maxIntegrationWeight"/> parameter is less than 1 or 
        /// greater than the maximum unsigned short value.
        /// Thrown when the <paramref name="maxColorIntegrationAngle"/> parameter value is not
        /// FusionDepthProcessor.DefaultColorIntegrationOfAllAngles or between 0 and 90 degrees, 
        /// exclusively.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// Users may also optionally call the low-level functions individually, instead of calling this
        /// function, for more control. However, this function call will be faster due to the integrated 
        /// nature of the calls. After this call completes, if a visible output image of the reconstruction
        /// is required, the user can call CalculatePointCloud and then FusionDepthProcessor.ShadePointCloud.
        /// </remarks>
        public bool ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame,
            int maxAlignIterationCount,
            int maxIntegrationWeight,
            float maxColorIntegrationAngle,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);
            ushort maxWeight = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxIntegrationWeight);

            HRESULT hr = volume.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                maxIterations,
                maxWeight,
                maxColorIntegrationAngle,
                ref worldToCameraTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return false;
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return true;
        }
Пример #6
0
        /// <summary>
        /// Integrates depth float data and color data into the reconstruction volume from the
        /// passed camera pose. Here the angle parameter constrains the integration to
        /// integrate color over a given angle relative to the surface normal (recommended use 
        /// is for thin structure scanning).
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be integrated.</param>
        /// <param name="colorFrame">The color frame to be integrated.</param>
        /// <param name="maxIntegrationWeight">
        /// A parameter to control the temporal smoothing of depth integration. Minimum value is 1.
        /// Lower values have more noisy representations, but objects that move integrate and 
        /// disintegrate faster, so are suitable for more dynamic environments. Higher values
        /// integrate objects more slowly, but provides finer detail with less noise.</param>
        /// <param name="maxColorIntegrationAngle">An angle parameter in degrees to specify the angle
        /// with respect to the surface normal over which color will be integrated. This can be used so
        /// only when the camera sensor is near parallel with the surface (i.e. the camera direction of
        /// view is perpendicular to the surface), or  +/- an angle from the surface normal direction that
        /// color is integrated. 
        /// Pass FusionDepthProcessor.DefaultColorIntegrationOfAllAngles to ignore and accept color from
        /// all angles (default, fastest processing).
        /// This angle relative to this normal direction vector describe the acceptance half angle, for 
        /// example, a +/- 90 degree acceptance angle in all directions (i.e. a 180 degree hemisphere) 
        /// relative to the normal would integrate color in any orientation of the sensor towards the 
        /// front of the surface, even when parallel to the surface, whereas a 0 acceptance angle would
        /// only integrate color directly along a single ray exactly perpendicular to the surface.
        /// In reality, the useful range of values is actually between 0 and 90 exclusively 
        /// (e.g. setting +/- 60 degrees = 120 degrees total acceptance angle).
        /// Note that there is a trade-off here, as setting this has a runtime cost, however, conversely,
        /// ignoring this will integrate color from any angle over all voxels along camera rays around the
        /// zero crossing surface region in the volume, which can cause thin structures to have the same 
        /// color on both sides</param>
        /// <param name="worldToCameraTransform">
        /// The camera pose (usually the camera pose result from the last AlignPointClouds or 
        /// AlignDepthFloatToReconstruction).
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/> 
        /// parameter is null.</exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="maxIntegrationWeight"/> parameter is less than 1 or
        /// greater than the maximum unsigned short value, or the 
        /// Thrown when the <paramref name="maxColorIntegrationAngle"/> parameter value is not
        /// FusionDepthProcessor.DefaultColorIntegrationOfAllAngles or between 0 and 90 degrees,
        /// exclusively.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public void IntegrateFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame,
            int maxIntegrationWeight,
            float maxColorIntegrationAngle,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            ushort integrationWeight = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxIntegrationWeight);

            ExceptionHelper.ThrowIfFailed(volume.IntegrateFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                integrationWeight,
                maxColorIntegrationAngle,
                ref worldToCameraTransform));
        }
Пример #7
0
        /// <summary>
        /// Find the most similar camera poses to the current camera input by comparing against the
        /// camera pose finder database, and returning a set of similar camera poses. These poses 
        /// and similarity measurements are ordered in terms of decreasing similarity (i.e. the most 
        /// similar is first). Both input depth and color frames must be identical sizes, with valid 
        /// camera parameters and captured at the same time.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <returns>Returns the matched frames object created by the camera pose finder.</returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/> 
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and  <paramref name="colorFrame"/> 
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, 
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <returns>Returns a set of matched frames/poses.</returns>
        public MatchCandidates FindCameraPose(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            INuiFusionMatchCandidates matchCandidates = null;

            ExceptionHelper.ThrowIfFailed(cameraPoseFinder.FindCameraPose(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                out matchCandidates));

            return new MatchCandidates(matchCandidates);
        }
Пример #8
0
        /// <summary>
        /// Test input camera frames against the camera pose finder database, adding frames to the 
        /// database if dis-similar enough to existing frames. Both input depth and color frames 
        /// must be identical sizes, a minimum size of 80x60, with valid camera parameters, and 
        /// captured at the same time.
        /// Note that once the database reaches its maximum initialized size, it will overwrite old
        /// pose information. Check the <pararmref name="pHistoryTrimmed"/> flag or the number of 
        /// poses in the database to determine whether the old poses are being overwritten.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <param name="worldToCameraTransform"> The current camera pose (usually the camera pose 
        /// result from the last AlignPointClouds or AlignDepthFloatToReconstruction).</param>
        /// <param name="minimumDistanceThreshold">A float distance threshold between 0 and 1.0f which
        /// regulates how close together poses are stored in the database. Input frames
        /// which have a minimum distance equal to or above this threshold when compared against the 
        /// database will be stored, as it indicates the input has become dis-similar to the existing 
        /// stored poses. Set to 0.0f to ignore and always add a pose when this function is called, 
        /// however in this case, unless there is an external test of distance, there is a risk this
        /// can lead to many duplicated poses.
        /// </param>
        /// <param name="addedPose">
        /// Set true when the input frame was added to the camera pose finder database.
        /// </param>
        /// <param name="trimmedHistory">
        /// Set true if the maxPoseHistoryCount was reached when the input frame is stored, so the
        /// oldest pose was overwritten in the camera pose finder database to store the latest pose.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/> 
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and <paramref name="colorFrame"/> 
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths, or the 
        /// <paramref name="minimumDistanceThreshold"/> parameter is less than 0 or greater 
        /// than 1.0f.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// The camera pose finder works by accumulating whether the values at each sample location pixel
        /// in a saved pose frame are less than or greater than a threshold which is randomly chosen 
        /// between minimum and maximum boundaries (e.g. for color this is 0-255). Given enough samples
        /// this represents a unique key frame signature that we can match against, as different poses
        /// will have different values for surfaces which are closer or further away, or different
        /// colors.
        /// Note that unlike depth, the robustness of finding a valid camera pose can have issues with
        /// ambient illumination levels in the color image. For best matching results, both the Kinect 
        /// camera and also the environment should have exactly the same configuration as when the 
        /// database key frame images were captured i.e. if you had a fixed exposure and custom white
        /// balance, this should again be set when testing the database later, otherwise the matching
        /// accuracy will be reduced. 
        /// To improve accuracy, it is possible to not just provide a red, green, blue input in the
        /// color image, but instead provide a different 3 channels of match data scaled 0-255. For
        /// example, to be more illumination independent, you could calculate hue and saturation, or
        /// convert RGB to to LAB and use the AB channels. Other measures such as texture response
        /// or corner response could additionally be computed and used in one or more of the channels.
        /// </remarks>
         public void ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame,
            Matrix4 worldToCameraTransform, 
            float minimumDistanceThreshold,
            out bool addedPose,
            out bool trimmedHistory)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            HRESULT hr = cameraPoseFinder.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                ref worldToCameraTransform,
                minimumDistanceThreshold,
                out addedPose,
                out trimmedHistory);

            ExceptionHelper.ThrowIfFailed(hr);
        }
Пример #9
0
        private bool TrackIntegrate(DepthImagePixel[] depthPixels, byte[] colorPixels, KinectFormat workFormat)
        {
            var depthSize = FormatHelper.GetDepthSize(workFormat.DepthImageFormat);
            var colorSize = FormatHelper.GetColorSize(workFormat.ColorImageFormat);

            // Convert the depth image frame to depth float image frame
            FusionDepthProcessor.DepthToDepthFloatFrame(
                depthPixels,
                (int)depthSize.Width,
                (int)depthSize.Height,
                this.depthFloatBuffer,
                FusionDepthProcessor.DefaultMinimumDepth,
                FusionDepthProcessor.DefaultMaximumDepth,
                false);

            bool trackingSucceeded = this.volume.AlignDepthFloatToReconstruction(
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    residualFloatBuffer,
                    out _alignmentEnergy,
                    volume.GetCurrentWorldToCameraTransform());

            //if (trackingSucceeded && _alignmentEnergy == 0.0)
            //    trackingSucceeded = false;

            // ProcessFrame will first calculate the camera pose and then integrate
            // if tracking is successful
            //bool trackingSucceeded = this.volume.ProcessFrame(
            //    this.depthFloatBuffer,
            //    FusionDepthProcessor.DefaultAlignIterationCount,
            //    IntegrationWeight,
            //    this.volume.GetCurrentWorldToCameraTransform());

            // If camera tracking failed, no data integration or raycast for reference
            // point cloud will have taken place, and the internal camera pose
            // will be unchanged.
            if (!trackingSucceeded)
            {
                this.trackingErrorCount++;

                // Show tracking error on status bar
                FusionStatusMessage = Properties.Resources.CameraTrackingFailed;
                _audioManager.State = AudioState.Error;
            }
            else
            {
                ProcessResidualImage();

                this.worldToCameraTransform = volume.GetCurrentWorldToCameraTransform();

                if (!IsIntegrationPaused)
                {
                    if (IntegratingColor)
                    {
                        FusionColorImageFrame frame = new FusionColorImageFrame((int)colorSize.Width, (int)colorSize.Height);
                        Single colorIntegrationAngle = 10.0f;

                        int[] intColorPixels = new int[colorPixels.Length / 4];
                        Buffer.BlockCopy(colorPixels, 0, intColorPixels, 0, colorPixels.Length);
                        frame.CopyPixelDataFrom(intColorPixels);
                        this.volume.IntegrateFrame(depthFloatBuffer, frame, FusionDepthProcessor.DefaultIntegrationWeight, colorIntegrationAngle, this.worldToCameraTransform);
                    }
                    else
                    {
                        this.volume.IntegrateFrame(depthFloatBuffer, IntegrationWeight, this.worldToCameraTransform);
                    }
                }

                this.trackingErrorCount = 0;
            }

            if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
            {
                // Auto Reset due to bad tracking
                FusionStatusMessage = Properties.Resources.ResetVolume;

                // Automatically Clear Volume and reset tracking if tracking fails
                this.ResetReconstruction(_currentVolumeCenter);
            }
            return trackingSucceeded;
        }
Пример #10
0
        /// <summary>
        /// The AlignPointClouds function uses an iterative algorithm to align two sets of oriented
        /// point clouds and calculate the camera's relative pose. This is a generic function which
        /// can be used independently of a Reconstruction Volume with sets of overlapping point clouds.
        /// All images must be the same size and have the same camera parameters.
        /// To find the frame-to-frame relative transformation between two sets of point clouds in
        /// the camera local frame of reference (created by DepthFloatFrameToPointCloud),
        /// set the <paramref name="observedToReferenceTransform"/> to the identity.
        /// To calculate the frame-to-model pose transformation between point clouds calculated from 
        /// new depth frames with DepthFloatFrameToPointCloud and point clouds calculated from an 
        /// existing Reconstruction volume with CalculatePointCloud (e.g. from the previous frame),
        /// pass the CalculatePointCloud image as the reference frame, and the current depth frame 
        /// point cloud from DepthFloatFrameToPointCloud as the observed frame. Set the 
        /// <paramref name="observedToReferenceTransform"/> to the previous frames calculated camera
        /// pose that was used in the CalculatePointCloud call.
        /// Note that here the current frame point cloud will be in the camera local frame of
        /// reference, whereas the raycast points and normals will be in the global/world coordinate
        /// system. By passing the <paramref name="observedToReferenceTransform"/> you make the 
        /// algorithm aware of the transformation between the two coordinate systems.
        /// The <paramref name="observedToReferenceTransform"/> pose supplied can also take into
        /// account information you may have from other sensors or sensing mechanisms to aid the
        /// tracking. To do this multiply the relative frame to frame delta transformation from
        /// the other sensing system with the previous frame's pose before passing to this function.
        /// Note that any delta transform used should be in the same coordinate system as that
        /// returned by the DepthFloatFrameToPointCloud calculation.
        /// </summary>
        /// <param name="referencePointCloudFrame">
        /// The point cloud frame of the reference camera, or the previous Kinect point cloud frame.
        /// </param>
        /// <param name="observedPointCloudFrame">
        /// The point cloud frame of the observed camera, or the current Kinect frame.
        /// </param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run. The minimum value is 1.
        /// Using only a small number of iterations will have a faster runtime, however, the
        /// algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with color-coded data
        /// from the camera tracking. This may be used as input to additional vision algorithms such as
        /// object segmentation. Values vary depending on whether the pixel was a valid pixel used in
        /// tracking (inlier) or failed in different tests (outlier). 0xff000000 indicates an invalid 
        /// input vertex (e.g. from 0 input depth), or one where no correspondences occur between point
        /// cloud images. Outlier vertices rejected due to too large a distance between vertices are 
        /// coded as 0xff008000. Outlier vertices rejected due to to large a difference in normal angle
        /// between point clouds are coded as 0xff800000. Inliers are color shaded depending on the 
        /// residual energy at that point, with more saturated colors indicating more discrepancy
        /// between vertices and less saturated colors (i.e. more white) representing less discrepancy,
        /// or less information at that pixel. Pass null if this image is not required.
        /// </param>
        /// <param name="observedToReferenceTransform">
        /// A pre-allocated transformation matrix. At entry to the function this should be filled
        /// with the best guess for the observed to reference transform (usually the last frame's
        /// calculated pose). At exit this is filled with he calculated pose or identity if the
        /// calculation failed.
        /// </param>
        /// <returns>
        /// Returns true if successful; returns false if the algorithm encountered a problem aligning
        /// the input point clouds and could not calculate a valid transformation, and
        /// the <paramref name="observedToReferenceTransform"/> parameter is set to identity.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or the
        /// <paramref name="observedPointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters have different camera parameters.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static bool AlignPointClouds(
            FusionPointCloudImageFrame referencePointCloudFrame,
            FusionPointCloudImageFrame observedPointCloudFrame,
            int maxAlignIterationCount,
            FusionColorImageFrame deltaFromReferenceFrame,
            ref Matrix4 observedToReferenceTransform)
        {
            if (null == referencePointCloudFrame)
            {
                throw new ArgumentNullException("referencePointCloudFrame");
            }

            if (null == observedPointCloudFrame)
            {
                throw new ArgumentNullException("observedPointCloudFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = NativeMethods.NuiFusionAlignPointClouds(
                FusionImageFrame.ToHandleRef(referencePointCloudFrame),
                FusionImageFrame.ToHandleRef(observedPointCloudFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                ref observedToReferenceTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return false;
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return true;
        }
Пример #11
0
        // Not used
        /// <summary>
        /// Perform camera pose finding when tracking is lost using AlignPointClouds.
        /// This is typically more successful than FindCameraPoseAlignDepthFloatToReconstruction.
        /// </summary>
        /// <returns>Returns true if a valid camera pose was found, otherwise false.</returns>
        private bool FindCameraPoseAlignPointClouds(FusionFloatImageFrame floatFrame, FusionColorImageFrame imageFrame)
        {
            MatchCandidates matchCandidates = poseFinder.FindCameraPose(floatFrame, imageFrame);

              if (null == matchCandidates) {
            return false;
              }

              int poseCount = matchCandidates.GetPoseCount();
              float minDistance = matchCandidates.CalculateMinimumDistance();

              if (0 == poseCount || minDistance >= 1) {
            return false;
              }

              // Smooth the depth frame
              this.volume.SmoothDepthFloatFrame(floatFrame, smoothDepthFloatFrameCamera, 1, .04f);

              // Calculate point cloud from the smoothed frame
              FusionDepthProcessor.DepthFloatFrameToPointCloud(smoothDepthFloatFrameCamera, depthPointCloudFrame);

              double smallestEnergy = double.MaxValue;
              int smallestEnergyNeighborIndex = -1;

              int bestNeighborIndex = -1;
              Matrix4 bestNeighborCameraPose = Matrix4.Identity;

              double bestNeighborAlignmentEnergy = 0.006f;

              // Run alignment with best matched poseCount (i.e. k nearest neighbors (kNN))
              int maxTests = Math.Min(5, poseCount);

              var neighbors = matchCandidates.GetMatchPoses();

              for (int n = 0; n < maxTests; n++) {
            // Run the camera tracking algorithm with the volume
            // this uses the raycast frame and pose to find a valid camera pose by matching the raycast against the input point cloud
            Matrix4 poseProposal = neighbors[n];

            // Get the saved pose view by raycasting the volume
            this.volume.CalculatePointCloud(raycastPointCloudFrame, poseProposal);

            bool success = this.volume.AlignPointClouds(
            raycastPointCloudFrame,
            depthPointCloudFrame,
            FusionDepthProcessor.DefaultAlignIterationCount,
            imageFrame,
            out alignmentEnergy,
            ref poseProposal);

            bool relocSuccess = success && alignmentEnergy < bestNeighborAlignmentEnergy && alignmentEnergy > 0;

            if (relocSuccess) {
              bestNeighborAlignmentEnergy = alignmentEnergy;
              bestNeighborIndex = n;

              // This is after tracking succeeds, so should be a more accurate pose to store...
              bestNeighborCameraPose = poseProposal;

              // Update the delta image
              imageFrame.CopyPixelDataTo(this.deltaFromReferenceFramePixelsArgb);
            }

            // Find smallest energy neighbor independent of tracking success
            if (alignmentEnergy < smallestEnergy) {
              smallestEnergy = alignmentEnergy;
              smallestEnergyNeighborIndex = n;
            }
              }

              matchCandidates.Dispose();

              // Use the neighbor with the smallest residual alignment energy
              // At the cost of additional processing we could also use kNN+Mean camera pose finding here
              // by calculating the mean pose of the best n matched poses and also testing this to see if the
              // residual alignment energy is less than with kNN.
              if (bestNeighborIndex > -1) {
            this.worldToCameraTransform = bestNeighborCameraPose;

            return true;
              } else {
            this.worldToCameraTransform = neighbors[smallestEnergyNeighborIndex];
            return false;
              }
        }
Пример #12
0
        /// <summary>
        /// Allocate the frame buffers used for rendering virtualCamera
        /// </summary>
        private void AllocateFrames()
        {
            // Allocate point cloud frame
            if (null == this.PointCloudFrame || this.depthWidth != this.PointCloudFrame.Width
                || this.depthHeight != this.PointCloudFrame.Height)
            {
                this.PointCloudFrame = new FusionPointCloudImageFrame(this.depthWidth, this.depthHeight);
            }

            // Allocate shaded surface frame
            if (null == this.ShadedSurfaceFrame || this.depthWidth != this.ShadedSurfaceFrame.Width
                || this.depthHeight != this.ShadedSurfaceFrame.Height)
            {
                this.ShadedSurfaceFrame = new FusionColorImageFrame(this.depthWidth, this.depthHeight);
            }

            // Allocate shaded surface normals frame
            if (null == this.ShadedSurfaceNormalsFrame || this.depthWidth != this.ShadedSurfaceNormalsFrame.Width
                || this.depthHeight != this.ShadedSurfaceNormalsFrame.Height)
            {
                this.ShadedSurfaceNormalsFrame = new FusionColorImageFrame(this.depthWidth, this.depthHeight);
            }
        }
Пример #13
0
        /// <summary>
        /// Render Fusion color frame to UI
        /// </summary>
        /// <param name="colorFrame">Fusion color frame</param>
        /// <param name="colorPixels">Pixel buffer for fusion color frame</param>
        /// <param name="bitmap">Bitmap contains color frame data for rendering</param>
        /// <param name="image">UI image component to render the color frame</param>
        private static void RenderColorImage(
            FusionColorImageFrame colorFrame, ref int[] colorPixels, ref WriteableBitmap bitmap, System.Windows.Controls.Image image)
        {
            if (null == colorFrame)
            {
                return;
            }

            if (null == colorPixels || colorFrame.PixelDataLength != colorPixels.Length)
            {
                // Create pixel array of correct format
                colorPixels = new int[colorFrame.PixelDataLength];
            }

            if (null == bitmap || colorFrame.Width != bitmap.Width || colorFrame.Height != bitmap.Height)
            {
                // Create bitmap of correct format
                bitmap = new WriteableBitmap(colorFrame.Width, colorFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null);

                // Set bitmap as source to UI image object
                image.Source = bitmap;
            }

            // Copy pixel data to pixel buffer
            colorFrame.CopyPixelDataTo(colorPixels);

            // Write pixels to bitmap
            bitmap.WritePixels(new Int32Rect(0, 0, colorFrame.Width, colorFrame.Height), colorPixels, bitmap.PixelWidth * sizeof(int), 0);
        }
Пример #14
0
        public void Evaluate(int SpreadMax)
        {
            this.VoxelResolutionX = this.FInVX[0];
            this.VoxelResolutionY = this.FInVY[0];
            this.VoxelResolutionZ = this.FInVZ[0];
            this.VoxelsPerMeter = this.FInVPM[0];

            if (this.FTextureOutput[0] == null) { this.FTextureOutput[0] = new DX11Resource<DX11DynamicTexture2D>(); }
            if (this.FPCOut[0] == null) { this.FPCOut[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }
            if (this.FGeomOut[0] == null) { this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FOutVoxels[0] == null) { this.FOutVoxels[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }

            if (this.FInExport[0]) { this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FInvalidateConnect)
            {
                this.FInvalidateConnect = false;

                if (this.FInRuntime.PluginIO.IsConnected)
                {
                    this.runtime = this.FInRuntime[0];
                    this.runtime.DepthFrameReady += this.runtime_DepthFrameReady;

                    var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
                    this.worldToCameraTransform = Matrix4.Identity;

                    //this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);
                    this.colorVolume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);

                    //this.volume.
                    /*FusionPointCloudImageFrame pc;
                    pc.*/

                    this.defaultWorldToVolumeTransform = this.colorVolume.GetCurrentWorldToVolumeTransform();

                    // Depth frames generated from the depth input
                    this.depthFloatBuffer = new FusionFloatImageFrame(width, height);

                    // Point cloud frames generated from the depth float input
                    this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height);

                    // Create images to raycast the Reconstruction Volume
                    this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height);

                    this.ResetReconstruction();
                }
            }

            if (this.runtime != null)
            {
                bool needreset = this.FInReset[0];

                if (needreset) { this.ResetReconstruction(); }
            }
        }
Пример #15
0
        private void InitFusion()
        {
            if (_isFusionInitialized)
                return;

            _currentFormat = new KinectFormat();

            _currentFormat.DepthImageFormat = DepthImageFormat.Undefined;
            _currentFormat.ColorImageFormat = ColorImageFormat.Undefined;

            _isFusionInitialized = true;

            var depthFormat = KinectSensor.DepthStream.Format;
            var colorFormat = KinectSensor.ColorStream.Format;
            var kinectFormat = new KinectFormat();
            kinectFormat.DepthImageFormat = depthFormat;
            kinectFormat.ColorImageFormat = colorFormat;

            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            _fusionWorkItemPool = new Pool<FusionWorkItem, KinectFormat>(5, kinectFormat, FusionWorkItem.Create);

            _fusionWorkQueue = new WorkQueue<FusionWorkItem>(ProcessFusionFrameBackground)
            {
                CanceledCallback = ReturnFusionWorkItem,
                MaxQueueLength = 2
            };

            this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)];

            // This is the bitmap we'll display on-screen
            this.colorFusionBitmap = new WriteableBitmap(
                (int)depthSize.Width * 2,
                (int)depthSize.Height * 2,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);
            FusionOutputImage = colorFusionBitmap;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);
                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPose)
                {
                    this.ResetReconstruction(_currentVolumeCenter);
                }
            }
            catch (ArgumentException)
            {
                FusionStatusMessage = "ArgumentException - DX11 GPU not found?";
                return;
            }
            catch (InvalidOperationException ex)
            {
                FusionStatusMessage = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                FusionStatusMessage = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            _residualData = new float[(int)(depthSize.Width * depthSize.Height)];

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Reset the reconstruction
            this.ResetReconstruction(_currentVolumeCenter);

            IntegratingColor = false;
            _audioManager.Start();
        }
Пример #16
0
        /// <summary>
        /// Calculate a point cloud by raycasting into the reconstruction volume, returning the point
        /// cloud containing 3D points and normals of the zero-crossing dense surface at every visible
        /// pixel in the image from the given camera pose, and optionally the color visualization image.
        /// This point cloud can be used as a reference frame in the next call to
        /// FusionDepthProcessor.AlignPointClouds, or passed to FusionDepthProcessor.ShadePointCloud
        /// to produce a visible image output.
        /// The <paramref name="pointCloudFrame"/> can be an arbitrary image size, for example, enabling
        /// you to calculate point clouds at the size of your window and then create a visible image by
        /// calling FusionDepthProcessor.ShadePointCloud and render this image, however, be aware that 
        /// large images will be expensive to calculate.
        /// </summary>
        /// <param name="pointCloudFrame">
        /// The pre-allocated point cloud frame, to be filled by raycasting into the reconstruction volume.
        /// Typically used as the reference frame with the FusionDepthProcessor.AlignPointClouds function
        /// or for visualization by calling FusionDepthProcessor.ShadePointCloud.
        /// </param>
        /// <param name="colorFrame">Optionally, the color frame to fill. Pass null to ignore.</param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) to raycast from.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null. </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the call failed for an unknown reason.
        /// </exception>
        public void CalculatePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            FusionColorImageFrame colorFrame,
            Matrix4 worldToCameraTransform)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.CalculatePointCloud(
                FusionImageFrame.ToHandleRef(pointCloudFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                ref worldToCameraTransform));
        }
Пример #17
0
        /// <summary>
        /// Create a visible color shaded image of a point cloud and its normals with simple
        /// grayscale L.N surface shading. All image frames must have the same width and height.
        /// </summary>
        /// <param name="pointCloudFrame">The point cloud frame to be shaded.</param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) where the raycast was performed from.
        /// Pass identity if the point cloud did not originate from a raycast and is in the
        /// camera local coordinate system.
        /// </param>
        /// <param name="shadedSurfaceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the grayscale L.N 
        /// shaded surface image. Pass null to skip this image.
        /// </param>
        /// <param name="shadedSurfaceNormalsFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color shaded
        /// normals image with color indicating orientation. Pass null to skip this image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters have different camera parameters.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void ShadePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            Matrix4 worldToCameraTransform,
            FusionColorImageFrame shadedSurfaceFrame,
            FusionColorImageFrame shadedSurfaceNormalsFrame)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionShadePointCloud2(
                FusionImageFrame.ToHandleRef(pointCloudFrame),
                ref worldToCameraTransform,
                IntPtr.Zero,
                FusionImageFrame.ToHandleRef(shadedSurfaceFrame),
                FusionImageFrame.ToHandleRef(shadedSurfaceNormalsFrame)));
        }
Пример #18
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            this.sensor.ColorFrameReady += this.kinect_colorframe_ready;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range = DepthRange.Near;
                checkBoxNearMode.IsChecked = true;
            }
            catch
            {
                // device not near mode capable
            }

            // Initialize and start the FPS timer
            this.fpsTimer = new DispatcherTimer();
            this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            // Reset the reconstruction
            this.ResetReconstruction();
        }
        protected virtual void Dispose( bool disposing )
        {
            if ( !disposed ) {
                if ( depthFloatBuffer != null ) {
                    depthFloatBuffer.Dispose();
                    depthFloatBuffer = null;
                }

                if ( pointCloudBuffer != null ) {
                    pointCloudBuffer.Dispose();
                    pointCloudBuffer = null;
                }

                if ( shadedSurfaceColorFrame != null ) {
                    shadedSurfaceColorFrame.Dispose();
                    shadedSurfaceColorFrame = null;
                }

                if ( volume != null ) {
                    volume.Dispose();
                    volume = null;
                }

                disposed = true;
            }
        }
        private void InitializeKinectFusion()
        {
            // KinecFusionの初期化
            var volParam = new ReconstructionParameters( VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ );
            volume = Reconstruction.FusionCreateReconstruction( volParam, ReconstructionProcessor.Amp, -1, Matrix4.Identity );

            // 変換バッファの作成
            depthFloatBuffer = new FusionFloatImageFrame( DepthWidth, DepthHeight );
            pointCloudBuffer = new FusionPointCloudImageFrame( DepthWidth, DepthHeight );
            shadedSurfaceColorFrame = new FusionColorImageFrame( DepthWidth, DepthHeight );

            // リセット
            volume.ResetReconstruction( Matrix4.Identity );
        }
Пример #21
0
        /// <summary>
        /// The AlignPointClouds function uses an on GPU iterative algorithm to align two sets of
        /// overlapping oriented point clouds and calculate the camera's relative pose.
        /// All images must be the same size and have the same camera parameters. 
        /// </summary>
        /// <param name="referencePointCloudFrame">A reference point cloud frame.</param>
        /// <param name="observedPointCloudFrame">An observerd point cloud frame.</param>
        /// <param name="maxAlignIterationCount">The number of iterations to run.</param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with color-coded data
        /// from the camera tracking. This may be used as input to additional vision algorithms such as
        /// object segmentation. Values vary depending on whether the pixel was a valid pixel used in
        /// tracking (inlier) or failed in different tests (outlier). 0xff000000 indicates an invalid 
        /// input vertex (e.g. from 0 input depth), or one where no correspondences occur between point
        /// cloud images. Outlier vertices rejected due to too large a distance between vertices are 
        /// coded as 0xff008000. Outlier vertices rejected due to to large a difference in normal angle
        /// between point clouds are coded as 0xff800000. Inliers are color shaded depending on the 
        /// residual energy at that point, with more saturated colors indicating more discrepancy
        /// between vertices and less saturated colors (i.e. more white) representing less discrepancy,
        /// or less information at that pixel. Pass null if this image is not required.
        /// </param>
        /// <param name="alignmentEnergy">A value describing
        /// how well the observed frame aligns to the model with the calculated pose (mean distance between
        /// matching points in the point clouds). A larger magnitude value represent more discrepancy, and 
        /// a lower value represent less discrepancy. Note that it is unlikely an exact 0 (perfect alignment) 
        /// value will ever/ be returned as every frame from the sensor will contain some sensor noise. 
        /// Pass NULL to ignore this parameter.</param>
        /// <param name="referenceToObservedTransform">The initial guess at the transform. This is 
        /// updated on tracking success, or returned as identity on failure.</param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or 
        /// <paramref name="observedPointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or
        /// <paramref name="observedPointCloudFrame"/> or <paramref name="deltaFromReferenceFrame"/>
        /// parameter is an incorrect image size, or the iterations parameter is not greater than 0.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public bool AlignPointClouds(
            FusionPointCloudImageFrame referencePointCloudFrame,
            FusionPointCloudImageFrame observedPointCloudFrame,
            int maxAlignIterationCount,
            FusionColorImageFrame deltaFromReferenceFrame,
            out float alignmentEnergy,
            ref Matrix4 referenceToObservedTransform)
        {
            if (null == referencePointCloudFrame)
            {
                throw new ArgumentNullException("referencePointCloudFrame");
            }

            if (null == observedPointCloudFrame)
            {
                throw new ArgumentNullException("observedPointCloudFrame");
            }

            ushort iterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = volume.AlignPointClouds(
                FusionImageFrame.ToHandleRef(referencePointCloudFrame),
                FusionImageFrame.ToHandleRef(observedPointCloudFrame),
                iterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                out alignmentEnergy,
                ref referenceToObservedTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return false;
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return true;
        }
Пример #22
0
        /// <summary>
        /// Allocate the frame buffers used in the process
        /// </summary>
        private void AllocateFrames()
        {
            // Allocate depth float frame
            if (null == this.depthFloatFrame || this.width != this.depthFloatFrame.Width || this.height != this.depthFloatFrame.Height)
            {
                this.depthFloatFrame = new FusionFloatImageFrame(this.width, this.height);
            }

            // Allocate delta from reference frame
            if (null == this.deltaFromReferenceFrame || this.width != this.deltaFromReferenceFrame.Width || this.height != this.deltaFromReferenceFrame.Height)
            {
                this.deltaFromReferenceFrame = new FusionFloatImageFrame(this.width, this.height);
            }

            // Allocate point cloud frame
            if (null == this.pointCloudFrame || this.width != this.pointCloudFrame.Width || this.height != this.pointCloudFrame.Height)
            {
                this.pointCloudFrame = new FusionPointCloudImageFrame(this.width, this.height);
            }

            // Allocate shaded surface frame
            if (null == this.shadedSurfaceFrame || this.width != this.shadedSurfaceFrame.Width || this.height != this.shadedSurfaceFrame.Height)
            {
                this.shadedSurfaceFrame = new FusionColorImageFrame(this.width, this.height);
            }

            // Allocate shaded surface normals frame
            if (null == this.shadedSurfaceNormalsFrame || this.width != this.shadedSurfaceNormalsFrame.Width || this.height != this.shadedSurfaceNormalsFrame.Height)
            {
                this.shadedSurfaceNormalsFrame = new FusionColorImageFrame(this.width, this.height);
            }
        }