Esempio n. 1
0
        /// <summary>
        /// Aligns a depth float image to the Reconstruction volume to calculate the new camera pose.
        /// This camera tracking method requires a Reconstruction volume, and updates the internal
        /// camera pose if successful. The maximum image resolution supported in this function is 640x480.
        /// Note that this function is designed primarily for tracking either with static scenes when
        /// performing environment reconstruction, or objects which move rigidly when performing object
        /// reconstruction from a static camera. Consider using the standalone function
        /// NuiFusionAlignPointClouds instead if tracking failures occur due to parts of a scene which
        /// move non-rigidly or should be considered as outliers, although in practice, such issues are
        /// best avoided by carefully designing or constraining usage scenarios wherever possible.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run.
        /// The minimum value is 1. Using only a small number of iterations will have a faster runtime,
        /// however, the algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated float image frame, to be filled with information about how
        /// well each observed pixel aligns with the passed in reference frame. This maybe processed
        /// to create a color rendering, or may be used as input to additional vision algorithms such
        /// as object segmentation. Pass null if not required.
        /// </param>
        /// <param name="alignmentEnergy">
        /// A float to receive a value describing how well the observed frame aligns to the model with
        /// the calculated pose. A larger magnitude value represent more discrepancy, and a lower value
        /// represent less discrepancy. Note that it is unlikely an exact 0 (perfect alignment) value will
        /// ever be returned as every frame from the sensor will contain some sensor noise.
        /// </param>
        /// <param name="worldToCameraTransform">
        /// The best guess of the camera pose (usually the camera pose result from the last
        /// AlignPointClouds or AlignDepthFloatToReconstruction).
        /// </param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public bool AlignDepthFloatToReconstruction(
            FusionFloatImageFrame depthFloatFrame,
            int maxAlignIterationCount,
            FusionFloatImageFrame deltaFromReferenceFrame,
            out float alignmentEnergy,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = volume.AlignDepthFloatToReconstruction(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                out alignmentEnergy,
                ref worldToCameraTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return(false);
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return(true);
        }
        /// <summary>
        /// Test input camera frames against the camera pose finder database, adding frames to the
        /// database if dis-similar enough to existing frames. Both input depth and color frames
        /// must be identical sizes, a minimum size of 80x60, with valid camera parameters, and
        /// captured at the same time.
        /// Note that once the database reaches its maximum initialized size, it will overwrite old
        /// pose information. Check the <pararmref name="pHistoryTrimmed"/> flag or the number of
        /// poses in the database to determine whether the old poses are being overwritten.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <param name="worldToCameraTransform"> The current camera pose (usually the camera pose
        /// result from the last AlignPointClouds or AlignDepthFloatToReconstruction).</param>
        /// <param name="minimumDistanceThreshold">A float distance threshold between 0 and 1.0f which
        /// regulates how close together poses are stored in the database. Input frames
        /// which have a minimum distance equal to or above this threshold when compared against the
        /// database will be stored, as it indicates the input has become dis-similar to the existing
        /// stored poses. Set to 0.0f to ignore and always add a pose when this function is called,
        /// however in this case, unless there is an external test of distance, there is a risk this
        /// can lead to many duplicated poses.
        /// </param>
        /// <param name="addedPose">
        /// Set true when the input frame was added to the camera pose finder database.
        /// </param>
        /// <param name="trimmedHistory">
        /// Set true if the maxPoseHistoryCount was reached when the input frame is stored, so the
        /// oldest pose was overwritten in the camera pose finder database to store the latest pose.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and <paramref name="colorFrame"/>
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths, or the
        /// <paramref name="minimumDistanceThreshold"/> parameter is less than 0 or greater
        /// than 1.0f.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// The camera pose finder works by accumulating whether the values at each sample location pixel
        /// in a saved pose frame are less than or greater than a threshold which is randomly chosen
        /// between minimum and maximum boundaries (e.g. for color this is 0-255). Given enough samples
        /// this represents a unique key frame signature that we can match against, as different poses
        /// will have different values for surfaces which are closer or further away, or different
        /// colors.
        /// Note that unlike depth, the robustness of finding a valid camera pose can have issues with
        /// ambient illumination levels in the color image. For best matching results, both the Kinect
        /// camera and also the environment should have exactly the same configuration as when the
        /// database key frame images were captured i.e. if you had a fixed exposure and custom white
        /// balance, this should again be set when testing the database later, otherwise the matching
        /// accuracy will be reduced.
        /// To improve accuracy, it is possible to not just provide a red, green, blue input in the
        /// color image, but instead provide a different 3 channels of match data scaled 0-255. For
        /// example, to be more illumination independent, you could calculate hue and saturation, or
        /// convert RGB to to LAB and use the AB channels. Other measures such as texture response
        /// or corner response could additionally be computed and used in one or more of the channels.
        /// </remarks>
        public void ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame,
            Matrix4 worldToCameraTransform,
            float minimumDistanceThreshold,
            out bool addedPose,
            out bool trimmedHistory)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            HRESULT hr = cameraPoseFinder.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                ref worldToCameraTransform,
                minimumDistanceThreshold,
                out addedPose,
                out trimmedHistory);

            ExceptionHelper.ThrowIfFailed(hr);
        }
Esempio n. 3
0
        /// <summary>
        /// Converts Kinect depth frames in unsigned short format to depth frames in float format
        /// representing distance from the camera in meters (parallel to the optical center axis).
        /// </summary>
        /// <param name="depthImageData">
        /// An array which stores the extended-depth texture of a depth image from the Kinect camera.
        /// </param>
        /// <param name="depthImageDataWidth">Width of the depth image data.</param>
        /// <param name="depthImageDataHeight">Height of the depth image data.</param>
        /// <param name="depthFloatFrame">
        /// A pre-allocated depth float type image frame, to be filled with the floating point depth values.
        /// </param>
        /// <param name="minDepthClip">
        /// Minimum depth distance threshold in meters. Depth pixels below this value will be
        /// returned as invalid (0). Min depth must be positive or 0.
        /// </param>
        /// <param name="maxDepthClip">
        /// Maximum depth distance threshold in meters. Depth pixels above this value will be
        /// returned as invalid (0). Max depth must be greater than 0.
        /// </param>
        /// <param name="mirrorDepth">
        /// A boolean parameter specifying whether to horizontally mirror the input depth image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthImageData"/> or the <paramref name="depthFloatFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthImageDataWidth"/> parameter and depthFloatFrame's
        /// <c>width</c> is not equal, or the <paramref name="depthImageDataHeight"/> parameter and
        /// depthFloatFrame's <c>height</c> member is not equal.
        /// Thrown when the <paramref name="minDepthClip"/> parameter or the
        /// <paramref name="maxDepthClip"/> is less than zero.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthToDepthFloatFrame(
            DepthImagePixel[] depthImageData,
            int depthImageDataWidth,
            int depthImageDataHeight,
            FusionFloatImageFrame depthFloatFrame,
            float minDepthClip,
            float maxDepthClip,
            bool mirrorDepth)
        {
            if (null == depthImageData)
            {
                throw new ArgumentNullException("depthImageData");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthToDepthFloatFrame(
                                              depthImageData,
                                              (uint)depthImageDataWidth,
                                              (uint)depthImageDataHeight,
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              minDepthClip,
                                              maxDepthClip,
                                              mirrorDepth));
        }
Esempio n. 4
0
        /// <summary>
        /// A high-level function to process a depth frame through the Kinect Fusion pipeline.
        /// Specifically, this performs on-GPU processing equivalent to the following functions
        /// for each frame:
        /// <para>
        /// 1) AlignDepthFloatToReconstruction
        /// 2) IntegrateFrame
        /// </para>
        /// Users may also optionally call the low-level functions individually, instead of calling this
        /// function, for more control. However, this function call will be faster due to the integrated
        /// nature of the calls. After this call completes, if a visible output image of the reconstruction
        /// is required, the user can call CalculatePointCloud and then ShadePointCloud.
        /// The maximum image resolution supported in this function is 640x480.
        /// <para/>
        /// If there is a tracking error in the AlignDepthFloatToReconstruction stage, no depth data
        /// integration will be performed, and the camera pose will remain unchanged.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the align camera tracking algorithm to run.
        /// The minimum value is 1. Using only a small number of iterations will have a faster
        /// runtime, however, the algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="maxIntegrationWeight">
        /// A parameter to control the temporal smoothing of depth integration. Lower values have
        /// more noisy representations, but objects that move appear and disappear faster, so are
        /// suitable for more dynamic environments. Higher values integrate objects more slowly,
        /// but provides finer detail with less noise.
        /// </param>
        /// <param name="worldToCameraTransform">
        /// The best guess of the latest camera pose (usually the camera pose result from the last
        /// process call).
        /// </param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        public bool ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            int maxAlignIterationCount,
            int maxIntegrationWeight,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);
            ushort maxWeight     = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxIntegrationWeight);

            HRESULT hr = volume.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                maxIterations,
                maxWeight,
                ref worldToCameraTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return(false);
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return(true);
        }
Esempio n. 5
0
        /// <summary>
        /// Integrates depth float data into the reconstruction volume from the
        /// worldToCameraTransform camera pose.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be integrated.</param>
        /// <param name="maxIntegrationWeight">
        /// A parameter to control the temporal smoothing of depth integration. Minimum value is 1.
        /// Lower values have more noisy representations, but objects that move integrate and
        /// disintegrate faster, so are suitable for more dynamic environments. Higher values
        /// integrate objects more slowly, but provides finer detail with less noise.</param>
        /// <param name="worldToCameraTransform">
        /// The camera pose (usually the camera pose result from the last AlignPointClouds or
        /// AlignDepthFloatToReconstruction).
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="maxIntegrationWeight"/> parameter is less than 1
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public void IntegrateFrame(
            FusionFloatImageFrame depthFloatFrame,
            int maxIntegrationWeight,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort integrationWeight = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxIntegrationWeight);

            ExceptionHelper.ThrowIfFailed(volume.IntegrateFrame(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              integrationWeight,
                                              ref worldToCameraTransform));
        }
Esempio n. 6
0
        /// <summary>
        /// Construct an oriented point cloud in the local camera frame of reference from a depth float
        /// image frame. Here we calculate the 3D position of each depth float pixel with the optical
        /// center of the camera as the origin. Both images must be the same size and have the same camera
        /// parameters. We use a right-hand coordinate system, and (in common with bitmap images with top left
        /// origin) +X is to the right, +Y down, and +Z is now forward from the Kinect camera into the scene,
        /// as though looking into the scene from behind the kinect.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be converted.</param>
        /// <param name="pointCloudFrame">
        /// A pre-allocated point cloud frame, to be filled with 3D points and normals.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or the <paramref name="pointCloudFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="pointCloudFrame"/>
        /// parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthFloatFrameToPointCloud(
            FusionFloatImageFrame depthFloatFrame,
            FusionPointCloudImageFrame pointCloudFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthFloatFrameToPointCloud(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              FusionImageFrame.ToHandleRef(pointCloudFrame)));
        }
        /// <summary>
        /// Find the most similar camera poses to the current camera input by comparing against the
        /// camera pose finder database, and returning a set of similar camera poses. These poses
        /// and similarity measurements are ordered in terms of decreasing similarity (i.e. the most
        /// similar is first). Both input depth and color frames must be identical sizes, with valid
        /// camera parameters and captured at the same time.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <returns>Returns the matched frames object created by the camera pose finder.</returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/>
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and  <paramref name="colorFrame"/>
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <returns>Returns a set of matched frames/poses.</returns>
        public MatchCandidates FindCameraPose(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            INuiFusionMatchCandidates matchCandidates = null;

            ExceptionHelper.ThrowIfFailed(cameraPoseFinder.FindCameraPose(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              FusionImageFrame.ToHandleRef(colorFrame),
                                              out matchCandidates));

            return(new MatchCandidates(matchCandidates));
        }
Esempio n. 8
0
        /// <summary>
        /// Allocate the frame buffers used in the process
        /// </summary>
        private void AllocateFrames()
        {
            // Allocate depth float frame
            if (null == this.depthFloatFrame || this.width != this.depthFloatFrame.Width || this.height != this.depthFloatFrame.Height)
            {
                this.depthFloatFrame = new FusionFloatImageFrame(this.width, this.height);
            }

            // Allocate delta from reference frame
            if (null == this.deltaFromReferenceFrame || this.width != this.deltaFromReferenceFrame.Width || this.height != this.deltaFromReferenceFrame.Height)
            {
                this.deltaFromReferenceFrame = new FusionFloatImageFrame(this.width, this.height);
            }

            // Allocate point cloud frame
            if (null == this.pointCloudFrame || this.width != this.pointCloudFrame.Width || this.height != this.pointCloudFrame.Height)
            {
                this.pointCloudFrame = new FusionPointCloudImageFrame(this.width, this.height);
            }

            // Allocate shaded surface frame
            if (null == this.shadedSurfaceFrame || this.width != this.shadedSurfaceFrame.Width || this.height != this.shadedSurfaceFrame.Height)
            {
                this.shadedSurfaceFrame = new FusionColorImageFrame(this.width, this.height);
            }

            // Allocate shaded surface normals frame
            if (null == this.shadedSurfaceNormalsFrame || this.width != this.shadedSurfaceNormalsFrame.Width || this.height != this.shadedSurfaceNormalsFrame.Height)
            {
                this.shadedSurfaceNormalsFrame = new FusionColorImageFrame(this.width, this.height);
            }
        }
Esempio n. 9
0
        /// <summary>
        /// Construct an oriented point cloud in the local camera frame of reference from a depth float
        /// image frame. Here we calculate the 3D position of each depth float pixel with the optical
        /// center of the camera as the origin. We use a right-hand coordinate system, and (in common 
        /// with bitmap images with top left origin) +X is to the right, +Y down, and +Z is now forward
        /// from the Kinect camera into the scene, as though looking into the scene from behind the 
        /// Kinect camera. Both images must be the same size and have the same camera parameters.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be converted.</param>
        /// <param name="pointCloudFrame">
        /// A pre-allocated point cloud frame, to be filled with 3D points and normals.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or the <paramref name="pointCloudFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="pointCloudFrame"/>
        /// parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthFloatFrameToPointCloud(
            FusionFloatImageFrame depthFloatFrame,
            FusionPointCloudImageFrame pointCloudFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthFloatFrameToPointCloud(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(pointCloudFrame)));
        }
Esempio n. 10
0
        /// <summary>
        /// Converts Kinect depth frames in unsigned short format to depth frames in float format
        /// representing distance from the camera in meters (parallel to the optical center axis).
        /// Note: <paramref name="depthImageData"/> and <paramref name="depthFloatFrame"/> must
        /// be the same pixel resolution and equal to <paramref name="depthImageDataWidth"/> by
        /// <paramref name="depthImageDataHeight"/>.
        /// The min and max depth clip values enable clipping of the input data, for example, to help
        /// isolate particular objects or surfaces to be reconstructed. Note that the thresholds return 
        /// different values when a depth pixel is outside the threshold - pixels inside minDepthClip will
        /// will be returned as 0 and ignored in processing, whereas pixels beyond maxDepthClip will be set
        /// to 1000 to signify a valid depth ray with depth beyond the set threshold. Setting this far-
        /// distance flag is important for reconstruction integration in situations where the camera is
        /// static or does not move significantly, as it enables any voxels closer to the camera
        /// along this ray to be culled instead of persisting (as would happen if the pixels were simply 
        /// set to 0 and ignored in processing). Note that when reconstructing large real-world size volumes,
        /// be sure to set large maxDepthClip distances, as when the camera moves around, any voxels in view
        /// which go beyond this threshold distance from the camera will be removed.
        /// </summary>
        /// <param name="depthImageData">
        /// An array which stores the extended-depth texture of a depth image from the Kinect camera.
        /// </param>
        /// <param name="depthImageDataWidth">Width of the depth image data.</param>
        /// <param name="depthImageDataHeight">Height of the depth image data.</param>
        /// <param name="depthFloatFrame">
        /// A pre-allocated depth float type image frame, to be filled with the floating point depth values.
        /// </param>
        /// <param name="minDepthClip">
        /// Minimum depth distance threshold in meters. Depth pixels below this value will be
        /// returned as invalid (0). Min depth must be positive or 0.
        /// </param>
        /// <param name="maxDepthClip">
        /// Maximum depth distance threshold in meters. Depth pixels above this value will be
        /// returned as invalid (1000). Max depth must be greater than 0.
        /// </param>
        /// <param name="mirrorDepth">
        /// A boolean parameter specifying whether to horizontally mirror the input depth image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthImageData"/> or the <paramref name="depthFloatFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthImageDataWidth"/> parameter and depthFloatFrame's
        /// <c>width</c> is not equal, or the <paramref name="depthImageDataHeight"/> parameter and
        /// depthFloatFrame's <c>height</c> member is not equal.
        /// Thrown when the <paramref name="minDepthClip"/> parameter or the
        /// <paramref name="maxDepthClip"/> is less than zero.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthToDepthFloatFrame(
            DepthImagePixel[] depthImageData,
            int depthImageDataWidth,
            int depthImageDataHeight,
            FusionFloatImageFrame depthFloatFrame,
            float minDepthClip,
            float maxDepthClip,
            bool mirrorDepth)
        {
            if (null == depthImageData)
            {
                throw new ArgumentNullException("depthImageData");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthToDepthFloatFrame(
                depthImageData,
                (uint)depthImageDataWidth,
                (uint)depthImageDataHeight,
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                minDepthClip,
                maxDepthClip,
                mirrorDepth));
        }
Esempio n. 11
0
        /// <summary>
        /// Find the most similar camera poses to the current camera input by comparing against the
        /// camera pose finder database, and returning a set of similar camera poses. These poses 
        /// and similarity measurements are ordered in terms of decreasing similarity (i.e. the most 
        /// similar is first). Both input depth and color frames must be identical sizes, with valid 
        /// camera parameters and captured at the same time.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <returns>Returns the matched frames object created by the camera pose finder.</returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/> 
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and  <paramref name="colorFrame"/> 
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, 
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <returns>Returns a set of matched frames/poses.</returns>
        public MatchCandidates FindCameraPose(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            INuiFusionMatchCandidates matchCandidates = null;

            ExceptionHelper.ThrowIfFailed(cameraPoseFinder.FindCameraPose(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                out matchCandidates));

            return new MatchCandidates(matchCandidates);
        }
Esempio n. 12
0
        /// <summary>
        /// Test input camera frames against the camera pose finder database, adding frames to the 
        /// database if dis-similar enough to existing frames. Both input depth and color frames 
        /// must be identical sizes, a minimum size of 80x60, with valid camera parameters, and 
        /// captured at the same time.
        /// Note that once the database reaches its maximum initialized size, it will overwrite old
        /// pose information. Check the <pararmref name="pHistoryTrimmed"/> flag or the number of 
        /// poses in the database to determine whether the old poses are being overwritten.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="colorFrame">The color frame to be processed.</param>
        /// <param name="worldToCameraTransform"> The current camera pose (usually the camera pose 
        /// result from the last AlignPointClouds or AlignDepthFloatToReconstruction).</param>
        /// <param name="minimumDistanceThreshold">A float distance threshold between 0 and 1.0f which
        /// regulates how close together poses are stored in the database. Input frames
        /// which have a minimum distance equal to or above this threshold when compared against the 
        /// database will be stored, as it indicates the input has become dis-similar to the existing 
        /// stored poses. Set to 0.0f to ignore and always add a pose when this function is called, 
        /// however in this case, unless there is an external test of distance, there is a risk this
        /// can lead to many duplicated poses.
        /// </param>
        /// <param name="addedPose">
        /// Set true when the input frame was added to the camera pose finder database.
        /// </param>
        /// <param name="trimmedHistory">
        /// Set true if the maxPoseHistoryCount was reached when the input frame is stored, so the
        /// oldest pose was overwritten in the camera pose finder database to store the latest pose.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="colorFrame"/> 
        /// parameter is null. </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> and <paramref name="colorFrame"/> 
        /// parameter is an incorrect or different image size, or their <c>CameraParameters</c>
        /// member is null or has incorrectly sized focal lengths, or the 
        /// <paramref name="minimumDistanceThreshold"/> parameter is less than 0 or greater 
        /// than 1.0f.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// The camera pose finder works by accumulating whether the values at each sample location pixel
        /// in a saved pose frame are less than or greater than a threshold which is randomly chosen 
        /// between minimum and maximum boundaries (e.g. for color this is 0-255). Given enough samples
        /// this represents a unique key frame signature that we can match against, as different poses
        /// will have different values for surfaces which are closer or further away, or different
        /// colors.
        /// Note that unlike depth, the robustness of finding a valid camera pose can have issues with
        /// ambient illumination levels in the color image. For best matching results, both the Kinect 
        /// camera and also the environment should have exactly the same configuration as when the 
        /// database key frame images were captured i.e. if you had a fixed exposure and custom white
        /// balance, this should again be set when testing the database later, otherwise the matching
        /// accuracy will be reduced. 
        /// To improve accuracy, it is possible to not just provide a red, green, blue input in the
        /// color image, but instead provide a different 3 channels of match data scaled 0-255. For
        /// example, to be more illumination independent, you could calculate hue and saturation, or
        /// convert RGB to to LAB and use the AB channels. Other measures such as texture response
        /// or corner response could additionally be computed and used in one or more of the channels.
        /// </remarks>
         public void ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionColorImageFrame colorFrame,
            Matrix4 worldToCameraTransform, 
            float minimumDistanceThreshold,
            out bool addedPose,
            out bool trimmedHistory)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            HRESULT hr = cameraPoseFinder.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                ref worldToCameraTransform,
                minimumDistanceThreshold,
                out addedPose,
                out trimmedHistory);

            ExceptionHelper.ThrowIfFailed(hr);
        }
        private void InitializeKinectFusion()
        {
            // KinecFusionの初期化
            var volParam = new ReconstructionParameters( VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ );
            volume = Reconstruction.FusionCreateReconstruction( volParam, ReconstructionProcessor.Amp, -1, Matrix4.Identity );

            // 変換バッファの作成
            depthFloatBuffer = new FusionFloatImageFrame( DepthWidth, DepthHeight );
            pointCloudBuffer = new FusionPointCloudImageFrame( DepthWidth, DepthHeight );
            shadedSurfaceColorFrame = new FusionColorImageFrame( DepthWidth, DepthHeight );

            // リセット
            volume.ResetReconstruction( Matrix4.Identity );
        }
Esempio n. 14
0
        /// <summary>
        /// Render Fusion depth float frame to UI
        /// </summary>
        /// <param name="depthFloatFrame">Fusion depth float frame</param>
        /// <param name="bitmap">Bitmap contains depth float frame data for rendering</param>
        /// <param name="image">UI image component to render depth float frame to</param>
        private void RenderDepthFloatImage(
            FusionFloatImageFrame depthFloatFrame, ref WriteableBitmap bitmap, System.Windows.Controls.Image image)
        {
            if (null == depthFloatFrame)
            {
                return;
            }

            // PixelDataLength is the number of pixels, not bytes
            if (null == this.depthFloatFramePixelsArgb || depthFloatFrame.PixelDataLength != this.depthFloatFramePixelsArgb.Length)
            {
                // Create colored pixel array of correct format
                this.depthFloatFramePixelsArgb = new int[depthFloatFrame.PixelDataLength];
            }

            if (null == this.depthFloatFrameDepthPixels || depthFloatFrame.PixelDataLength != this.depthFloatFrameDepthPixels.Length)
            {
                // Create colored pixel array of correct format
                this.depthFloatFrameDepthPixels = new float[depthFloatFrame.PixelDataLength];
            }

            if (null == bitmap || depthFloatFrame.Width != bitmap.Width || depthFloatFrame.Height != bitmap.Height)
            {
                // Create bitmap of correct format
                bitmap = new WriteableBitmap(depthFloatFrame.Width, depthFloatFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null);

                // Set bitmap as source to UI image object
                image.Source = bitmap;
            }

            depthFloatFrame.CopyPixelDataTo(this.depthFloatFrameDepthPixels);

            // Calculate color of pixels based on depth of each pixel
            float range = 4.0f;
            float oneOverRange = 1.0f / range;
            float minRange = 0.0f;

            Parallel.For(
                0,
                depthFloatFrame.Height,
                y =>
                    {
                        int index = y * depthFloatFrame.Width;
                        for (int x = 0; x < depthFloatFrame.Width; ++x, ++index)
                        {
                            float depth = this.depthFloatFrameDepthPixels[index];
                            int intensity = (depth >= minRange) ? ((int)(((depth - minRange) * oneOverRange) * 256.0f) % 256) : 0;

                            this.depthFloatFramePixelsArgb[index] = (intensity << 16) | (intensity << 8) | intensity; // argb
                        }
                    });

            // Copy colored pixels to bitmap
            bitmap.WritePixels(
                new Int32Rect(0, 0, depthFloatFrame.Width, depthFloatFrame.Height),
                this.depthFloatFramePixelsArgb,
                bitmap.PixelWidth * sizeof(int),
                0);
        }
Esempio n. 15
0
        /// <summary>
        /// Set a reference depth frame to be used internally to help with tracking when calling 
        /// AlignDepthFloatToReconstruction to calculate a new camera pose. This function should
        /// only be called when not using the default tracking behavior of Kinect Fusion.
        /// </summary>
        /// <param name="referenceDepthFloatFrame">A previous depth float frame where align was
        /// successful (and hence same functionality as AlignDepthFloatToReconstruction), 
        /// or a ray-casted model depth.</param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referenceDepthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referenceDepthFloatFrame"/> parameter is an incorrect 
        /// image size.</exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// AlignDepthFloatToReconstruction internally saves the last depth frame it was passed and
        /// uses this image to help it track when called the next time. For example, this can be used
        /// if you are reconstructing and lose track, then want to re-start tracking from a different
        /// (known) location without resetting the volume. To enable the tracking to succeed you
        /// could perform a raycast from the new location to get a depth image (by calling 
        /// CalculatePointCloudAndDepth) then call this set function with the depth image, before 
        /// calling AlignDepthFloatToReconstruction.
        /// </remarks>
        public void SetAlignDepthFloatToReconstructionReferenceFrame(
            FusionFloatImageFrame referenceDepthFloatFrame)
        {
            if (null == referenceDepthFloatFrame)
            {
                throw new ArgumentNullException("referenceDepthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.SetAlignDepthFloatToReconstructionReferenceFrame(
                FusionImageFrame.ToHandleRef(referenceDepthFloatFrame)));
        }
Esempio n. 16
0
        /// <summary>
        /// Converts Kinect depth frames in unsigned short format to depth frames in float format 
        /// representing distance from the camera in meters (parallel to the optical center axis).
        /// Note: <paramref name="depthImageData"/> and <paramref name="depthFloatFrame"/> must
        /// be the same pixel resolution. This version of the function runs on the GPU.
        /// </summary>
        /// <param name="depthImageData">The source depth data.</param>
        /// <param name="depthFloatFrame">A depth float frame, to be filled with depth.</param>
        /// <param name="minDepthClip">The minimum depth threshold. Values below this will be set to 0.</param>
        /// <param name="maxDepthClip">The maximum depth threshold. Values above this will be set to 1000.</param>
        /// <param name="mirrorDepth">Set true to mirror depth, false so the image appears correct if viewing
        /// the Kinect camera from behind.</param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthImageData"/> or 
        /// <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthImageData"/> or
        /// <paramref name="depthFloatFrame"/> parameter is an incorrect image size, or the 
        /// kernelWidth is an incorrect size.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// The min and max depth clip values enable clipping of the input data, for example, to help
        /// isolate particular objects or surfaces to be reconstructed. Note that the thresholds return 
        /// different values when a depth pixel is outside the threshold - pixels inside minDepthClip will
        /// will be returned as 0 and ignored in processing, whereas pixels beyond maxDepthClip will be set
        /// to 1000 to signify a valid depth ray with depth beyond the set threshold. Setting this far-
        /// distance flag is important for reconstruction integration in situations where the camera is
        /// static or does not move significantly, as it enables any voxels closer to the camera
        /// along this ray to be culled instead of persisting (as would happen if the pixels were simply 
        /// set to 0 and ignored in processing). Note that when reconstructing large real-world size volumes,
        /// be sure to set large maxDepthClip distances, as when the camera moves around, any voxels in view
        /// which go beyond this threshold distance from the camera will be removed.
        /// </remarks>
        public void DepthToDepthFloatFrame(
            DepthImagePixel[] depthImageData,
            FusionFloatImageFrame depthFloatFrame,
            float minDepthClip,
            float maxDepthClip,
            bool mirrorDepth)
        {
            if (null == depthImageData)
            {
                throw new ArgumentNullException("depthImageData");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.DepthToDepthFloatFrame(
                depthImageData,
                (uint)depthImageData.Length * sizeof(int),  // DepthImagePixel is 2 ushort per pixel
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                minDepthClip,
                maxDepthClip,
                mirrorDepth));
        }
Esempio n. 17
0
        /// <summary>
        /// Integrates depth float data into the reconstruction volume from the passed
        /// camera pose.
        /// Note: this function will also set the internal camera pose.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be integrated.</param>
        /// <param name="maxIntegrationWeight">
        /// A parameter to control the temporal smoothing of depth integration. Minimum value is 1.
        /// Lower values have more noisy representations, but objects that move integrate and 
        /// disintegrate faster, so are suitable for more dynamic environments. Higher values
        /// integrate objects more slowly, but provides finer detail with less noise.</param>
        /// <param name="worldToCameraTransform">
        /// The camera pose (usually the camera pose result from the last 
        /// FusionDepthProcessor.AlignPointClouds or AlignDepthFloatToReconstruction).
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="maxIntegrationWeight"/> parameter is less than 1 or
        /// greater than the maximum unsigned short value.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public void IntegrateFrame(
            FusionFloatImageFrame depthFloatFrame,
            int maxIntegrationWeight,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort integrationWeight = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxIntegrationWeight);

            ExceptionHelper.ThrowIfFailed(volume.IntegrateFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                integrationWeight,
                ref worldToCameraTransform));
        }
Esempio n. 18
0
        private void InitFusion()
        {
            if (_isFusionInitialized)
                return;

            _currentFormat = new KinectFormat();

            _currentFormat.DepthImageFormat = DepthImageFormat.Undefined;
            _currentFormat.ColorImageFormat = ColorImageFormat.Undefined;

            _isFusionInitialized = true;

            var depthFormat = KinectSensor.DepthStream.Format;
            var colorFormat = KinectSensor.ColorStream.Format;
            var kinectFormat = new KinectFormat();
            kinectFormat.DepthImageFormat = depthFormat;
            kinectFormat.ColorImageFormat = colorFormat;

            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            _fusionWorkItemPool = new Pool<FusionWorkItem, KinectFormat>(5, kinectFormat, FusionWorkItem.Create);

            _fusionWorkQueue = new WorkQueue<FusionWorkItem>(ProcessFusionFrameBackground)
            {
                CanceledCallback = ReturnFusionWorkItem,
                MaxQueueLength = 2
            };

            this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)];

            // This is the bitmap we'll display on-screen
            this.colorFusionBitmap = new WriteableBitmap(
                (int)depthSize.Width * 2,
                (int)depthSize.Height * 2,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);
            FusionOutputImage = colorFusionBitmap;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);
                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPose)
                {
                    this.ResetReconstruction(_currentVolumeCenter);
                }
            }
            catch (ArgumentException)
            {
                FusionStatusMessage = "ArgumentException - DX11 GPU not found?";
                return;
            }
            catch (InvalidOperationException ex)
            {
                FusionStatusMessage = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                FusionStatusMessage = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            _residualData = new float[(int)(depthSize.Width * depthSize.Height)];

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Reset the reconstruction
            this.ResetReconstruction(_currentVolumeCenter);

            IntegratingColor = false;
            _audioManager.Start();
        }
Esempio n. 19
0
        // Not used
        /// <summary>
        /// Perform camera pose finding when tracking is lost using AlignPointClouds.
        /// This is typically more successful than FindCameraPoseAlignDepthFloatToReconstruction.
        /// </summary>
        /// <returns>Returns true if a valid camera pose was found, otherwise false.</returns>
        private bool FindCameraPoseAlignPointClouds(FusionFloatImageFrame floatFrame, FusionColorImageFrame imageFrame)
        {
            MatchCandidates matchCandidates = poseFinder.FindCameraPose(floatFrame, imageFrame);

              if (null == matchCandidates) {
            return false;
              }

              int poseCount = matchCandidates.GetPoseCount();
              float minDistance = matchCandidates.CalculateMinimumDistance();

              if (0 == poseCount || minDistance >= 1) {
            return false;
              }

              // Smooth the depth frame
              this.volume.SmoothDepthFloatFrame(floatFrame, smoothDepthFloatFrameCamera, 1, .04f);

              // Calculate point cloud from the smoothed frame
              FusionDepthProcessor.DepthFloatFrameToPointCloud(smoothDepthFloatFrameCamera, depthPointCloudFrame);

              double smallestEnergy = double.MaxValue;
              int smallestEnergyNeighborIndex = -1;

              int bestNeighborIndex = -1;
              Matrix4 bestNeighborCameraPose = Matrix4.Identity;

              double bestNeighborAlignmentEnergy = 0.006f;

              // Run alignment with best matched poseCount (i.e. k nearest neighbors (kNN))
              int maxTests = Math.Min(5, poseCount);

              var neighbors = matchCandidates.GetMatchPoses();

              for (int n = 0; n < maxTests; n++) {
            // Run the camera tracking algorithm with the volume
            // this uses the raycast frame and pose to find a valid camera pose by matching the raycast against the input point cloud
            Matrix4 poseProposal = neighbors[n];

            // Get the saved pose view by raycasting the volume
            this.volume.CalculatePointCloud(raycastPointCloudFrame, poseProposal);

            bool success = this.volume.AlignPointClouds(
            raycastPointCloudFrame,
            depthPointCloudFrame,
            FusionDepthProcessor.DefaultAlignIterationCount,
            imageFrame,
            out alignmentEnergy,
            ref poseProposal);

            bool relocSuccess = success && alignmentEnergy < bestNeighborAlignmentEnergy && alignmentEnergy > 0;

            if (relocSuccess) {
              bestNeighborAlignmentEnergy = alignmentEnergy;
              bestNeighborIndex = n;

              // This is after tracking succeeds, so should be a more accurate pose to store...
              bestNeighborCameraPose = poseProposal;

              // Update the delta image
              imageFrame.CopyPixelDataTo(this.deltaFromReferenceFramePixelsArgb);
            }

            // Find smallest energy neighbor independent of tracking success
            if (alignmentEnergy < smallestEnergy) {
              smallestEnergy = alignmentEnergy;
              smallestEnergyNeighborIndex = n;
            }
              }

              matchCandidates.Dispose();

              // Use the neighbor with the smallest residual alignment energy
              // At the cost of additional processing we could also use kNN+Mean camera pose finding here
              // by calculating the mean pose of the best n matched poses and also testing this to see if the
              // residual alignment energy is less than with kNN.
              if (bestNeighborIndex > -1) {
            this.worldToCameraTransform = bestNeighborCameraPose;

            return true;
              } else {
            this.worldToCameraTransform = neighbors[smallestEnergyNeighborIndex];
            return false;
              }
        }
Esempio n. 20
0
        // When both color and depth frames are ready
        private void onFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            // Only get the frames when we're done processing the previous one, to prevent
              // frame callbacks from piling up
              if (frameReady) {

            Monitor.Enter(frameLock);
            // Enter a context with both the depth and color frames
            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) {
              // Lock resources and prevent further frame callbacks
              frameReady = false;

              // Init
              DepthImagePixel[] depth = new DepthImagePixel[depthFrame.PixelDataLength];

              // Obtain raw data from frames
              depthFrame.CopyDepthImagePixelDataTo(depth);
              colorFrame.CopyPixelDataTo(color);

              // Construct into a fusionfloatimageframe
              FusionFloatImageFrame frame = new FusionFloatImageFrame(640, 480);
              FusionFloatImageFrame smoothDepthFloatFrame = new FusionFloatImageFrame(640, 480);
              FusionDepthProcessor.DepthToDepthFloatFrame(depth, 640, 480, frame, FusionDepthProcessor.DefaultMinimumDepth, 2.5f, false);

              this.volume.SmoothDepthFloatFrame(frame, smoothDepthFloatFrame, 1, .04f);

              /*
              byte[] pixelData = new byte[4 * 640 * 480];

              colorFrame.CopyPixelDataTo(pixelData);

              int[] intPixelData = new int[640 * 480];
              Buffer.BlockCopy(pixelData, 0, intPixelData, 0, 640 * 480 * sizeof(int));
              FusionColorImageFrame imageFrame = new FusionColorImageFrame(640, 480);
              imageFrame.CopyPixelDataFrom(intPixelData);

              bool success = FindCameraPoseAlignPointClouds(frame, imageFrame);

              Matrix4 t = worldToCameraTransform;
              Console.WriteLine("{0} {1} {2} {3}", t.M11, t.M12, t.M13, t.M14);
              Console.WriteLine("{0} {1} {2} {3}", t.M21, t.M22, t.M23, t.M24);
              Console.WriteLine("{0} {1} {2} {3}", t.M31, t.M32, t.M33, t.M34);
              Console.WriteLine("{0} {1} {2} {3}", t.M41, t.M42, t.M43, t.M44);
              if (success) {
            currentMatrix = t;
              }*/

              // Calculate point cloud
              FusionPointCloudImageFrame observedPointCloud = new FusionPointCloudImageFrame(640, 480);
              FusionDepthProcessor.DepthFloatFrameToPointCloud(smoothDepthFloatFrame, observedPointCloud);

              FusionPointCloudImageFrame imgFrame = new FusionPointCloudImageFrame(640, 480);
              volume.CalculatePointCloud(imgFrame, currentMatrix);
              Matrix4 t = currentMatrix;
              float alignmentEnergy = 0;
              bool success = volume.AlignPointClouds(imgFrame, observedPointCloud, 100, null, out alignmentEnergy, ref t);

              Console.WriteLine(success);
              Console.WriteLine("{0}", alignmentEnergy);
              Console.WriteLine("{0} {1} {2} {3}", t.M11, t.M12, t.M13, t.M14);
              Console.WriteLine("{0} {1} {2} {3}", t.M21, t.M22, t.M23, t.M24);
              Console.WriteLine("{0} {1} {2} {3}", t.M31, t.M32, t.M33, t.M34);
              Console.WriteLine("{0} {1} {2} {3}", t.M41, t.M42, t.M43, t.M44);
              if (success) {
            currentMatrix = t;
              }

              frame.Dispose();
              smoothDepthFloatFrame.Dispose();
              observedPointCloud.Dispose();
              imgFrame.Dispose();

              // Release resources, now ready for next callback
              frameReady = true;
            }
            Monitor.Exit(frameLock);
              }
        }
Esempio n. 21
0
        /// <summary>
        /// Render Fusion depth float frame to UI
        /// </summary>
        /// <param name="depthFloatFrame">Fusion depth float frame</param>
        /// <param name="depthPixels">Pixel buffer for depth float frame with pixel in depth</param>
        /// <param name="colorPixels">Pixel buffer for depth float frame with pixel in colors</param>
        /// <param name="bitmap">Bitmap contains depth float frame data for rendering</param>
        /// <param name="image">UI image component to render depth float frame to</param>
        private static void RenderDepthFloatImage(FusionFloatImageFrame depthFloatFrame, ref float[] depthPixels, ref int[] colorPixels, ref WriteableBitmap bitmap, System.Windows.Controls.Image image)
        {
            if (null == depthFloatFrame)
            {
                return;
            }

            if (null == depthPixels || depthFloatFrame.PixelDataLength != depthPixels.Length)
            {
                // Create depth pixel array of correct format
                depthPixels = new float[depthFloatFrame.PixelDataLength];
            }

            if (null == colorPixels || depthFloatFrame.PixelDataLength != colorPixels.Length)
            {
                // Create colored pixel array of correct format
                colorPixels = new int[depthFloatFrame.PixelDataLength];
            }

            if (null == bitmap || depthFloatFrame.Width != bitmap.Width || depthFloatFrame.Height != bitmap.Height)
            {
                // Create bitmap of correct format
                bitmap = new WriteableBitmap(depthFloatFrame.Width, depthFloatFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null);

                // Set bitmap as source to UI image object
                image.Source = bitmap;
            }

            depthFloatFrame.CopyPixelDataTo(depthPixels);

            // Calculate color pixels based on depth of each pixel
            float range = 4.0f;
            float minRange = 0.0f;

            for (int i = 0; i < depthPixels.Length; i++)
            {
                float depth = depthPixels[i];
                int intensity = (depth >= minRange) ? ((int)(((depth - minRange) / range) * 256.0f) % 256) : 0;

                colorPixels[i] = 0;
                colorPixels[i] += intensity; // blue

                intensity *= 256;
                colorPixels[i] += intensity; // green

                intensity *= 256;
                colorPixels[i] += intensity; // red
            }

            // Copy colored pixels to bitmap
            bitmap.WritePixels(
                        new Int32Rect(0, 0, depthFloatFrame.Width, depthFloatFrame.Height),
                        colorPixels,
                        bitmap.PixelWidth * sizeof(int),
                        0);
        }
Esempio n. 22
0
        /// <summary>
        /// Aligns a depth float image to the Reconstruction volume to calculate the new camera pose.
        /// This camera tracking method requires a Reconstruction volume, and updates the internal 
        /// camera pose if successful. The maximum image resolution supported in this function is 640x480.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run. 
        /// The minimum value is 1. Using only a small number of iterations will have a faster runtime,
        /// however, the algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated float image frame, to be filled with information about how
        /// well each observed pixel aligns with the passed in reference frame. This may be processed
        /// to create a color rendering, or may be used as input to additional vision algorithms such 
        /// as object segmentation. These residual values are normalized -1 to 1 and represent the 
        /// alignment cost/energy for each pixel. Larger magnitude values (either positive or negative)
        /// represent more discrepancy, and lower values represent less discrepancy or less information
        /// at that pixel.
        /// Note that if valid depth exists, but no reconstruction model exists behind the depth   
        /// pixels, 0 values indicating perfect alignment will be returned for that area. In contrast,
        /// where no valid depth occurs 1 values will always be returned. Pass null if not required.
        /// </param>
        /// <param name="alignmentEnergy">
        /// A float to receive a value describing how well the observed frame aligns to the model with
        /// the calculated pose. A larger magnitude value represent more discrepancy, and a lower value
        /// represent less discrepancy. Note that it is unlikely an exact 0 (perfect alignment) value 
        /// will ever be returned as every frame from the sensor will contain some sensor noise.
        /// </param>
        /// <param name="worldToCameraTransform">
        /// The best guess of the camera pose (usually the camera pose result from the last
        /// AlignPointClouds or AlignDepthFloatToReconstruction).
        /// </param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is an incorrect image size.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1 or
        /// an incorrect value.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// Note that this function is designed primarily for tracking either with static scenes when
        /// performing environment reconstruction, or objects which move rigidly when performing object
        /// reconstruction from a static camera. Consider using the function AlignPointClouds instead 
        /// if tracking failures occur due to parts of a scene which move non-rigidly or should be 
        /// considered as outliers, although in practice, such issues are best avoided by carefully 
        /// designing or constraining usage scenarios wherever possible.
        /// </remarks> 
        public bool AlignDepthFloatToReconstruction(
            FusionFloatImageFrame depthFloatFrame,
            int maxAlignIterationCount,
            FusionFloatImageFrame deltaFromReferenceFrame,
            out float alignmentEnergy,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = volume.AlignDepthFloatToReconstruction(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                out alignmentEnergy,
                ref worldToCameraTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return false;
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return true;
        }
Esempio n. 23
0
        public void Evaluate(int SpreadMax)
        {
            this.VoxelResolutionX = this.FInVX[0];
            this.VoxelResolutionY = this.FInVY[0];
            this.VoxelResolutionZ = this.FInVZ[0];
            this.VoxelsPerMeter = this.FInVPM[0];

            if (this.FTextureOutput[0] == null) { this.FTextureOutput[0] = new DX11Resource<DX11DynamicTexture2D>(); }
            if (this.FPCOut[0] == null) { this.FPCOut[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }
            if (this.FGeomOut[0] == null) { this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FOutVoxels[0] == null) { this.FOutVoxels[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }

            if (this.FInExport[0]) { this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FInvalidateConnect)
            {
                this.FInvalidateConnect = false;

                if (this.FInRuntime.PluginIO.IsConnected)
                {
                    this.runtime = this.FInRuntime[0];
                    this.runtime.DepthFrameReady += this.runtime_DepthFrameReady;

                    var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
                    this.worldToCameraTransform = Matrix4.Identity;

                    //this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);
                    this.colorVolume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);

                    //this.volume.
                    /*FusionPointCloudImageFrame pc;
                    pc.*/

                    this.defaultWorldToVolumeTransform = this.colorVolume.GetCurrentWorldToVolumeTransform();

                    // Depth frames generated from the depth input
                    this.depthFloatBuffer = new FusionFloatImageFrame(width, height);

                    // Point cloud frames generated from the depth float input
                    this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height);

                    // Create images to raycast the Reconstruction Volume
                    this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height);

                    this.ResetReconstruction();
                }
            }

            if (this.runtime != null)
            {
                bool needreset = this.FInReset[0];

                if (needreset) { this.ResetReconstruction(); }
            }
        }
Esempio n. 24
0
        /// <summary>
        /// A high-level function to process a depth frame through the Kinect Fusion pipeline.
        /// Specifically, this performs processing equivalent to the following functions for each frame:
        /// <para>
        /// 1) AlignDepthFloatToReconstruction
        /// 2) IntegrateFrame
        /// </para>
        /// If there is a tracking error in the AlignDepthFloatToReconstruction stage, no depth data 
        /// integration will be performed, and the camera pose will remain unchanged.
        /// The maximum image resolution supported in this function is 640x480.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be processed.</param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the align camera tracking algorithm to run.
        /// The minimum value is 1. Using only a small number of iterations will have a faster
        /// runtime, however, the algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="maxIntegrationWeight">
        /// A parameter to control the temporal smoothing of depth integration. Lower values have
        /// more noisy representations, but objects that move appear and disappear faster, so are
        /// suitable for more dynamic environments. Higher values integrate objects more slowly,
        /// but provides finer detail with less noise.
        /// </param>
        /// <param name="worldToCameraTransform">
        /// The best guess of the latest camera pose (usually the camera pose result from the last
        /// process call).
        /// </param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> parameter is an incorrect image size.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1 or
        /// greater than the maximum unsigned short value.
        /// Thrown when the <paramref name="maxIntegrationWeight"/> parameter is less than 1 or 
        /// greater than the maximum unsigned short value.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// Users may also optionally call the low-level functions individually, instead of calling this
        /// function, for more control. However, this function call will be faster due to the integrated 
        /// nature of the calls. After this call completes, if a visible output image of the reconstruction
        /// is required, the user can call CalculatePointCloud and then FusionDepthProcessor.ShadePointCloud.
        /// </remarks>
        public bool ProcessFrame(
            FusionFloatImageFrame depthFloatFrame,
            int maxAlignIterationCount,
            int maxIntegrationWeight,
            Matrix4 worldToCameraTransform)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);
            ushort maxWeight = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxIntegrationWeight);

            HRESULT hr = volume.ProcessFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                maxIterations,
                maxWeight,
                ref worldToCameraTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return false;
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return true;
        }
Esempio n. 25
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            this.sensor.ColorFrameReady += this.kinect_colorframe_ready;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range = DepthRange.Near;
                checkBoxNearMode.IsChecked = true;
            }
            catch
            {
                // device not near mode capable
            }

            // Initialize and start the FPS timer
            this.fpsTimer = new DispatcherTimer();
            this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            // Reset the reconstruction
            this.ResetReconstruction();
        }
Esempio n. 26
0
        /// <summary>
        /// Spatially smooth a depth float image frame using edge-preserving filtering on GPU. 
        /// </summary>
        /// <param name="depthFloatFrame">A source depth float frame.</param>
        /// <param name="smoothDepthFloatFrame">A depth float frame, to be filled with smoothed 
        /// depth.</param>
        /// <param name="kernelWidth">Smoothing Kernel Width. Valid values are  1,2,3 
        /// (for 3x3,5x5,7x7 smoothing kernel block size respectively).</param>
        /// <param name="distanceThreshold">A distance difference range that smoothing occurs in.
        /// Pixels with neighboring pixels outside this distance range will not be smoothed 
        /// (larger values indicate discontinuity/edge). Must be greater than 0.</param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="smoothDepthFloatFrame"/> or 
        /// <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="smoothDepthFloatFrame"/> or
        /// <paramref name="depthFloatFrame"/> parameter is an incorrect image size, or the 
        /// kernelWidth is an incorrect size.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public void SmoothDepthFloatFrame(
            FusionFloatImageFrame depthFloatFrame,
            FusionFloatImageFrame smoothDepthFloatFrame,
            int kernelWidth,
            float distanceThreshold)
        {
            if (null == smoothDepthFloatFrame)
            {
                throw new ArgumentNullException("smoothDepthFloatFrame");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            uint kW = ExceptionHelper.CastAndThrowIfOutOfUintRange(kernelWidth);

            ExceptionHelper.ThrowIfFailed(volume.SmoothDepthFloatFrame(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(smoothDepthFloatFrame),
                kW,
                distanceThreshold));
        }
        protected virtual void Dispose( bool disposing )
        {
            if ( !disposed ) {
                if ( depthFloatBuffer != null ) {
                    depthFloatBuffer.Dispose();
                    depthFloatBuffer = null;
                }

                if ( pointCloudBuffer != null ) {
                    pointCloudBuffer.Dispose();
                    pointCloudBuffer = null;
                }

                if ( shadedSurfaceColorFrame != null ) {
                    shadedSurfaceColorFrame.Dispose();
                    shadedSurfaceColorFrame = null;
                }

                if ( volume != null ) {
                    volume.Dispose();
                    volume = null;
                }

                disposed = true;
            }
        }
Esempio n. 28
0
        /// <summary>
        /// Calculate a point cloud and depth image by raycasting into the reconstruction volume. 
        /// This returns the point cloud containing 3D points and normals of the zero-crossing dense 
        /// surface at every visible pixel in the image from the given camera pose, the depth
        /// to the surface.
        /// </summary>
        /// <param name="pointCloudFrame">A  point cloud frame, to be filled by raycasting into the 
        /// reconstruction volume. Typically used as the reference frame with the 
        /// FusionDepthProcessor.AlignPointClouds function or for visualization by calling 
        /// FusionDepthProcessor.ShadePointCloud.</param>
        /// <param name="depthFloatFrame">A floating point depth frame, to be filled with floating point
        /// depth in meters to the raycast surface. This image must be identical
        /// in size, and camera parameters to the <paramref name="pointCloudFrame"/> parameter.</param>
        /// <param name="worldToCameraTransform">The world-to-camera transform (camera pose) to raycast
        /// from.</param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="depthFloatFrame"/> 
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or<paramref name="depthFloatFrame"/>
        /// parameter is an incorrect image size.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// This point cloud can then be used as a reference frame in the next call to 
        /// FusionDepthProcessor.AlignPointClouds, or passed to FusionDepthProcessor.ShadePointCloud
        /// to produce a visible image output.The depth image can be used as a reference frame for
        /// AlignDepthFloatToReconstruction by calling SetAlignDepthFloatToReconstructionReferenceFrame 
        /// to enable a greater range of tracking. The <paramref name="pointCloudFrame"/> and 
        /// <paramref name="depthFloatFrame"/> parameters can be an arbitrary image size, for example, 
        /// enabling you to calculate point clouds at the size of your UI window and then create a visible
        /// image by calling FusionDepthProcessor.ShadePointCloud and render this image, however, be aware
        /// that the calculation of high resolution images will be expensive in terms of runtime.
        /// </remarks>
        public void CalculatePointCloudAndDepth(
            FusionPointCloudImageFrame pointCloudFrame,
            FusionFloatImageFrame depthFloatFrame,
            Matrix4 worldToCameraTransform)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.CalculatePointCloudAndDepth(
                FusionImageFrame.ToHandleRef(pointCloudFrame),
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                ref worldToCameraTransform));
        }
Esempio n. 29
0
        /// <summary>
        /// Render Fusion AlignDepthFloatToReconstruction float deltas frame to UI
        /// </summary>
        /// <param name="alignDeltasFloatFrame">Fusion depth float frame</param>
        /// <param name="bitmap">Bitmap contains float frame data for rendering</param>
        /// <param name="image">UI image component to render float frame to</param>
        private void RenderAlignDeltasFloatImage(FusionFloatImageFrame alignDeltasFloatFrame, ref WriteableBitmap bitmap, System.Windows.Controls.Image image)
        {
            if (null == alignDeltasFloatFrame)
            {
                return;
            }

            if (null == this.deltaFromReferenceFrameFloatPixels || alignDeltasFloatFrame.PixelDataLength != this.deltaFromReferenceFrameFloatPixels.Length)
            {
                // Create depth pixel array of correct format
                this.deltaFromReferenceFrameFloatPixels = new float[alignDeltasFloatFrame.PixelDataLength];
            }

            if (null == this.deltaFromReferenceFramePixels || alignDeltasFloatFrame.PixelDataLength != this.deltaFromReferenceFramePixels.Length)
            {
                // Create colored pixel array of correct format
                this.deltaFromReferenceFramePixels = new int[alignDeltasFloatFrame.PixelDataLength];
            }

            if (null == bitmap || alignDeltasFloatFrame.Width != bitmap.Width || alignDeltasFloatFrame.Height != bitmap.Height)
            {
                // Create bitmap of correct format
                bitmap = new WriteableBitmap(alignDeltasFloatFrame.Width, alignDeltasFloatFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null);

                // Set bitmap as source to UI image object
                image.Source = bitmap;
            }

            alignDeltasFloatFrame.CopyPixelDataTo(this.deltaFromReferenceFrameFloatPixels);

            Parallel.For(
            0,
            alignDeltasFloatFrame.Height,
            y =>
            {
                int index = y * alignDeltasFloatFrame.Width;
                for (int x = 0; x < alignDeltasFloatFrame.Width; ++x, ++index)
                {
                    float residue = this.deltaFromReferenceFrameFloatPixels[index];

                    if (residue < 1.0f)
                    {
                        this.deltaFromReferenceFramePixels[index] = (byte)(255.0f * ClampFloat(1.0f - residue, 0.0f, 1.0f)); // blue
                        this.deltaFromReferenceFramePixels[index] |= ((byte)(255.0f * ClampFloat(1.0f - Math.Abs(residue), 0.0f, 1.0f))) << 8; // green
                        this.deltaFromReferenceFramePixels[index] |= ((byte)(255.0f * ClampFloat(1.0f + residue, 0.0f, 1.0f))) << 16; // red
                    }
                    else
                    {
                        this.deltaFromReferenceFramePixels[index] = 0;
                    }
                }
            });

            // Copy colored pixels to bitmap
            bitmap.WritePixels(
                        new Int32Rect(0, 0, alignDeltasFloatFrame.Width, alignDeltasFloatFrame.Height),
                        this.deltaFromReferenceFramePixels,
                        bitmap.PixelWidth * sizeof(int),
                        0);
        }