Example #1
0
        /// <summary>
        /// Calculate a point cloud by raycasting into the reconstruction volume, returning the point
        /// cloud containing 3D points and normals of the zero-crossing dense surface at every visible
        /// pixel in the image from the given camera pose.
        /// This point cloud can be used as a reference frame in the next call to
        /// FusionDepthProcessor.AlignPointClouds, or passed to FusionDepthProcessor.ShadePointCloud
        /// to produce a visible image output.
        /// The <paramref name="pointCloudFrame"/> can be an arbitrary image size, for example, enabling
        /// you to calculate point clouds at the size of your window and then create a visible image by
        /// calling FusionDepthProcessor.ShadePointCloud and render this image, however, be aware that
        /// large images will be expensive to calculate.
        /// </summary>
        /// <param name="pointCloudFrame">
        /// The pre-allocated point cloud frame, to be filled by raycasting into the reconstruction volume.
        /// Typically used as the reference frame with the FusionDepthProcessor.AlignPointClouds function
        /// or for visualization by calling FusionDepthProcessor.ShadePointCloud.
        /// </param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) to raycast from.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the call failed for an unknown reason.
        /// </exception>
        public void CalculatePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            Matrix4 worldToCameraTransform)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.CalculatePointCloud(
                                              FusionImageFrame.ToHandleRef(pointCloudFrame),
                                              ref worldToCameraTransform));
        }
Example #2
0
        /// <summary>
        /// Create a visible color shaded image of a point cloud and its normals. All image
        /// frames must have the same width and height.
        /// </summary>
        /// <param name="pointCloudFrame">The point cloud frame to be shaded.</param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) where the raycast was performed from.
        /// Pass identity if the point cloud did not originate from a raycast and is in the
        /// camera local coordinate system.
        /// </param>
        /// <param name="shadedSurfaceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color L.N shaded
        /// surface image. Pass null to skip this image.
        /// </param>
        /// <param name="shadedSurfaceNormalsFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color shaded
        /// normals image with color indicating orientation. Pass null to skip this image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void ShadePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            Matrix4 worldToCameraTransform,
            FusionColorImageFrame shadedSurfaceFrame,
            FusionColorImageFrame shadedSurfaceNormalsFrame)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionShadePointCloud2(
                                              FusionImageFrame.ToHandleRef(pointCloudFrame),
                                              ref worldToCameraTransform,
                                              IntPtr.Zero,
                                              FusionImageFrame.ToHandleRef(shadedSurfaceFrame),
                                              FusionImageFrame.ToHandleRef(shadedSurfaceNormalsFrame)));
        }
Example #3
0
        /// <summary>
        /// Construct an oriented point cloud in the local camera frame of reference from a depth float
        /// image frame. Here we calculate the 3D position of each depth float pixel with the optical
        /// center of the camera as the origin. Both images must be the same size and have the same camera
        /// parameters. We use a right-hand coordinate system, and (in common with bitmap images with top left
        /// origin) +X is to the right, +Y down, and +Z is now forward from the Kinect camera into the scene,
        /// as though looking into the scene from behind the kinect.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be converted.</param>
        /// <param name="pointCloudFrame">
        /// A pre-allocated point cloud frame, to be filled with 3D points and normals.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or the <paramref name="pointCloudFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="pointCloudFrame"/>
        /// parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthFloatFrameToPointCloud(
            FusionFloatImageFrame depthFloatFrame,
            FusionPointCloudImageFrame pointCloudFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthFloatFrameToPointCloud(
                                              FusionImageFrame.ToHandleRef(depthFloatFrame),
                                              FusionImageFrame.ToHandleRef(pointCloudFrame)));
        }
Example #4
0
        /// <summary>
        /// The AlignPointClouds function uses an iterative algorithm to align two sets of oriented
        /// point clouds and calculate the camera's relative pose. This is a generic function which
        /// can be used independently of a Reconstruction Volume with sets of overlapping point clouds.
        /// To find the frame to frame relative transformation between two sets of point clouds in
        /// the camera local frame of reference (created by DepthFloatFrameToPointCloud),
        /// set the <paramref name="observedToReferenceTransform"/> to the identity.
        /// To calculate the pose transformation between new depth frames and an existing
        /// Reconstruction volume, pass in previous frames point cloud from RenderReconstruction as
        /// the reference frame, and the current frame point cloud (from DepthFloatFrameToPointCloud)
        /// as the observed frame. Set the <paramref name="observedToReferenceTransform"/> to the
        /// previous frames calculated camera pose.
        /// Note that here the current frame point cloud will be in the camera local frame of
        /// reference, whereas the synthetic points and normals will be in the global/world volume
        /// coordinate system. By passing the <paramref name="observedToReferenceTransform"/> you
        /// make the algorithm aware of the transformation between them.
        /// The <paramref name="observedToReferenceTransform"/> pose supplied can also take into
        /// account information you may have from other sensors or sensing mechanisms to aid the
        /// tracking. To do this multiply the relative frame to frame delta transformation from
        /// the other sensing system with the previous frame's pose before passing to this function.
        /// Note that any delta transform used should be in the same coordinate system as that
        /// returned by the DepthFloatFrameToPointCloud calculation.
        /// </summary>
        /// <param name="referencePointCloudFrame">
        /// The point cloud frame of the reference camera, or the previous Kinect point cloud frame.
        /// </param>
        /// <param name="observedPointCloudFrame">
        /// The point cloud frame of the observed camera, or the current Kinect frame.
        /// </param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run. The minimum value is 1.
        /// Using only a small number of iterations will have a faster runtime, however, the
        /// algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with color-coded data
        /// from the camera tracking. Values vary depending on whether the pixel was a valid pixel
        /// used in tracking (green) or failed in different tests. Pass null if not required.
        /// </param>
        /// <param name="observedToReferenceTransform">
        /// A pre-allocated transformation matrix. At entry to the function this should be filled
        /// with the best guess for the observed to reference transform (usually the last frame's
        /// calculated pose). At exit this is filled with he calculated pose or identity if the
        /// calculation failed.
        /// </param>
        /// <returns>
        /// Returns true if successful; returns false if the algorithm encountered a problem aligning
        /// the input point clouds and could not calculate a valid transformation, and
        /// the <paramref name="observedToReferenceTransform"/> parameter is set to identity.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or the
        /// <paramref name="observedPointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static bool AlignPointClouds(
            FusionPointCloudImageFrame referencePointCloudFrame,
            FusionPointCloudImageFrame observedPointCloudFrame,
            int maxAlignIterationCount,
            FusionColorImageFrame deltaFromReferenceFrame,
            ref Matrix4 observedToReferenceTransform)
        {
            if (null == referencePointCloudFrame)
            {
                throw new ArgumentNullException("referencePointCloudFrame");
            }

            if (null == observedPointCloudFrame)
            {
                throw new ArgumentNullException("observedPointCloudFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = NativeMethods.NuiFusionAlignPointClouds(
                FusionImageFrame.ToHandleRef(referencePointCloudFrame),
                FusionImageFrame.ToHandleRef(observedPointCloudFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                ref observedToReferenceTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return(false);
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return(true);
        }
Example #5
0
        /// <summary>
        /// The AlignPointClouds function uses an iterative algorithm to align two sets of oriented
        /// point clouds and calculate the camera's relative pose. This is a generic function which
        /// can be used independently of a Reconstruction Volume with sets of overlapping point clouds.
        /// All images must be the same size and have the same camera parameters.
        /// To find the frame-to-frame relative transformation between two sets of point clouds in
        /// the camera local frame of reference (created by DepthFloatFrameToPointCloud),
        /// set the <paramref name="observedToReferenceTransform"/> to the identity.
        /// To calculate the frame-to-model pose transformation between point clouds calculated from 
        /// new depth frames with DepthFloatFrameToPointCloud and point clouds calculated from an 
        /// existing Reconstruction volume with CalculatePointCloud (e.g. from the previous frame),
        /// pass the CalculatePointCloud image as the reference frame, and the current depth frame 
        /// point cloud from DepthFloatFrameToPointCloud as the observed frame. Set the 
        /// <paramref name="observedToReferenceTransform"/> to the previous frames calculated camera
        /// pose that was used in the CalculatePointCloud call.
        /// Note that here the current frame point cloud will be in the camera local frame of
        /// reference, whereas the raycast points and normals will be in the global/world coordinate
        /// system. By passing the <paramref name="observedToReferenceTransform"/> you make the 
        /// algorithm aware of the transformation between the two coordinate systems.
        /// The <paramref name="observedToReferenceTransform"/> pose supplied can also take into
        /// account information you may have from other sensors or sensing mechanisms to aid the
        /// tracking. To do this multiply the relative frame to frame delta transformation from
        /// the other sensing system with the previous frame's pose before passing to this function.
        /// Note that any delta transform used should be in the same coordinate system as that
        /// returned by the DepthFloatFrameToPointCloud calculation.
        /// </summary>
        /// <param name="referencePointCloudFrame">
        /// The point cloud frame of the reference camera, or the previous Kinect point cloud frame.
        /// </param>
        /// <param name="observedPointCloudFrame">
        /// The point cloud frame of the observed camera, or the current Kinect frame.
        /// </param>
        /// <param name="maxAlignIterationCount">
        /// The maximum number of iterations of the algorithm to run. The minimum value is 1.
        /// Using only a small number of iterations will have a faster runtime, however, the
        /// algorithm may not converge to the correct transformation.
        /// </param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with color-coded data
        /// from the camera tracking. This may be used as input to additional vision algorithms such as
        /// object segmentation. Values vary depending on whether the pixel was a valid pixel used in
        /// tracking (inlier) or failed in different tests (outlier). 0xff000000 indicates an invalid 
        /// input vertex (e.g. from 0 input depth), or one where no correspondences occur between point
        /// cloud images. Outlier vertices rejected due to too large a distance between vertices are 
        /// coded as 0xff008000. Outlier vertices rejected due to to large a difference in normal angle
        /// between point clouds are coded as 0xff800000. Inliers are color shaded depending on the 
        /// residual energy at that point, with more saturated colors indicating more discrepancy
        /// between vertices and less saturated colors (i.e. more white) representing less discrepancy,
        /// or less information at that pixel. Pass null if this image is not required.
        /// </param>
        /// <param name="observedToReferenceTransform">
        /// A pre-allocated transformation matrix. At entry to the function this should be filled
        /// with the best guess for the observed to reference transform (usually the last frame's
        /// calculated pose). At exit this is filled with he calculated pose or identity if the
        /// calculation failed.
        /// </param>
        /// <returns>
        /// Returns true if successful; returns false if the algorithm encountered a problem aligning
        /// the input point clouds and could not calculate a valid transformation, and
        /// the <paramref name="observedToReferenceTransform"/> parameter is set to identity.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or the
        /// <paramref name="observedPointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or <paramref name="observedPointCloudFrame"/>
        /// or <paramref name="deltaFromReferenceFrame"/> parameters have different camera parameters.
        /// Thrown when the <paramref name="maxAlignIterationCount"/> parameter is less than 1.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static bool AlignPointClouds(
            FusionPointCloudImageFrame referencePointCloudFrame,
            FusionPointCloudImageFrame observedPointCloudFrame,
            int maxAlignIterationCount,
            FusionColorImageFrame deltaFromReferenceFrame,
            ref Matrix4 observedToReferenceTransform)
        {
            if (null == referencePointCloudFrame)
            {
                throw new ArgumentNullException("referencePointCloudFrame");
            }

            if (null == observedPointCloudFrame)
            {
                throw new ArgumentNullException("observedPointCloudFrame");
            }

            ushort maxIterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = NativeMethods.NuiFusionAlignPointClouds(
                FusionImageFrame.ToHandleRef(referencePointCloudFrame),
                FusionImageFrame.ToHandleRef(observedPointCloudFrame),
                maxIterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                ref observedToReferenceTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return false;
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return true;
        }
Example #6
0
        /// <summary>
        /// Create a visible color shaded image of a point cloud and its normals with simple
        /// grayscale L.N surface shading. All image frames must have the same width and height.
        /// </summary>
        /// <param name="pointCloudFrame">The point cloud frame to be shaded.</param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) where the raycast was performed from.
        /// Pass identity if the point cloud did not originate from a raycast and is in the
        /// camera local coordinate system.
        /// </param>
        /// <param name="shadedSurfaceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the grayscale L.N 
        /// shaded surface image. Pass null to skip this image.
        /// </param>
        /// <param name="shadedSurfaceNormalsFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with the color shaded
        /// normals image with color indicating orientation. Pass null to skip this image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters are different image sizes.
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="shadedSurfaceFrame"/>
        /// or <paramref name="shadedSurfaceNormalsFrame"/> parameters have different camera parameters.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void ShadePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            Matrix4 worldToCameraTransform,
            FusionColorImageFrame shadedSurfaceFrame,
            FusionColorImageFrame shadedSurfaceNormalsFrame)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionShadePointCloud2(
                FusionImageFrame.ToHandleRef(pointCloudFrame),
                ref worldToCameraTransform,
                IntPtr.Zero,
                FusionImageFrame.ToHandleRef(shadedSurfaceFrame),
                FusionImageFrame.ToHandleRef(shadedSurfaceNormalsFrame)));
        }
Example #7
0
        /// <summary>
        /// Construct an oriented point cloud in the local camera frame of reference from a depth float
        /// image frame. Here we calculate the 3D position of each depth float pixel with the optical
        /// center of the camera as the origin. We use a right-hand coordinate system, and (in common 
        /// with bitmap images with top left origin) +X is to the right, +Y down, and +Z is now forward
        /// from the Kinect camera into the scene, as though looking into the scene from behind the 
        /// Kinect camera. Both images must be the same size and have the same camera parameters.
        /// </summary>
        /// <param name="depthFloatFrame">The depth float frame to be converted.</param>
        /// <param name="pointCloudFrame">
        /// A pre-allocated point cloud frame, to be filled with 3D points and normals.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or the <paramref name="pointCloudFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthFloatFrame"/> or <paramref name="pointCloudFrame"/>
        /// parameters are different image sizes.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthFloatFrameToPointCloud(
            FusionFloatImageFrame depthFloatFrame,
            FusionPointCloudImageFrame pointCloudFrame)
        {
            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthFloatFrameToPointCloud(
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                FusionImageFrame.ToHandleRef(pointCloudFrame)));
        }
Example #8
0
        private void InitFusion()
        {
            if (_isFusionInitialized)
                return;

            _currentFormat = new KinectFormat();

            _currentFormat.DepthImageFormat = DepthImageFormat.Undefined;
            _currentFormat.ColorImageFormat = ColorImageFormat.Undefined;

            _isFusionInitialized = true;

            var depthFormat = KinectSensor.DepthStream.Format;
            var colorFormat = KinectSensor.ColorStream.Format;
            var kinectFormat = new KinectFormat();
            kinectFormat.DepthImageFormat = depthFormat;
            kinectFormat.ColorImageFormat = colorFormat;

            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            _fusionWorkItemPool = new Pool<FusionWorkItem, KinectFormat>(5, kinectFormat, FusionWorkItem.Create);

            _fusionWorkQueue = new WorkQueue<FusionWorkItem>(ProcessFusionFrameBackground)
            {
                CanceledCallback = ReturnFusionWorkItem,
                MaxQueueLength = 2
            };

            this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)];

            // This is the bitmap we'll display on-screen
            this.colorFusionBitmap = new WriteableBitmap(
                (int)depthSize.Width * 2,
                (int)depthSize.Height * 2,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);
            FusionOutputImage = colorFusionBitmap;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);
                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPose)
                {
                    this.ResetReconstruction(_currentVolumeCenter);
                }
            }
            catch (ArgumentException)
            {
                FusionStatusMessage = "ArgumentException - DX11 GPU not found?";
                return;
            }
            catch (InvalidOperationException ex)
            {
                FusionStatusMessage = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                FusionStatusMessage = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            _residualData = new float[(int)(depthSize.Width * depthSize.Height)];

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Reset the reconstruction
            this.ResetReconstruction(_currentVolumeCenter);

            IntegratingColor = false;
            _audioManager.Start();
        }
        private void InitializeKinectFusion()
        {
            // KinecFusionの初期化
            var volParam = new ReconstructionParameters( VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ );
            volume = Reconstruction.FusionCreateReconstruction( volParam, ReconstructionProcessor.Amp, -1, Matrix4.Identity );

            // 変換バッファの作成
            depthFloatBuffer = new FusionFloatImageFrame( DepthWidth, DepthHeight );
            pointCloudBuffer = new FusionPointCloudImageFrame( DepthWidth, DepthHeight );
            shadedSurfaceColorFrame = new FusionColorImageFrame( DepthWidth, DepthHeight );

            // リセット
            volume.ResetReconstruction( Matrix4.Identity );
        }
        protected virtual void Dispose( bool disposing )
        {
            if ( !disposed ) {
                if ( depthFloatBuffer != null ) {
                    depthFloatBuffer.Dispose();
                    depthFloatBuffer = null;
                }

                if ( pointCloudBuffer != null ) {
                    pointCloudBuffer.Dispose();
                    pointCloudBuffer = null;
                }

                if ( shadedSurfaceColorFrame != null ) {
                    shadedSurfaceColorFrame.Dispose();
                    shadedSurfaceColorFrame = null;
                }

                if ( volume != null ) {
                    volume.Dispose();
                    volume = null;
                }

                disposed = true;
            }
        }
Example #11
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            this.sensor.ColorFrameReady += this.kinect_colorframe_ready;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range = DepthRange.Near;
                checkBoxNearMode.IsChecked = true;
            }
            catch
            {
                // device not near mode capable
            }

            // Initialize and start the FPS timer
            this.fpsTimer = new DispatcherTimer();
            this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            // Reset the reconstruction
            this.ResetReconstruction();
        }
Example #12
0
        public void Evaluate(int SpreadMax)
        {
            this.VoxelResolutionX = this.FInVX[0];
            this.VoxelResolutionY = this.FInVY[0];
            this.VoxelResolutionZ = this.FInVZ[0];
            this.VoxelsPerMeter = this.FInVPM[0];

            if (this.FTextureOutput[0] == null) { this.FTextureOutput[0] = new DX11Resource<DX11DynamicTexture2D>(); }
            if (this.FPCOut[0] == null) { this.FPCOut[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }
            if (this.FGeomOut[0] == null) { this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FOutVoxels[0] == null) { this.FOutVoxels[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }

            if (this.FInExport[0]) { this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FInvalidateConnect)
            {
                this.FInvalidateConnect = false;

                if (this.FInRuntime.PluginIO.IsConnected)
                {
                    this.runtime = this.FInRuntime[0];
                    this.runtime.DepthFrameReady += this.runtime_DepthFrameReady;

                    var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
                    this.worldToCameraTransform = Matrix4.Identity;

                    //this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);
                    this.colorVolume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);

                    //this.volume.
                    /*FusionPointCloudImageFrame pc;
                    pc.*/

                    this.defaultWorldToVolumeTransform = this.colorVolume.GetCurrentWorldToVolumeTransform();

                    // Depth frames generated from the depth input
                    this.depthFloatBuffer = new FusionFloatImageFrame(width, height);

                    // Point cloud frames generated from the depth float input
                    this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height);

                    // Create images to raycast the Reconstruction Volume
                    this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height);

                    this.ResetReconstruction();
                }
            }

            if (this.runtime != null)
            {
                bool needreset = this.FInReset[0];

                if (needreset) { this.ResetReconstruction(); }
            }
        }
Example #13
0
        // When both color and depth frames are ready
        private void onFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            // Only get the frames when we're done processing the previous one, to prevent
              // frame callbacks from piling up
              if (frameReady) {

            Monitor.Enter(frameLock);
            // Enter a context with both the depth and color frames
            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) {
              // Lock resources and prevent further frame callbacks
              frameReady = false;

              // Init
              DepthImagePixel[] depth = new DepthImagePixel[depthFrame.PixelDataLength];

              // Obtain raw data from frames
              depthFrame.CopyDepthImagePixelDataTo(depth);
              colorFrame.CopyPixelDataTo(color);

              // Construct into a fusionfloatimageframe
              FusionFloatImageFrame frame = new FusionFloatImageFrame(640, 480);
              FusionFloatImageFrame smoothDepthFloatFrame = new FusionFloatImageFrame(640, 480);
              FusionDepthProcessor.DepthToDepthFloatFrame(depth, 640, 480, frame, FusionDepthProcessor.DefaultMinimumDepth, 2.5f, false);

              this.volume.SmoothDepthFloatFrame(frame, smoothDepthFloatFrame, 1, .04f);

              /*
              byte[] pixelData = new byte[4 * 640 * 480];

              colorFrame.CopyPixelDataTo(pixelData);

              int[] intPixelData = new int[640 * 480];
              Buffer.BlockCopy(pixelData, 0, intPixelData, 0, 640 * 480 * sizeof(int));
              FusionColorImageFrame imageFrame = new FusionColorImageFrame(640, 480);
              imageFrame.CopyPixelDataFrom(intPixelData);

              bool success = FindCameraPoseAlignPointClouds(frame, imageFrame);

              Matrix4 t = worldToCameraTransform;
              Console.WriteLine("{0} {1} {2} {3}", t.M11, t.M12, t.M13, t.M14);
              Console.WriteLine("{0} {1} {2} {3}", t.M21, t.M22, t.M23, t.M24);
              Console.WriteLine("{0} {1} {2} {3}", t.M31, t.M32, t.M33, t.M34);
              Console.WriteLine("{0} {1} {2} {3}", t.M41, t.M42, t.M43, t.M44);
              if (success) {
            currentMatrix = t;
              }*/

              // Calculate point cloud
              FusionPointCloudImageFrame observedPointCloud = new FusionPointCloudImageFrame(640, 480);
              FusionDepthProcessor.DepthFloatFrameToPointCloud(smoothDepthFloatFrame, observedPointCloud);

              FusionPointCloudImageFrame imgFrame = new FusionPointCloudImageFrame(640, 480);
              volume.CalculatePointCloud(imgFrame, currentMatrix);
              Matrix4 t = currentMatrix;
              float alignmentEnergy = 0;
              bool success = volume.AlignPointClouds(imgFrame, observedPointCloud, 100, null, out alignmentEnergy, ref t);

              Console.WriteLine(success);
              Console.WriteLine("{0}", alignmentEnergy);
              Console.WriteLine("{0} {1} {2} {3}", t.M11, t.M12, t.M13, t.M14);
              Console.WriteLine("{0} {1} {2} {3}", t.M21, t.M22, t.M23, t.M24);
              Console.WriteLine("{0} {1} {2} {3}", t.M31, t.M32, t.M33, t.M34);
              Console.WriteLine("{0} {1} {2} {3}", t.M41, t.M42, t.M43, t.M44);
              if (success) {
            currentMatrix = t;
              }

              frame.Dispose();
              smoothDepthFloatFrame.Dispose();
              observedPointCloud.Dispose();
              imgFrame.Dispose();

              // Release resources, now ready for next callback
              frameReady = true;
            }
            Monitor.Exit(frameLock);
              }
        }
Example #14
0
        /// <summary>
        /// The AlignPointClouds function uses an on GPU iterative algorithm to align two sets of
        /// overlapping oriented point clouds and calculate the camera's relative pose.
        /// All images must be the same size and have the same camera parameters. 
        /// </summary>
        /// <param name="referencePointCloudFrame">A reference point cloud frame.</param>
        /// <param name="observedPointCloudFrame">An observerd point cloud frame.</param>
        /// <param name="maxAlignIterationCount">The number of iterations to run.</param>
        /// <param name="deltaFromReferenceFrame">
        /// Optionally, a pre-allocated color image frame, to be filled with color-coded data
        /// from the camera tracking. This may be used as input to additional vision algorithms such as
        /// object segmentation. Values vary depending on whether the pixel was a valid pixel used in
        /// tracking (inlier) or failed in different tests (outlier). 0xff000000 indicates an invalid 
        /// input vertex (e.g. from 0 input depth), or one where no correspondences occur between point
        /// cloud images. Outlier vertices rejected due to too large a distance between vertices are 
        /// coded as 0xff008000. Outlier vertices rejected due to to large a difference in normal angle
        /// between point clouds are coded as 0xff800000. Inliers are color shaded depending on the 
        /// residual energy at that point, with more saturated colors indicating more discrepancy
        /// between vertices and less saturated colors (i.e. more white) representing less discrepancy,
        /// or less information at that pixel. Pass null if this image is not required.
        /// </param>
        /// <param name="alignmentEnergy">A value describing
        /// how well the observed frame aligns to the model with the calculated pose (mean distance between
        /// matching points in the point clouds). A larger magnitude value represent more discrepancy, and 
        /// a lower value represent less discrepancy. Note that it is unlikely an exact 0 (perfect alignment) 
        /// value will ever/ be returned as every frame from the sensor will contain some sensor noise. 
        /// Pass NULL to ignore this parameter.</param>
        /// <param name="referenceToObservedTransform">The initial guess at the transform. This is 
        /// updated on tracking success, or returned as identity on failure.</param>
        /// <returns>
        /// Returns true if successful; return false if the algorithm encountered a problem aligning
        /// the input depth image and could not calculate a valid transformation.
        /// </returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or 
        /// <paramref name="observedPointCloudFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="referencePointCloudFrame"/> or
        /// <paramref name="observedPointCloudFrame"/> or <paramref name="deltaFromReferenceFrame"/>
        /// parameter is an incorrect image size, or the iterations parameter is not greater than 0.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        public bool AlignPointClouds(
            FusionPointCloudImageFrame referencePointCloudFrame,
            FusionPointCloudImageFrame observedPointCloudFrame,
            int maxAlignIterationCount,
            FusionColorImageFrame deltaFromReferenceFrame,
            out float alignmentEnergy,
            ref Matrix4 referenceToObservedTransform)
        {
            if (null == referencePointCloudFrame)
            {
                throw new ArgumentNullException("referencePointCloudFrame");
            }

            if (null == observedPointCloudFrame)
            {
                throw new ArgumentNullException("observedPointCloudFrame");
            }

            ushort iterations = ExceptionHelper.CastAndThrowIfOutOfUshortRange(maxAlignIterationCount);

            HRESULT hr = volume.AlignPointClouds(
                FusionImageFrame.ToHandleRef(referencePointCloudFrame),
                FusionImageFrame.ToHandleRef(observedPointCloudFrame),
                iterations,
                FusionImageFrame.ToHandleRef(deltaFromReferenceFrame),
                out alignmentEnergy,
                ref referenceToObservedTransform);

            if (hr == HRESULT.E_NUI_FUSION_TRACKING_ERROR)
            {
                return false;
            }
            else
            {
                ExceptionHelper.ThrowIfFailed(hr);
            }

            return true;
        }
Example #15
0
        /// <summary>
        /// Calculate a point cloud by raycasting into the reconstruction volume, returning the point
        /// cloud containing 3D points and normals of the zero-crossing dense surface at every visible
        /// pixel in the image from the given camera pose, and optionally the color visualization image.
        /// This point cloud can be used as a reference frame in the next call to
        /// FusionDepthProcessor.AlignPointClouds, or passed to FusionDepthProcessor.ShadePointCloud
        /// to produce a visible image output.
        /// The <paramref name="pointCloudFrame"/> can be an arbitrary image size, for example, enabling
        /// you to calculate point clouds at the size of your window and then create a visible image by
        /// calling FusionDepthProcessor.ShadePointCloud and render this image, however, be aware that 
        /// large images will be expensive to calculate.
        /// </summary>
        /// <param name="pointCloudFrame">
        /// The pre-allocated point cloud frame, to be filled by raycasting into the reconstruction volume.
        /// Typically used as the reference frame with the FusionDepthProcessor.AlignPointClouds function
        /// or for visualization by calling FusionDepthProcessor.ShadePointCloud.
        /// </param>
        /// <param name="colorFrame">Optionally, the color frame to fill. Pass null to ignore.</param>
        /// <param name="worldToCameraTransform">
        /// The world to camera transform (camera pose) to raycast from.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> parameter is null. </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the call failed for an unknown reason.
        /// </exception>
        public void CalculatePointCloud(
            FusionPointCloudImageFrame pointCloudFrame,
            FusionColorImageFrame colorFrame,
            Matrix4 worldToCameraTransform)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            if (null == colorFrame)
            {
                throw new ArgumentNullException("colorFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.CalculatePointCloud(
                FusionImageFrame.ToHandleRef(pointCloudFrame),
                FusionImageFrame.ToHandleRef(colorFrame),
                ref worldToCameraTransform));
        }
Example #16
0
        /// <summary>
        /// Calculate a point cloud and depth image by raycasting into the reconstruction volume. 
        /// This returns the point cloud containing 3D points and normals of the zero-crossing dense 
        /// surface at every visible pixel in the image from the given camera pose, the depth
        /// to the surface.
        /// </summary>
        /// <param name="pointCloudFrame">A  point cloud frame, to be filled by raycasting into the 
        /// reconstruction volume. Typically used as the reference frame with the 
        /// FusionDepthProcessor.AlignPointClouds function or for visualization by calling 
        /// FusionDepthProcessor.ShadePointCloud.</param>
        /// <param name="depthFloatFrame">A floating point depth frame, to be filled with floating point
        /// depth in meters to the raycast surface. This image must be identical
        /// in size, and camera parameters to the <paramref name="pointCloudFrame"/> parameter.</param>
        /// <param name="worldToCameraTransform">The world-to-camera transform (camera pose) to raycast
        /// from.</param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or <paramref name="depthFloatFrame"/> 
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="pointCloudFrame"/> or<paramref name="depthFloatFrame"/>
        /// parameter is an incorrect image size.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// This point cloud can then be used as a reference frame in the next call to 
        /// FusionDepthProcessor.AlignPointClouds, or passed to FusionDepthProcessor.ShadePointCloud
        /// to produce a visible image output.The depth image can be used as a reference frame for
        /// AlignDepthFloatToReconstruction by calling SetAlignDepthFloatToReconstructionReferenceFrame 
        /// to enable a greater range of tracking. The <paramref name="pointCloudFrame"/> and 
        /// <paramref name="depthFloatFrame"/> parameters can be an arbitrary image size, for example, 
        /// enabling you to calculate point clouds at the size of your UI window and then create a visible
        /// image by calling FusionDepthProcessor.ShadePointCloud and render this image, however, be aware
        /// that the calculation of high resolution images will be expensive in terms of runtime.
        /// </remarks>
        public void CalculatePointCloudAndDepth(
            FusionPointCloudImageFrame pointCloudFrame,
            FusionFloatImageFrame depthFloatFrame,
            Matrix4 worldToCameraTransform)
        {
            if (null == pointCloudFrame)
            {
                throw new ArgumentNullException("pointCloudFrame");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.CalculatePointCloudAndDepth(
                FusionImageFrame.ToHandleRef(pointCloudFrame),
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                ref worldToCameraTransform));
        }
Example #17
0
        /// <summary>
        /// Allocate the frame buffers used in the process
        /// </summary>
        private void AllocateFrames()
        {
            // Allocate depth float frame
            if (null == this.depthFloatFrame || this.width != this.depthFloatFrame.Width || this.height != this.depthFloatFrame.Height)
            {
                this.depthFloatFrame = new FusionFloatImageFrame(this.width, this.height);
            }

            // Allocate delta from reference frame
            if (null == this.deltaFromReferenceFrame || this.width != this.deltaFromReferenceFrame.Width || this.height != this.deltaFromReferenceFrame.Height)
            {
                this.deltaFromReferenceFrame = new FusionFloatImageFrame(this.width, this.height);
            }

            // Allocate point cloud frame
            if (null == this.pointCloudFrame || this.width != this.pointCloudFrame.Width || this.height != this.pointCloudFrame.Height)
            {
                this.pointCloudFrame = new FusionPointCloudImageFrame(this.width, this.height);
            }

            // Allocate shaded surface frame
            if (null == this.shadedSurfaceFrame || this.width != this.shadedSurfaceFrame.Width || this.height != this.shadedSurfaceFrame.Height)
            {
                this.shadedSurfaceFrame = new FusionColorImageFrame(this.width, this.height);
            }

            // Allocate shaded surface normals frame
            if (null == this.shadedSurfaceNormalsFrame || this.width != this.shadedSurfaceNormalsFrame.Width || this.height != this.shadedSurfaceNormalsFrame.Height)
            {
                this.shadedSurfaceNormalsFrame = new FusionColorImageFrame(this.width, this.height);
            }
        }