コード例 #1
0
 public void CreateSmoothDepthCloud(FusionFloatImageFrame downsampledDepthFloatFrame)
 {
     // Smooth the depth frame
     this.volume.SmoothDepthFloatFrame(downsampledDepthFloatFrame, this.DownsampledSmoothDepthFloatFrame, SmoothingKernelWidth, SmoothingDistanceThreshold);
     // Calculate point cloud from the smoothed frame
     FusionDepthProcessor.DepthFloatFrameToPointCloud(this.DownsampledSmoothDepthFloatFrame, DownsampledDepthPointCloudFrame);
 }
コード例 #2
0
        /// <summary>
        /// Check to ensure suitable DirectX11 compatible hardware exists before initializing Kinect Fusion
        /// </summary>
        /// <returns></returns>
        private static bool IsHardwareCompatible()
        {
            try
            {
                string deviceDescription;
                string deviceInstancePath;
                int    deviceMemoryKB;
                FusionDepthProcessor.GetDeviceInfo(ReconstructionProcessor.Amp, -1, out deviceDescription, out deviceInstancePath, out deviceMemoryKB);

                return(true);
            }
            catch (IndexOutOfRangeException ex)
            {
                // Thrown when index is out of range for processor type or there is no DirectX11 capable device installed.
                // As we set -1 (auto-select default) for the DeviceToUse above, this indicates that there is no DirectX11
                // capable device. The options for users in this case are to either install a DirectX11 capable device
                // (see documentation for recommended GPUs) or to switch to non-real-time CPU based reconstruction by
                // changing ProcessorType to ReconstructionProcessor.Cpu

                logger.Error("No DirectX11 device detected, or invalid device index", ex);
                return(false);
            }
            catch (DllNotFoundException ex)
            {
                logger.Error("A prerequisite component for Kinect Fusion is missing", ex);
                return(false);
            }
            catch (InvalidOperationException ex)
            {
                logger.Error("Unknown exception", ex);
                return(false);
            }
        }
コード例 #3
0
ファイル: VolumeBuilder.cs プロジェクト: git-thinh/Arcinect
        /// <summary>
        /// Track camera pose using AlignPointClouds
        /// </summary>
        /// <param name="calculateDeltaFrame">A flag to indicate it is time to calculate the delta frame.</param>
        /// <param name="calculatedCameraPose">The calculated camera pose.</param>
        /// <returns>Returns true if tracking succeeded, false otherwise.</returns>
        private bool TrackCameraAlignPointClouds(ref bool calculateDeltaFrame, ref Matrix4 calculatedCameraPose)
        {
            var trackingSucceeded = false;

            DownsampleDepthFrameNearestNeighbor();

            // Smooth the depth frame
            this.volume.SmoothDepthFloatFrame(this.downsampledDepthFloatFrame, this.downsampledSmoothDepthFloatFrame, settings.SmoothingKernelWidth, settings.SmoothingDistanceThreshold);

            // Calculate point cloud from the smoothed frame
            FusionDepthProcessor.DepthFloatFrameToPointCloud(this.downsampledSmoothDepthFloatFrame, this.downsampledDepthPointCloudFrame);

            // Get the saved pose view by raycasting the volume from the current camera pose
            this.volume.CalculatePointCloud(this.downsampledRaycastPointCloudFrame, calculatedCameraPose);

            var initialPose = calculatedCameraPose;

            // Note that here we only calculate the deltaFromReferenceFrame every
            // DeltaFrameCalculationInterval frames to reduce computation time
            if (calculateDeltaFrame)
            {
                trackingSucceeded = FusionDepthProcessor.AlignPointClouds(
                    this.downsampledRaycastPointCloudFrame,
                    this.downsampledDepthPointCloudFrame,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    this.downsampledDeltaFromReferenceFrameColorFrame,
                    ref calculatedCameraPose);

                UpsampleColorDeltasFrameNearestNeighbor();

                // Set calculateDeltaFrame to false as we are rendering it here
                calculateDeltaFrame = false;
            }
            else
            {
                // Don't bother getting the residual delta from reference frame to cut computation time
                trackingSucceeded = FusionDepthProcessor.AlignPointClouds(
                    this.downsampledRaycastPointCloudFrame,
                    this.downsampledDepthPointCloudFrame,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    null,
                    ref calculatedCameraPose);
            }

            if (trackingSucceeded)
            {
                trackingSucceeded = MathUtils.CheckTransformChange(
                    initialPose,
                    calculatedCameraPose,
                    settings.MaxTranslationDeltaAlignPointClouds,
                    settings.MaxRotationDeltaAlignPointClouds);
            }

            return(trackingSucceeded);
        }
コード例 #4
0
        private void TrackCamera_AlignPointClouds(ref Matrix4 calculatedCameraPose)
        {
            DownsampleDepthFrameNearestNeighbor(downsampledDepthFloatFrame, DownsampleFactor);
            volume.SmoothDepthFloatFrame(downsampledDepthFloatFrame, downsampledSmoothDepthFloatFrame, 1, 0.04f); //TODO: Magic numbers
            FusionDepthProcessor.DepthFloatFrameToPointCloud(downsampledSmoothDepthFloatFrame, downsampledDepthPointCloudFrame);
            volume.CalculatePointCloud(downsampledRaycastPointCloudFrame, calculatedCameraPose);
            Matrix4 initialPose = calculatedCameraPose;

            FusionDepthProcessor.AlignPointClouds(downsampledRaycastPointCloudFrame, downsampledDepthPointCloudFrame, FusionDepthProcessor.DefaultAlignIterationCount, downsampledDeltaFromReferenceFrameColorFrame, ref calculatedCameraPose);
            UpsampleColorDeltasFrameNearestNeighbor(DownsampleFactor);
            //UpdateAlignDeltas(); //TODO: Necessary?
        }
コード例 #5
0
ファイル: CameraTracker.cs プロジェクト: rexcardan/KinectX
        /// <summary>
        /// Track camera pose using AlignPointClouds
        /// </summary>
        /// <param name="calculateDeltaFrame">A flag to indicate it is time to calculate the delta frame.</param>
        /// <param name="calculatedCameraPose">The calculated camera pose.</param>
        /// <returns>Returns true if tracking succeeded, false otherwise.</returns>
        private bool TrackCameraAlignPointClouds(ushort[] depthPixels, ref bool calculateDeltaFrame, ref Matrix4 calculatedCameraPose)
        {
            bool trackingSucceeded = false;

            //Resample at lower resolution
            var downsampledDepthFloatFrame = engine.Resampler.GetDefaultFloatFrame();

            engine.Resampler.DownsampleDepthFrameNearestNeighbor(downsampledDepthFloatFrame, depthPixels, engine.FusionVolume.MirrorDepth);

            //Smooth
            engine.PointCloudCalculator.CreateSmoothDepthCloud(downsampledDepthFloatFrame);
            //get point cloud for tracking
            engine.PointCloudCalculator.RaycastPointCloud(calculatedCameraPose);

            Matrix4 initialPose = calculatedCameraPose;

            // Note that here we only calculate the deltaFromReferenceFrame every
            // DeltaFrameCalculationInterval frames to reduce computation time
            if (calculateDeltaFrame)
            {
                trackingSucceeded = engine.DeltaCalculator.CalculateDeltaFrame(calculatedCameraPose);
                // Set calculateDeltaFrame to false as we are rendering it here
                calculateDeltaFrame = false;
            }
            else
            {
                // Don't bother getting the residual delta from reference frame to cut computation time
                trackingSucceeded = FusionDepthProcessor.AlignPointClouds(
                    engine.PointCloudCalculator.DownsampledRaycastPointCloudFrame,
                    engine.PointCloudCalculator.DownsampledDepthPointCloudFrame,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    null,
                    ref calculatedCameraPose);
            }

            if (trackingSucceeded)
            {
                bool failed = KinectFusionHelper.CameraTransformFailed(
                    initialPose,
                    calculatedCameraPose,
                    Constants.MaxTranslationDeltaAlignPointClouds,
                    Constants.MaxRotationDeltaAlignPointClouds);

                if (failed)
                {
                    trackingSucceeded = false;
                }
            }

            return(trackingSucceeded);
        }
コード例 #6
0
        /// <summary>
        /// Calculates delta frame and returns if tracking succeeded
        /// </summary>
        /// <returns></returns>
        public bool CalculateDeltaFrame(Matrix4 calculatedCameraPose)
        {
            var pc = engine.PointCloudCalculator;
            var trackingSucceeded = FusionDepthProcessor.AlignPointClouds(
                engine.PointCloudCalculator.DownsampledRaycastPointCloudFrame,
                engine.PointCloudCalculator.DownsampledDepthPointCloudFrame,
                FusionDepthProcessor.DefaultAlignIterationCount,
                DownsampledDeltaFromReferenceFrameColorFrame,
                ref calculatedCameraPose);

            UpsampleColorDeltasFrameNearestNeighbor();
            OnColorDeltaPixelReady(this.DeltaFromReferenceFramePixelsArgb);

            return(trackingSucceeded);
        }
コード例 #7
0
ファイル: VolumeBuilder.cs プロジェクト: git-thinh/Arcinect
        /// <summary>
        /// Update the volume data.
        /// </summary
        private void UpdateVolumeData()
        {
            // For capture color
            //this.volume.CalculatePointCloud(this.raycastPointCloudFrame, this.shadedSurfaceFrame, this.worldToCameraTransform);

            this.volume.CalculatePointCloud(this.raycastPointCloudFrame, this.worldToCameraTransform);

            // Shade point cloud frame for rendering
            FusionDepthProcessor.ShadePointCloud(
                this.raycastPointCloudFrame,
                this.worldToCameraTransform,
                this.worldToBGRTransform,
                this.shadedSurfaceFrame, null);

            // Copy pixel data to pixel buffer
            this.shadedSurfaceFrame.CopyPixelDataTo(this.shadedSurfaceFramePixelsArgb);

            this.dispatcher.BeginInvoke((Action)RenderVolumeBitmap);
        }
コード例 #8
0
        private void ProcessFrame(byte bodyIndex)
        {
            try
            {
                RemoveNonBodyPixels(bodyIndex);

                reconstruction.DepthToDepthFloatFrame(rawDepthData, floatDepthFrame,
                                                      MIN_DEPTH, MAX_DEPTH,
                                                      false);

                var aligned = reconstruction.ProcessFrame(
                    floatDepthFrame,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    out alignmentEnergy,
                    worldToCameraTransform);
                if (aligned)
                {
                    syncContext.Post(() => FrameAligned?.Invoke(this, EventArgs.Empty));
                    worldToCameraTransform = reconstruction.GetCurrentWorldToCameraTransform();
                }
            }
            catch (InvalidOperationException)
            {
            }

            try
            {
                reconstruction.CalculatePointCloud(pointCloudFrame, worldToCameraTransform);

                FusionDepthProcessor.ShadePointCloud(pointCloudFrame, worldToCameraTransform, surfaceFrame, null);
                SurfaceBitmap.Access(data => surfaceFrame.CopyPixelDataTo(data));

                syncContext.Post(() => SurfaceBitmapUpdated?.Invoke(this, EventArgs.Empty));
            }
            catch (InvalidOperationException)
            {
            }
        }
コード例 #9
0
ファイル: VolumeRenderer.cs プロジェクト: rexcardan/KinectX
        /// <summary>
        /// Render the reconstruction
        /// </summary>
        public void RenderReconstruction()
        {
            var fusionVolume = engine.FusionVolume;

            if (null == fusionVolume.Reconstruction || engine.MeshExporter.IsSavingMesh || null == engine.PointCloudCalculator.RaycastPointCloudFrame ||
                null == ShadedSurfaceFrame || null == ShadedSurfaceNormalsFrame)
            {
                return;
            }

            var pc = engine.PointCloudCalculator;

            // If KinectView option has been set, use the worldToCameraTransform, else use the virtualCamera transform
            Matrix4 cameraView = this.KinectView ? fusionVolume.WorldToCameraTransform : RenderWorldToCameraMatrix;

            if (engine.DataIntegrator.CaptureColor)
            {
                fusionVolume.Reconstruction.CalculatePointCloud(pc.RaycastPointCloudFrame, ShadedSurfaceFrame, cameraView);
            }
            else
            {
                fusionVolume.Reconstruction.CalculatePointCloud(pc.RaycastPointCloudFrame, cameraView);

                // Shade point cloud frame for rendering
                FusionDepthProcessor.ShadePointCloud(
                    pc.RaycastPointCloudFrame,
                    cameraView,
                    WorldToBGRTransform,
                    DisplayNormals ? null : ShadedSurfaceFrame,
                    DisplayNormals ? ShadedSurfaceNormalsFrame : null);
            }

            var renderFrame = engine.DataIntegrator.CaptureColor ?
                              ShadedSurfaceFrame : (DisplayNormals ? ShadedSurfaceNormalsFrame : ShadedSurfaceFrame);

            engine.RenderController.RenderReconstruction(renderFrame);
        }
コード例 #10
0
ファイル: KinectFusionNode.cs プロジェクト: kratter/dx11-vvvv
        private void ProcessDepthData(ushort[] depthPixels)
        {
            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    this.width,
                    this.height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                float energy;
                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.volume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    out energy,
                    this.volume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.FOutSuccess[0] = false;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();
                    Matrix4 sdfPose = this.volume.GetCurrentWorldToVolumeTransform();

                    this.FOutWorldCam[0]   = this.getmat(calculatedCameraPose);
                    this.FOutWorldVoxel[0] = this.getmat(sdfPose);


                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.FOutSuccess[0]         = true;
                }

                // Calculate the point cloud
                this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);
                //this.volume.AlignDepthFloatToReconstruction

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                lock (m_lock)
                {
                    this.shadedSurfaceColorFrame.CopyPixelDataTo(this.pic);

                    this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);

                    this.FInvalidate = true;
                }
            }
            catch (Exception ex)
            {
                Console.Write(ex.Message);
            }

            this.processing = false;
        }
コード例 #11
0
ファイル: VolumeBuilder.cs プロジェクト: git-thinh/Arcinect
        /// <summary>
        /// Perform camera pose finding when tracking is lost using AlignPointClouds.
        /// This is typically more successful than FindCameraPoseAlignDepthFloatToReconstruction.
        /// </summary>
        /// <returns>Returns true if a valid camera pose was found, otherwise false.</returns>
        private bool FindCameraPoseAlignPointClouds()
        {
            if (!this.cameraPoseFinderAvailable)
            {
                return(false);
            }

            ProcessColorForCameraPoseFinder();

            var matchCandidates = this.cameraPoseFinder.FindCameraPose(
                this.depthFloatFrame,
                this.resampledColorFrame);

            if (matchCandidates == null)
            {
                return(false);
            }

            var poseCount   = matchCandidates.GetPoseCount();
            var minDistance = matchCandidates.CalculateMinimumDistance();

            if (poseCount == 0 || minDistance >= settings.CameraPoseFinderDistanceThresholdReject)
            {
                return(false);
            }

            // Smooth the depth frame
            this.volume.SmoothDepthFloatFrame(this.depthFloatFrame, this.smoothDepthFloatFrame, settings.SmoothingKernelWidth, settings.SmoothingDistanceThreshold);

            // Calculate point cloud from the smoothed frame
            FusionDepthProcessor.DepthFloatFrameToPointCloud(this.smoothDepthFloatFrame, this.depthPointCloudFrame);

            var smallestEnergy = double.MaxValue;
            var smallestEnergyNeighborIndex = -1;

            var bestNeighborIndex      = -1;
            var bestNeighborCameraPose = Matrix4.Identity;

            var bestNeighborAlignmentEnergy = settings.MaxAlignPointCloudsEnergyForSuccess;

            // Run alignment with best matched poseCount (i.e. k nearest neighbors (kNN))
            var maxTests = Math.Min(settings.MaxCameraPoseFinderPoseTests, poseCount);

            var neighbors = matchCandidates.GetMatchPoses();

            for (var n = 0; n < maxTests; n++)
            {
                // Run the camera tracking algorithm with the volume
                // this uses the raycast frame and pose to find a valid camera pose by matching the raycast against the input point cloud
                var poseProposal = neighbors[n];

                // Get the saved pose view by raycasting the volume
                this.volume.CalculatePointCloud(this.raycastPointCloudFrame, poseProposal);

                var success = this.volume.AlignPointClouds(
                    this.raycastPointCloudFrame,
                    this.depthPointCloudFrame,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    this.resampledColorFrame,
                    out this.alignmentEnergy,
                    ref poseProposal);

                var relocSuccess = success && this.alignmentEnergy <bestNeighborAlignmentEnergy && this.alignmentEnergy> settings.MinAlignPointCloudsEnergyForSuccess;

                if (relocSuccess)
                {
                    bestNeighborAlignmentEnergy = this.alignmentEnergy;
                    bestNeighborIndex           = n;

                    // This is after tracking succeeds, so should be a more accurate pose to store...
                    bestNeighborCameraPose = poseProposal;

                    // Update the delta image
                    this.resampledColorFrame.CopyPixelDataTo(this.deltaFromReferenceFramePixelsArgb);
                }

                // Find smallest energy neighbor independent of tracking success
                if (this.alignmentEnergy < smallestEnergy)
                {
                    smallestEnergy = this.alignmentEnergy;
                    smallestEnergyNeighborIndex = n;
                }
            }

            matchCandidates.Dispose();

            // Use the neighbor with the smallest residual alignment energy
            // At the cost of additional processing we could also use kNN+Mean camera pose finding here
            // by calculating the mean pose of the best n matched poses and also testing this to see if the
            // residual alignment energy is less than with kNN.
            if (bestNeighborIndex > -1)
            {
                this.worldToCameraTransform = bestNeighborCameraPose;
                this.SetReferenceFrame(this.worldToCameraTransform);

                // Tracking succeeded!
                this.SetTrackingSucceeded();

                return(true);
            }
            else
            {
                this.worldToCameraTransform = neighbors[smallestEnergyNeighborIndex];
                this.SetReferenceFrame(this.worldToCameraTransform);

                // Camera pose finding failed - return the tracking failed error code
                this.SetTrackingFailed();

                return(false);
            }
        }
コード例 #12
0
        private void ProcessDepthData(ushort[] depthPixels)
        {
            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    this.width,
                    this.height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.colorVolume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    this.colorVolume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.FOutSuccess[0] = false;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.colorVolume.GetCurrentWorldToCameraTransform();
                    Matrix4 sdfPose = this.colorVolume.GetCurrentWorldToVolumeTransform();

                    this.FOutWorldCam[0]   = this.getmat(calculatedCameraPose);
                    this.FOutWorldVoxel[0] = this.getmat(sdfPose);


                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.FOutSuccess[0]         = true;
                }

                // Calculate the point cloud
                this.colorVolume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);
                //this.volume.AlignDepthFloatToReconstruction

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                lock (m_lock)
                {
                    this.shadedSurfaceColorFrame.CopyPixelDataTo(this.pic);

                    this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);

                    //this.LockFrameAndExecute((Action<IntPtr>)(src => Marshal.Copy(src, destinationPixelData, 0, this.PixelDataLength)));

                    /*var v = (Action<IntPtr>) (src => Marshal.Copy(src, this.piccloud, 0, 640*480*6));
                     *
                     * Type t = this.pointCloudBuffer.GetType();
                     * MethodInfo m =t.GetMethod("LockFrameAndExecute",BindingFlags.NonPublic | BindingFlags.Instance);
                     * m.Invoke(this.pointCloudBuffer, new object[] { v });*/
                    //MethodInfo m =

                    //this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);


                    this.FInvalidate = true;
                }
            }
            catch (Exception ex)
            {
                Console.Write("Test");
            }

            this.processing = false;
        }
コード例 #13
0
        /// <summary>
        /// Process the depth input
        /// </summary>
        /// <param name="depthPixels">The depth data array to be processed</param>
        private void ProcessDepthData(DepthImagePixel[] depthPixels)
        {
            Debug.Assert(null != this.volume, "volume should be initialized");
            Debug.Assert(null != this.shadedSurfaceColorFrame, "shaded surface should be initialized");
            Debug.Assert(null != this.colorBitmap, "color bitmap should be initialized");

            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    (int)ImageSize.Width,
                    (int)ImageSize.Height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.volume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    this.volume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.trackingErrorCount++;

                    // Show tracking error on status bar
                    this.statusBarText.Text = Properties.Resources.CameraTrackingFailed;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();

                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.trackingErrorCount     = 0;
                }

                if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
                {
                    // Auto Reset due to bad tracking
                    this.statusBarText.Text = Properties.Resources.ResetVolume;

                    // Automatically Clear Volume and reset tracking if tracking fails
                    this.ResetReconstruction();
                }

                // Calculate the point cloud
                this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                this.shadedSurfaceColorFrame.CopyPixelDataTo(this.colorPixels);

                // Write the pixel data into our bitmap
                this.colorBitmap.WritePixels(
                    new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                    this.colorPixels,
                    this.colorBitmap.PixelWidth * sizeof(int),
                    0);

                // The input frame was processed successfully, increase the processed frame count
                ++this.processedFrameCount;
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
            }
            finally
            {
                this.processingFrame = false;
            }
        }
コード例 #14
0
        private void RenderFusion()
        {
            Matrix3D m = Matrix3D.Identity;

            m = worldToCameraTransform.ToMatrix3D();

            CurrentRotationDegrees += RotationRateInDegrees;

            double zSize = VoxelResolutionZ / (double)VoxelsPerMeter;

            m.Translate(new Vector3D(_currentVolumeCenter.X,
                                     _currentVolumeCenter.Y,
                                     -_currentVolumeCenter.Z));
            m.Rotate(new Quaternion(new Vector3D(0, 1, 0), CurrentRotationDegrees));

            double zDelta = _volumeCenter.Z - _currentVolumeCenter.Z;

            m.Translate(new Vector3D(0,
                                     0,
                                     1.75 * zSize));


            //m.Translate(new Vector3D(0 * VoxelsPerMeter,
            //                        0,
            //                        -1.0 * (HeadNeckOffset + 0.5 * zSize)));
            //m.Translate(new Vector3D(_currentVolumeCenter.X, _currentVolumeCenter.Y, _currentVolumeCenter.Z + zSize));

            var cameraTransform = m.ToMatrix4();

            var viewCam = cameraTransform;

            if (!IsTrackingModel)
            {
                viewCam = worldToCameraTransform;
            }

            // Calculate the point cloud
            this.volume.CalculatePointCloud(this.pointCloudBuffer, viewCam);

            float volSizeX = VoxelResolutionX / (float)VoxelsPerMeter;
            float volSizeY = VoxelResolutionY / (float)VoxelsPerMeter;
            float volSizeZ = VoxelResolutionZ / (float)VoxelsPerMeter;

            Matrix4 worldToBGRTransform = Matrix4.Identity;

            worldToBGRTransform.M11 = VoxelsPerMeter / (float)VoxelResolutionX;
            worldToBGRTransform.M22 = VoxelsPerMeter / (float)VoxelResolutionY;
            worldToBGRTransform.M33 = VoxelsPerMeter / (float)VoxelResolutionZ;
            worldToBGRTransform.M41 = -_currentVolumeCenter.X - 0.5f * volSizeX;
            worldToBGRTransform.M42 = _currentVolumeCenter.Y - 0.5f * volSizeY;
            worldToBGRTransform.M43 = _currentVolumeCenter.Z - 0.5f * volSizeZ;
            worldToBGRTransform.M44 = 1.0f;

            // Shade point cloud and render
            FusionDepthProcessor.ShadePointCloud(
                this.pointCloudBuffer,
                viewCam,
                worldToBGRTransform,
                null,
                this.shadedSurfaceColorFrame);

            this.shadedSurfaceColorFrame.CopyPixelDataTo(this.colorPixels);
        }
コード例 #15
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Check to ensure suitable DirectX11 compatible hardware exists before initializing Kinect Fusion
            try
            {
                string deviceDescription  = string.Empty;
                string deviceInstancePath = string.Empty;
                int    deviceMemory       = 0;

                FusionDepthProcessor.GetDeviceInfo(
                    ProcessorType, DeviceToUse, out deviceDescription, out deviceInstancePath, out deviceMemory);
            }
            catch (IndexOutOfRangeException)
            {
                // Thrown when index is out of range for processor type or there is no DirectX11 capable device installed.
                // As we set -1 (auto-select default) for the DeviceToUse above, this indicates that there is no DirectX11
                // capable device. The options for users in this case are to either install a DirectX11 capable device
                // (see documentation for recommended GPUs) or to switch to non-real-time CPU based reconstruction by
                // changing ProcessorType to ReconstructionProcessor.Cpu
                this.statusBarText.Text = Properties.Resources.NoDirectX11CompatibleDeviceOrInvalidDeviceIndex;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }

            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Create local depth pixels buffer
            this.depthImagePixels = new DepthImagePixel[this.frameDataLength];

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    // Reset the reconstruction if we need to add a custom world-volume transformation
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range   = DepthRange.Near;
                this.checkBoxNearMode.IsChecked = true;
            }
            catch (InvalidOperationException)
            {
                // Near mode not supported on device, silently fail during initialization
                this.checkBoxNearMode.IsEnabled = false;
            }

            // Initialize and start the FPS timer
            this.fpsTimer          = new DispatcherTimer();
            this.fpsTimer.Tick    += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            this.lastFPSTimestamp = DateTime.UtcNow;
        }
コード例 #16
0
        private void ProcessDepthData(DepthImagePixel[] depthPixels)
        {
            try {
                if (processingSaveFile)
                {
                    return;
                }

                // DepthImagePixel から DepthFloatFrame に変換する
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    DepthWidth,
                    DepthHeight,
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // フレームを処理する
                bool trackingSucceeded = volume.ProcessFrame(
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    volume.GetCurrentWorldToCameraTransform());
                if (!trackingSucceeded)
                {
                    // 一定数エラーになったらリセット
                    // Kinectまたは対象を素早く動かしすぎ などの場合
                    trackingErrorCount++;
                    if (trackingErrorCount >= 100)
                    {
                        Trace.WriteLine(@"tracking error.");

                        trackingErrorCount = 0;
                        volume.ResetReconstruction(Matrix4.Identity);
                    }

                    return;
                }

                // PointCloudを取得する
                volume.CalculatePointCloud(
                    pointCloudBuffer,
                    volume.GetCurrentWorldToCameraTransform());

                // PointCloudを2次元のデータに描画する
                FusionDepthProcessor.ShadePointCloud(
                    pointCloudBuffer,
                    volume.GetCurrentWorldToCameraTransform(),
                    shadedSurfaceColorFrame,
                    null);

                // 2次元のデータをBitmapに書きだす
                var colorPixels = new int[depthPixels.Length];
                shadedSurfaceColorFrame.CopyPixelDataTo(colorPixels);

                ImageKinectFusion.Source = BitmapSource.Create(DepthWidth, DepthHeight, 96, 96,
                                                               PixelFormats.Bgr32, null, colorPixels, DepthWidth * 4);
            }
            catch (Exception ex) {
                Trace.WriteLine(ex.Message);
            }
            finally {
                processingFrame = false;
            }
        }
コード例 #17
0
        /// <summary>
        /// processes the depth data package into the kinect fusion volume
        /// </summary>
        /// <param name="pKdp">the data package</param>
        void processDepthData(KinectDataPackage pKdp, System.Threading.CancellationToken pCancelToken)
        {
            lock (canWorkLock)
            {
                Log.LogManager.updateAlgorithmStatus("Kinect Fusion integration");
                this.volume.ResetReconstruction(Matrix4.Identity);

                int picturesIntegrated = 0;
                foreach (ushort[] pDepth in pKdp.rawDepthData)
                {
                    pCancelToken.ThrowIfCancellationRequested();
                    WriteableBitmap            bitmap                  = new WriteableBitmap(this.depthFloatFrame.Width, this.depthFloatFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null);
                    FusionFloatImageFrame      depthFloatBuffer        = new FusionFloatImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    FusionPointCloudImageFrame pointCloudBuffer        = new FusionPointCloudImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    FusionColorImageFrame      shadedSurfaceColorFrame = new FusionColorImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    int[] voxelPixels = new int[this.depthFloatFrame.Width * this.depthFloatFrame.Height];

                    this.volume.DepthToDepthFloatFrame(
                        pDepth,
                        depthFloatBuffer,
                        Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.minDepthClip,
                        Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.maxDepthClip,
                        false);
                    float alignmentValue;
                    bool  trackingSucceeded = this.volume.ProcessFrame(depthFloatBuffer,
                                                                       Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.iterationCount,
                                                                       Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.integrationWeight,
                                                                       out alignmentValue,
                                                                       volume.GetCurrentWorldToCameraTransform());

                    // If camera tracking failed, no data integration or raycast for reference
                    // point cloud will have taken place, and the internal camera pose
                    // will be unchanged.
                    if (!trackingSucceeded)
                    {
                        trackingErrorCount++;
                    }
                    else
                    {
                        Matrix4 calculatedCameraPose = volume.GetCurrentWorldToCameraTransform();

                        // Set the camera pose and reset tracking errors
                        worldToCameraTransform = calculatedCameraPose;
                        trackingErrorCount     = 0;
                    }

                    // Calculate the point cloud
                    volume.CalculatePointCloud(pointCloudBuffer, worldToCameraTransform);

                    // Shade point cloud and render
                    FusionDepthProcessor.ShadePointCloud(
                        pointCloudBuffer,
                        worldToCameraTransform,
                        null,
                        shadedSurfaceColorFrame
                        );

                    shadedSurfaceColorFrame.CopyPixelDataTo(voxelPixels);

                    bitmap.WritePixels(
                        new Int32Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight),
                        voxelPixels,
                        bitmap.PixelWidth * sizeof(int),
                        0);

                    bitmap.Freeze();
                    OnNewFusionPictureEvent.BeginInvoke(pKdp.usedConfig.ID, bitmap, null, null);

                    picturesIntegrated++;
                    Log.LogManager.writeLogDebug("[DataIntegration:Reconstruction] " + picturesIntegrated + " of " + pKdp.rawDepthData.Count + " Pictures integrated");
                }

                //if request was calibration request, export meshes
                if (pKdp.usedConfig.clientRequestObject.requestType == ClientConfigObject.RequestType.calibration)
                {
                    exportMesh(volume, pKdp, false);
                    Log.LogManager.writeLog("[DataIntegration:Reconstruction] Mesh of " + pKdp.usedConfig.name + " exported.");
                    return;
                }

                //broadcast new point cloud
                PointCloud p = new PointCloud(volume);
                p.ConfigObject = pKdp.usedConfig;
                OnNewPointCloudEvent.BeginInvoke(p, null, null);
                Log.LogManager.writeLog("[DataIntegration:Reconstruction] All pictures of " + pKdp.usedConfig.name + " integrated");
                Log.LogManager.updateAlgorithmStatus("Done");
            }
        }
コード例 #18
0
 public void UpdateDepthPointCloud(FusionFloatImageFrame smooth)
 {
     FusionDepthProcessor.DepthFloatFrameToPointCloud(smooth, DepthPointCloudFrame);
 }
コード例 #19
0
        private bool TrackIntegrate(DepthImagePixel[] depthPixels, DepthImageFormat depthFormat)
        {
            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            // Convert the depth image frame to depth float image frame
            FusionDepthProcessor.DepthToDepthFloatFrame(
                depthPixels,
                (int)depthSize.Width,
                (int)depthSize.Height,
                this.depthFloatBuffer,
                FusionDepthProcessor.DefaultMinimumDepth,
                FusionDepthProcessor.DefaultMaximumDepth,
                false);

            bool trackingSucceeded = this.volume.AlignDepthFloatToReconstruction(
                depthFloatBuffer,
                FusionDepthProcessor.DefaultAlignIterationCount,
                residualFloatBuffer,
                out _alignmentEnergy,
                volume.GetCurrentWorldToCameraTransform());

            //if (trackingSucceeded && _alignmentEnergy == 0.0)
            //    trackingSucceeded = false;

            // ProcessFrame will first calculate the camera pose and then integrate
            // if tracking is successful
            //bool trackingSucceeded = this.volume.ProcessFrame(
            //    this.depthFloatBuffer,
            //    FusionDepthProcessor.DefaultAlignIterationCount,
            //    IntegrationWeight,
            //    this.volume.GetCurrentWorldToCameraTransform());

            // If camera tracking failed, no data integration or raycast for reference
            // point cloud will have taken place, and the internal camera pose
            // will be unchanged.
            if (!trackingSucceeded)
            {
                this.trackingErrorCount++;

                // Show tracking error on status bar
                FusionStatusMessage = Properties.Resources.CameraTrackingFailed;
                _audioManager.State = AudioState.Error;
            }
            else
            {
                ProcessResidualImage();

                this.worldToCameraTransform = volume.GetCurrentWorldToCameraTransform();

                if (!IsIntegrationPaused)
                {
                    this.volume.IntegrateFrame(depthFloatBuffer, IntegrationWeight, this.worldToCameraTransform);
                }

                this.trackingErrorCount = 0;
            }

            if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
            {
                // Auto Reset due to bad tracking
                FusionStatusMessage = Properties.Resources.ResetVolume;

                // Automatically Clear Volume and reset tracking if tracking fails
                this.ResetReconstruction(_currentVolumeCenter);
            }
            return(trackingSucceeded);
        }