示例#1
0
        /// <summary>
        /// Update the volume data.
        /// </summary
        private void UpdateVolumeData()
        {
            // For capture color
            //this.volume.CalculatePointCloud(this.raycastPointCloudFrame, this.shadedSurfaceFrame, this.worldToCameraTransform);

            this.volume.CalculatePointCloud(this.raycastPointCloudFrame, this.worldToCameraTransform);

            // Shade point cloud frame for rendering
            FusionDepthProcessor.ShadePointCloud(
                this.raycastPointCloudFrame,
                this.worldToCameraTransform,
                this.worldToBGRTransform,
                this.shadedSurfaceFrame, null);

            // Copy pixel data to pixel buffer
            this.shadedSurfaceFrame.CopyPixelDataTo(this.shadedSurfaceFramePixelsArgb);

            this.dispatcher.BeginInvoke((Action)RenderVolumeBitmap);
        }
示例#2
0
        private void ProcessFrame(byte bodyIndex)
        {
            try
            {
                RemoveNonBodyPixels(bodyIndex);

                reconstruction.DepthToDepthFloatFrame(rawDepthData, floatDepthFrame,
                                                      MIN_DEPTH, MAX_DEPTH,
                                                      false);

                var aligned = reconstruction.ProcessFrame(
                    floatDepthFrame,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    out alignmentEnergy,
                    worldToCameraTransform);
                if (aligned)
                {
                    syncContext.Post(() => FrameAligned?.Invoke(this, EventArgs.Empty));
                    worldToCameraTransform = reconstruction.GetCurrentWorldToCameraTransform();
                }
            }
            catch (InvalidOperationException)
            {
            }

            try
            {
                reconstruction.CalculatePointCloud(pointCloudFrame, worldToCameraTransform);

                FusionDepthProcessor.ShadePointCloud(pointCloudFrame, worldToCameraTransform, surfaceFrame, null);
                SurfaceBitmap.Access(data => surfaceFrame.CopyPixelDataTo(data));

                syncContext.Post(() => SurfaceBitmapUpdated?.Invoke(this, EventArgs.Empty));
            }
            catch (InvalidOperationException)
            {
            }
        }
示例#3
0
        /// <summary>
        /// Render the reconstruction
        /// </summary>
        public void RenderReconstruction()
        {
            var fusionVolume = engine.FusionVolume;

            if (null == fusionVolume.Reconstruction || engine.MeshExporter.IsSavingMesh || null == engine.PointCloudCalculator.RaycastPointCloudFrame ||
                null == ShadedSurfaceFrame || null == ShadedSurfaceNormalsFrame)
            {
                return;
            }

            var pc = engine.PointCloudCalculator;

            // If KinectView option has been set, use the worldToCameraTransform, else use the virtualCamera transform
            Matrix4 cameraView = this.KinectView ? fusionVolume.WorldToCameraTransform : RenderWorldToCameraMatrix;

            if (engine.DataIntegrator.CaptureColor)
            {
                fusionVolume.Reconstruction.CalculatePointCloud(pc.RaycastPointCloudFrame, ShadedSurfaceFrame, cameraView);
            }
            else
            {
                fusionVolume.Reconstruction.CalculatePointCloud(pc.RaycastPointCloudFrame, cameraView);

                // Shade point cloud frame for rendering
                FusionDepthProcessor.ShadePointCloud(
                    pc.RaycastPointCloudFrame,
                    cameraView,
                    WorldToBGRTransform,
                    DisplayNormals ? null : ShadedSurfaceFrame,
                    DisplayNormals ? ShadedSurfaceNormalsFrame : null);
            }

            var renderFrame = engine.DataIntegrator.CaptureColor ?
                              ShadedSurfaceFrame : (DisplayNormals ? ShadedSurfaceNormalsFrame : ShadedSurfaceFrame);

            engine.RenderController.RenderReconstruction(renderFrame);
        }
示例#4
0
        private void ProcessDepthData(ushort[] depthPixels)
        {
            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    this.width,
                    this.height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                float energy;
                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.volume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    out energy,
                    this.volume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.FOutSuccess[0] = false;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();
                    Matrix4 sdfPose = this.volume.GetCurrentWorldToVolumeTransform();

                    this.FOutWorldCam[0]   = this.getmat(calculatedCameraPose);
                    this.FOutWorldVoxel[0] = this.getmat(sdfPose);


                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.FOutSuccess[0]         = true;
                }

                // Calculate the point cloud
                this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);
                //this.volume.AlignDepthFloatToReconstruction

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                lock (m_lock)
                {
                    this.shadedSurfaceColorFrame.CopyPixelDataTo(this.pic);

                    this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);

                    this.FInvalidate = true;
                }
            }
            catch (Exception ex)
            {
                Console.Write(ex.Message);
            }

            this.processing = false;
        }
示例#5
0
        private void ProcessDepthData(ushort[] depthPixels)
        {
            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    this.width,
                    this.height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.colorVolume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    this.colorVolume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.FOutSuccess[0] = false;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.colorVolume.GetCurrentWorldToCameraTransform();
                    Matrix4 sdfPose = this.colorVolume.GetCurrentWorldToVolumeTransform();

                    this.FOutWorldCam[0]   = this.getmat(calculatedCameraPose);
                    this.FOutWorldVoxel[0] = this.getmat(sdfPose);


                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.FOutSuccess[0]         = true;
                }

                // Calculate the point cloud
                this.colorVolume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);
                //this.volume.AlignDepthFloatToReconstruction

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                lock (m_lock)
                {
                    this.shadedSurfaceColorFrame.CopyPixelDataTo(this.pic);

                    this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);

                    //this.LockFrameAndExecute((Action<IntPtr>)(src => Marshal.Copy(src, destinationPixelData, 0, this.PixelDataLength)));

                    /*var v = (Action<IntPtr>) (src => Marshal.Copy(src, this.piccloud, 0, 640*480*6));
                     *
                     * Type t = this.pointCloudBuffer.GetType();
                     * MethodInfo m =t.GetMethod("LockFrameAndExecute",BindingFlags.NonPublic | BindingFlags.Instance);
                     * m.Invoke(this.pointCloudBuffer, new object[] { v });*/
                    //MethodInfo m =

                    //this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);


                    this.FInvalidate = true;
                }
            }
            catch (Exception ex)
            {
                Console.Write("Test");
            }

            this.processing = false;
        }
示例#6
0
        /// <summary>
        /// Process the depth input
        /// </summary>
        /// <param name="depthPixels">The depth data array to be processed</param>
        private void ProcessDepthData(DepthImagePixel[] depthPixels)
        {
            Debug.Assert(null != this.volume, "volume should be initialized");
            Debug.Assert(null != this.shadedSurfaceColorFrame, "shaded surface should be initialized");
            Debug.Assert(null != this.colorBitmap, "color bitmap should be initialized");

            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    (int)ImageSize.Width,
                    (int)ImageSize.Height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.volume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    this.volume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.trackingErrorCount++;

                    // Show tracking error on status bar
                    this.statusBarText.Text = Properties.Resources.CameraTrackingFailed;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();

                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.trackingErrorCount     = 0;
                }

                if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
                {
                    // Auto Reset due to bad tracking
                    this.statusBarText.Text = Properties.Resources.ResetVolume;

                    // Automatically Clear Volume and reset tracking if tracking fails
                    this.ResetReconstruction();
                }

                // Calculate the point cloud
                this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                this.shadedSurfaceColorFrame.CopyPixelDataTo(this.colorPixels);

                // Write the pixel data into our bitmap
                this.colorBitmap.WritePixels(
                    new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                    this.colorPixels,
                    this.colorBitmap.PixelWidth * sizeof(int),
                    0);

                // The input frame was processed successfully, increase the processed frame count
                ++this.processedFrameCount;
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
            }
            finally
            {
                this.processingFrame = false;
            }
        }
        private void ProcessDepthData(DepthImagePixel[] depthPixels)
        {
            try {
                if (processingSaveFile)
                {
                    return;
                }

                // DepthImagePixel から DepthFloatFrame に変換する
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    DepthWidth,
                    DepthHeight,
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // フレームを処理する
                bool trackingSucceeded = volume.ProcessFrame(
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    volume.GetCurrentWorldToCameraTransform());
                if (!trackingSucceeded)
                {
                    // 一定数エラーになったらリセット
                    // Kinectまたは対象を素早く動かしすぎ などの場合
                    trackingErrorCount++;
                    if (trackingErrorCount >= 100)
                    {
                        Trace.WriteLine(@"tracking error.");

                        trackingErrorCount = 0;
                        volume.ResetReconstruction(Matrix4.Identity);
                    }

                    return;
                }

                // PointCloudを取得する
                volume.CalculatePointCloud(
                    pointCloudBuffer,
                    volume.GetCurrentWorldToCameraTransform());

                // PointCloudを2次元のデータに描画する
                FusionDepthProcessor.ShadePointCloud(
                    pointCloudBuffer,
                    volume.GetCurrentWorldToCameraTransform(),
                    shadedSurfaceColorFrame,
                    null);

                // 2次元のデータをBitmapに書きだす
                var colorPixels = new int[depthPixels.Length];
                shadedSurfaceColorFrame.CopyPixelDataTo(colorPixels);

                ImageKinectFusion.Source = BitmapSource.Create(DepthWidth, DepthHeight, 96, 96,
                                                               PixelFormats.Bgr32, null, colorPixels, DepthWidth * 4);
            }
            catch (Exception ex) {
                Trace.WriteLine(ex.Message);
            }
            finally {
                processingFrame = false;
            }
        }
示例#8
0
        /// <summary>
        /// processes the depth data package into the kinect fusion volume
        /// </summary>
        /// <param name="pKdp">the data package</param>
        void processDepthData(KinectDataPackage pKdp, System.Threading.CancellationToken pCancelToken)
        {
            lock (canWorkLock)
            {
                Log.LogManager.updateAlgorithmStatus("Kinect Fusion integration");
                this.volume.ResetReconstruction(Matrix4.Identity);

                int picturesIntegrated = 0;
                foreach (ushort[] pDepth in pKdp.rawDepthData)
                {
                    pCancelToken.ThrowIfCancellationRequested();
                    WriteableBitmap            bitmap                  = new WriteableBitmap(this.depthFloatFrame.Width, this.depthFloatFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null);
                    FusionFloatImageFrame      depthFloatBuffer        = new FusionFloatImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    FusionPointCloudImageFrame pointCloudBuffer        = new FusionPointCloudImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    FusionColorImageFrame      shadedSurfaceColorFrame = new FusionColorImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    int[] voxelPixels = new int[this.depthFloatFrame.Width * this.depthFloatFrame.Height];

                    this.volume.DepthToDepthFloatFrame(
                        pDepth,
                        depthFloatBuffer,
                        Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.minDepthClip,
                        Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.maxDepthClip,
                        false);
                    float alignmentValue;
                    bool  trackingSucceeded = this.volume.ProcessFrame(depthFloatBuffer,
                                                                       Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.iterationCount,
                                                                       Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.integrationWeight,
                                                                       out alignmentValue,
                                                                       volume.GetCurrentWorldToCameraTransform());

                    // If camera tracking failed, no data integration or raycast for reference
                    // point cloud will have taken place, and the internal camera pose
                    // will be unchanged.
                    if (!trackingSucceeded)
                    {
                        trackingErrorCount++;
                    }
                    else
                    {
                        Matrix4 calculatedCameraPose = volume.GetCurrentWorldToCameraTransform();

                        // Set the camera pose and reset tracking errors
                        worldToCameraTransform = calculatedCameraPose;
                        trackingErrorCount     = 0;
                    }

                    // Calculate the point cloud
                    volume.CalculatePointCloud(pointCloudBuffer, worldToCameraTransform);

                    // Shade point cloud and render
                    FusionDepthProcessor.ShadePointCloud(
                        pointCloudBuffer,
                        worldToCameraTransform,
                        null,
                        shadedSurfaceColorFrame
                        );

                    shadedSurfaceColorFrame.CopyPixelDataTo(voxelPixels);

                    bitmap.WritePixels(
                        new Int32Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight),
                        voxelPixels,
                        bitmap.PixelWidth * sizeof(int),
                        0);

                    bitmap.Freeze();
                    OnNewFusionPictureEvent.BeginInvoke(pKdp.usedConfig.ID, bitmap, null, null);

                    picturesIntegrated++;
                    Log.LogManager.writeLogDebug("[DataIntegration:Reconstruction] " + picturesIntegrated + " of " + pKdp.rawDepthData.Count + " Pictures integrated");
                }

                //if request was calibration request, export meshes
                if (pKdp.usedConfig.clientRequestObject.requestType == ClientConfigObject.RequestType.calibration)
                {
                    exportMesh(volume, pKdp, false);
                    Log.LogManager.writeLog("[DataIntegration:Reconstruction] Mesh of " + pKdp.usedConfig.name + " exported.");
                    return;
                }

                //broadcast new point cloud
                PointCloud p = new PointCloud(volume);
                p.ConfigObject = pKdp.usedConfig;
                OnNewPointCloudEvent.BeginInvoke(p, null, null);
                Log.LogManager.writeLog("[DataIntegration:Reconstruction] All pictures of " + pKdp.usedConfig.name + " integrated");
                Log.LogManager.updateAlgorithmStatus("Done");
            }
        }
        private void RenderFusion()
        {
            Matrix3D m = Matrix3D.Identity;

            m = worldToCameraTransform.ToMatrix3D();

            CurrentRotationDegrees += RotationRateInDegrees;

            double zSize = VoxelResolutionZ / (double)VoxelsPerMeter;

            m.Translate(new Vector3D(_currentVolumeCenter.X,
                                     _currentVolumeCenter.Y,
                                     -_currentVolumeCenter.Z));
            m.Rotate(new Quaternion(new Vector3D(0, 1, 0), CurrentRotationDegrees));

            double zDelta = _volumeCenter.Z - _currentVolumeCenter.Z;

            m.Translate(new Vector3D(0,
                                     0,
                                     1.75 * zSize));


            //m.Translate(new Vector3D(0 * VoxelsPerMeter,
            //                        0,
            //                        -1.0 * (HeadNeckOffset + 0.5 * zSize)));
            //m.Translate(new Vector3D(_currentVolumeCenter.X, _currentVolumeCenter.Y, _currentVolumeCenter.Z + zSize));

            var cameraTransform = m.ToMatrix4();

            var viewCam = cameraTransform;

            if (!IsTrackingModel)
            {
                viewCam = worldToCameraTransform;
            }

            // Calculate the point cloud
            this.volume.CalculatePointCloud(this.pointCloudBuffer, viewCam);

            float volSizeX = VoxelResolutionX / (float)VoxelsPerMeter;
            float volSizeY = VoxelResolutionY / (float)VoxelsPerMeter;
            float volSizeZ = VoxelResolutionZ / (float)VoxelsPerMeter;

            Matrix4 worldToBGRTransform = Matrix4.Identity;

            worldToBGRTransform.M11 = VoxelsPerMeter / (float)VoxelResolutionX;
            worldToBGRTransform.M22 = VoxelsPerMeter / (float)VoxelResolutionY;
            worldToBGRTransform.M33 = VoxelsPerMeter / (float)VoxelResolutionZ;
            worldToBGRTransform.M41 = -_currentVolumeCenter.X - 0.5f * volSizeX;
            worldToBGRTransform.M42 = _currentVolumeCenter.Y - 0.5f * volSizeY;
            worldToBGRTransform.M43 = _currentVolumeCenter.Z - 0.5f * volSizeZ;
            worldToBGRTransform.M44 = 1.0f;

            // Shade point cloud and render
            FusionDepthProcessor.ShadePointCloud(
                this.pointCloudBuffer,
                viewCam,
                worldToBGRTransform,
                null,
                this.shadedSurfaceColorFrame);

            this.shadedSurfaceColorFrame.CopyPixelDataTo(this.colorPixels);
        }