Beispiel #1
0
        private void ProcessDepthData(ushort[] depthPixels)
        {
            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    this.width,
                    this.height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                float energy;
                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.volume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    out energy,
                    this.volume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.FOutSuccess[0] = false;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();
                    Matrix4 sdfPose = this.volume.GetCurrentWorldToVolumeTransform();

                    this.FOutWorldCam[0]   = this.getmat(calculatedCameraPose);
                    this.FOutWorldVoxel[0] = this.getmat(sdfPose);


                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.FOutSuccess[0]         = true;
                }

                // Calculate the point cloud
                this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);
                //this.volume.AlignDepthFloatToReconstruction

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                lock (m_lock)
                {
                    this.shadedSurfaceColorFrame.CopyPixelDataTo(this.pic);

                    this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);

                    this.FInvalidate = true;
                }
            }
            catch (Exception ex)
            {
                Console.Write(ex.Message);
            }

            this.processing = false;
        }
Beispiel #2
0
        private void ProcessDepthData(ushort[] depthPixels)
        {
            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    this.width,
                    this.height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.colorVolume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    this.colorVolume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.FOutSuccess[0] = false;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.colorVolume.GetCurrentWorldToCameraTransform();
                    Matrix4 sdfPose = this.colorVolume.GetCurrentWorldToVolumeTransform();

                    this.FOutWorldCam[0]   = this.getmat(calculatedCameraPose);
                    this.FOutWorldVoxel[0] = this.getmat(sdfPose);


                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.FOutSuccess[0]         = true;
                }

                // Calculate the point cloud
                this.colorVolume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);
                //this.volume.AlignDepthFloatToReconstruction

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                lock (m_lock)
                {
                    this.shadedSurfaceColorFrame.CopyPixelDataTo(this.pic);

                    this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);

                    //this.LockFrameAndExecute((Action<IntPtr>)(src => Marshal.Copy(src, destinationPixelData, 0, this.PixelDataLength)));

                    /*var v = (Action<IntPtr>) (src => Marshal.Copy(src, this.piccloud, 0, 640*480*6));
                     *
                     * Type t = this.pointCloudBuffer.GetType();
                     * MethodInfo m =t.GetMethod("LockFrameAndExecute",BindingFlags.NonPublic | BindingFlags.Instance);
                     * m.Invoke(this.pointCloudBuffer, new object[] { v });*/
                    //MethodInfo m =

                    //this.pointCloudBuffer.CopyPixelDataTo(this.piccloud);


                    this.FInvalidate = true;
                }
            }
            catch (Exception ex)
            {
                Console.Write("Test");
            }

            this.processing = false;
        }
        private void ProcessDepthData(DepthImagePixel[] depthPixels)
        {
            try {
                if (processingSaveFile)
                {
                    return;
                }

                // DepthImagePixel から DepthFloatFrame に変換する
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    DepthWidth,
                    DepthHeight,
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // フレームを処理する
                bool trackingSucceeded = volume.ProcessFrame(
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    volume.GetCurrentWorldToCameraTransform());
                if (!trackingSucceeded)
                {
                    // 一定数エラーになったらリセット
                    // Kinectまたは対象を素早く動かしすぎ などの場合
                    trackingErrorCount++;
                    if (trackingErrorCount >= 100)
                    {
                        Trace.WriteLine(@"tracking error.");

                        trackingErrorCount = 0;
                        volume.ResetReconstruction(Matrix4.Identity);
                    }

                    return;
                }

                // PointCloudを取得する
                volume.CalculatePointCloud(
                    pointCloudBuffer,
                    volume.GetCurrentWorldToCameraTransform());

                // PointCloudを2次元のデータに描画する
                FusionDepthProcessor.ShadePointCloud(
                    pointCloudBuffer,
                    volume.GetCurrentWorldToCameraTransform(),
                    shadedSurfaceColorFrame,
                    null);

                // 2次元のデータをBitmapに書きだす
                var colorPixels = new int[depthPixels.Length];
                shadedSurfaceColorFrame.CopyPixelDataTo(colorPixels);

                ImageKinectFusion.Source = BitmapSource.Create(DepthWidth, DepthHeight, 96, 96,
                                                               PixelFormats.Bgr32, null, colorPixels, DepthWidth * 4);
            }
            catch (Exception ex) {
                Trace.WriteLine(ex.Message);
            }
            finally {
                processingFrame = false;
            }
        }
Beispiel #4
0
        /// <summary>
        /// Process the depth input
        /// </summary>
        /// <param name="depthPixels">The depth data array to be processed</param>
        private void ProcessDepthData(DepthImagePixel[] depthPixels)
        {
            Debug.Assert(null != this.volume, "volume should be initialized");
            Debug.Assert(null != this.shadedSurfaceColorFrame, "shaded surface should be initialized");
            Debug.Assert(null != this.colorBitmap, "color bitmap should be initialized");

            try
            {
                // Convert the depth image frame to depth float image frame
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    (int)ImageSize.Width,
                    (int)ImageSize.Height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful
                bool trackingSucceeded = this.volume.ProcessFrame(
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    this.volume.GetCurrentWorldToCameraTransform());

                // If camera tracking failed, no data integration or raycast for reference
                // point cloud will have taken place, and the internal camera pose
                // will be unchanged.
                if (!trackingSucceeded)
                {
                    this.trackingErrorCount++;

                    // Show tracking error on status bar
                    this.statusBarText.Text = Properties.Resources.CameraTrackingFailed;
                }
                else
                {
                    Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();

                    // Set the camera pose and reset tracking errors
                    this.worldToCameraTransform = calculatedCameraPose;
                    this.trackingErrorCount     = 0;
                }

                if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
                {
                    // Auto Reset due to bad tracking
                    this.statusBarText.Text = Properties.Resources.ResetVolume;

                    // Automatically Clear Volume and reset tracking if tracking fails
                    this.ResetReconstruction();
                }

                // Calculate the point cloud
                this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);

                // Shade point cloud and render
                FusionDepthProcessor.ShadePointCloud(
                    this.pointCloudBuffer,
                    this.worldToCameraTransform,
                    this.shadedSurfaceColorFrame,
                    null);

                this.shadedSurfaceColorFrame.CopyPixelDataTo(this.colorPixels);

                // Write the pixel data into our bitmap
                this.colorBitmap.WritePixels(
                    new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                    this.colorPixels,
                    this.colorBitmap.PixelWidth * sizeof(int),
                    0);

                // The input frame was processed successfully, increase the processed frame count
                ++this.processedFrameCount;
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
            }
            finally
            {
                this.processingFrame = false;
            }
        }
        private bool TrackIntegrate(DepthImagePixel[] depthPixels, DepthImageFormat depthFormat)
        {
            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            // Convert the depth image frame to depth float image frame
            FusionDepthProcessor.DepthToDepthFloatFrame(
                depthPixels,
                (int)depthSize.Width,
                (int)depthSize.Height,
                this.depthFloatBuffer,
                FusionDepthProcessor.DefaultMinimumDepth,
                FusionDepthProcessor.DefaultMaximumDepth,
                false);

            bool trackingSucceeded = this.volume.AlignDepthFloatToReconstruction(
                depthFloatBuffer,
                FusionDepthProcessor.DefaultAlignIterationCount,
                residualFloatBuffer,
                out _alignmentEnergy,
                volume.GetCurrentWorldToCameraTransform());

            //if (trackingSucceeded && _alignmentEnergy == 0.0)
            //    trackingSucceeded = false;

            // ProcessFrame will first calculate the camera pose and then integrate
            // if tracking is successful
            //bool trackingSucceeded = this.volume.ProcessFrame(
            //    this.depthFloatBuffer,
            //    FusionDepthProcessor.DefaultAlignIterationCount,
            //    IntegrationWeight,
            //    this.volume.GetCurrentWorldToCameraTransform());

            // If camera tracking failed, no data integration or raycast for reference
            // point cloud will have taken place, and the internal camera pose
            // will be unchanged.
            if (!trackingSucceeded)
            {
                this.trackingErrorCount++;

                // Show tracking error on status bar
                FusionStatusMessage = Properties.Resources.CameraTrackingFailed;
                _audioManager.State = AudioState.Error;
            }
            else
            {
                ProcessResidualImage();

                this.worldToCameraTransform = volume.GetCurrentWorldToCameraTransform();

                if (!IsIntegrationPaused)
                {
                    this.volume.IntegrateFrame(depthFloatBuffer, IntegrationWeight, this.worldToCameraTransform);
                }

                this.trackingErrorCount = 0;
            }

            if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
            {
                // Auto Reset due to bad tracking
                FusionStatusMessage = Properties.Resources.ResetVolume;

                // Automatically Clear Volume and reset tracking if tracking fails
                this.ResetReconstruction(_currentVolumeCenter);
            }
            return(trackingSucceeded);
        }