Beispiel #1
0
        public ReconstructionController(KinectSensor sensor)
        {
            Contract.Requires(sensor != null);

            this.syncContext = SynchronizationContext.Current;
            this.sensor      = sensor;

            var rparams = new ReconstructionParameters(128, 256, 256, 256);

            reconstruction              = Reconstruction.FusionCreateReconstruction(rparams, ReconstructionProcessor.Amp, -1, worldToCameraTransform);
            worldToVolumeTransform      = reconstruction.GetCurrentWorldToVolumeTransform();
            worldToVolumeTransform.M43 -= MIN_DEPTH * rparams.VoxelsPerMeter;
            reconstruction.ResetReconstruction(worldToCameraTransform, worldToVolumeTransform);

            var depthFrameDesc = sensor.DepthFrameSource.FrameDescription;

            var totalPixels = depthFrameDesc.Width * depthFrameDesc.Height;

            rawDepthData  = new ushort[totalPixels];
            bodyIndexData = new byte[totalPixels];
            SurfaceBitmap = new ThreadSafeBitmap(depthFrameDesc.Width, depthFrameDesc.Height);

            var intrinsics = sensor.CoordinateMapper.GetDepthCameraIntrinsics();
            var cparams    = new CameraParameters(
                intrinsics.FocalLengthX / depthFrameDesc.Width,
                intrinsics.FocalLengthY / depthFrameDesc.Height,
                intrinsics.PrincipalPointX / depthFrameDesc.Width,
                intrinsics.PrincipalPointY / depthFrameDesc.Height);

            floatDepthFrame = new FusionFloatImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
            pointCloudFrame = new FusionPointCloudImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
            surfaceFrame    = new FusionColorImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
        }
        private void InitializeKinectFusion()
        {
            // KinecFusionの初期化
            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            volume = Reconstruction.FusionCreateReconstruction(volParam, ReconstructionProcessor.Amp, -1, Matrix4.Identity);

            // 変換バッファの作成
            depthFloatBuffer        = new FusionFloatImageFrame(DepthWidth, DepthHeight);
            pointCloudBuffer        = new FusionPointCloudImageFrame(DepthWidth, DepthHeight);
            shadedSurfaceColorFrame = new FusionColorImageFrame(DepthWidth, DepthHeight);

            // リセット
            volume.ResetReconstruction(Matrix4.Identity);
        }
        private void ProcessDepthData(DepthImagePixel[] depthPixels)
        {
            try {
                if (processingSaveFile)
                {
                    return;
                }

                // DepthImagePixel から DepthFloatFrame に変換する
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    DepthWidth,
                    DepthHeight,
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // フレームを処理する
                bool trackingSucceeded = volume.ProcessFrame(
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    FusionDepthProcessor.DefaultIntegrationWeight,
                    volume.GetCurrentWorldToCameraTransform());
                if (!trackingSucceeded)
                {
                    // 一定数エラーになったらリセット
                    // Kinectまたは対象を素早く動かしすぎ などの場合
                    trackingErrorCount++;
                    if (trackingErrorCount >= 100)
                    {
                        Trace.WriteLine(@"tracking error.");

                        trackingErrorCount = 0;
                        volume.ResetReconstruction(Matrix4.Identity);
                    }

                    return;
                }

                // PointCloudを取得する
                volume.CalculatePointCloud(
                    pointCloudBuffer,
                    volume.GetCurrentWorldToCameraTransform());

                // PointCloudを2次元のデータに描画する
                FusionDepthProcessor.ShadePointCloud(
                    pointCloudBuffer,
                    volume.GetCurrentWorldToCameraTransform(),
                    shadedSurfaceColorFrame,
                    null);

                // 2次元のデータをBitmapに書きだす
                var colorPixels = new int[depthPixels.Length];
                shadedSurfaceColorFrame.CopyPixelDataTo(colorPixels);

                ImageKinectFusion.Source = BitmapSource.Create(DepthWidth, DepthHeight, 96, 96,
                                                               PixelFormats.Bgr32, null, colorPixels, DepthWidth * 4);
            }
            catch (Exception ex) {
                Trace.WriteLine(ex.Message);
            }
            finally {
                processingFrame = false;
            }
        }