コード例 #1
0
        private void SetupVariables()
        {
            mapper = sensor.CoordinateMapper;

            //Calculate integers used for allocation
            int depthImageSize = depthWidth * depthHeight;

            downsampledWidth  = depthWidth / DownsampleFactor;
            downsampledHeight = depthHeight / DownsampleFactor;
            int colorImageByteSize        = colorWidth * colorHeight * sizeof(int);
            int downsampledDepthImageSize = downsampledWidth * downsampledHeight;

            //Allocate frames
            resampledColorFrameDepthAligned = new FusionColorImageFrame(depthWidth, depthHeight);
            depthFloatFrame                              = new FusionFloatImageFrame(depthWidth, depthHeight);
            downsampledDepthFloatFrame                   = new FusionFloatImageFrame(downsampledWidth, downsampledHeight);
            downsampledSmoothDepthFloatFrame             = new FusionFloatImageFrame(downsampledWidth, downsampledHeight);
            downsampledDepthPointCloudFrame              = new FusionPointCloudImageFrame(downsampledWidth, downsampledHeight);
            downsampledRaycastPointCloudFrame            = new FusionPointCloudImageFrame(downsampledWidth, downsampledHeight);
            downsampledDeltaFromReferenceFrameColorFrame = new FusionColorImageFrame(downsampledWidth, downsampledHeight);

            //Allocate arrays
            depthImagePixels            = new ushort[depthImageSize];
            downsampledDepthImagePixels = new float[downsampledDepthImageSize];
            downsampledDeltaFromReferenceColorPixels = new int[downsampledDepthImageSize];
            deltaFromReferenceFramePixelsArgb        = new int[depthImageSize];
            colorCoordinates = new ColorSpacePoint[depthImageSize];
            resampledColorImagePixelsAlignedToDepth = new int[depthImageSize];
            depthVisibilityTestMapWidth             = colorWidth / ColorDownsampleFactor;
            depthVisibilityTestMapHeight            = colorHeight / ColorDownsampleFactor;
            depthVisibilityTestMap = new ushort[depthVisibilityTestMapWidth * depthVisibilityTestMapHeight];
            colorImagePixels       = new byte[colorImageByteSize];
        }
コード例 #2
0
        protected virtual void Dispose(bool disposing)
        {
            if (!disposed)
            {
                if (depthFloatBuffer != null)
                {
                    depthFloatBuffer.Dispose();
                    depthFloatBuffer = null;
                }

                if (pointCloudBuffer != null)
                {
                    pointCloudBuffer.Dispose();
                    pointCloudBuffer = null;
                }

                if (shadedSurfaceColorFrame != null)
                {
                    shadedSurfaceColorFrame.Dispose();
                    shadedSurfaceColorFrame = null;
                }

                if (volume != null)
                {
                    volume.Dispose();
                    volume = null;
                }

                disposed = true;
            }
        }
コード例 #3
0
        public ReconstructionController(KinectSensor sensor)
        {
            Contract.Requires(sensor != null);

            this.syncContext = SynchronizationContext.Current;
            this.sensor      = sensor;

            var rparams = new ReconstructionParameters(128, 256, 256, 256);

            reconstruction              = Reconstruction.FusionCreateReconstruction(rparams, ReconstructionProcessor.Amp, -1, worldToCameraTransform);
            worldToVolumeTransform      = reconstruction.GetCurrentWorldToVolumeTransform();
            worldToVolumeTransform.M43 -= MIN_DEPTH * rparams.VoxelsPerMeter;
            reconstruction.ResetReconstruction(worldToCameraTransform, worldToVolumeTransform);

            var depthFrameDesc = sensor.DepthFrameSource.FrameDescription;

            var totalPixels = depthFrameDesc.Width * depthFrameDesc.Height;

            rawDepthData  = new ushort[totalPixels];
            bodyIndexData = new byte[totalPixels];
            SurfaceBitmap = new ThreadSafeBitmap(depthFrameDesc.Width, depthFrameDesc.Height);

            var intrinsics = sensor.CoordinateMapper.GetDepthCameraIntrinsics();
            var cparams    = new CameraParameters(
                intrinsics.FocalLengthX / depthFrameDesc.Width,
                intrinsics.FocalLengthY / depthFrameDesc.Height,
                intrinsics.PrincipalPointX / depthFrameDesc.Width,
                intrinsics.PrincipalPointY / depthFrameDesc.Height);

            floatDepthFrame = new FusionFloatImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
            pointCloudFrame = new FusionPointCloudImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
            surfaceFrame    = new FusionColorImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
        }
コード例 #4
0
        public PointCloudProcessor(Engine e)
        {
            this.engine = e;
            var resampler = e.Resampler;

            this.volume = e.FusionVolume.Reconstruction;
            DownsampledSmoothDepthFloatFrame  = new FusionFloatImageFrame(resampler.DownsampledWidth, resampler.DownsampledHeight);
            DownsampledRaycastPointCloudFrame = new FusionPointCloudImageFrame(resampler.DownsampledWidth, resampler.DownsampledHeight);
            DownsampledDepthPointCloudFrame   = new FusionPointCloudImageFrame(resampler.DownsampledWidth, resampler.DownsampledHeight);
            DepthPointCloudFrame   = new FusionPointCloudImageFrame(KinectSettings.DEPTH_WIDTH, KinectSettings.DEPTH_HEIGHT);
            RaycastPointCloudFrame = new FusionPointCloudImageFrame(KinectSettings.DEPTH_WIDTH, KinectSettings.DEPTH_HEIGHT);
        }
コード例 #5
0
        private void InitializeKinectFusion()
        {
            // KinecFusionの初期化
            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            volume = Reconstruction.FusionCreateReconstruction(volParam, ReconstructionProcessor.Amp, -1, Matrix4.Identity);

            // 変換バッファの作成
            depthFloatBuffer        = new FusionFloatImageFrame(DepthWidth, DepthHeight);
            pointCloudBuffer        = new FusionPointCloudImageFrame(DepthWidth, DepthHeight);
            shadedSurfaceColorFrame = new FusionColorImageFrame(DepthWidth, DepthHeight);

            // リセット
            volume.ResetReconstruction(Matrix4.Identity);
        }
コード例 #6
0
        void InitializeFusion()
        {
            // Reconstruction Parameters
            float voxelPerMeter = 256;
            int   voxelsX       = 512;
            int   voxelsY       = 384;
            int   voxelsZ       = 512;

            reconstructionParameters = new ReconstructionParameters(voxelPerMeter, voxelsX, voxelsY, voxelsZ);

            //カメラ座標の初期値をワールド座標に設定
            worldToCameraTransform = Matrix4.Identity;

            //FusionのReconstructionオブジェクトを作成
            reconstruction = ColorReconstruction.FusionCreateReconstruction(reconstructionParameters, ReconstructionProcessor.Amp, -1, worldToCameraTransform);

            // Fusionのイメージフレームを作成
            cameraParameters      = CameraParameters.Defaults;
            depthImageFrame       = new FusionFloatImageFrame(depthWidth, depthHeight, cameraParameters);
            smoothDepthImageFrame = new FusionFloatImageFrame(depthWidth, depthHeight, cameraParameters);
            colorImageFrame       = new FusionColorImageFrame(depthWidth, depthHeight, cameraParameters);
            pointCloudImageFrame  = new FusionPointCloudImageFrame(depthWidth, depthHeight, cameraParameters);
            surfaceImageFrame     = new FusionColorImageFrame(depthWidth, depthHeight, cameraParameters);
        }
コード例 #7
0
ファイル: KinectFusionNode.cs プロジェクト: kratter/dx11-vvvv
        public void Evaluate(int SpreadMax)
        {
            this.VoxelResolutionX = this.FInVX[0];
            this.VoxelResolutionY = this.FInVY[0];
            this.VoxelResolutionZ = this.FInVZ[0];
            this.VoxelsPerMeter   = this.FInVPM[0];

            if (this.FTextureOutput[0] == null)
            {
                this.FTextureOutput[0] = new DX11Resource <DX11DynamicTexture2D>();
            }
            if (this.FPCOut[0] == null)
            {
                this.FPCOut[0] = new DX11Resource <IDX11ReadableStructureBuffer>();
            }
            if (this.FGeomOut[0] == null)
            {
                this.FGeomOut[0] = new DX11Resource <DX11IndexedGeometry>();
            }

            if (this.FOutVoxels[0] == null)
            {
                this.FOutVoxels[0] = new DX11Resource <IDX11ReadableStructureBuffer>();
            }

            if (this.FInExport[0])
            {
                this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource <DX11IndexedGeometry>();
            }

            if (this.FInvalidateConnect)
            {
                this.FInvalidateConnect = false;

                if (this.FInRuntime.PluginIO.IsConnected)
                {
                    this.runtime = this.FInRuntime[0];
                    this.runtime.DepthFrameReady += this.runtime_DepthFrameReady;



                    // Depth frames generated from the depth input
                    this.depthFloatBuffer = new FusionFloatImageFrame(width, height);

                    // Point cloud frames generated from the depth float input
                    this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height);

                    // Create images to raycast the Reconstruction Volume
                    this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height);
                }
            }

            if (this.FInVPM.IsChanged || this.FInVX.IsChanged || this.FInVY.IsChanged || this.FInVZ.IsChanged)
            {
                if (this.volume != null)
                {
                    this.volume.Dispose();
                }

                var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
                this.worldToCameraTransform = Matrix4.Identity;

                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                this.ResetReconstruction();
            }

            if (this.runtime != null)
            {
                bool needreset = this.FInReset[0];

                if (needreset)
                {
                    this.ResetReconstruction();
                }
            }
        }
コード例 #8
0
ファイル: VolumeBuilder.cs プロジェクト: git-thinh/Arcinect
        public VolumeBuilder(Scanner source, Dispatcher dispatcher)
        {
            if (source == null)
            {
                throw new ArgumentNullException("source");
            }

            this.source     = source;
            this.dispatcher = dispatcher;

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            // Map world X axis to blue channel, Y axis to green channel and Z axis to red channel,
            // normalizing each to the range [0, 1]. We also add a shift of 0.5 to both X,Y channels
            // as the world origin starts located at the center of the front face of the volume,
            // hence we need to map negative x,y world vertex locations to positive color values.
            this.worldToBGRTransform     = Matrix4.Identity;
            this.worldToBGRTransform.M11 = settings.VoxelsPerMeter / settings.VoxelsX;
            this.worldToBGRTransform.M22 = settings.VoxelsPerMeter / settings.VoxelsY;
            this.worldToBGRTransform.M33 = settings.VoxelsPerMeter / settings.VoxelsZ;
            this.worldToBGRTransform.M41 = 0.5f;
            this.worldToBGRTransform.M42 = 0.5f;
            this.worldToBGRTransform.M44 = 1.0f;

            var volumeParameters = new ReconstructionParameters(settings.VoxelsPerMeter, settings.VoxelsX, settings.VoxelsY, settings.VoxelsZ);

            this.volume = ColorReconstruction.FusionCreateReconstruction(volumeParameters, ReconstructionProcessor.Amp, -1, this.worldToCameraTransform);

            var depthWidth  = this.source.Frame.DepthWidth;
            var depthHeight = this.source.Frame.DepthHeight;
            var depthSize   = depthWidth * depthHeight;

            this.depthFloatFrame                 = new FusionFloatImageFrame(depthWidth, depthHeight);
            this.smoothDepthFloatFrame           = new FusionFloatImageFrame(depthWidth, depthHeight);
            this.resampledColorFrame             = new FusionColorImageFrame(depthWidth, depthHeight);
            this.resampledColorFrameDepthAligned = new FusionColorImageFrame(depthWidth, depthHeight);
            this.deltaFromReferenceFrame         = new FusionFloatImageFrame(depthWidth, depthHeight);
            this.shadedSurfaceFrame              = new FusionColorImageFrame(depthWidth, depthHeight);
            this.raycastPointCloudFrame          = new FusionPointCloudImageFrame(depthWidth, depthHeight);
            this.depthPointCloudFrame            = new FusionPointCloudImageFrame(depthWidth, depthHeight);

            var downsampledDepthWidth  = depthWidth / settings.DownsampleFactor;
            var downsampledDepthHeight = depthHeight / settings.DownsampleFactor;
            var downsampledDepthSize   = downsampledDepthWidth * downsampledDepthHeight;

            this.downsampledDepthFloatFrame                   = new FusionFloatImageFrame(downsampledDepthWidth, downsampledDepthHeight);
            this.downsampledSmoothDepthFloatFrame             = new FusionFloatImageFrame(downsampledDepthWidth, downsampledDepthHeight);
            this.downsampledRaycastPointCloudFrame            = new FusionPointCloudImageFrame(downsampledDepthWidth, downsampledDepthHeight);
            this.downsampledDepthPointCloudFrame              = new FusionPointCloudImageFrame(downsampledDepthWidth, downsampledDepthHeight);
            this.downsampledDeltaFromReferenceFrameColorFrame = new FusionColorImageFrame(downsampledDepthWidth, downsampledDepthHeight);

            this.resampledColorData   = new int[depthSize];
            this.downsampledDepthData = new float[downsampledDepthSize];
            this.downsampledDeltaFromReferenceColorPixels = new int[downsampledDepthSize];
            this.deltaFromReferenceFramePixelsArgb        = new int[depthSize];
            this.shadedSurfaceFramePixelsArgb             = new int[this.shadedSurfaceFrame.PixelDataLength];

            this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

            this.volumeBitmap = new WriteableBitmap(depthWidth, depthHeight, settings.DefaultSystemDPI, settings.DefaultSystemDPI, PixelFormats.Bgr32, null);

            // Create a camera pose finder with default parameters
            this.cameraPoseFinder = CameraPoseFinder.FusionCreateCameraPoseFinder(CameraPoseFinderParameters.Defaults);

            this.workerThread = new Thread(WorkerThreadProc);
            this.workerThread.Start();
            this.source.Frame.OnDataUpdate += OnFrameDataUpdate;
        }
コード例 #9
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range = DepthRange.Near;
                checkBoxNearMode.IsChecked    = true;
            }
            catch
            {
                // device not near mode capable
            }

            // Initialize and start the FPS timer
            this.fpsTimer          = new DispatcherTimer();
            this.fpsTimer.Tick    += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            // Reset the reconstruction
            this.ResetReconstruction();
        }
コード例 #10
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Check to ensure suitable DirectX11 compatible hardware exists before initializing Kinect Fusion
            try
            {
                string deviceDescription  = string.Empty;
                string deviceInstancePath = string.Empty;
                int    deviceMemory       = 0;

                FusionDepthProcessor.GetDeviceInfo(
                    ProcessorType, DeviceToUse, out deviceDescription, out deviceInstancePath, out deviceMemory);
            }
            catch (IndexOutOfRangeException)
            {
                // Thrown when index is out of range for processor type or there is no DirectX11 capable device installed.
                // As we set -1 (auto-select default) for the DeviceToUse above, this indicates that there is no DirectX11
                // capable device. The options for users in this case are to either install a DirectX11 capable device
                // (see documentation for recommended GPUs) or to switch to non-real-time CPU based reconstruction by
                // changing ProcessorType to ReconstructionProcessor.Cpu
                this.statusBarText.Text = Properties.Resources.NoDirectX11CompatibleDeviceOrInvalidDeviceIndex;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }

            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Create local depth pixels buffer
            this.depthImagePixels = new DepthImagePixel[this.frameDataLength];

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    // Reset the reconstruction if we need to add a custom world-volume transformation
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range   = DepthRange.Near;
                this.checkBoxNearMode.IsChecked = true;
            }
            catch (InvalidOperationException)
            {
                // Near mode not supported on device, silently fail during initialization
                this.checkBoxNearMode.IsEnabled = false;
            }

            // Initialize and start the FPS timer
            this.fpsTimer          = new DispatcherTimer();
            this.fpsTimer.Tick    += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            this.lastFPSTimestamp = DateTime.UtcNow;
        }
コード例 #11
0
ファイル: MainWindow.xaml.cs プロジェクト: noa99kee/K4W2-Book
        void InitializeFusion()
        {
            // Reconstruction Parameters
            float voxelPerMeter = 256;
            int voxelsX = 512;
            int voxelsY = 384;
            int voxelsZ = 512;
            reconstructionParameters = new ReconstructionParameters( voxelPerMeter, voxelsX, voxelsY, voxelsZ );

            //カメラ座標の初期値をワールド座標に設定
            worldToCameraTransform = Matrix4.Identity;

            //FusionのReconstructionオブジェクトを作成
            reconstruction = ColorReconstruction.FusionCreateReconstruction( reconstructionParameters, ReconstructionProcessor.Amp, -1, worldToCameraTransform );

            // Fusionのイメージフレームを作成
            cameraParameters = CameraParameters.Defaults;
            depthImageFrame = new FusionFloatImageFrame( depthWidth, depthHeight, cameraParameters );
            smoothDepthImageFrame = new FusionFloatImageFrame( depthWidth, depthHeight, cameraParameters );
            colorImageFrame = new FusionColorImageFrame( depthWidth, depthHeight, cameraParameters );
            pointCloudImageFrame = new FusionPointCloudImageFrame( depthWidth, depthHeight, cameraParameters );
            surfaceImageFrame = new FusionColorImageFrame( depthWidth, depthHeight, cameraParameters );
        }
コード例 #12
0
        /// <summary>
        /// processes the depth data package into the kinect fusion volume
        /// </summary>
        /// <param name="pKdp">the data package</param>
        void processDepthData(KinectDataPackage pKdp, System.Threading.CancellationToken pCancelToken)
        {
            lock (canWorkLock)
            {
                Log.LogManager.updateAlgorithmStatus("Kinect Fusion integration");
                this.volume.ResetReconstruction(Matrix4.Identity);

                int picturesIntegrated = 0;
                foreach (ushort[] pDepth in pKdp.rawDepthData)
                {
                    pCancelToken.ThrowIfCancellationRequested();
                    WriteableBitmap            bitmap                  = new WriteableBitmap(this.depthFloatFrame.Width, this.depthFloatFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null);
                    FusionFloatImageFrame      depthFloatBuffer        = new FusionFloatImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    FusionPointCloudImageFrame pointCloudBuffer        = new FusionPointCloudImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    FusionColorImageFrame      shadedSurfaceColorFrame = new FusionColorImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height);
                    int[] voxelPixels = new int[this.depthFloatFrame.Width * this.depthFloatFrame.Height];

                    this.volume.DepthToDepthFloatFrame(
                        pDepth,
                        depthFloatBuffer,
                        Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.minDepthClip,
                        Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.maxDepthClip,
                        false);
                    float alignmentValue;
                    bool  trackingSucceeded = this.volume.ProcessFrame(depthFloatBuffer,
                                                                       Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.iterationCount,
                                                                       Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.integrationWeight,
                                                                       out alignmentValue,
                                                                       volume.GetCurrentWorldToCameraTransform());

                    // If camera tracking failed, no data integration or raycast for reference
                    // point cloud will have taken place, and the internal camera pose
                    // will be unchanged.
                    if (!trackingSucceeded)
                    {
                        trackingErrorCount++;
                    }
                    else
                    {
                        Matrix4 calculatedCameraPose = volume.GetCurrentWorldToCameraTransform();

                        // Set the camera pose and reset tracking errors
                        worldToCameraTransform = calculatedCameraPose;
                        trackingErrorCount     = 0;
                    }

                    // Calculate the point cloud
                    volume.CalculatePointCloud(pointCloudBuffer, worldToCameraTransform);

                    // Shade point cloud and render
                    FusionDepthProcessor.ShadePointCloud(
                        pointCloudBuffer,
                        worldToCameraTransform,
                        null,
                        shadedSurfaceColorFrame
                        );

                    shadedSurfaceColorFrame.CopyPixelDataTo(voxelPixels);

                    bitmap.WritePixels(
                        new Int32Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight),
                        voxelPixels,
                        bitmap.PixelWidth * sizeof(int),
                        0);

                    bitmap.Freeze();
                    OnNewFusionPictureEvent.BeginInvoke(pKdp.usedConfig.ID, bitmap, null, null);

                    picturesIntegrated++;
                    Log.LogManager.writeLogDebug("[DataIntegration:Reconstruction] " + picturesIntegrated + " of " + pKdp.rawDepthData.Count + " Pictures integrated");
                }

                //if request was calibration request, export meshes
                if (pKdp.usedConfig.clientRequestObject.requestType == ClientConfigObject.RequestType.calibration)
                {
                    exportMesh(volume, pKdp, false);
                    Log.LogManager.writeLog("[DataIntegration:Reconstruction] Mesh of " + pKdp.usedConfig.name + " exported.");
                    return;
                }

                //broadcast new point cloud
                PointCloud p = new PointCloud(volume);
                p.ConfigObject = pKdp.usedConfig;
                OnNewPointCloudEvent.BeginInvoke(p, null, null);
                Log.LogManager.writeLog("[DataIntegration:Reconstruction] All pictures of " + pKdp.usedConfig.name + " integrated");
                Log.LogManager.updateAlgorithmStatus("Done");
            }
        }
コード例 #13
0
ファイル: KinectFusionNode.cs プロジェクト: sunep/dx11-vvvv
        public void Evaluate(int SpreadMax)
        {
            this.VoxelResolutionX = this.FInVX[0];
            this.VoxelResolutionY = this.FInVY[0];
            this.VoxelResolutionZ = this.FInVZ[0];
            this.VoxelsPerMeter = this.FInVPM[0];

            if (this.FTextureOutput[0] == null) { this.FTextureOutput[0] = new DX11Resource<DX11DynamicTexture2D>(); }
            if (this.FPCOut[0] == null) { this.FPCOut[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }
            if (this.FGeomOut[0] == null) { this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FOutVoxels[0] == null) { this.FOutVoxels[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }

            if (this.FInExport[0]) { this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FInvalidateConnect)
            {
                this.FInvalidateConnect = false;

                if (this.FInRuntime.PluginIO.IsConnected)
                {
                    this.runtime = this.FInRuntime[0];
                    this.runtime.DepthFrameReady += this.runtime_DepthFrameReady;

                    // Depth frames generated from the depth input
                    this.depthFloatBuffer = new FusionFloatImageFrame(width, height);

                    // Point cloud frames generated from the depth float input
                    this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height);

                    // Create images to raycast the Reconstruction Volume
                    this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height);

                }
            }

            if (this.FInVPM.IsChanged || this.FInVX.IsChanged || this.FInVY.IsChanged || this.FInVZ.IsChanged)
            {
                if (this.volume != null)
                {
                    this.volume.Dispose();
                }

                var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
                this.worldToCameraTransform = Matrix4.Identity;

                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                this.ResetReconstruction();
            }

            if (this.runtime != null)
            {
                bool needreset = this.FInReset[0];

                if (needreset) { this.ResetReconstruction(); }
            }
        }
コード例 #14
0
        private void InitFusion()
        {
            if (_isFusionInitialized)
            {
                return;
            }

            _isFusionInitialized = true;

            var depthFormat = KinectSensor.DepthStream.Format;
            var depthSize   = FormatHelper.GetDepthSize(depthFormat);

            _fusionWorkItemPool = new Pool <FusionWorkItem, DepthImageFormat>(5, depthFormat, FusionWorkItem.Create);

            _fusionWorkQueue = new WorkQueue <FusionWorkItem>(ProcessFusionFrameBackground)
            {
                CanceledCallback = ReturnFusionWorkItem,
                MaxQueueLength   = 2
            };

            this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)];

            // This is the bitmap we'll display on-screen
            this.colorFusionBitmap = new WriteableBitmap(
                (int)depthSize.Width * 2,
                (int)depthSize.Height * 2,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);
            FusionOutputImage = colorFusionBitmap;


            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPose)
                {
                    this.ResetReconstruction(_currentVolumeCenter);
                }
            }
            catch (ArgumentException)
            {
                FusionStatusMessage = "ArgumentException - DX11 GPU not found?";
                return;
            }
            catch (InvalidOperationException ex)
            {
                FusionStatusMessage = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                FusionStatusMessage = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer    = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            _residualData            = new float[(int)(depthSize.Width * depthSize.Height)];

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Reset the reconstruction
            this.ResetReconstruction(_currentVolumeCenter);

            _audioManager.Start();
        }