示例#1
0
        public ReconstructionController(KinectSensor sensor)
        {
            Contract.Requires(sensor != null);

            this.syncContext = SynchronizationContext.Current;
            this.sensor      = sensor;

            var rparams = new ReconstructionParameters(128, 256, 256, 256);

            reconstruction              = Reconstruction.FusionCreateReconstruction(rparams, ReconstructionProcessor.Amp, -1, worldToCameraTransform);
            worldToVolumeTransform      = reconstruction.GetCurrentWorldToVolumeTransform();
            worldToVolumeTransform.M43 -= MIN_DEPTH * rparams.VoxelsPerMeter;
            reconstruction.ResetReconstruction(worldToCameraTransform, worldToVolumeTransform);

            var depthFrameDesc = sensor.DepthFrameSource.FrameDescription;

            var totalPixels = depthFrameDesc.Width * depthFrameDesc.Height;

            rawDepthData  = new ushort[totalPixels];
            bodyIndexData = new byte[totalPixels];
            SurfaceBitmap = new ThreadSafeBitmap(depthFrameDesc.Width, depthFrameDesc.Height);

            var intrinsics = sensor.CoordinateMapper.GetDepthCameraIntrinsics();
            var cparams    = new CameraParameters(
                intrinsics.FocalLengthX / depthFrameDesc.Width,
                intrinsics.FocalLengthY / depthFrameDesc.Height,
                intrinsics.PrincipalPointX / depthFrameDesc.Width,
                intrinsics.PrincipalPointY / depthFrameDesc.Height);

            floatDepthFrame = new FusionFloatImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
            pointCloudFrame = new FusionPointCloudImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
            surfaceFrame    = new FusionColorImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams);
        }
示例#2
0
        public KinectManager(string modelData)
        {
            poseFinder.LoadCameraPoseFinderDatabase("poseFinder.txt");
              FileStream stream = System.IO.File.OpenRead(modelData);
              // Open bracket
              char ch = (char) stream.ReadByte();

              short[] modelVolumeData = new short[X_VOXELS * Y_VOXELS * Z_VOXELS];

              StringBuilder b = new StringBuilder();
              for (int i = 0; i < modelVolumeData.Length; i++) {
            ch = (char)stream.ReadByte();
            while (ch != ']' && ch != ',') {
              b.Append(ch);
              ch = (char)stream.ReadByte();
            }
            modelVolumeData[i] = short.Parse(b.ToString());
            b.Clear();
            if (i % 100000 == 0) {
              Console.WriteLine(i);
            }
              }

              /*
              string str = System.IO.File.ReadAllText(modelData).Trim();
              str = str.Substring(1, str.Length - 2);
              string[] parts = str.Split(',');
              short[] modelVolumeData = new short[parts.Length];
              for (int i = 0; i < parts.Length; i++) {
            modelVolumeData[i] = short.Parse(parts[i]);
              }*/

              ReconstructionParameters rParams = new ReconstructionParameters(VOXEL_RESOLUTION, X_VOXELS, Y_VOXELS, Z_VOXELS);
              volume = ColorReconstruction.FusionCreateReconstruction(rParams, ReconstructionProcessor.Amp, -1, Matrix4.Identity);
              volume.ImportVolumeBlock(modelVolumeData);

              foreach (KinectSensor potentialSensor in KinectSensor.KinectSensors) {
            if (potentialSensor.Status == KinectStatus.Connected) {
              sensor = potentialSensor;
              break;
            }
              }

              if (sensor == null) {
            Console.WriteLine("Can't find Kinect Sensor");
            return;
              }

              sensor.DepthStream.Enable(DEPTH_FORMAT);
              sensor.ColorStream.Enable(COLOR_FORMAT);
              sensor.AllFramesReady += onFrameReady;
              sensor.Start();
        }
示例#3
0
        public FusionVolume(Engine e, Matrix4 startingWorldToCameraTx)
        {
            this.Engine = e;
            ReconstructionParameters volParam = new ReconstructionParameters(FusionVolume.VoxelsPerMeter, FusionVolume.VoxelsX, FusionVolume.VoxelsY, FusionVolume.VoxelsZ);

            WorldToCameraTransform             = startingWorldToCameraTx;
            this.Reconstruction                = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, WorldToCameraTransform);
            this.DefaultWorldToVolumeTransform = this.Reconstruction.GetCurrentWorldToVolumeTransform();
            Renderer = new VolumeRenderer(e);
            resetter = new VolumeResetter();
            ResetReconstruction(0.4f, 0.10f);
            // this.resetter.ResetReconstruction(this, startingWorldToCameraTx);
        }
        private void InitializeKinectFusion()
        {
            // KinecFusionの初期化
            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            volume = Reconstruction.FusionCreateReconstruction(volParam, ReconstructionProcessor.Amp, -1, Matrix4.Identity);

            // 変換バッファの作成
            depthFloatBuffer        = new FusionFloatImageFrame(DepthWidth, DepthHeight);
            pointCloudBuffer        = new FusionPointCloudImageFrame(DepthWidth, DepthHeight);
            shadedSurfaceColorFrame = new FusionColorImageFrame(DepthWidth, DepthHeight);

            // リセット
            volume.ResetReconstruction(Matrix4.Identity);
        }
        /// <summary>
        /// Constructs and prepares the ColorReconstruction for data input
        /// </summary>
        private void RecreateReconstruction()
        {
            ReconstructionParameters volParam = new ReconstructionParameters(voxelsPerMeter, voxelsX, voxelsY, voxelsZ);

            worldToCameraTransform = Matrix4.Identity;
            ReconstructionProcessor ProcessorType = ReconstructionProcessor.Amp;

            volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, -1, worldToCameraTransform);
            defaultWorldToVolumeTransform = volume.GetCurrentWorldToVolumeTransform();
            ResetReconstruction();

            worldToBGRTransform     = Matrix4.Identity;
            worldToBGRTransform.M11 = voxelsPerMeter / voxelsX;
            worldToBGRTransform.M22 = voxelsPerMeter / voxelsY;
            worldToBGRTransform.M33 = voxelsPerMeter / voxelsZ;
            worldToBGRTransform.M41 = 0.5f;
            worldToBGRTransform.M42 = 0.5f;
            worldToBGRTransform.M44 = 1.0f;
        }
示例#6
0
        void InitializeFusion()
        {
            // Reconstruction Parameters
            float voxelPerMeter = 256;
            int voxelsX = 512;
            int voxelsY = 384;
            int voxelsZ = 512;
            reconstructionParameters = new ReconstructionParameters( voxelPerMeter, voxelsX, voxelsY, voxelsZ );

            //カメラ座標の初期値をワールド座標に設定
            worldToCameraTransform = Matrix4.Identity;

            //FusionのReconstructionオブジェクトを作成
            reconstruction = ColorReconstruction.FusionCreateReconstruction( reconstructionParameters, ReconstructionProcessor.Amp, -1, worldToCameraTransform );

            // Fusionのイメージフレームを作成
            cameraParameters = CameraParameters.Defaults;
            depthImageFrame = new FusionFloatImageFrame( depthWidth, depthHeight, cameraParameters );
            smoothDepthImageFrame = new FusionFloatImageFrame( depthWidth, depthHeight, cameraParameters );
            colorImageFrame = new FusionColorImageFrame( depthWidth, depthHeight, cameraParameters );
            pointCloudImageFrame = new FusionPointCloudImageFrame( depthWidth, depthHeight, cameraParameters );
            surfaceImageFrame = new FusionColorImageFrame( depthWidth, depthHeight, cameraParameters );
        }
示例#7
0
        void InitializeFusion()
        {
            // Reconstruction Parameters
            float voxelPerMeter = 256;
            int   voxelsX       = 512;
            int   voxelsY       = 384;
            int   voxelsZ       = 512;

            reconstructionParameters = new ReconstructionParameters(voxelPerMeter, voxelsX, voxelsY, voxelsZ);

            //カメラ座標の初期値をワールド座標に設定
            worldToCameraTransform = Matrix4.Identity;

            //FusionのReconstructionオブジェクトを作成
            reconstruction = ColorReconstruction.FusionCreateReconstruction(reconstructionParameters, ReconstructionProcessor.Amp, -1, worldToCameraTransform);

            // Fusionのイメージフレームを作成
            cameraParameters      = CameraParameters.Defaults;
            depthImageFrame       = new FusionFloatImageFrame(depthWidth, depthHeight, cameraParameters);
            smoothDepthImageFrame = new FusionFloatImageFrame(depthWidth, depthHeight, cameraParameters);
            colorImageFrame       = new FusionColorImageFrame(depthWidth, depthHeight, cameraParameters);
            pointCloudImageFrame  = new FusionPointCloudImageFrame(depthWidth, depthHeight, cameraParameters);
            surfaceImageFrame     = new FusionColorImageFrame(depthWidth, depthHeight, cameraParameters);
        }
        private void InitializeKinectFusion()
        {
            // KinecFusionの初期化
            var volParam = new ReconstructionParameters( VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ );
            volume = Reconstruction.FusionCreateReconstruction( volParam, ReconstructionProcessor.Amp, -1, Matrix4.Identity );

            // 変換バッファの作成
            depthFloatBuffer = new FusionFloatImageFrame( DepthWidth, DepthHeight );
            pointCloudBuffer = new FusionPointCloudImageFrame( DepthWidth, DepthHeight );
            shadedSurfaceColorFrame = new FusionColorImageFrame( DepthWidth, DepthHeight );

            // リセット
            volume.ResetReconstruction( Matrix4.Identity );
        }
示例#9
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range = DepthRange.Near;
                checkBoxNearMode.IsChecked    = true;
            }
            catch
            {
                // device not near mode capable
            }

            // Initialize and start the FPS timer
            this.fpsTimer          = new DispatcherTimer();
            this.fpsTimer.Tick    += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            // Reset the reconstruction
            this.ResetReconstruction();
        }
示例#10
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Check to ensure suitable DirectX11 compatible hardware exists before initializing Kinect Fusion
            try
            {
                string deviceDescription  = string.Empty;
                string deviceInstancePath = string.Empty;
                int    deviceMemory       = 0;

                FusionDepthProcessor.GetDeviceInfo(
                    ProcessorType, DeviceToUse, out deviceDescription, out deviceInstancePath, out deviceMemory);
            }
            catch (IndexOutOfRangeException)
            {
                // Thrown when index is out of range for processor type or there is no DirectX11 capable device installed.
                // As we set -1 (auto-select default) for the DeviceToUse above, this indicates that there is no DirectX11
                // capable device. The options for users in this case are to either install a DirectX11 capable device
                // (see documentation for recommended GPUs) or to switch to non-real-time CPU based reconstruction by
                // changing ProcessorType to ReconstructionProcessor.Cpu
                this.statusBarText.Text = Properties.Resources.NoDirectX11CompatibleDeviceOrInvalidDeviceIndex;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }

            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Create local depth pixels buffer
            this.depthImagePixels = new DepthImagePixel[this.frameDataLength];

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    // Reset the reconstruction if we need to add a custom world-volume transformation
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor             = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range   = DepthRange.Near;
                this.checkBoxNearMode.IsChecked = true;
            }
            catch (InvalidOperationException)
            {
                // Near mode not supported on device, silently fail during initialization
                this.checkBoxNearMode.IsEnabled = false;
            }

            // Initialize and start the FPS timer
            this.fpsTimer          = new DispatcherTimer();
            this.fpsTimer.Tick    += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            this.lastFPSTimestamp = DateTime.UtcNow;
        }
示例#11
0
        private void InitFusion()
        {
            if (_isFusionInitialized)
                return;

            _currentFormat = new KinectFormat();

            _currentFormat.DepthImageFormat = DepthImageFormat.Undefined;
            _currentFormat.ColorImageFormat = ColorImageFormat.Undefined;

            _isFusionInitialized = true;

            var depthFormat = KinectSensor.DepthStream.Format;
            var colorFormat = KinectSensor.ColorStream.Format;
            var kinectFormat = new KinectFormat();
            kinectFormat.DepthImageFormat = depthFormat;
            kinectFormat.ColorImageFormat = colorFormat;

            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            _fusionWorkItemPool = new Pool<FusionWorkItem, KinectFormat>(5, kinectFormat, FusionWorkItem.Create);

            _fusionWorkQueue = new WorkQueue<FusionWorkItem>(ProcessFusionFrameBackground)
            {
                CanceledCallback = ReturnFusionWorkItem,
                MaxQueueLength = 2
            };

            this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)];

            // This is the bitmap we'll display on-screen
            this.colorFusionBitmap = new WriteableBitmap(
                (int)depthSize.Width * 2,
                (int)depthSize.Height * 2,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);
            FusionOutputImage = colorFusionBitmap;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);
                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPose)
                {
                    this.ResetReconstruction(_currentVolumeCenter);
                }
            }
            catch (ArgumentException)
            {
                FusionStatusMessage = "ArgumentException - DX11 GPU not found?";
                return;
            }
            catch (InvalidOperationException ex)
            {
                FusionStatusMessage = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                FusionStatusMessage = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            _residualData = new float[(int)(depthSize.Width * depthSize.Height)];

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Reset the reconstruction
            this.ResetReconstruction(_currentVolumeCenter);

            IntegratingColor = false;
            _audioManager.Start();
        }
示例#12
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
                return;
            }

            // Turn on the depth stream to receive depth frames
            this.sensor.DepthStream.Enable(DepthImageResolution);

            this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);

            this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[this.frameDataLength];

            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;

            // Add an event handler to be called whenever there is new depth frame data
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

            this.sensor.ColorFrameReady += this.kinect_colorframe_ready;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range = DepthRange.Near;
                checkBoxNearMode.IsChecked = true;
            }
            catch
            {
                // device not near mode capable
            }

            // Initialize and start the FPS timer
            this.fpsTimer = new DispatcherTimer();
            this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            // Reset the reconstruction
            this.ResetReconstruction();
        }
示例#13
0
        /// <summary>
        /// Re-create the reconstruction object
        /// </summary>
        /// <returns>Indicate success or failure</returns>
        public bool RecreateReconstruction(FusionVolume vol, Matrix4 startingWorldToCameraTx)
        {
            lock (this.volumeLock)
            {
                if (null != vol.Reconstruction)
                {
                    vol.Reconstruction.Dispose();
                    vol.Reconstruction = null;
                }

                try
                {
                    ReconstructionParameters volParam = new ReconstructionParameters(FusionVolume.VoxelsPerMeter, FusionVolume.VoxelsX, FusionVolume.VoxelsY, FusionVolume.VoxelsZ);

                    // Set the world-view transform to identity, so the world origin is the initial camera location.
                    vol.WorldToCameraTransform = startingWorldToCameraTx;

                    vol.Reconstruction = ColorReconstruction.FusionCreateReconstruction(volParam, FusionVolume.ProcessorType, FusionVolume.DeviceToUse, vol.WorldToCameraTransform);

                    vol.DefaultWorldToVolumeTransform = vol.Reconstruction.GetCurrentWorldToVolumeTransform();

                    if (VolumeResetter.TranslateResetPoseByMinDepthThreshold)
                    {
                        ResetReconstruction(vol, startingWorldToCameraTx);
                    }
                    else
                    {
                        vol.Engine.CameraTracker.ResetTracking();
                        vol.Engine.ColorProcessor.ResetColorImage();
                    }

                    vol.Renderer.ResetWorldToBGR();

                    if (vol.Engine.CubeDrawer != null)
                    {
                        vol.Engine.CubeDrawer.UpdateVolumeCube();
                    }

                    vol.Renderer.ViewChanged = true;

                    return(true);
                }
                catch (ArgumentException)
                {
                    vol.Reconstruction = null;
                    logger.Log(LogLevel.Error, "Volume resolution not appropriatate");
                }
                catch (InvalidOperationException ex)
                {
                    vol.Reconstruction = null;
                    logger.Log(LogLevel.Error, ex);
                }
                catch (DllNotFoundException)
                {
                    vol.Reconstruction = null;
                    logger.Log(LogLevel.Error, "Missing Dll prerequisite for volume reconstruction");
                }
                catch (OutOfMemoryException)
                {
                    vol.Reconstruction = null;
                    logger.Log(LogLevel.Error, "Out of memory when recreating volume");
                }

                return(false);
            }
        }
示例#14
0
        public VolumeBuilder(Scanner source, Dispatcher dispatcher)
        {
            if (source == null)
            {
                throw new ArgumentNullException("source");
            }

            this.source     = source;
            this.dispatcher = dispatcher;

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            // Map world X axis to blue channel, Y axis to green channel and Z axis to red channel,
            // normalizing each to the range [0, 1]. We also add a shift of 0.5 to both X,Y channels
            // as the world origin starts located at the center of the front face of the volume,
            // hence we need to map negative x,y world vertex locations to positive color values.
            this.worldToBGRTransform     = Matrix4.Identity;
            this.worldToBGRTransform.M11 = settings.VoxelsPerMeter / settings.VoxelsX;
            this.worldToBGRTransform.M22 = settings.VoxelsPerMeter / settings.VoxelsY;
            this.worldToBGRTransform.M33 = settings.VoxelsPerMeter / settings.VoxelsZ;
            this.worldToBGRTransform.M41 = 0.5f;
            this.worldToBGRTransform.M42 = 0.5f;
            this.worldToBGRTransform.M44 = 1.0f;

            var volumeParameters = new ReconstructionParameters(settings.VoxelsPerMeter, settings.VoxelsX, settings.VoxelsY, settings.VoxelsZ);

            this.volume = ColorReconstruction.FusionCreateReconstruction(volumeParameters, ReconstructionProcessor.Amp, -1, this.worldToCameraTransform);

            var depthWidth  = this.source.Frame.DepthWidth;
            var depthHeight = this.source.Frame.DepthHeight;
            var depthSize   = depthWidth * depthHeight;

            this.depthFloatFrame                 = new FusionFloatImageFrame(depthWidth, depthHeight);
            this.smoothDepthFloatFrame           = new FusionFloatImageFrame(depthWidth, depthHeight);
            this.resampledColorFrame             = new FusionColorImageFrame(depthWidth, depthHeight);
            this.resampledColorFrameDepthAligned = new FusionColorImageFrame(depthWidth, depthHeight);
            this.deltaFromReferenceFrame         = new FusionFloatImageFrame(depthWidth, depthHeight);
            this.shadedSurfaceFrame              = new FusionColorImageFrame(depthWidth, depthHeight);
            this.raycastPointCloudFrame          = new FusionPointCloudImageFrame(depthWidth, depthHeight);
            this.depthPointCloudFrame            = new FusionPointCloudImageFrame(depthWidth, depthHeight);

            var downsampledDepthWidth  = depthWidth / settings.DownsampleFactor;
            var downsampledDepthHeight = depthHeight / settings.DownsampleFactor;
            var downsampledDepthSize   = downsampledDepthWidth * downsampledDepthHeight;

            this.downsampledDepthFloatFrame                   = new FusionFloatImageFrame(downsampledDepthWidth, downsampledDepthHeight);
            this.downsampledSmoothDepthFloatFrame             = new FusionFloatImageFrame(downsampledDepthWidth, downsampledDepthHeight);
            this.downsampledRaycastPointCloudFrame            = new FusionPointCloudImageFrame(downsampledDepthWidth, downsampledDepthHeight);
            this.downsampledDepthPointCloudFrame              = new FusionPointCloudImageFrame(downsampledDepthWidth, downsampledDepthHeight);
            this.downsampledDeltaFromReferenceFrameColorFrame = new FusionColorImageFrame(downsampledDepthWidth, downsampledDepthHeight);

            this.resampledColorData   = new int[depthSize];
            this.downsampledDepthData = new float[downsampledDepthSize];
            this.downsampledDeltaFromReferenceColorPixels = new int[downsampledDepthSize];
            this.deltaFromReferenceFramePixelsArgb        = new int[depthSize];
            this.shadedSurfaceFramePixelsArgb             = new int[this.shadedSurfaceFrame.PixelDataLength];

            this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

            this.volumeBitmap = new WriteableBitmap(depthWidth, depthHeight, settings.DefaultSystemDPI, settings.DefaultSystemDPI, PixelFormats.Bgr32, null);

            // Create a camera pose finder with default parameters
            this.cameraPoseFinder = CameraPoseFinder.FusionCreateCameraPoseFinder(CameraPoseFinderParameters.Defaults);

            this.workerThread = new Thread(WorkerThreadProc);
            this.workerThread.Start();
            this.source.Frame.OnDataUpdate += OnFrameDataUpdate;
        }
示例#15
0
        /// <summary>
        /// Re-create the reconstruction object
        /// </summary>
        /// <returns>Indicate success or failure</returns>
        private bool RecreateReconstruction()
        {
            // Check if sensors has been initialized
            if (null == this.sensors)
            {
                return false;
            }

            if (null != this.volume)
            {
                lock (this.reconstructionLock)
                {
                    this.volume.Dispose();
                }
            }

            try
            {
                // The zero-based GPU index to choose for reconstruction processing if the 
                // ReconstructionProcessor AMP options are selected.
                // Here we automatically choose a device to use for processing by passing -1, 
                int deviceIndex = -1;

                ReconstructionParameters volParam = new ReconstructionParameters(
                    this.voxelsPerMeter, this.voxelsX, this.voxelsY, this.voxelsZ);

                // Here we set internal camera pose to identity, as we mange each separately in the ReconstructionSensor class
                this.volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, deviceIndex, Matrix4.Identity);

                // We need to call reset here to set the correct world-to-volume transform
                this.ResetReconstruction(this, null);

                // Reset "Pause Integration"
                if (this.PauseIntegration)
                {
                    this.PauseIntegration = false;
                }

                this.firstFrame = true;
                this.DoneReconstructing();

                // Create volume cube 3D graphics in WPF3D. The front top left corner is the actual origin of the volume
                // voxel coordinate system, and shown with an overlaid coordinate cross.
                // Red is the +X axis, Green is the +Y axis, Blue is the +Z axis in the voxel coordinate system
                this.DisposeVolumeCube3DGraphics(); // Auto-removes from the visual tree
                this.CreateCube3DGraphics(volumeCubeLineColor, LineThickness, new Vector3D(0, 0, this.worldToVolumeTransform.M43 / this.voxelsPerMeter)); // Auto-adds to the visual tree
                this.AddVolumeCube3DGraphics();

                return true;
            }
            catch (ArgumentException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.VolumeResolution);
            }
            catch (InvalidOperationException ex)
            {
                this.volume = null;
                this.ShowStatusMessage(ex.Message);
            }
            catch (DllNotFoundException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.MissingPrerequisite);
            }
            catch (OutOfMemoryException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.OutOfMemory);
            }

            return false;
        }
示例#16
0
        public void Evaluate(int SpreadMax)
        {
            this.VoxelResolutionX = this.FInVX[0];
            this.VoxelResolutionY = this.FInVY[0];
            this.VoxelResolutionZ = this.FInVZ[0];
            this.VoxelsPerMeter = this.FInVPM[0];

            if (this.FTextureOutput[0] == null) { this.FTextureOutput[0] = new DX11Resource<DX11DynamicTexture2D>(); }
            if (this.FPCOut[0] == null) { this.FPCOut[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }
            if (this.FGeomOut[0] == null) { this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FOutVoxels[0] == null) { this.FOutVoxels[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }

            if (this.FInExport[0]) { this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FInvalidateConnect)
            {
                this.FInvalidateConnect = false;

                if (this.FInRuntime.PluginIO.IsConnected)
                {
                    this.runtime = this.FInRuntime[0];
                    this.runtime.DepthFrameReady += this.runtime_DepthFrameReady;

                    var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
                    this.worldToCameraTransform = Matrix4.Identity;

                    //this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);
                    this.colorVolume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);

                    //this.volume.
                    /*FusionPointCloudImageFrame pc;
                    pc.*/

                    this.defaultWorldToVolumeTransform = this.colorVolume.GetCurrentWorldToVolumeTransform();

                    // Depth frames generated from the depth input
                    this.depthFloatBuffer = new FusionFloatImageFrame(width, height);

                    // Point cloud frames generated from the depth float input
                    this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height);

                    // Create images to raycast the Reconstruction Volume
                    this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height);

                    this.ResetReconstruction();
                }
            }

            if (this.runtime != null)
            {
                bool needreset = this.FInReset[0];

                if (needreset) { this.ResetReconstruction(); }
            }
        }
示例#17
0
        private void InitFusion()
        {
            if (_isFusionInitialized)
            {
                return;
            }

            _isFusionInitialized = true;

            var depthFormat = KinectSensor.DepthStream.Format;
            var depthSize   = FormatHelper.GetDepthSize(depthFormat);

            _fusionWorkItemPool = new Pool <FusionWorkItem, DepthImageFormat>(5, depthFormat, FusionWorkItem.Create);

            _fusionWorkQueue = new WorkQueue <FusionWorkItem>(ProcessFusionFrameBackground)
            {
                CanceledCallback = ReturnFusionWorkItem,
                MaxQueueLength   = 2
            };

            this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)];

            // This is the bitmap we'll display on-screen
            this.colorFusionBitmap = new WriteableBitmap(
                (int)depthSize.Width * 2,
                (int)depthSize.Height * 2,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);
            FusionOutputImage = colorFusionBitmap;


            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPose)
                {
                    this.ResetReconstruction(_currentVolumeCenter);
                }
            }
            catch (ArgumentException)
            {
                FusionStatusMessage = "ArgumentException - DX11 GPU not found?";
                return;
            }
            catch (InvalidOperationException ex)
            {
                FusionStatusMessage = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                FusionStatusMessage = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer    = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            _residualData            = new float[(int)(depthSize.Width * depthSize.Height)];

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Reset the reconstruction
            this.ResetReconstruction(_currentVolumeCenter);

            _audioManager.Start();
        }
示例#18
0
        public void Evaluate(int SpreadMax)
        {
            this.VoxelResolutionX = this.FInVX[0];
            this.VoxelResolutionY = this.FInVY[0];
            this.VoxelResolutionZ = this.FInVZ[0];
            this.VoxelsPerMeter   = this.FInVPM[0];

            if (this.FTextureOutput[0] == null)
            {
                this.FTextureOutput[0] = new DX11Resource <DX11DynamicTexture2D>();
            }
            if (this.FPCOut[0] == null)
            {
                this.FPCOut[0] = new DX11Resource <IDX11ReadableStructureBuffer>();
            }
            if (this.FGeomOut[0] == null)
            {
                this.FGeomOut[0] = new DX11Resource <DX11IndexedGeometry>();
            }

            if (this.FOutVoxels[0] == null)
            {
                this.FOutVoxels[0] = new DX11Resource <IDX11ReadableStructureBuffer>();
            }

            if (this.FInExport[0])
            {
                this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource <DX11IndexedGeometry>();
            }

            if (this.FInvalidateConnect)
            {
                this.FInvalidateConnect = false;

                if (this.FInRuntime.PluginIO.IsConnected)
                {
                    this.runtime = this.FInRuntime[0];
                    this.runtime.DepthFrameReady += this.runtime_DepthFrameReady;



                    // Depth frames generated from the depth input
                    this.depthFloatBuffer = new FusionFloatImageFrame(width, height);

                    // Point cloud frames generated from the depth float input
                    this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height);

                    // Create images to raycast the Reconstruction Volume
                    this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height);
                }
            }

            if (this.FInVPM.IsChanged || this.FInVX.IsChanged || this.FInVY.IsChanged || this.FInVZ.IsChanged)
            {
                if (this.volume != null)
                {
                    this.volume.Dispose();
                }

                var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
                this.worldToCameraTransform = Matrix4.Identity;

                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                this.ResetReconstruction();
            }

            if (this.runtime != null)
            {
                bool needreset = this.FInReset[0];

                if (needreset)
                {
                    this.ResetReconstruction();
                }
            }
        }
示例#19
0
        private void loadAndStartDepth()
        {
            // Read in the model reconstruction (prescanned model)
              FileStream stream = System.IO.File.OpenRead(modelData);
              // Open bracket
              char ch = (char)stream.ReadByte();
              // Copy all the model data into a short array of the same size
              short[] modelVolumeData = new short[X_VOXELS * Y_VOXELS * Z_VOXELS];
              max = modelVolumeData.Length;
              // Parse what is essentially a really big json array
              StringBuilder b = new StringBuilder();
              for (int i = 0; i < modelVolumeData.Length; i++) {
            ch = (char)stream.ReadByte();
            while (ch != ']' && ch != ',') {
              b.Append(ch);
              ch = (char)stream.ReadByte();
            }
            modelVolumeData[i] = short.Parse(b.ToString());
            b.Clear();
            progress = i;
              }

              // Build the reconstruction volume from the prescanned model
              // Now we have access to our prescanned model
              ReconstructionParameters rParams = new ReconstructionParameters(VOXEL_RESOLUTION, X_VOXELS, Y_VOXELS, Z_VOXELS);
              volume = ColorReconstruction.FusionCreateReconstruction(rParams, ReconstructionProcessor.Amp, -1, Matrix4.Identity);
              volume.ImportVolumeBlock(modelVolumeData);

              if (continuousTrack) {
            continueVolume = ColorReconstruction.FusionCreateReconstruction(rParams, ReconstructionProcessor.Amp, -1, Matrix4.Identity);
              }

              sensor.DepthStream.Enable(DEPTH_FORMAT);
              sensor.DepthFrameReady += depthFrameReady;
              isLoading = false;
              new Thread(new ThreadStart(runDepth)).Start();
        }
示例#20
0
        /// <summary>
        /// Initialize a Kinect Fusion 3D Reconstruction Volume.
        /// Voxel volume axis sizes must be greater than 0 and a multiple of 32. A Kinect camera 
        /// is also required to be connected.
        /// </summary>
        /// <param name="reconstructionParameters">
        /// The Reconstruction parameters to define the size and shape of the reconstruction volume.
        /// </param>
        /// <param name="reconstructionProcessorType">
        /// the processor type to be used for all calls to the reconstruction volume object returned
        /// from this function.
        /// </param>
        /// <param name="deviceIndex">Set this variable to an explicit zero-based device index to use
        /// a specific GPU as enumerated by FusionDepthProcessor.GetDeviceInfo, or set to -1 to 
        /// automatically select the default device for a given processor type.
        /// </param>
        /// <param name="initialWorldToCameraTransform">
        /// The initial camera pose of the reconstruction volume with respect to the world origin. 
        /// Pass identity as the default camera pose. 
        /// </param>
        /// <returns>The Reconstruction instance.</returns>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="reconstructionParameters"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="reconstructionParameters"/> parameter's <c>VoxelX</c>,
        /// <c>VoxelY</c>, or <c>VoxelZ</c> member is not a greater than 0 and multiple of 32.
        /// Thrown when the <paramref name="deviceIndex"/> parameter is less than -1 or greater 
        /// than the number of available devices for the respective processor type.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown when the memory required for the Reconstruction volume processing could not be
        /// allocated.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the Kinect device is not
        /// connected or the Reconstruction volume is too big so a GPU memory allocation failed, 
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// Users can select which device the processing is performed on with
        /// the <paramref name="reconstructionProcessorType"/> parameter. For those with multiple GPUs
        /// the <paramref name="deviceIndex"/> parameter also enables users to explicitly configure
        /// on which device the reconstruction volume is created.
        /// Note that this function creates a default world-volume transform. To set a non-default
        /// transform call ResetReconstruction with an appropriate Matrix4. This default transformation
        /// is a combination of translation in X,Y to locate the world origin at the center of the front
        /// face of the reconstruction volume cube, and scaling by the voxelsPerMeter reconstruction
        /// parameter to convert from the world coordinate system to volume voxel indices.
        /// </remarks>
        public static Reconstruction FusionCreateReconstruction(
            ReconstructionParameters reconstructionParameters,
            ReconstructionProcessor reconstructionProcessorType,
            int deviceIndex,
            Matrix4 initialWorldToCameraTransform)
        {
            if (null == reconstructionParameters)
            {
                throw new ArgumentNullException("reconstructionParameters");
            }

            INuiFusionReconstruction reconstruction = null;

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionCreateReconstruction(
                reconstructionParameters,
                reconstructionProcessorType,
                deviceIndex,
                ref initialWorldToCameraTransform,
                out reconstruction));

            return new Reconstruction(reconstruction);
        }
示例#21
0
        /// <summary>
        /// Re-create the reconstruction object
        /// </summary>
        /// <returns>Indicate success or failure</returns>
        private bool RecreateReconstruction()
        {
            // Check if sensor has been initialized
            if (null == this.sensor)
            {
                return false;
            }

            if (null != this.volume)
            {
                this.volume.Dispose();
            }

            try
            {
                // The zero-based GPU index to choose for reconstruction processing if the
                // ReconstructionProcessor AMP options are selected.
                // Here we automatically choose a device to use for processing by passing -1,
                int deviceIndex = -1;

                ReconstructionParameters volParam = new ReconstructionParameters(this.voxelsPerMeter, this.voxelsX, this.voxelsY, this.voxelsZ);

                // Set the world-view transform to identity, so the world origin is the initial camera location.
                this.worldToCameraTransform = Matrix4.Identity;

                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, deviceIndex, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    this.ResetReconstruction();
                }

                // Reset "Pause Integration"
                if (this.PauseIntegration)
                {
                    this.PauseIntegration = false;
                }

                return true;
            }
            catch (ArgumentException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.VolumeResolution);
            }
            catch (InvalidOperationException ex)
            {
                this.volume = null;
                this.ShowStatusMessage(ex.Message);
            }
            catch (DllNotFoundException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.MissingPrerequisite);
            }
            catch (OutOfMemoryException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.OutOfMemory);
            }

            return false;
        }