예제 #1
0
        public KinectManager(string modelData)
        {
            poseFinder.LoadCameraPoseFinderDatabase("poseFinder.txt");
              FileStream stream = System.IO.File.OpenRead(modelData);
              // Open bracket
              char ch = (char) stream.ReadByte();

              short[] modelVolumeData = new short[X_VOXELS * Y_VOXELS * Z_VOXELS];

              StringBuilder b = new StringBuilder();
              for (int i = 0; i < modelVolumeData.Length; i++) {
            ch = (char)stream.ReadByte();
            while (ch != ']' && ch != ',') {
              b.Append(ch);
              ch = (char)stream.ReadByte();
            }
            modelVolumeData[i] = short.Parse(b.ToString());
            b.Clear();
            if (i % 100000 == 0) {
              Console.WriteLine(i);
            }
              }

              /*
              string str = System.IO.File.ReadAllText(modelData).Trim();
              str = str.Substring(1, str.Length - 2);
              string[] parts = str.Split(',');
              short[] modelVolumeData = new short[parts.Length];
              for (int i = 0; i < parts.Length; i++) {
            modelVolumeData[i] = short.Parse(parts[i]);
              }*/

              ReconstructionParameters rParams = new ReconstructionParameters(VOXEL_RESOLUTION, X_VOXELS, Y_VOXELS, Z_VOXELS);
              volume = ColorReconstruction.FusionCreateReconstruction(rParams, ReconstructionProcessor.Amp, -1, Matrix4.Identity);
              volume.ImportVolumeBlock(modelVolumeData);

              foreach (KinectSensor potentialSensor in KinectSensor.KinectSensors) {
            if (potentialSensor.Status == KinectStatus.Connected) {
              sensor = potentialSensor;
              break;
            }
              }

              if (sensor == null) {
            Console.WriteLine("Can't find Kinect Sensor");
            return;
              }

              sensor.DepthStream.Enable(DEPTH_FORMAT);
              sensor.ColorStream.Enable(COLOR_FORMAT);
              sensor.AllFramesReady += onFrameReady;
              sensor.Start();
        }
예제 #2
0
        private void loadAndStartDepth()
        {
            // Read in the model reconstruction (prescanned model)
              FileStream stream = System.IO.File.OpenRead(modelData);
              // Open bracket
              char ch = (char)stream.ReadByte();
              // Copy all the model data into a short array of the same size
              short[] modelVolumeData = new short[X_VOXELS * Y_VOXELS * Z_VOXELS];
              max = modelVolumeData.Length;
              // Parse what is essentially a really big json array
              StringBuilder b = new StringBuilder();
              for (int i = 0; i < modelVolumeData.Length; i++) {
            ch = (char)stream.ReadByte();
            while (ch != ']' && ch != ',') {
              b.Append(ch);
              ch = (char)stream.ReadByte();
            }
            modelVolumeData[i] = short.Parse(b.ToString());
            b.Clear();
            progress = i;
              }

              // Build the reconstruction volume from the prescanned model
              // Now we have access to our prescanned model
              ReconstructionParameters rParams = new ReconstructionParameters(VOXEL_RESOLUTION, X_VOXELS, Y_VOXELS, Z_VOXELS);
              volume = ColorReconstruction.FusionCreateReconstruction(rParams, ReconstructionProcessor.Amp, -1, Matrix4.Identity);
              volume.ImportVolumeBlock(modelVolumeData);

              if (continuousTrack) {
            continueVolume = ColorReconstruction.FusionCreateReconstruction(rParams, ReconstructionProcessor.Amp, -1, Matrix4.Identity);
              }

              sensor.DepthStream.Enable(DEPTH_FORMAT);
              sensor.DepthFrameReady += depthFrameReady;
              isLoading = false;
              new Thread(new ThreadStart(runDepth)).Start();
        }
예제 #3
0
        /// <summary>
        /// Re-create the reconstruction object
        /// </summary>
        /// <returns>Indicate success or failure</returns>
        private bool RecreateReconstruction()
        {
            // Check if sensors has been initialized
            if (null == this.sensors)
            {
                return false;
            }

            if (null != this.volume)
            {
                lock (this.reconstructionLock)
                {
                    this.volume.Dispose();
                }
            }

            try
            {
                // The zero-based GPU index to choose for reconstruction processing if the 
                // ReconstructionProcessor AMP options are selected.
                // Here we automatically choose a device to use for processing by passing -1, 
                int deviceIndex = -1;

                ReconstructionParameters volParam = new ReconstructionParameters(
                    this.voxelsPerMeter, this.voxelsX, this.voxelsY, this.voxelsZ);

                // Here we set internal camera pose to identity, as we mange each separately in the ReconstructionSensor class
                this.volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, deviceIndex, Matrix4.Identity);

                // We need to call reset here to set the correct world-to-volume transform
                this.ResetReconstruction(this, null);

                // Reset "Pause Integration"
                if (this.PauseIntegration)
                {
                    this.PauseIntegration = false;
                }

                this.firstFrame = true;
                this.DoneReconstructing();

                // Create volume cube 3D graphics in WPF3D. The front top left corner is the actual origin of the volume
                // voxel coordinate system, and shown with an overlaid coordinate cross.
                // Red is the +X axis, Green is the +Y axis, Blue is the +Z axis in the voxel coordinate system
                this.DisposeVolumeCube3DGraphics(); // Auto-removes from the visual tree
                this.CreateCube3DGraphics(volumeCubeLineColor, LineThickness, new Vector3D(0, 0, this.worldToVolumeTransform.M43 / this.voxelsPerMeter)); // Auto-adds to the visual tree
                this.AddVolumeCube3DGraphics();

                return true;
            }
            catch (ArgumentException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.VolumeResolution);
            }
            catch (InvalidOperationException ex)
            {
                this.volume = null;
                this.ShowStatusMessage(ex.Message);
            }
            catch (DllNotFoundException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.MissingPrerequisite);
            }
            catch (OutOfMemoryException)
            {
                this.volume = null;
                this.ShowStatusMessage(Properties.Resources.OutOfMemory);
            }

            return false;
        }
예제 #4
0
        public void Evaluate(int SpreadMax)
        {
            this.VoxelResolutionX = this.FInVX[0];
            this.VoxelResolutionY = this.FInVY[0];
            this.VoxelResolutionZ = this.FInVZ[0];
            this.VoxelsPerMeter = this.FInVPM[0];

            if (this.FTextureOutput[0] == null) { this.FTextureOutput[0] = new DX11Resource<DX11DynamicTexture2D>(); }
            if (this.FPCOut[0] == null) { this.FPCOut[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }
            if (this.FGeomOut[0] == null) { this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FOutVoxels[0] == null) { this.FOutVoxels[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); }

            if (this.FInExport[0]) { this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); }

            if (this.FInvalidateConnect)
            {
                this.FInvalidateConnect = false;

                if (this.FInRuntime.PluginIO.IsConnected)
                {
                    this.runtime = this.FInRuntime[0];
                    this.runtime.DepthFrameReady += this.runtime_DepthFrameReady;

                    var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
                    this.worldToCameraTransform = Matrix4.Identity;

                    //this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);
                    this.colorVolume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform);

                    //this.volume.
                    /*FusionPointCloudImageFrame pc;
                    pc.*/

                    this.defaultWorldToVolumeTransform = this.colorVolume.GetCurrentWorldToVolumeTransform();

                    // Depth frames generated from the depth input
                    this.depthFloatBuffer = new FusionFloatImageFrame(width, height);

                    // Point cloud frames generated from the depth float input
                    this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height);

                    // Create images to raycast the Reconstruction Volume
                    this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height);

                    this.ResetReconstruction();
                }
            }

            if (this.runtime != null)
            {
                bool needreset = this.FInReset[0];

                if (needreset) { this.ResetReconstruction(); }
            }
        }
예제 #5
0
        private void InitFusion()
        {
            if (_isFusionInitialized)
                return;

            _currentFormat = new KinectFormat();

            _currentFormat.DepthImageFormat = DepthImageFormat.Undefined;
            _currentFormat.ColorImageFormat = ColorImageFormat.Undefined;

            _isFusionInitialized = true;

            var depthFormat = KinectSensor.DepthStream.Format;
            var colorFormat = KinectSensor.ColorStream.Format;
            var kinectFormat = new KinectFormat();
            kinectFormat.DepthImageFormat = depthFormat;
            kinectFormat.ColorImageFormat = colorFormat;

            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            _fusionWorkItemPool = new Pool<FusionWorkItem, KinectFormat>(5, kinectFormat, FusionWorkItem.Create);

            _fusionWorkQueue = new WorkQueue<FusionWorkItem>(ProcessFusionFrameBackground)
            {
                CanceledCallback = ReturnFusionWorkItem,
                MaxQueueLength = 2
            };

            this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)];

            // This is the bitmap we'll display on-screen
            this.colorFusionBitmap = new WriteableBitmap(
                (int)depthSize.Width * 2,
                (int)depthSize.Height * 2,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);
            FusionOutputImage = colorFusionBitmap;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);
                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPose)
                {
                    this.ResetReconstruction(_currentVolumeCenter);
                }
            }
            catch (ArgumentException)
            {
                FusionStatusMessage = "ArgumentException - DX11 GPU not found?";
                return;
            }
            catch (InvalidOperationException ex)
            {
                FusionStatusMessage = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                FusionStatusMessage = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            _residualData = new float[(int)(depthSize.Width * depthSize.Height)];

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Reset the reconstruction
            this.ResetReconstruction(_currentVolumeCenter);

            IntegratingColor = false;
            _audioManager.Start();
        }