private void Init()
        {
            _currentKinectFormat = new KinectFormat()
            {
                ColorImageFormat = ColorImageFormat.Undefined,
                DepthImageFormat = DepthImageFormat.Undefined,
                NumSkeletons     = 0
            };

            _kinectFrameWorkItemPool = new Pool <KinectFrameWorkItem, KinectFormat>(5, _currentKinectFormat, KinectFrameWorkItem.Create);

            _kinectWorkQueue = new WorkQueue <KinectFrameWorkItem>(ProcessKinectFrame)
            {
                CanceledCallback = ReturnKinectFrameWorkItem,
                MaxQueueLength   = 1
            };

            _elevationTimer          = new DispatcherTimer();
            _elevationTimer.Interval = TimeSpan.FromMilliseconds(500);
            _elevationTimer.Tick    += new EventHandler(elevationTimer_Tick);

            InitRelayCommands();

            KinectSensorChooser.KinectChanged += SensorChooserOnKinectChanged;

            KinectSensorChooser.Start();

            _voiceHeardResetTimer          = new DispatcherTimer();
            _voiceHeardResetTimer.Tick    += new EventHandler(_voiceHeadResetTimer_Tick);
            _voiceHeardResetTimer.Interval = TimeSpan.FromSeconds(2);
        }
示例#2
0
        private void Init()
        {
            _currentKinectFormat = new KinectFormat()
            {
                ColorImageFormat = ColorImageFormat.Undefined,
                DepthImageFormat = DepthImageFormat.Undefined,
                NumSkeletons = 0
            };

            _kinectFrameWorkItemPool = new Pool<KinectFrameWorkItem, KinectFormat>(5, _currentKinectFormat, KinectFrameWorkItem.Create);

            _kinectWorkQueue = new WorkQueue<KinectFrameWorkItem>(ProcessKinectFrame)
            {
                CanceledCallback = ReturnKinectFrameWorkItem,
                MaxQueueLength = 1
            };

            _elevationTimer = new DispatcherTimer();
            _elevationTimer.Interval = TimeSpan.FromMilliseconds(500);
            _elevationTimer.Tick += new EventHandler(elevationTimer_Tick);

            InitRelayCommands();

            KinectSensorChooser.KinectChanged += SensorChooserOnKinectChanged;

            KinectSensorChooser.Start();

            _voiceHeardResetTimer = new DispatcherTimer();
            _voiceHeardResetTimer.Tick += new EventHandler(_voiceHeadResetTimer_Tick);
            _voiceHeardResetTimer.Interval = TimeSpan.FromSeconds(2);
        }
示例#3
0
        public void ProcessFusionFrame(KinectFrameWorkItem workItem)
        {
            DepthImagePixel[] depthPixels = workItem.DepthImagePixels;
            byte[] colorPixels = workItem.MappedColorImageData;

            DepthImageFormat format = workItem.Format.DepthImageFormat;
            ColorImageFormat color_format = workItem.Format.ColorImageFormat;
            FaceFusion.Services.KinectFormat kinect_format = new FaceFusion.Services.KinectFormat();

            kinect_format.DepthImageFormat = format;
            kinect_format.ColorImageFormat = color_format;

            if (kinect_format.DepthImageFormat != _currentFormat.DepthImageFormat || kinect_format.ColorImageFormat != _currentFormat.ColorImageFormat)
            {
                _currentFormat = kinect_format;
                _fusionWorkItemPool.Format = kinect_format;
                _residualImageData = new byte[depthPixels.Length * 4];
            }

            var fusionWorkItem = _fusionWorkItemPool.Pop();

            if (fusionWorkItem == null)
            {
                Trace.WriteLine("Fusion Depth Pool empty");
                return;
            }

            Array.Copy(depthPixels, fusionWorkItem.data, depthPixels.Length);
            Array.Copy(colorPixels, fusionWorkItem.colordata, colorPixels.Length);

            if (_fusionWorkQueue != null)
            {
                _fusionWorkQueue.AddWork(fusionWorkItem);
            }
        }
示例#4
0
        private void InitFusion()
        {
            if (_isFusionInitialized)
                return;

            _currentFormat = new KinectFormat();

            _currentFormat.DepthImageFormat = DepthImageFormat.Undefined;
            _currentFormat.ColorImageFormat = ColorImageFormat.Undefined;

            _isFusionInitialized = true;

            var depthFormat = KinectSensor.DepthStream.Format;
            var colorFormat = KinectSensor.ColorStream.Format;
            var kinectFormat = new KinectFormat();
            kinectFormat.DepthImageFormat = depthFormat;
            kinectFormat.ColorImageFormat = colorFormat;

            var depthSize = FormatHelper.GetDepthSize(depthFormat);

            _fusionWorkItemPool = new Pool<FusionWorkItem, KinectFormat>(5, kinectFormat, FusionWorkItem.Create);

            _fusionWorkQueue = new WorkQueue<FusionWorkItem>(ProcessFusionFrameBackground)
            {
                CanceledCallback = ReturnFusionWorkItem,
                MaxQueueLength = 2
            };

            this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength;

            // Allocate space to put the color pixels we'll create
            this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)];

            // This is the bitmap we'll display on-screen
            this.colorFusionBitmap = new WriteableBitmap(
                (int)depthSize.Width * 2,
                (int)depthSize.Height * 2,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);
            FusionOutputImage = colorFusionBitmap;

            var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = ColorReconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);
                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPose)
                {
                    this.ResetReconstruction(_currentVolumeCenter);
                }
            }
            catch (ArgumentException)
            {
                FusionStatusMessage = "ArgumentException - DX11 GPU not found?";
                return;
            }
            catch (InvalidOperationException ex)
            {
                FusionStatusMessage = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                FusionStatusMessage = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height);
            _residualData = new float[(int)(depthSize.Width * depthSize.Height)];

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2);

            // Reset the reconstruction
            this.ResetReconstruction(_currentVolumeCenter);

            IntegratingColor = false;
            _audioManager.Start();
        }
示例#5
0
        private bool TrackIntegrate(DepthImagePixel[] depthPixels, byte[] colorPixels, KinectFormat workFormat)
        {
            var depthSize = FormatHelper.GetDepthSize(workFormat.DepthImageFormat);
            var colorSize = FormatHelper.GetColorSize(workFormat.ColorImageFormat);

            // Convert the depth image frame to depth float image frame
            FusionDepthProcessor.DepthToDepthFloatFrame(
                depthPixels,
                (int)depthSize.Width,
                (int)depthSize.Height,
                this.depthFloatBuffer,
                FusionDepthProcessor.DefaultMinimumDepth,
                FusionDepthProcessor.DefaultMaximumDepth,
                false);

            bool trackingSucceeded = this.volume.AlignDepthFloatToReconstruction(
                    depthFloatBuffer,
                    FusionDepthProcessor.DefaultAlignIterationCount,
                    residualFloatBuffer,
                    out _alignmentEnergy,
                    volume.GetCurrentWorldToCameraTransform());

            //if (trackingSucceeded && _alignmentEnergy == 0.0)
            //    trackingSucceeded = false;

            // ProcessFrame will first calculate the camera pose and then integrate
            // if tracking is successful
            //bool trackingSucceeded = this.volume.ProcessFrame(
            //    this.depthFloatBuffer,
            //    FusionDepthProcessor.DefaultAlignIterationCount,
            //    IntegrationWeight,
            //    this.volume.GetCurrentWorldToCameraTransform());

            // If camera tracking failed, no data integration or raycast for reference
            // point cloud will have taken place, and the internal camera pose
            // will be unchanged.
            if (!trackingSucceeded)
            {
                this.trackingErrorCount++;

                // Show tracking error on status bar
                FusionStatusMessage = Properties.Resources.CameraTrackingFailed;
                _audioManager.State = AudioState.Error;
            }
            else
            {
                ProcessResidualImage();

                this.worldToCameraTransform = volume.GetCurrentWorldToCameraTransform();

                if (!IsIntegrationPaused)
                {
                    if (IntegratingColor)
                    {
                        FusionColorImageFrame frame = new FusionColorImageFrame((int)colorSize.Width, (int)colorSize.Height);
                        Single colorIntegrationAngle = 10.0f;

                        int[] intColorPixels = new int[colorPixels.Length / 4];
                        Buffer.BlockCopy(colorPixels, 0, intColorPixels, 0, colorPixels.Length);
                        frame.CopyPixelDataFrom(intColorPixels);
                        this.volume.IntegrateFrame(depthFloatBuffer, frame, FusionDepthProcessor.DefaultIntegrationWeight, colorIntegrationAngle, this.worldToCameraTransform);
                    }
                    else
                    {
                        this.volume.IntegrateFrame(depthFloatBuffer, IntegrationWeight, this.worldToCameraTransform);
                    }
                }

                this.trackingErrorCount = 0;
            }

            if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
            {
                // Auto Reset due to bad tracking
                FusionStatusMessage = Properties.Resources.ResetVolume;

                // Automatically Clear Volume and reset tracking if tracking fails
                this.ResetReconstruction(_currentVolumeCenter);
            }
            return trackingSucceeded;
        }