Exemple #1
0
        public static void MapDepthFrameToColorSpace(CoordinateMapper mapper, KinectData data)
        {
            if (data.DepthPixels == null)
            {
                return;
            }

            var pinnedArray = System.Runtime.InteropServices.GCHandle.Alloc(data.DepthPixels, System.Runtime.InteropServices.GCHandleType.Pinned);

            data.MappedDepthToColorPixels = new ColorSpacePoint[Kinect2Metrics.DepthFrameWidth * Kinect2Metrics.DepthFrameHeight];

            var pinnedMappedArry = System.Runtime.InteropServices.GCHandle.Alloc(data.MappedDepthToColorPixels, System.Runtime.InteropServices.GCHandleType.Pinned);

            try
            {
                IntPtr depthPointer    = pinnedArray.AddrOfPinnedObject();
                IntPtr mappedIRPointer = pinnedMappedArry.AddrOfPinnedObject();
                mapper.MapDepthFrameToColorSpaceUsingIntPtr(depthPointer, (uint)Kinect2Metrics.DepthBufferLength, mappedIRPointer, (uint)Kinect2Metrics.MappedDepthToColorBufferLength);
            }
            finally
            {
                pinnedArray.Free();
                pinnedMappedArry.Free();
            }
        }
        /// <summary>
        /// Update map from a depth frame
        /// </summary>
        /// <param name="coordinateMapper"></param>
        /// <param name="depthFrame"></param>
        public void Update(CoordinateMapper coordinateMapper, DepthFrameData depthFrame)
        {
            if (this.dataPointer == IntPtr.Zero)
            {
                throw new ObjectDisposedException("ColorToDepthFrameData");
            }

            coordinateMapper.MapDepthFrameToColorSpaceUsingIntPtr(depthFrame.DataPointer, (uint)depthFrame.SizeInBytes, this.dataPointer, (uint)this.sizeInBytes);
        }
        private static void Reader_FrameArrived(object sender, MultiSourceFrameArrivedEventArgs e)
        {
            if (e.FrameReference != null)
            {
                MultiSourceFrame multiFrame = e.FrameReference.AcquireFrame();
                if (multiFrame.ColorFrameReference != null && multiFrame.DepthFrameReference != null)
                {
                    try
                    {
                        using (DepthFrame depthFrame = multiFrame.DepthFrameReference.AcquireFrame())
                        {
                            using (ColorFrame colorFrame = multiFrame.ColorFrameReference.AcquireFrame())
                            {
                                if (depthFrame != null && colorFrame != null)
                                {
                                    colorFrameDescription = colorFrame.FrameDescription;
                                    colorWidth            = colorFrameDescription.Width;
                                    colorHeight           = colorFrameDescription.Height;
                                    colorFrameData        = new byte[colorWidth * colorHeight * bytesPerPixel];

                                    if (colorFrame.RawColorImageFormat == ColorImageFormat.Bgra)
                                    {
                                        colorFrame.CopyRawFrameDataToArray(colorFrameData);
                                    }
                                    else
                                    {
                                        colorFrame.CopyConvertedFrameDataToArray(colorFrameData, ColorImageFormat.Bgra);
                                    }

                                    using (KinectBuffer buffer = depthFrame.LockImageBuffer())
                                    {
                                        depthFrameDescription = depthFrame.FrameDescription;
                                        depthWidth            = depthFrame.FrameDescription.Width;
                                        depthHeight           = depthFrame.FrameDescription.Height;
                                        depthFrameData        = new ushort[depthWidth * depthHeight];

                                        cameraSpacePoints = new CameraSpacePoint[depthWidth * depthHeight];
                                        colorSpacePoints  = new ColorSpacePoint[depthWidth * depthHeight];

                                        depthFrame.CopyFrameDataToArray(depthFrameData);

                                        //coordinateMapper.MapDepthFrameToColorSpace(depthFrameData, colorSpacePoints);
                                        //coordinateMapper.MapDepthFrameToCameraSpace(depthFrameData, cameraSpacePoints);
                                        coordinateMapper.MapDepthFrameToColorSpaceUsingIntPtr(buffer.UnderlyingBuffer, buffer.Size, colorSpacePoints);
                                        coordinateMapper.MapDepthFrameToCameraSpaceUsingIntPtr(buffer.UnderlyingBuffer, buffer.Size, cameraSpacePoints);
                                    }
                                }
                            }
                        }
                    }
                    catch (Exception) { return; }
                }
            }
        }
Exemple #4
0
    public bool MapDepthFrameToColorCoords(KinectInterop.SensorData sensorData, ref Vector2[] vColorCoords)
    {
        if (coordMapper != null && sensorData.colorImage != null && sensorData.depthImage != null)
        {
            var pDepthData            = GCHandle.Alloc(sensorData.depthImage, GCHandleType.Pinned);
            var pColorCoordinatesData = GCHandle.Alloc(vColorCoords, GCHandleType.Pinned);

            coordMapper.MapDepthFrameToColorSpaceUsingIntPtr(
                pDepthData.AddrOfPinnedObject(),
                sensorData.depthImage.Length * sizeof(ushort),
                pColorCoordinatesData.AddrOfPinnedObject(),
                (uint)vColorCoords.Length);

            pColorCoordinatesData.Free();
            pDepthData.Free();

            return(true);
        }

        return(false);
    }
Exemple #5
0
    void ProcessFrame()
    {
        GCHandle pDepthData            = GCHandle.Alloc(pDepthBuffer, GCHandleType.Pinned);
        GCHandle pDepthCoordinatesData = GCHandle.Alloc(m_pDepthCoordinates, GCHandleType.Pinned);
        GCHandle pColorData            = GCHandle.Alloc(m_pColorSpacePoints, GCHandleType.Pinned);

        m_pCoordinateMapper.MapColorFrameToDepthSpaceUsingIntPtr(
            pDepthData.AddrOfPinnedObject(), (uint)pDepthBuffer.Length * sizeof(ushort),
            pDepthCoordinatesData.AddrOfPinnedObject(), (uint)m_pDepthCoordinates.Length);

        m_pCoordinateMapper.MapDepthFrameToColorSpaceUsingIntPtr(
            pDepthData.AddrOfPinnedObject(), pDepthBuffer.Length * sizeof(ushort),
            pColorData.AddrOfPinnedObject(), (uint)m_pColorSpacePoints.Length);

        m_pDepth.LoadRawTextureData(pDepthData.AddrOfPinnedObject(), pDepthBuffer.Length * sizeof(ushort));
        m_pDepth.Apply();

        pColorData.Free();
        pDepthCoordinatesData.Free();
        pDepthData.Free();

        m_pColorRGBX.LoadRawTextureData(pColorBuffer);
        m_pColorRGBX.Apply();
    }
Exemple #6
0
        /// <summary>
        /// Procedure invoked by Kinect when new data are available
        /// </summary>
        private void MultisourceFrameArrived(object sender, MultiSourceFrameArrivedEventArgs e)
        {
            if (KinectSensor == null || Reader == null)
            {
                return;
            }

            // acquire frame data
            MultiSourceFrame multiSourceFrame = e.FrameReference.AcquireFrame();

            // if the Frame has expired by the time we process this event, return.
            if (multiSourceFrame == null)
            {
                return;
            }

            // Continue only if buffer is empty
            if (!Buffer.IsEmpty())
            {
                return;
            }

            // declare variables for data from sensor
            ColorFrame    colorFrame    = null;
            DepthFrame    depthFrame    = null;
            InfraredFrame infraredFrame = null;

            byte[]             colorFrameData                 = null;
            ushort[]           depthData                      = null;
            ushort[]           infraredData                   = null;
            DepthSpacePoint[]  pointsFromColorToDepth         = null;
            ColorSpacePoint[]  pointsFromDepthToColor         = null;
            CameraSpacePoint[] cameraSpacePointsFromDepthData = null;

            try
            {
                // get frames from sensor
                colorFrame    = multiSourceFrame.ColorFrameReference.AcquireFrame();
                depthFrame    = multiSourceFrame.DepthFrameReference.AcquireFrame();
                infraredFrame = multiSourceFrame.InfraredFrameReference.AcquireFrame();

                // If any frame has expired by the time we process this event, return.
                if (colorFrame == null || depthFrame == null || infraredFrame == null)
                {
                    return;
                }

                // use frame data to fill arrays
                colorFrameData = new byte[ColorFrameDescription.LengthInPixels * 4];
                depthData      = new ushort[DepthFrameDescription.LengthInPixels];
                infraredData   = new ushort[InfraredFrameDescription.LengthInPixels];

                colorFrame.CopyConvertedFrameDataToArray(colorFrameData, ColorImageFormat.Bgra);
                depthFrame.CopyFrameDataToArray(depthData);
                infraredFrame.CopyFrameDataToArray(infraredData);

                pointsFromColorToDepth         = new DepthSpacePoint[ColorFrameDescription.LengthInPixels];
                pointsFromDepthToColor         = new ColorSpacePoint[DepthFrameDescription.LengthInPixels];
                cameraSpacePointsFromDepthData = new CameraSpacePoint[DepthFrameDescription.LengthInPixels];

                using (KinectBuffer depthFrameData = depthFrame.LockImageBuffer())
                {
                    CoordinateMapper.MapColorFrameToDepthSpaceUsingIntPtr(
                        depthFrameData.UnderlyingBuffer,
                        depthFrameData.Size,
                        pointsFromColorToDepth);

                    CoordinateMapper.MapDepthFrameToColorSpaceUsingIntPtr(
                        depthFrameData.UnderlyingBuffer,
                        depthFrameData.Size,
                        pointsFromDepthToColor);

                    CoordinateMapper.MapDepthFrameToCameraSpaceUsingIntPtr(
                        depthFrameData.UnderlyingBuffer,
                        depthFrameData.Size,
                        cameraSpacePointsFromDepthData);
                }
            }
            finally
            {
                // dispose frames so that Kinect can continue processing
                colorFrame?.Dispose();
                depthFrame?.Dispose();
                infraredFrame?.Dispose();

                // send data futher
                if (
                    colorFrameData != null &&
                    depthData != null &&
                    infraredData != null &&
                    cameraSpacePointsFromDepthData != null
                    )
                {
                    // store data to buffer and notify processing thread
                    Buffer.Store(
                        new KinectData(
                            colorFrameData,
                            depthData,
                            infraredData,
                            cameraSpacePointsFromDepthData,
                            pointsFromColorToDepth,
                            pointsFromDepthToColor
                            )
                        );

                    TrackingManager.SendKinectUpdate();
                }
            }
        }
        /// <summary>
        /// Update map from a depth frame
        /// </summary>
        /// <param name="coordinateMapper"></param>
        /// <param name="depthFrame"></param>
        public void Update(CoordinateMapper coordinateMapper, DepthFrameData depthFrame)
        {
            if (this.dataPointer == IntPtr.Zero)
                throw new ObjectDisposedException("ColorToDepthFrameData");

            coordinateMapper.MapDepthFrameToColorSpaceUsingIntPtr(depthFrame.DataPointer, (uint)depthFrame.SizeInBytes, this.dataPointer, (uint)this.sizeInBytes);
        }
        /// <summary>
        /// Handles the depth/color/body index frame data arriving from the sensor
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void OnMultiSourceFrameArrived(object sender, MultiSourceFrameArrivedEventArgs e)
        {
            DepthFrame depthFrame = null;
            ColorFrame colorFrame = null;
            BodyFrame  bodyFrame  = null;

            MultiSourceFrame multiSourceFrame = e.FrameReference.AcquireFrame();

            // If the Frame has expired by the time we process this event, return.
            if (multiSourceFrame == null)
            {
                return;
            }

            // We use a try/finally to ensure that we clean up before we exit the function.
            // This includes calling Dispose on any Frame objects that we may have and unlocking the bitmap back buffer.
            try
            {
                depthFrame = multiSourceFrame.DepthFrameReference.AcquireFrame();
                colorFrame = multiSourceFrame.ColorFrameReference.AcquireFrame();
                bodyFrame  = multiSourceFrame.BodyFrameReference.AcquireFrame();

                // If any frame has expired by the time we process this event, return.
                // The "finally" statement will Dispose any that are not null.
                if (depthFrame == null || colorFrame == null || bodyFrame == null)
                {
                    return;
                }

                // Copy color data (using Bgra format)
                colorFrame.CopyConvertedFrameDataToIntPtr(colorPixels, COLOR_PIXEL_BYTES, ColorImageFormat.Bgra);

                if (ColorDataCheckBox.Checked)
                {
                    Marshal.Copy(colorPixels, colorFrameData, 8, (int)COLOR_PIXEL_BYTES);
                    colorFramePublisher.Send(new ZFrame(colorFrameData));
                }

                if (BodyDataCheckBox.Checked)
                {
                    // Copy data for Body tracking
                    bodyArray = new Body[bodyFrame.BodyCount];
                    bodyFrame.GetAndRefreshBodyData(bodyArray);

                    // Remove old bodies
                    bodyFrameData.Clear();

                    //At this point, we are just reserving 4 bytes for storing 'bodyCount' and we are going to modify it later
                    AddArrayToList(ref bodyFrameData, new byte[4] {
                        0, 0, 0, 0
                    });

                    int bodyCount = 0;
                    foreach (Body body in bodyArray)
                    {
                        if (!body.IsTracked)
                        {
                            continue;
                        }

                        AddArrayToList(ref bodyFrameData, BitConverter.GetBytes(body.TrackingId));   //add 8 bytes for ulong TrackingId
                        AddArrayToList(ref bodyFrameData, BitConverter.GetBytes(ALL_JOINTS.Length)); //add 4 bytes for int TrackingId

                        foreach (JointType jointType in ALL_JOINTS)
                        {
                            var joint = body.Joints[jointType];
                            AddArrayToList(ref bodyFrameData, BitConverter.GetBytes((int)joint.TrackingState)); //add 4 bytes for int TrackingState
                            AddArrayToList(ref bodyFrameData, BitConverter.GetBytes((int)joint.JointType));     //add 4 bytes for int JointType
                            AddArrayToList(ref bodyFrameData, BitConverter.GetBytes(joint.Position.X));         //add 4 bytes for float X
                            AddArrayToList(ref bodyFrameData, BitConverter.GetBytes(joint.Position.Y));         //add 4 bytes for float Y
                            AddArrayToList(ref bodyFrameData, BitConverter.GetBytes(joint.Position.Z));         //add 4 bytes for float Z
                        }
                        bodyCount++;
                    }

                    var bodyCountBytes = BitConverter.GetBytes(bodyCount);//4 bytes
                    UpdateList(bodyCountBytes, ref bodyFrameData);

                    bodyFramePublisher.Send(new ZFrame(bodyFrameData.ToArray()));
                }

                if (PointCloudDataCheckBox.Checked)
                {
                    depthFrame.CopyFrameDataToIntPtr(depthFrameData, DEPTH_FRAME_BYTES);
                    coordinateMapper.MapDepthFrameToCameraSpaceUsingIntPtr(depthFrameData, DEPTH_FRAME_BYTES, camerSpacePoints, CAMERA_SPACE_BYTES);
                    coordinateMapper.MapDepthFrameToColorSpaceUsingIntPtr(depthFrameData, DEPTH_FRAME_BYTES, colorSpacePoints, COLOR_SPACE_BYTES);

                    // Remove old points
                    ClearPointCloud();

                    //At this point, we are just reserving 4 bytes for storing 'pointCloudSize' and we are going to modify it later
                    AddPointsToCloud(new byte[4] {
                        0, 0, 0, 0
                    });

                    ComposePointCloud();

                    GetNonEmptyPointCloud(out byte[] pointCloud);
                    pointCloudPublisher.Send(new ZFrame(pointCloud));
                }
            }
            finally
            {
                if (depthFrame != null)
                {
                    depthFrame.Dispose();
                }

                if (colorFrame != null)
                {
                    colorFrame.Dispose();
                }

                if (bodyFrame != null)
                {
                    bodyFrame.Dispose();
                }
            }
        }