コード例 #1
0
ファイル: Tools.cs プロジェクト: KinectGod/KinectFYP
        public static Vector2 Convert(KinectSensor sensor, SkeletonPoint position)
        {
            float width = 0;
            float height = 0;
            float x = 0;
            float y = 0;

            if (sensor.ColorStream.IsEnabled)
            {
                var colorPoint = sensor.MapSkeletonPointToColor(position, sensor.ColorStream.Format);
                x = colorPoint.X;
                y = colorPoint.Y;

                switch (sensor.ColorStream.Format)
                {
                    case ColorImageFormat.RawYuvResolution640x480Fps15:
                    case ColorImageFormat.RgbResolution640x480Fps30:
                    case ColorImageFormat.YuvResolution640x480Fps15:
                        width = 640;
                        height = 480;
                        break;
                    case ColorImageFormat.RgbResolution1280x960Fps12:
                        width = 1280;
                        height = 960;
                        break;
                }
            }
            else if (sensor.DepthStream.IsEnabled)
            {
                var depthPoint = sensor.MapSkeletonPointToDepth(position, sensor.DepthStream.Format);
                x = depthPoint.X;
                y = depthPoint.Y;

                switch (sensor.DepthStream.Format)
                {
                    case DepthImageFormat.Resolution80x60Fps30:
                        width = 80;
                        height = 60;
                        break;
                    case DepthImageFormat.Resolution320x240Fps30:
                        width = 320;
                        height = 240;
                        break;
                    case DepthImageFormat.Resolution640x480Fps30:
                        width = 640;
                        height = 480;
                        break;
                }
            }
            else
            {
                width = 1;
                height = 1;
            }

            return new Vector2(x / width, y / height);
        }
コード例 #2
0
        public SkeletonRenderer()
        {
            sensor = KinectSensor.KinectSensors.FirstOrDefault();

            LeftPos.X = 100;
            LeftPos.Y = 200;

            RightPos.X = 300;
            RightPos.Y = 200;

            if (sensor != null)
            {
                sensor.SkeletonStream.Enable();
                sensor.Start();

                var stream = Observable.FromEventPattern<SkeletonFrameReadyEventArgs>(eh => sensor.SkeletonFrameReady += eh, eh => sensor.SkeletonFrameReady -= eh)
                        .Select(frameReady =>
                        {
                            using (var frame = frameReady.EventArgs.OpenSkeletonFrame())
                            {
                                var res = new Skeleton[frame.SkeletonArrayLength];
                                frame.CopySkeletonDataTo(res);
                                return res[0];
                            }
                        });

                skeletonStream = stream.Subscribe(s =>
                {
                    var left = sensor.MapSkeletonPointToDepth(s.Joints[JointType.HandLeft].Position, DepthImageFormat.Resolution640x480Fps30);
                    var right = sensor.MapSkeletonPointToDepth(s.Joints[JointType.HandLeft].Position, DepthImageFormat.Resolution640x480Fps30);
                });
            }
            else
            {
                Observable.Generate(new Skeleton(), s => true, s =>
                {
                    //s.Joints[JointType.HandLeft] = new Joint() { Position = new SkeletonPoint { }, JointType= JointType.HandLeft };
                    return s;
                }, s => s, s => TimeSpan.FromMilliseconds(33));
            }
        }
コード例 #3
0
ファイル: JointTracker.cs プロジェクト: pvishal/kinect-hands
        public DepthImagePoint GetJointPosition(KinectSensor sensor, AllFramesReadyEventArgs e, JointType jointType)
        {
            Skeleton skeleton = GetTrackedSkeleton(e);
            DepthImagePoint depthPoint = new DepthImagePoint();

            JointDetected = false;

            if (SkeletonDetected == true)
            {
                Joint joint = skeleton.Joints[jointType];

                JointDetected = true;
                SkeletonPoint jointPoint = joint.Position;
                depthPoint = sensor.MapSkeletonPointToDepth(jointPoint, DepthImageFormat.Resolution320x240Fps30);

            }

            return depthPoint;
        }
        /// <summary>
        /// Returns the 2D position of the provided 3D SkeletonPoint.
        /// The result will be in in either Color coordinate space or Depth coordinate space, depending on 
        /// the current value of this.ImageType.
        /// Only those parameters associated with the current ImageType will be used.
        /// </summary>
        /// <param name="sensor">The KinectSensor for which this mapping is being performed.</param>
        /// <param name="imageType">The target image type</param>
        /// <param name="renderSize">The target dimensions of the visualization</param>
        /// <param name="skeletonPoint">The source point to map</param>
        /// <param name="colorFormat">The format of the target color image, if imageType is Color</param>
        /// <param name="colorWidth">The width of the target color image, if the imageType is Color</param>
        /// <param name="colorHeight">The height of the target color image, if the imageType is Color</param>
        /// <param name="depthFormat">The format of the target depth image, if the imageType is Depth</param>
        /// <param name="depthWidth">The width of the target depth image, if the imageType is Depth</param>
        /// <param name="depthHeight">The height of the target depth image, if the imageType is Depth</param>
        /// <returns>Returns the 2D position of the provided 3D SkeletonPoint.</returns>
        private static Point Get2DPosition(
            KinectSensor sensor,
            ImageType imageType,
            Size renderSize,
            SkeletonPoint skeletonPoint,
            ColorImageFormat colorFormat,
            int colorWidth,
            int colorHeight,
            DepthImageFormat depthFormat,
            int depthWidth,
            int depthHeight)
        {
            try
            {
                switch (imageType)
                {
                    case ImageType.Color:
                        if (ColorImageFormat.Undefined != colorFormat)
                        {
                            var colorPoint = sensor.MapSkeletonPointToColor(skeletonPoint, colorFormat);

                            // map back to skeleton.Width & skeleton.Height
                            return new Point(
                                (int)(renderSize.Width * colorPoint.X / colorWidth),
                                (int)(renderSize.Height * colorPoint.Y / colorHeight));
                        }

                        break;
                    case ImageType.Depth:
                        if (DepthImageFormat.Undefined != depthFormat)
                        {
                            var depthPoint = sensor.MapSkeletonPointToDepth(skeletonPoint, depthFormat);

                            return new Point(
                                (int)(renderSize.Width * depthPoint.X / depthWidth),
                                (int)(renderSize.Height * depthPoint.Y / depthHeight));
                        }

                        break;
                }
            }
            catch (InvalidOperationException)
            {
                // The stream must have stopped abruptly
                // Handle this gracefully
            }

            return new Point();
        }
コード例 #5
0
        private static Point GetJointPointHand(KinectSensor kinectDevice, Joint joint, Size containerSize, Point offset)
        {
            DepthImagePoint point = kinectDevice.MapSkeletonPointToDepth(joint.Position, kinectDevice.DepthStream.Format);
            point.X = (int)((point.X * containerSize.Width / kinectDevice.DepthStream.FrameWidth) - offset.X);
            point.Y = (int)((point.Y * containerSize.Height / kinectDevice.DepthStream.FrameHeight) - offset.Y);

            return new Point(point.X, point.Y);
        }
コード例 #6
0
        public static Vector2 GetScreenPosition(this Joint joint, KinectSensor kinectRuntime, int screenWidth, int screenHeight)
        {
            //float depthX;
            //float depthY;

            DepthImagePoint DIPoint = kinectRuntime.MapSkeletonPointToDepth(joint.Position, DepthImageFormat.Resolution320x240Fps30);//out depthX, out depthY);

            //depthX = Math.Max(0, Math.Min(depthX * screenWidth, screenWidth));  //convert to 320, 240 space
            //depthY = Math.Max(0, Math.Min(depthY * screenHeight, screenHeight));  //convert to 320, 240 space

            //int colorX;
            //int colorY;
            // only ImageResolution.Resolution640x480 is supported at this point
            //kinectRuntime.NuiCamera.GetColorPixelCoordinatesFromDepthPixel(ImageResolution.Resolution640x480, new ImageViewArea(), (int)depthX, (int)depthY, (short)0, out colorX, out colorY);

            // map back to skeleton.Width & skeleton.Height
            //return new Vector2(screenWidth * colorX / 320.0f, screenHeight * colorY / 240f);
            return new Vector2(DIPoint.X, DIPoint.Y);
        }
コード例 #7
0
ファイル: MainController.cs プロジェクト: hawkingrei/PPKinecT
        /// <summary>
        /// Check queue if right hand and elbow positions are stable
        /// under the condition that Status is DepthDetecting.
        /// If is stable, the Status will be changed to next after doing depth calibration.
        /// If not, nothing will be changed.
        /// </summary>
        public void DoDepthDetecting(KinectSensor sensor, DepthImageFrame depthFrame)
        {
            if (Status == MainStatus.DepthDetecting)
            {
                if (IsStable(rightHandQueue, DEPTH_HAND_TOLERANCE, DEPTH_HAND_COUNT))
                {
                    // if is stable, calibrate depth according to avg of position near hand
                    SkeletonPoint handPoint = rightHandQueue.Last().Position;
                    DepthImagePoint centerDepthPoint = sensor.MapSkeletonPointToDepth(
                        handPoint, sensor.DepthStream.Format);
                    int[] depthArr = new int[DEPTH_NEAR_COUNT * 4];
                    int index = 0;
                    for (int i = 0; i < DEPTH_NEAR_COUNT; ++i)
                    {
                        // top
                        SkeletonPoint topSke = depthFrame.MapToSkeletonPoint(
                            centerDepthPoint.X - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i),
                            centerDepthPoint.Y - DEPTH_NEAR_RADIUS);
                        depthArr[index++] = depthFrame.MapFromSkeletonPoint(topSke).Depth;
                        // bottom
                        SkeletonPoint bottomSke = depthFrame.MapToSkeletonPoint(
                            centerDepthPoint.X - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i),
                            centerDepthPoint.Y + DEPTH_NEAR_RADIUS);
                        depthArr[index++] = depthFrame.MapFromSkeletonPoint(bottomSke).Depth;
                        // left
                        SkeletonPoint leftSke = depthFrame.MapToSkeletonPoint(
                            centerDepthPoint.X - DEPTH_NEAR_RADIUS,
                            centerDepthPoint.Y - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i));
                        depthArr[index++] = depthFrame.MapFromSkeletonPoint(leftSke).Depth;
                        // right
                        SkeletonPoint rightSke = depthFrame.MapToSkeletonPoint(
                            centerDepthPoint.X + DEPTH_NEAR_RADIUS,
                            centerDepthPoint.Y - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i));
                        depthArr[index++] = depthFrame.MapFromSkeletonPoint(rightSke).Depth;
                    }
                    // set median(rather than mean) depth of the list
                    Array.Sort(depthArr);
                    kCalibrator.SetDepth(depthArr[DEPTH_NEAR_COUNT / 2]);

                    ClearQueue();
                    Status = MainStatus.EdgeDetecting;
                    mainWindow.textBlock.Text = "Detecting edge now. Arm at four corners of the screen.";
                    // cooling time timer
                    edgeTimer.Start();
                    edgeCooling = true;
                }
            }
        }
コード例 #8
0
        ////将三维坐标转化到平面
        private static Point GetJointPoint(KinectSensor kinectDevice, Joint joint, Size containerSize, Point offset)
        {
            DepthImagePoint point = kinectDevice.MapSkeletonPointToDepth(joint.Position, kinectDevice.DepthStream.Format);

            //////////////////////????
            double windowWidth = 770;
            double windowHeight = 1020;

            point.X = (int)((point.X * windowWidth / kinectDevice.DepthStream.FrameWidth) - offset.X);
            point.Y = (int)((point.Y * windowHeight / kinectDevice.DepthStream.FrameHeight) - offset.Y);

            return new Point(point.X, point.Y);
        }