/// <summary> /// 距離データをカラー画像に変換する /// </summary> /// <param name="kinect"></param> /// <param name="depthFrame"></param> /// <returns></returns> private void HeightMeasure( KinectSensor kinect, DepthImageFrame depthFrame, SkeletonFrame skeletonFrame ) { ColorImageStream colorStream = kinect.ColorStream; DepthImageStream depthStream = kinect.DepthStream; // トラッキングされている最初のスケルトンを取得する // インデックスはプレイヤーIDに対応しているのでとっておく Skeleton[] skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength]; skeletonFrame.CopySkeletonDataTo( skeletons ); int playerIndex = 0; for ( playerIndex = 0; playerIndex < skeletons.Length; playerIndex++ ) { if ( skeletons[playerIndex].TrackingState == SkeletonTrackingState.Tracked ) { break; } } if ( playerIndex == skeletons.Length ) { return; } // トラッキングされている最初のスケルトン Skeleton skeleton = skeletons[playerIndex]; // 実際のプレイヤーIDは、スケルトンのインデックス+1 playerIndex++; // 頭、足先がトラッキングされていない場合は、そのままRGBカメラのデータを返す Joint head = skeleton.Joints[JointType.Head]; Joint leftFoot = skeleton.Joints[JointType.FootLeft]; Joint rightFoot = skeleton.Joints[JointType.FootRight]; if ( (head.TrackingState != JointTrackingState.Tracked) || (leftFoot.TrackingState != JointTrackingState.Tracked) || (rightFoot.TrackingState != JointTrackingState.Tracked) ) { return; } // 距離カメラのピクセルごとのデータを取得する short[] depthPixel = new short[depthFrame.PixelDataLength]; depthFrame.CopyPixelDataTo( depthPixel ); // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ) ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength]; kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel, colorStream.Format, colorPoint ); // 頭のてっぺんを探す DepthImagePoint headDepth = depthFrame.MapFromSkeletonPoint( head.Position ); int top = 0; for ( int i = 0; (headDepth.Y - i) > 0; i++ ) { // 一つ上のY座標を取得し、プレイヤーがいなければ、現在の座標が最上位 int index = ((headDepth.Y - i) * depthFrame.Width) + headDepth.X; int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask; if ( player == playerIndex ) { top = i; } } // 頭のてっぺんを3次元座標に戻し、足の座標(下にあるほう)を取得する // この差分で身長を測る head.Position = depthFrame.MapToSkeletonPoint( headDepth.X, headDepth.Y - top ); Joint foot = (leftFoot.Position.Y < rightFoot.Position.Y) ? leftFoot : rightFoot; // 背丈を表示する DrawMeasure( kinect, colorStream, head, foot ); }
private Point GetPosition2DLocation(DepthImageFrame depthFrame, SkeletonPoint skeletonPoint) { DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeletonPoint); ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, this._sensor.ColorStream.Format); // map back to skeleton.Width & skeleton.Height //return new Point( // (int)(this.RenderSize.Width * colorPoint.X / this._sensor.ColorStream.FrameWidth), // (int)(this.RenderSize.Height * colorPoint.Y / this._sensor.ColorStream.FrameHeight)); return new Point( (int)(canvas1.Width * colorPoint.X / this._sensor.ColorStream.FrameWidth), (int)(canvas1.Height * colorPoint.Y / this._sensor.ColorStream.FrameHeight)); }
private double GetRealLength(DepthImageFrame depthFrame, SkeletonPoint skeletonPoint1,SkeletonPoint skeletonPoint2) { DepthImagePoint depthPoint1 = depthFrame.MapFromSkeletonPoint(skeletonPoint1); DepthImagePoint depthPoint2 = depthFrame.MapFromSkeletonPoint(skeletonPoint2); double depth = (depthPoint1.Depth+depthPoint2.Depth)/2; //double lengthPixel = Math.Sqrt((depthPoint1.X-depthPoint2.X)^2+(depthPoint1.Y-depthPoint2.Y)^2); double lengthPixel = Math.Abs(depthPoint1.Y - depthPoint2.Y); double frameLengthReal = 2 * depth * Math.Tan(57 / 2 * Math.PI / 180); // lengthReal/frameLengthReal = lengthPixel/frameLengthPixel double lengthReal = lengthPixel * frameLengthReal / depthFrame.Height; return lengthReal; }
/// <summary> /// Check queue if right hand and elbow positions are stable /// under the condition that Status is DepthDetecting. /// If is stable, the Status will be changed to next after doing depth calibration. /// If not, nothing will be changed. /// </summary> public void DoDepthDetecting(KinectSensor sensor, DepthImageFrame depthFrame) { if (Status == MainStatus.DepthDetecting) { if (IsStable(rightHandQueue, DEPTH_HAND_TOLERANCE, DEPTH_HAND_COUNT)) { // if is stable, calibrate depth according to avg of position near hand SkeletonPoint handPoint = rightHandQueue.Last().Position; DepthImagePoint centerDepthPoint = sensor.MapSkeletonPointToDepth( handPoint, sensor.DepthStream.Format); int[] depthArr = new int[DEPTH_NEAR_COUNT * 4]; int index = 0; for (int i = 0; i < DEPTH_NEAR_COUNT; ++i) { // top SkeletonPoint topSke = depthFrame.MapToSkeletonPoint( centerDepthPoint.X - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i), centerDepthPoint.Y - DEPTH_NEAR_RADIUS); depthArr[index++] = depthFrame.MapFromSkeletonPoint(topSke).Depth; // bottom SkeletonPoint bottomSke = depthFrame.MapToSkeletonPoint( centerDepthPoint.X - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i), centerDepthPoint.Y + DEPTH_NEAR_RADIUS); depthArr[index++] = depthFrame.MapFromSkeletonPoint(bottomSke).Depth; // left SkeletonPoint leftSke = depthFrame.MapToSkeletonPoint( centerDepthPoint.X - DEPTH_NEAR_RADIUS, centerDepthPoint.Y - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i)); depthArr[index++] = depthFrame.MapFromSkeletonPoint(leftSke).Depth; // right SkeletonPoint rightSke = depthFrame.MapToSkeletonPoint( centerDepthPoint.X + DEPTH_NEAR_RADIUS, centerDepthPoint.Y - DEPTH_NEAR_RADIUS + (int)(DEPTH_SPAN * i)); depthArr[index++] = depthFrame.MapFromSkeletonPoint(rightSke).Depth; } // set median(rather than mean) depth of the list Array.Sort(depthArr); kCalibrator.SetDepth(depthArr[DEPTH_NEAR_COUNT / 2]); ClearQueue(); Status = MainStatus.EdgeDetecting; mainWindow.textBlock.Text = "Detecting edge now. Arm at four corners of the screen."; // cooling time timer edgeTimer.Start(); edgeCooling = true; } } }
public void ProcessFrame(KinectSensor sensor, byte[] colorImage, ColorImageFormat colorImageFormat, DepthImageFrame depthFrame, short[] depthImage, DepthImageFormat depthImageFormat, Skeleton[] skeletonData, SkeletonFrame skeletonFrame) { //Console.WriteLine("N: ---------"); coordinates.Clear(); int detectedFace = 0; int trackedSkeletonsCount = 0; int playerIndex = -1; for (int i = 0; i < skeletonData.Length; i++) //foreach (Skeleton skeleton in skeletonData) { Skeleton skeleton = skeletonData[i]; if (skeleton.TrackingState == SkeletonTrackingState.Tracked || skeleton.TrackingState == SkeletonTrackingState.PositionOnly) { // We want keep a record of any skeleton, tracked or untracked. if (!trackedSkeletons.ContainsKey(skeleton.TrackingId)) { trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker()); } DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeleton.Joints[JointType.Head].Position); ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, colorImageFormat); Coordinates2D c = new Coordinates2D(); playerIndex = i + 1; c.X = colorPoint.X; c.Y = colorPoint.Y; c.Width = 0; c.Height = 0; c.LeftEyeX = 0; c.LeftEyeY = 0; c.RightEyeX = 0; c.RightEyeY = 0; c.PlayerIndex = playerIndex; trackedSkeletonsCount++; // Give each tracker the upated frame. SkeletonFaceTracker skeletonFaceTracker; if (!scannedIdentities.Contains(skeleton.TrackingId) && detectedFace < 1 && trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker)) { detectedFace++; scannedIdentities.Add(skeleton.TrackingId); skeletonFaceTracker.OnFrameReady(sensor, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton); skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber; Coordinates2D? realCoords = skeletonFaceTracker.GetFaceCoordinates(); if (realCoords.HasValue) { c = realCoords.Value; c.PlayerIndex = playerIndex; } } c.TrackingId = skeleton.TrackingId; coordinates.Add(c); } } if (scannedIdentities.Count > 0 && scannedIdentities.Count >= trackedSkeletonsCount) { scannedIdentities.Clear(); //Console.WriteLine("Clearing"); } RemoveOldTrackers(skeletonFrame.FrameNumber); //if (coordinates.Count > 0) { int[] identities = new int[coordinates.Count]; // stopwatch.Reset(); // stopwatch.Start(); double[] distances = new double[coordinates.Count * 8]; this. ProcessImage(colorImage, GetWidth(colorImageFormat), GetHeight(colorImageFormat), depthImage, 640, 480, coordinates.ToArray(), identities, distances); // stopwatch.Stop(); // foreach (int i in identities) // { // Console.WriteLine("Recognized: {0} (in {1} millis - {2} ticks)", i, stopwatch.ElapsedMilliseconds, stopwatch.ElapsedTicks); // } } }
/// <summary> /// Convert real-world positions to 2D image coords. /// </summary> /// <param name="depthFrame">Datth frame used to conversion.</param> /// <param name="skeletonPosition">Point which position should be converted.</param> /// <returns>Point in 2D image.</returns> private Point GetPosition2DLocation(DepthImageFrame depthFrame, SkeletonPoint skeletonPosition) { DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeletonPosition); ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, kinect.ColorStream.Format); // map back to skeleton.Width & skeleton.Height return new Point(colorPoint.X, colorPoint.Y); }
/// <summary> /// get a joint display position /// </summary> /// <param name="joint"></param> /// <returns></returns> private Point getDisplayPosition(DepthImageFrame depthFrame, Joint joint) { DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(joint.Position); return new Point( (int)(userCanvas.Width * depthPoint.X / depthFrame.Width), (int)(userCanvas.Height * depthPoint.Y / depthFrame.Height)); }
private Point GetPosition2DLocation(DepthImageFrame depthFrame, SkeletonPoint skeletonPoint) { DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeletonPoint); return new Point( (int)(this.RenderSize.Width * depthPoint.X / depthFrame.Width), (int)(this.RenderSize.Height * depthPoint.Y / depthFrame.Height)); /* switch (ImageType) { case ImageType.Color: ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, kinectSensor.ColorStream.Format); // map back to skeleton.Width & skeleton.Height return new Point( (int)(this.RenderSize.Width * colorPoint.X / kinectSensor.ColorStream.FrameWidth), (int)(this.RenderSize.Height * colorPoint.Y / kinectSensor.ColorStream.FrameHeight)); case ImageType.Depth: return new Point( (int)(this.RenderSize.Width * depthPoint.X / depthFrame.Width), (int)(this.RenderSize.Height * depthPoint.Y / depthFrame.Height)); default: throw new ArgumentOutOfRangeException("ImageType was a not expected value: " + ImageType.ToString()); } */ }
public Point MapToPoint(SkeletonPoint s, DepthImageFrame depth) { DepthImagePoint dp = depth.MapFromSkeletonPoint(s); ColorImagePoint cp = depth.MapToColorImagePoint(dp.X, dp.Y, ColorImageFormat.RgbResolution640x480Fps30); return new Point(cp.X, cp.Y); }
private int[] CalculatePlayerPositions(SkeletonFrame skeletonFrame, DepthImageFrame depthFrame) { Skeleton[] allskeletons = new Skeleton[skeletonFrame.SkeletonArrayLength]; int[] pointdata = new int[skeletonFrame.SkeletonArrayLength * 3]; skeletonFrame.CopySkeletonDataTo(allskeletons); int k = 0; foreach (Skeleton skeleton in allskeletons) { if (skeleton.TrackingState == SkeletonTrackingState.Tracked) { Joint jointpoint = this.BestTrackedJoint(skeleton); if (jointpoint.TrackingState == JointTrackingState.Tracked) { var depthImagePoint = depthFrame.MapFromSkeletonPoint(jointpoint.Position); pointdata[k] = depthImagePoint.Depth; pointdata[k + 1] = depthImagePoint.X; pointdata[k + 2] = depthImagePoint.Y; } else { pointdata[k] = 0; pointdata[k + 1] = 0; pointdata[k + 2] = 0; } } else { pointdata[k] = 0; pointdata[k + 1] = 0; pointdata[k + 2] = 0; } k += 3; } return pointdata; }