/** * Evento para actualizar la posicion del cursor * */ void miKinect_Cursor(object sender, SkeletonFrameReadyEventArgs e) { canvasEsqueleto.Children.Clear(); Skeleton[] esqueletos = null; using (SkeletonFrame frameEsqueleto = e.OpenSkeletonFrame()) { if (frameEsqueleto != null) { esqueletos = new Skeleton[frameEsqueleto.SkeletonArrayLength]; frameEsqueleto.CopySkeletonDataTo(esqueletos); } } if (esqueletos == null) { return; } foreach (Skeleton esqueleto in esqueletos) { if (esqueleto.TrackingState == SkeletonTrackingState.Tracked) { Joint HandLeft = esqueleto.Joints[JointType.HandLeft]; ColorImagePoint j1P = miKinect.CoordinateMapper.MapSkeletonPointToColorPoint(HandLeft.Position, ColorImageFormat.RgbResolution1280x960Fps12); this.Cursor.Margin = new Thickness(j1P.X, j1P.Y, 0.0, 0.0); } } }
/** * * Coge la pieza de la peana donde este puesta * * */ private Pieza isTaken(ItemCollection it, ColorImagePoint j1P) { Pieza c = null; Pieza pieceF = null; int i = 0; while (i < it.Count && !take) { c = (Pieza)it.GetItemAt(i); //Posicion de la pieza if (j1P.X >= c.Margin.Left && j1P.X <= (c.Margin.Left + c.Width) && j1P.Y >= c.Margin.Top && j1P.Y <= (c.Margin.Top + c.Height)) { contTake++; if (contTake == 30) { //Guarda la posicion inicial de la pieza mar = new Thickness(c.Margin.Left, c.Margin.Top, 0, 0); //Marca que se ha cogido una pieza take = true; contTake = 0; pieceF = c; } } i++; } return(pieceF); }
private void adjustHandPosition(FrameworkElement element, ColorImagePoint hand, Double half_shoulder_width) { element.Width = half_shoulder_width; element.Height = half_shoulder_width; Canvas.SetLeft(element, hand.X - element.Width / 2); Canvas.SetTop(element, hand.Y - element.Height / 2); }
/// <summary> /// Map a <see cref="SkeletonPoint"/> to <see cref="ColorImagePoint"/> that is consistent with the skeleton's position in the color video /// </summary> /// <param name="jointPosition">Position of joint in skeleton space</param> /// <returns>Position of joint in 2D image space</returns> private Point SkeletonPointToScreen(SkeletonPoint jointPosition) { ColorImagePoint colorImagePoint = this.sensor.CoordinateMapper.MapSkeletonPointToColorPoint(jointPosition, COLOR_IMAGE_FORMAT); return(new Point(colorImagePoint.X, colorImagePoint.Y)); }
private Point GetJoint(JointType jointType, Skeleton skeleton) { SkeletonPoint skeletonPoint = skeleton.Joints[jointType].Position; ColorImagePoint colorImagePoint = sensor.CoordinateMapper.MapSkeletonPointToColorPoint(skeletonPoint, ColorImageFormat.RgbResolution640x480Fps30); return(new Point(colorImagePoint.X, colorImagePoint.Y)); }
/// <summary> /// 距離データをカラー画像に変換する /// </summary> /// <param name="kinect"></param> /// <param name="depthFrame"></param> /// <returns></returns> private byte[] ConvertDepthColor( KinectSensor kinect, DepthImageFrame depthFrame ) { ColorImageStream colorStream = kinect.ColorStream; DepthImageStream depthStream = kinect.DepthStream; // 距離カメラのピクセルごとのデータを取得する short[] depthPixel = new short[depthFrame.PixelDataLength]; depthFrame.CopyPixelDataTo( depthPixel ); // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ) ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength]; kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel, colorStream.Format, colorPoint ); byte[] depthColor = new byte[depthFrame.PixelDataLength * Bgr32BytesPerPixel]; for ( int index = 0; index < depthPixel.Length; index++ ) { // 距離カメラのデータから、プレイヤーIDと距離を取得する int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask; int distance = depthPixel[index] >> DepthImageFrame.PlayerIndexBitmaskWidth; // 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 ); int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 ); int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel; if ( player != 0 ) { depthColor[colorIndex] = 255; depthColor[colorIndex + 1] = 255; depthColor[colorIndex + 2] = 255; } else { // サポート外 0-40cm if ( distance == depthStream.UnknownDepth ) { depthColor[colorIndex] = 0; depthColor[colorIndex + 1] = 0; depthColor[colorIndex + 2] = 255; } // 近すぎ 40cm-80cm(default mode) else if ( distance == depthStream.TooNearDepth ) { depthColor[colorIndex] = 0; depthColor[colorIndex + 1] = 255; depthColor[colorIndex + 2] = 0; } // 遠すぎ 3m(Near),4m(Default)-8m else if ( distance == depthStream.TooFarDepth ) { depthColor[colorIndex] = 255; depthColor[colorIndex + 1] = 0; depthColor[colorIndex + 2] = 0; } // 有効な距離データ else { depthColor[colorIndex] = 0; depthColor[colorIndex + 1] = 255; depthColor[colorIndex + 2] = 255; } } } return depthColor; }
void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e) { using (DepthImageFrame depth = e.OpenDepthImageFrame()) { if (depth == null || kinectSensorChooser1.Kinect == null) { return; } //Map a joint location to a point on the depth map //left hand DepthImagePoint leftDepthPoint = depth.MapFromSkeletonPoint(first.Joints[JointType.HandLeft].Position); //right hand DepthImagePoint rightDepthPoint = depth.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position); //Map a depth point to a point on the color image //left hand ColorImagePoint leftColorPoint = depth.MapToColorImagePoint(leftDepthPoint.X, leftDepthPoint.Y, ColorImageFormat.RgbResolution640x480Fps30); //right hand ColorImagePoint rightColorPoint = depth.MapToColorImagePoint(rightDepthPoint.X, rightDepthPoint.Y, ColorImageFormat.RgbResolution640x480Fps30); //Set location CameraPosition(leftEllipse, leftColorPoint); CameraPosition(rightEllipse, rightColorPoint); } }
void color_matched(int display) { this._sensor.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, _depthPixels, ColorImageFormat.YuvResolution640x480Fps15, _mappedDepthLocations); //_bitmapBits[display] = Enumerable.Repeat((byte)0, _colorPixels.Length).ToArray(); for (int i = 0; i < _depthPixels.Length; i++) { int depthVal = _depthPixels[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; if ((depthVal <= upper) && (depthVal > lower)) { ColorImagePoint point = _mappedDepthLocations[i]; if ((point.X >= 0 && point.X < 640) && (point.Y >= 0 && point.Y < 480)) { int baseIndex = (point.Y * 640 + point.X) * 4; byte[] colorMatch = nearest_color(new byte[] { _colorPixels[baseIndex + 2], _colorPixels[baseIndex + 1], _colorPixels[baseIndex] }); _bitmapBits[display][baseIndex] = colorMatch[2]; _bitmapBits[display][baseIndex + 1] = colorMatch[1]; _bitmapBits[display][baseIndex + 2] = colorMatch[0]; //_bitmapBits[display][baseIndex] = (byte)255; //_bitmapBits[display][baseIndex + 1] = (byte)255; //_bitmapBits[display][baseIndex + 2] = (byte)255; //_bitmapBits[display][baseIndex] = _colorPixels[baseIndex]; //_bitmapBits[display][baseIndex + 1] = _colorPixels[baseIndex + 1]; //_bitmapBits[display][baseIndex + 2] = _colorPixels[baseIndex + 2]; } } } }
private Point getPosition2DLocation(SkeletonPoint position, Canvas skeletonCanvas, DepthImageFrame depthFrame) { DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(position); switch (ImageType) { case ImageType.Color: ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, Kinect.ColorStream.Format); // map back to skeleton.Width & skeleton.Height return(new Point( (int)(skeletonCanvas.ActualWidth * colorPoint.X / Kinect.ColorStream.FrameWidth), (int)(skeletonCanvas.ActualHeight * colorPoint.Y / Kinect.ColorStream.FrameHeight) )); case ImageType.Depth: return(new Point( (int)(skeletonCanvas.ActualWidth * depthPoint.X / depthFrame.Width), (int)(skeletonCanvas.ActualHeight * depthPoint.Y / depthFrame.Height) )); default: throw new ArgumentOutOfRangeException("ImageType was a not expected value: " + ImageType.ToString()); } }
/* * Tracks a hand by marking it with a light blue ellipse. */ public static void DrawTrackedHands(this Canvas canvas, Joint hand, CoordinateMapper mapper) { if (hand.TrackingState == JointTrackingState.NotTracked) { return; } Point point = new Point(); // Use CoordinateMapper to map the point of the hand's position to the appropriate point in the color stream // Need to do this because resolution varies between skeleton stream and color stream ColorImagePoint colorPoint = mapper.MapSkeletonPointToColorPoint(hand.Position, ColorImageFormat.RgbResolution1280x960Fps12); point.X = colorPoint.X; point.Y = colorPoint.Y; Ellipse ellipse = new Ellipse { Width = 50, Height = 50, Stroke = new SolidColorBrush(Colors.LightBlue), StrokeThickness = 4 }; Canvas.SetLeft(ellipse, point.X - ellipse.Width / 2); Canvas.SetTop(ellipse, point.Y - ellipse.Height / 2); canvas.Children.Add(ellipse); }
/// <summary> /// Modifie l'emplacement du curseur sur le flux video /// </summary> /// <param name="kinect"> capteur</param> /// <param name="joint">squelette</param> public void SetPosition(KinectSensor kinect, Joint joint) { ColorImagePoint colorImagePoint = kinect.CoordinateMapper.MapSkeletonPointToColorPoint(joint.Position, ColorImageFormat.RgbResolution640x480Fps30); Canvas.SetLeft(this, colorImagePoint.X * 3); Canvas.SetTop(this, colorImagePoint.Y * 2.25); }
/// <summary> /// 把某一个关节点,显示到 colorImage 上 /// </summary> /// <param name="ellipse"> XAML 文件中 Canvas 控件上,颜色图中表示关节点的圆</param> /// <param name="joint">骨架上的关节点</param> private void setPointPosition(FrameworkElement ellipse, Joint joint) { ColorImagePoint colorImagePoint = myKinect.CoordinateMapper.MapSkeletonPointToColorPoint(joint.Position, ColorImageFormat.RgbResolution640x480Fps30); Canvas.SetLeft(ellipse, colorImagePoint.X); Canvas.SetTop(ellipse, colorImagePoint.Y); }
public byte MapDepthToColorImagePoint(string[] args) { try { verifArgs(6, args); getKinectSensor(int.Parse(args[0])); DepthImageFormat depthImageFormat = (DepthImageFormat)int.Parse(args[1]); int depthX = int.Parse(args[2]); int depthY = int.Parse(args[3]); short depthPixelValue = short.Parse(args[4]); ColorImageFormat colorImageFormat = (ColorImageFormat)int.Parse(args[5]); ColorImagePoint point = sensor.MapDepthToColorImagePoint( depthImageFormat, depthX, depthY, depthPixelValue, colorImageFormat); rData = point.X + "||" + point.Y; return(KSuccess.QueryOk); } catch (KActionException e) { rData = e.Message; return(e.exceptionNumber); } }
private void MoveToCameraPosition(FrameworkElement element, ColorImagePoint point) { //Divide by 2 for width and height so point is right in the middle // instead of in top/left corner Canvas.SetLeft(element, point.X - element.Width / 2); Canvas.SetTop(element, point.Y - element.Height / 2); }
private Point SkeletonPointToScreenPoint(SkeletonPoint sp) { ColorImagePoint pt = sensor.CoordinateMapper.MapSkeletonPointToColorPoint( sp, ColorImageFormat.RgbResolution640x480Fps30); return(new Point(pt.X, pt.Y)); }
void _sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e) { bool gotColor = false; bool gotDepth = false; using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) { if (colorFrame != null) { Debug.Assert(colorFrame.Width == 640 && colorFrame.Height == 480, "This app only uses 640x480."); if (_colorPixels.Length != colorFrame.PixelDataLength) { _colorPixels = new byte[colorFrame.PixelDataLength]; _bitmap = new WriteableBitmap(640, 480, 96.0, 96.0, PixelFormats.Bgr32, null); _bitmapBits = new byte[640 * 480 * 4]; this.Image.Source = _bitmap; } colorFrame.CopyPixelDataTo(_colorPixels); gotColor = true; } } using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) { if (depthFrame != null) { Debug.Assert(depthFrame.Width == 640 && depthFrame.Height == 480, "This app only uses 640x480."); if (_depthPixels.Length != depthFrame.PixelDataLength) { _depthPixels = new short[depthFrame.PixelDataLength]; _mappedDepthLocations = new ColorImagePoint[depthFrame.PixelDataLength]; } depthFrame.CopyPixelDataTo(_depthPixels); gotDepth = true; } } // Put the color image into _bitmapBits for (int i = 0; i < _colorPixels.Length; i += 4) { _bitmapBits[i + 3] = 255; _bitmapBits[i + 2] = _colorPixels[i + 2]; _bitmapBits[i + 1] = _colorPixels[i + 1]; _bitmapBits[i] = _colorPixels[i]; } this._sensor.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, _depthPixels, ColorImageFormat.RgbResolution640x480Fps30, _mappedDepthLocations); for (int i = 0; i < _depthPixels.Length; i++) { int depthVal = _depthPixels[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; ColorImagePoint point = _mappedDepthLocations[i]; } _bitmap.WritePixels(new Int32Rect(0, 0, _bitmap.PixelWidth, _bitmap.PixelHeight), _bitmapBits, _bitmap.PixelWidth * sizeof(int), 0); }
/// <summary> /// 停止状態にあるかチェックする /// </summary> /// <param name="skeletonFrame"></param> /// <param name="point"></param> /// <returns></returns> bool IsSteady(SkeletonFrame skeletonFrame, ColorImagePoint point) { var currentPoint = new FramePoint() { Point = point, TimeStamp = skeletonFrame.Timestamp, }; // milliseconds時間経過したら steady if ((currentPoint.TimeStamp - basePoint.TimeStamp) > milliseconds) { basePoint = currentPoint; return(true); } // 座標の変化量がthreshold以上ならば、basePointを更新して初めから計測 if (Math.Abs(currentPoint.Point.X - basePoint.Point.X) > threshold || Math.Abs(currentPoint.Point.Y - basePoint.Point.Y) > threshold) { // 座標が動いたので基点を動いた位置にずらして、最初から計測 basePoint = currentPoint; } return(false); }
void mykinect_SkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e) { using (SkeletonFrame skframe = e.OpenSkeletonFrame()) { if (skframe == null) { return; } FrameSkeletons = new Skeleton[skframe.SkeletonArrayLength]; skframe.CopySkeletonDataTo(FrameSkeletons); Skeleton sk = (from s in FrameSkeletons where s.TrackingState == SkeletonTrackingState.Tracked select s).FirstOrDefault(); if (sk == null) { return; } ColorImagePoint cpl = MapToColorImage(sk.Joints[JointType.HandLeft]); DrawHand(cpl); } }
private void drawImage(Joint joint, string file) { Image img = new Image(); img.Source = new BitmapImage(new Uri(file, UriKind.Relative)); img.Opacity = 0.8; img.Width = 100; img.Height = 100; Matrix imgMatrix = ((MatrixTransform)img.RenderTransform).Matrix; imgMatrix.RotateAt(angle, img.Width / 2, img.Height / 2); img.RenderTransform = new MatrixTransform(imgMatrix); if (angle < 360) { angle += 10; } else { angle = 0; } ColorImagePoint point = kinect.MapSkeletonPointToColor(joint.Position, kinect.ColorStream.Format); double[] multiMargin1 = getMargin(); img.Margin = new Thickness((multiMargin1[2] + multiMargin1[0] * point.X) - img.Width / 2, (multiMargin1[3] + multiMargin1[1] * point.Y) - img.Height / 2, 0, 0); canvas1.Children.Add(img); }
//This method is used to position the ellipses on the canvas //according to correct movements of the tracked joints. //IMPORTANT NOTE: Code for vector scaling was imported from the Coding4Fun Kinect Toolkit //available here: http://c4fkinect.codeplex.com/ //I only used this part to avoid adding an extra reference. private void SetEllipsePosition(Ellipse ellipse, Joint joint) { /* * Microsoft.Kinect.SkeletonPoint vector = new Microsoft.Kinect.SkeletonPoint(); * vector.X = ScaleVector(640, joint.Position.X); * vector.Y = ScaleVector(480, -joint.Position.Y); * vector.Z = joint.Position.Z; * * * Joint updatedJoint = new Joint(); * updatedJoint = joint; * updatedJoint.TrackingState = JointTrackingState.Tracked; * updatedJoint.Position = vector; * * Canvas.SetLeft(ellipse, updatedJoint.Position.X); * Canvas.SetTop(ellipse, updatedJoint.Position.Y); */ ColorImagePoint point = sensor.MapSkeletonPointToColor(joint.Position, ColorImageFormat.RgbResolution640x480Fps30); double sizeX = canvas.ActualWidth; double sizeY = canvas.ActualHeight; point.X = (int)((point.X / 640.0) * sizeX); point.Y = (int)((point.Y / 480.0) * sizeY); Canvas.SetLeft(ellipse, point.X); Canvas.SetTop(ellipse, point.Y); }
public byte MapSkeletonPointToColor(string[] args) { try { verifArgs(5, args); getKinectSensor(int.Parse(args[0])); SkeletonPoint skeletonPoint = new SkeletonPoint(); skeletonPoint.X = float.Parse(args[1].Replace('.', ',')); skeletonPoint.Y = float.Parse(args[2].Replace('.', ',')); skeletonPoint.Z = float.Parse(args[3].Replace('.', ',')); ColorImageFormat colorImageFormat = (ColorImageFormat)int.Parse(args[4]); ColorImagePoint point = sensor.MapSkeletonPointToColor(skeletonPoint, colorImageFormat); rData = point.X + "||" + point.Y; Console.WriteLine(rData); return(KSuccess.QueryOk); } catch (KActionException e) { rData = e.Message; return(e.exceptionNumber); } }
void PixelInRange(int i) { //取出Player Index 傳統方法 //int playerIndex = depthpixelData[i] & DepthImageFrame.PlayerIndexBitmask; //取出Player Index 新方法 int playerIndex = depthPixel[i].PlayerIndex; if (playerIndex > 0) { ColorImagePoint p = colorpoints[i]; if (p.X < min_x) { min_x = p.X; } else if (p.X > max_x) { max_x = p.X; } if (p.Y < min_y) { min_y = p.Y; } else if (p.Y > max_y) { max_y = p.Y; } } }
void DrawSword(ColorImagePoint cp) { RightHand.X1 = cp.X; RightHand.Y1 = cp.Y; RightHand.X2 = cp.X; RightHand.Y2 = cp.Y - 100; }
/// <summary> /// 画像を指定した領域で切り取る /// </summary> /// <param name="src">切り取る元画像</param> /// <param name="centerPosition">切り取る領域の中心座標</param> /// <param name="snipWidth">切り取る横幅</param> /// <param name="snipHeight">切り取る縦幅</param> /// <returns>切り取った画像</returns> private IplImage SnipFaceImage(IplImage src, ColorImagePoint centerPosition, int snipWidth, int snipHeight) { int faceX, faceY; // 画面からはみ出している場合は切り取り処理しない if (centerPosition.X - snipWidth / 2 < 0 || centerPosition.Y - snipHeight / 2 < 0) { return(null); } else { faceX = centerPosition.X - snipWidth / 2; faceY = centerPosition.Y - snipHeight / 2; } // 切り取り領域の設定 var faceRect = new CvRect(faceX, faceY, snipWidth, snipHeight); var part = new IplImage(faceRect.Size, BitDepth.U8, 1); src.SetROI(faceRect); // 切り取り範囲を設定 Cv.Copy(src, part); // データをコピー src.ResetROI(); // 指定した範囲のリセット return(part); }
void mykinect_SkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e) { using (SkeletonFrame skframe = e.OpenSkeletonFrame()) { if (skframe == null) { return; } #region 選擇骨架 Skeleton[] FrameSkeletons = new Skeleton[skframe.SkeletonArrayLength]; skframe.CopySkeletonDataTo(FrameSkeletons); Skeleton user = (from sk in FrameSkeletons where sk.TrackingState == SkeletonTrackingState.Tracked select sk).FirstOrDefault(); #endregion if (user != null) { Joint jpl = user.Joints[JointType.HandLeft]; ColorImagePoint cpl = MapToColorImagePoint(jpl); DrawShield(cpl); Joint jpr = user.Joints[JointType.HandRight]; ColorImagePoint cpr = MapToColorImagePoint(jpr); DrawSword(cpr); } } }
void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e) { using (DepthImageFrame depth = e.OpenDepthImageFrame()) { if (depth == null) { return; } try { DepthImagePoint rightDepthPoint = kinectSensorChooser.Kinect.CoordinateMapper.MapSkeletonPointToDepthPoint(first.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30); ColorImagePoint rightColorPoint = kinectSensorChooser.Kinect.CoordinateMapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, rightDepthPoint, ColorImageFormat.RgbResolution640x480Fps30); CameraPosition(handCursor, rightColorPoint); currentX = rightColorPoint.X; currentY = rightColorPoint.Y; } catch (Exception) { } } }
ColorImagePoint MapToColorImagePoint(Joint jp) { ColorImagePoint cp = kinect.CoordinateMapper.MapSkeletonPointToColorPoint(jp.Position, kinect.ColorStream.Format); return(cp); }
/** * Zeichnet ein Item */ private void RenderAccessoryItem(DrawingContext drawingContext, Skeleton person, AccessoryItem item) { SkeletonPoint headPos = person.Joints[JointType.Head].Position; ColorImagePoint colorImagePoint = _sensor.CoordinateMapper.MapSkeletonPointToColorPoint(headPos, _sensor.ColorStream .Format); double g = item.Width; // Objektgroesse in m. double r = headPos.Z; // Entfernung in m. double imgWidth = 2 * Math.Atan(g / (2 * r)) * ActualWidth; double aspectRatio = item.Image.Width / item.Image.Height; double imgHeight = imgWidth / aspectRatio; double offsetX = 0, offsetY = 0; switch (item.Position) { case AccessoryPositon.Hat: offsetY = -1.1 * imgHeight; break; case AccessoryPositon.Beard: offsetY = imgHeight / 4; break; } double headX = colorImagePoint.X * (ActualWidth / _sensor.ColorStream.FrameWidth) + offsetX; double headY = colorImagePoint.Y * (ActualHeight / _sensor.ColorStream.FrameHeight) + offsetY; AccessoryRect = new Rect(headX - imgWidth / 2, headY, imgWidth, imgHeight); drawingContext.DrawImage(item.Image, AccessoryRect); }
private void DrawBone(Skeleton skeleton, JointType joint1, JointType joint2) { //DepthImagePoint joint1Point = kSensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[joint1].Position, DepthImageFormat.Resolution320x240Fps30); //DepthImagePoint joint2Point = kSensor.CoordinateMapper.MapSkeletonPointToDepthPoint(skeleton.Joints[joint2].Position, DepthImageFormat.Resolution320x240Fps30); ColorImagePoint joint1Point = kSensor.CoordinateMapper.MapSkeletonPointToColorPoint(skeleton.Joints[joint1].Position, ColorImageFormat.RgbResolution640x480Fps30); ColorImagePoint joint2Point = kSensor.CoordinateMapper.MapSkeletonPointToColorPoint(skeleton.Joints[joint2].Position, ColorImageFormat.RgbResolution640x480Fps30); motion.Add(new Position( skeleton.Joints[joint2].Position.X, skeleton.Joints[joint2].Position.Y, skeleton.Joints[joint2].Position.Z, joint2Point.X, joint2Point.Y, (int)joint2, count )); Line backBone = new Line(); backBone.Stroke = new SolidColorBrush(Colors.Yellow); backBone.StrokeThickness = 5; backBone.X1 = joint1Point.X; backBone.Y1 = joint1Point.Y; backBone.X2 = joint2Point.X; backBone.Y2 = joint2Point.Y; skelCanvas.Children.Add(backBone); }
void kinect_AllFramesReady(object sender, AllFramesReadyEventArgs e) { image1.Source = e.OpenColorImageFrame().ToBitmapSource(); // スケルトンフレームを取得する SkeletonFrame skeletonFrame = e.OpenSkeletonFrame(); if (skeletonFrame != null) { // スケルトンデータを取得する Skeleton[] skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength]; skeletonFrame.CopySkeletonDataTo(skeletonData); // プレーヤーごとのスケルトンを描画する foreach (var skeleton in skeletonData) { var head = skeleton.Joints[JointType.Head]; if (head.TrackingState == JointTrackingState.Tracked) { ColorImagePoint point = kinect.MapSkeletonPointToColor(head.Position, kinect.ColorStream.Format); var x = image2.Width / 2; var y = image2.Height / 2; image2.Margin = new Thickness(point.X - x, point.Y - y, 0, 0); image2.Visibility = System.Windows.Visibility.Visible; } } } }
private void CameraPosition(FrameworkElement element, ColorImagePoint point) { //Divide by 2 for width and height so point is right in the middle // instead of in top/left corner Canvas.SetLeft(element, point.X - element.Width / 2); Canvas.SetTop(element, point.Y - element.Height / 2); }
void markAtPoint(ColorImagePoint p, Bitmap bmp) { Graphics g = Graphics.FromImage(bmp); g.DrawEllipse(Pens.Red, p.X - 20, p.Y - 20, 40, 40); //g.DrawEllipse(Pens.Red, new Rectangle(p.X - 20, p.Y - 20, 40, 40)); }
Point GetJoint(JointType j, Skeleton S) { SkeletonPoint Sloc = S.Joints[j].Position; ColorImagePoint Cloc = sensor.MapSkeletonPointToColor(Sloc, ColorImageFormat.RgbResolution640x480Fps30); return(new Point(Cloc.X, Cloc.Y)); }
private void AddLine(ColorImagePoint p1, ColorImagePoint p2) { Line myLine = new Line(); myLine.Stroke = System.Windows.Media.Brushes.Black; myLine.X1 = p1.X; myLine.X2 = p2.X; myLine.Y1 = p1.Y; myLine.Y2 = p2.Y; myLine.StrokeThickness = 1; cvs.Children.Add(myLine); }
private void CameraPosition(FrameworkElement element, ColorImagePoint point) { Canvas.SetLeft(element, point.X - element.Width / 2); Canvas.SetTop(element, point.Y - element.Height / 2); // Check if you're choosing any of the choices if (element.Name.Equals("rightEllipse")) { if(Canvas.GetLeft(element) > clickLeftBorder) { Console.WriteLine("You clicked"); switch (greenIndex) { case 0: clickLabel.Content = "Bottom box clicked"; break; case 1: clickLabel.Content = "Top box clicked"; break; case 2: clickLabel.Content = "Middle box clicked"; break; } } } else if (element.Name.Equals("leftEllipse")) { if (Canvas.GetLeft(element) < rollRightBorder) { Console.WriteLine("You be rollin"); if (rolling == false) { rollTimer.Start(); rolling = true; } } else { if(rolling == true) { rollTimer.Stop(); rolling = false; } } } }
/// <summary> /// プレーヤーだけ表示する /// </summary> /// <param name="colorFrame"></param> /// <param name="depthFrame"></param> /// <returns></returns> private byte[] BackgroundMask( KinectSensor kinect, ColorImageFrame colorFrame, DepthImageFrame depthFrame ) { ColorImageStream colorStream = kinect.ColorStream; DepthImageStream depthStream = kinect.DepthStream; // RGBカメラのピクセルごとのデータを取得する byte[] colorPixel = new byte[colorFrame.PixelDataLength]; colorFrame.CopyPixelDataTo( colorPixel ); // 距離カメラのピクセルごとのデータを取得する short[] depthPixel = new short[depthFrame.PixelDataLength]; depthFrame.CopyPixelDataTo( depthPixel ); // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ) ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength]; kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel, colorStream.Format, colorPoint ); // 出力バッファ(初期値は白(255,255,255)) byte[] outputColor = new byte[colorPixel.Length]; for ( int i = 0; i < outputColor.Length; i += Bgr32BytesPerPixel ) { outputColor[i] = 255; outputColor[i + 1] = 255; outputColor[i + 2] = 255; } for ( int index = 0; index < depthPixel.Length; index++ ) { // プレイヤーを取得する int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask; // 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 ); int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 ); int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel; // プレーヤーを検出した座標だけ、RGBカメラの画像を使う if ( player != 0 ) { outputColor[colorIndex] = colorPixel[colorIndex]; outputColor[colorIndex + 1] = colorPixel[colorIndex + 1]; outputColor[colorIndex + 2] = colorPixel[colorIndex + 2]; } } return outputColor; }
/// <summary> /// 顔の位置を取得 /// </summary> /// <param name="headPosition">スケルトンの頭の位置座標</param> /// <returns>顔座標</returns> private Rect CheckFacePosition( ColorImagePoint headPosition ) { //切り取る領域の範囲 int snipWidth = 200; int snipHeight = 200; // 返却用Rect (初期値はスケルトンの頭の座標とimage2画像の幅) Rect reRect = new Rect(headPosition.X, headPosition.Y, image2.Width, image2.Height); storage.Clear(); openCVGrayImage.ResetROI(); // たまにROIがセットされた状態で呼ばれるためROIをリセット openCVImage.CopyFrom( outputImage ); // WriteableBitmap -> IplImage Cv.CvtColor( openCVImage, openCVGrayImage, ColorConversion.BgrToGray ); // 画像をグレイスケール化 Cv.EqualizeHist( openCVGrayImage, openCVGrayImage ); // 画像の平滑化 // 顔認識 try { // 画像の切り取り var snipImage = SnipFaceImage( openCVGrayImage, headPosition, snipWidth, snipHeight ); if ( snipImage != null ) { CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects( snipImage, cascade, storage ); // 顔を検出した場合 if ( faces.Total > 0 ) { reRect.X = faces[0].Value.Rect.X + (headPosition.X - snipWidth / 2); reRect.Y = faces[0].Value.Rect.Y + (headPosition.Y - snipHeight / 2); reRect.Width = faces[0].Value.Rect.Width; reRect.Height = faces[0].Value.Rect.Height; } } } catch ( Exception ) { } return reRect; }
/// <summary> /// 停止状態にあるかチェックする /// </summary> /// <param name="skeletonFrame"></param> /// <param name="point"></param> /// <returns></returns> bool IsSteady( SkeletonFrame skeletonFrame, ColorImagePoint point ) { var currentPoint = new FramePoint() { Point = point, TimeStamp = skeletonFrame.Timestamp, }; // milliseconds時間経過したら steady if ( (currentPoint.TimeStamp - basePoint.TimeStamp) > milliseconds ) { basePoint = currentPoint; return true; } // 座標の変化量がthreshold以上ならば、basePointを更新して初めから計測 if ( Math.Abs( currentPoint.Point.X - basePoint.Point.X ) > threshold || Math.Abs( currentPoint.Point.Y - basePoint.Point.Y ) > threshold ) { // 座標が動いたので基点を動いた位置にずらして、最初から計測 basePoint = currentPoint; } return false; }
/// <summary> /// 距離データをカラー画像に変換する /// </summary> /// <param name="kinect"></param> /// <param name="depthFrame"></param> /// <returns></returns> private byte[] ConvertDepthColor( KinectSensor kinect, DepthImageFrame depthFrame ) { ColorImageStream colorStream = kinect.ColorStream; DepthImageStream depthStream = kinect.DepthStream; // 距離カメラのピクセルごとのデータを取得する short[] depthPixel = new short[depthFrame.PixelDataLength]; depthFrame.CopyPixelDataTo( depthPixel ); // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ) ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength]; kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel, colorStream.Format, colorPoint ); byte[] depthColor = new byte[depthFrame.PixelDataLength * Bgr32BytesPerPixel]; for ( int index = 0; index < depthPixel.Length; index++ ) { // 距離カメラのデータから、プレイヤーIDと距離を取得する int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask; int distance = depthPixel[index] >> DepthImageFrame.PlayerIndexBitmaskWidth; // 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 ); int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 ); int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel; // プレイヤーがいるピクセルの場合 if ( player != 0 ) { // 有効なプレーヤーに色付けする if ( enablePlayer[player] ) { depthColor[colorIndex] = playerColor[player].B; depthColor[colorIndex + 1] = playerColor[player].G; depthColor[colorIndex + 2] = playerColor[player].R; } } } return depthColor; }
/// <summary> /// 光学迷彩 /// </summary> /// <param name="kinect"></param> /// <param name="colorFrame"></param> /// <param name="depthFrame"></param> /// <returns></returns> private byte[] OpticalCamouflage( KinectSensor kinect, ColorImageFrame colorFrame, DepthImageFrame depthFrame ) { ColorImageStream colorStream = kinect.ColorStream; DepthImageStream depthStream = kinect.DepthStream; // RGBカメラのピクセルごとのデータを取得する byte[] colorPixel = new byte[colorFrame.PixelDataLength]; colorFrame.CopyPixelDataTo( colorPixel ); // 背景がないときは、そのときのフレームを背景として保存する if ( backPixel == null ) { backPixel = new byte[colorFrame.PixelDataLength]; Array.Copy( colorPixel, backPixel, backPixel.Length ); } // 距離カメラのピクセルごとのデータを取得する short[] depthPixel = new short[depthFrame.PixelDataLength]; depthFrame.CopyPixelDataTo( depthPixel ); // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ) ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength]; kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel, colorStream.Format, colorPoint ); // 出力バッファ(初期値はRGBカメラの画像) byte[] outputColor = new byte[colorPixel.Length]; Array.Copy( colorPixel, outputColor, outputColor.Length ); for ( int index = 0; index < depthPixel.Length; index++ ) { // プレイヤーを取得する int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask; // 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 ); int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 ); int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel; // プレーヤーを検出した座標は、背景画像を使う if ( player != 0 ) { outputColor[colorIndex] = backPixel[colorIndex]; outputColor[colorIndex + 1] = backPixel[colorIndex + 1]; outputColor[colorIndex + 2] = backPixel[colorIndex + 2]; } } return outputColor; }
private byte[][] SensorAllFramesReady(object sender, AllFramesReadyEventArgs e) { bool depthReceived = false; bool colorReceived = false; DepthImagePixel[] depthPixels; byte[] colorPixels; ColorImagePoint[] colorCoordinates; int colorToDepthDivisor; byte[] greenScreenPixelData; // Allocate space to put the color pixels we'll create depthPixels = new DepthImagePixel[this.kinectSensor.DepthStream.FramePixelDataLength]; colorPixels = new byte[this.kinectSensor.ColorStream.FramePixelDataLength]; greenScreenPixelData = new byte[this.kinectSensor.DepthStream.FramePixelDataLength]; colorCoordinates = new ColorImagePoint[this.kinectSensor.DepthStream.FramePixelDataLength]; int colorWidth = this.kinectSensor.ColorStream.FrameWidth; int colorHeight = this.kinectSensor.ColorStream.FrameHeight; colorToDepthDivisor = colorWidth / 640; byte[][] results = new byte[2][]; // kinectSensor.DepthStream.FramePixelDataLength]; DepthImageFormat DepthFormat = DepthImageFormat.Resolution640x480Fps30; ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30; using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) { if (null != depthFrame) { // Copy the pixel data from the image to a temporary array depthFrame.CopyDepthImagePixelDataTo(depthPixels); depthReceived = true; } } using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) { if (null != colorFrame) { // Copy the pixel data from the image to a temporary array this.outputColorBitmap = new WriteableBitmap(640, 480, 96, 96, PixelFormats.Bgr32, null); colorFrame.CopyPixelDataTo(colorPixels); colorReceived = true; } } if (true == depthReceived) { this.kinectSensor.CoordinateMapper.MapDepthFrameToColorFrame( DepthFormat, depthPixels, ColorFormat, colorCoordinates); Array.Clear(greenScreenPixelData, 0, greenScreenPixelData.Length); // loop over each row and column of the depth for (int y = 0; y < 480; ++y) { for (int x = 0; x < 640; ++x) { // calculate index into depth array int depthIndex = x + (y * 640); DepthImagePixel depthPixel = depthPixels[depthIndex]; int player = depthPixel.PlayerIndex; // if we're tracking a player for the current pixel, do green screen if (player > 0) { // retrieve the depth to color mapping for the current depth pixel ColorImagePoint colorImagePoint = colorCoordinates[depthIndex]; // scale color coordinates to depth resolution int colorInDepthX = colorImagePoint.X / colorToDepthDivisor; int colorInDepthY = colorImagePoint.Y / colorToDepthDivisor; // make sure the depth pixel maps to a valid point in color space if (colorInDepthX > 0 && colorInDepthX < 640 && colorInDepthY >= 0 && colorInDepthY < 480) { // calculate index into the green screen pixel array int greenScreenIndex = colorInDepthX + (colorInDepthY * 640); // set opaque greenScreenPixelData[greenScreenIndex] = 33; // compensate for depth/color not corresponding exactly by setting the pixel // to the left to opaque as well greenScreenPixelData[greenScreenIndex - 1] = 33; } } } } } if (true == colorReceived) { // Write the pixel data into our bitmap /* this.outputColorBitmap.WritePixels( new Int32Rect(0, 0, this.outputColorBitmap.PixelWidth, this.outputColorBitmap.PixelHeight), colorPixels, this.outputColorBitmap.PixelWidth * sizeof(int), 0); if (playerOpacityMaskImage == null) { playerOpacityMaskImage = new WriteableBitmap( 640, 480, 96, 96, PixelFormats.Bgra32, null); results[0] = playerOpacityMaskImage; } playerOpacityMaskImage.WritePixels( new Int32Rect(0, 0, 640, 480), greenScreenPixelData, 640 * ((playerOpacityMaskImage.Format.BitsPerPixel + 7) / 8), 0); */ results[0] = greenScreenPixelData; // playerOpacityMaskImage results[1] = colorPixels; return results; } return results; }
//Skeleton[] fixSkleton = new Skeleton[1]; public void Record(SkeletonFrame frame,KinectSensor psensor) { writer.Write((int) FrameType.Skeletons); var timeSpan = DateTime.Now.Subtract(referenceTime); referenceTime = DateTime.Now; writer.Write((long) timeSpan.TotalMilliseconds); writer.Write((int) frame.TrackingMode); writer.Write(frame.FloorClipPlane.Item1); writer.Write(frame.FloorClipPlane.Item2); writer.Write(frame.FloorClipPlane.Item3); writer.Write(frame.FloorClipPlane.Item4); writer.Write(frame.FrameNumber); //var skeletons = frame.GetSkeletons(); frame.CopySkeletonDataTo(totalSkeleton); firstSkeleton = (from trackskeleton in totalSkeleton where trackskeleton.TrackingState == SkeletonTrackingState.Tracked select trackskeleton).FirstOrDefault(); if ( firstSkeleton !=null ) { if (firstSkeleton.Joints[JointType.Spine].TrackingState == JointTrackingState.Tracked) { tmpHandRight = psensor.CoordinateMapper. MapSkeletonPointToColorPoint( firstSkeleton.Joints[JointType.HandRight].Position, ColorImageFormat.RgbResolution640x480Fps30); tmpHandLeft = psensor.CoordinateMapper. MapSkeletonPointToColorPoint( firstSkeleton.Joints[JointType.HandLeft].Position, ColorImageFormat.RgbResolution640x480Fps30); tmpSpine = psensor.CoordinateMapper. MapSkeletonPointToColorPoint( firstSkeleton.Joints[JointType.Spine].Position, ColorImageFormat.RgbResolution640x480Fps30); writer.Write(tmpHandRight.X); writer.Write(tmpHandRight.Y); writer.Write(tmpHandLeft.X); writer.Write(tmpHandLeft.Y); writer.Write(tmpSpine.X); writer.Write(tmpSpine.Y); writer.Write(true); // is skleton detected //Console.WriteLine("spine x"+tmpSpine.X); //Console.WriteLine("spine y" + tmpSpine.Y); //Console.WriteLine("skleton detected"); } else { writer.Write(0); writer.Write(0); writer.Write(0); writer.Write(0); writer.Write(0); writer.Write(0); writer.Write(false); // is skleton detected //Console.WriteLine("skleton NOT DETECTE222"); } } else { writer.Write(0); writer.Write(0); writer.Write(0); writer.Write(0); writer.Write(0); writer.Write(0); writer.Write(false); // is skleton detected //Console.WriteLine("skleton NOT DETECTE"); } //frame.CopySkeletonDataTo(skeletons); //var formatter = new BinaryFormatter(); //formatter.Serialize(writer.BaseStream, skeletons); }
public void MapDepthFrameToColorFrame(DepthImageFormat depthImageFormat, short[] depthPixelData, ColorImageFormat colorImageFormat, ColorImagePoint[] colorCoordinates);
void GetROI(Skeleton user, DepthImageFrame depthFrame , ColorImageFrame color_frame = null) { // Map skeleton to Depth DepthImagePoint rightHandPoint = _sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(user.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30); DepthImagePoint rightWristPoint = _sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(user.Joints[JointType.WristRight].Position, DepthImageFormat.Resolution640x480Fps30); int hand_depth = (rightHandPoint.Depth>rightWristPoint.Depth)?rightHandPoint.Depth:rightWristPoint.Depth+10; // hand depth used for segmenting out the hand //*********************************** Map The depth Image to color Image to align the color image************************************************************************ DepthImagePixel[] depthImagePixels = new DepthImagePixel[depthFrame.PixelDataLength]; depthFrame.CopyDepthImagePixelDataTo(depthImagePixels); short[] rawDepthData = new short[depthFrame.PixelDataLength]; depthFrame.CopyPixelDataTo(rawDepthData); ColorImagePoint[] mapped_depth_locations = new ColorImagePoint[depthFrame.PixelDataLength]; _sensor.CoordinateMapper.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, depthImagePixels, ColorImageFormat.RgbResolution640x480Fps30, mapped_depth_locations); byte[] aligned_colorPixels = new byte[color_frame.PixelDataLength]; // creating a byte array for storing the aligned pixel values byte[] original_colorPixels = new byte[color_frame.PixelDataLength]; color_frame.CopyPixelDataTo(original_colorPixels); int aligned_image_index = 0; //int hand_baseindex = rightHandPoint.Y*640 + rightHandPoint.X; for (int i = 0; i < mapped_depth_locations.Length; i++) { int depth = rawDepthData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; //Console.WriteLine(depth); ColorImagePoint point = mapped_depth_locations[i]; if ((point.X >= 0 && point.X < 640) && (point.Y >= 0 && point.Y < 480)) { int baseIndex = (point.Y * 640 + point.X) * 4; if (depth < hand_depth && depth != -1) { aligned_colorPixels[aligned_image_index] = original_colorPixels[baseIndex]; aligned_colorPixels[aligned_image_index + 1] = original_colorPixels[baseIndex + 1]; aligned_colorPixels[aligned_image_index + 2] = original_colorPixels[baseIndex + 2]; aligned_colorPixels[aligned_image_index + 3] = 0; } else { aligned_colorPixels[aligned_image_index] = 0; aligned_colorPixels[aligned_image_index + 1] = 0; aligned_colorPixels[aligned_image_index + 2] = 0; aligned_colorPixels[aligned_image_index + 3] = 0; } } aligned_image_index = aligned_image_index + 4; // *************************** Now modify the contents of this aligned_colorBitmap using the depth information *************************************************** } //*********************************************************************************************************************************************************************** int threshold = 20; int hand_length = 3 * Math.Max(Math.Abs(rightHandPoint.X - rightWristPoint.X), Math.Abs(rightHandPoint.Y - rightWristPoint.Y)); // int hand_length = (int)Math.Sqrt((rightHandPoint.X - rightWristPoint.X) ^ 2 + (rightHandPoint.Y - rightWristPoint.Y) ^ 2); int hand_length_old = hand_length; //****************************Low pass filter for hand_length********************************* if (Math.Abs(hand_length - hand_length_old) > threshold) hand_length = hand_length_old; //************************************************************************************************ // Console.WriteLine(hand_length); int top_left_X_depth = rightHandPoint.X - hand_length; int top_left_Y_depth = rightHandPoint.Y - hand_length; int top_left_Z_depth = rightHandPoint.Depth; top_left_X_depth = (top_left_X_depth<0)? 0 : top_left_X_depth; top_left_Y_depth = (top_left_Y_depth<0)? 0 : top_left_Y_depth; DepthImagePoint top_left = new DepthImagePoint(); top_left.X = top_left_X_depth; top_left.Y = top_left_Y_depth; top_left.Depth = rightHandPoint.Depth; int bottom_right_X_depth = rightHandPoint.X + hand_length; int bottom_right_Y_depth = rightHandPoint.Y + hand_length; int bottom_right_Z_depth = rightHandPoint.Depth ; bottom_right_X_depth = (bottom_right_X_depth>640)? 600 : bottom_right_X_depth; bottom_right_Y_depth = (bottom_right_Y_depth>480)? 400 : bottom_right_Y_depth; DepthImagePoint bottom_right = new DepthImagePoint(); bottom_right.X = bottom_right_X_depth; bottom_right.Y = bottom_right_Y_depth; bottom_right.Depth = bottom_right_Z_depth; Canvas.SetLeft(right_hand_pointer, top_left.X - right_hand_pointer.Width / 2); Canvas.SetTop(right_hand_pointer, top_left.Y - right_hand_pointer.Height / 2); Canvas.SetLeft(left_hand_pointer, bottom_right.X - left_hand_pointer.Width / 2); Canvas.SetTop(left_hand_pointer, bottom_right.Y - left_hand_pointer.Height / 2); border_rect.Width = 2*hand_length; border_rect.Height = 2*hand_length; Canvas.SetLeft(border_rect, top_left.X); Canvas.SetTop(border_rect, top_left.Y); aligned_colorPixelsToBitmap(aligned_colorPixels, color_frame, (int)top_left.X, (int)top_left.Y, (int)border_rect.Width, (int)border_rect.Height); }
private void MoveToCameraPosition(FrameworkElement element, ColorImagePoint point) { Canvas.SetLeft(element, point.X ); Canvas.SetTop(element, point.Y ); }
private void kinectPlayerImage(ColorImageFrame colorFrame, DepthImageFrame depthFrame) { if (colorFrame == null || depthFrame == null) { return; } // Image color index const int BlueIndex = 0; const int GreenIndex = 1; const int RedIndex = 2; const int AlphaIndex = 3; // Get color image byte[] colorPixels = new Byte[colorFrame.PixelDataLength]; colorFrame.CopyPixelDataTo(colorPixels); // Get depth image short[] rawDepthData = new short[depthFrame.PixelDataLength]; depthFrame.CopyPixelDataTo(rawDepthData); // Create array to hold depth mapping data. ColorImagePoint[] _mappedDepthLocations = new ColorImagePoint[depthFrame.PixelDataLength]; kinectSensorChooser.Kinect.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, rawDepthData, ColorImageFormat.RgbResolution640x480Fps30, _mappedDepthLocations); // Each index in depth array is equal to 4 pixels in color array (B, G, R, A) for (int depthIndex = 0, colorIndex = 0; depthIndex < rawDepthData.Length && colorIndex < colorPixels.Length; depthIndex++, colorIndex += 4) { // Get the player (requires skeleton tracking enabled for values) int player = rawDepthData[depthIndex] & DepthImageFrame.PlayerIndexBitmask; if (player > 0) { // Not a player ColorImagePoint point = _mappedDepthLocations[depthIndex]; if ((point.X >= 0 && point.X < colorFrame.Width) && (point.Y >= 0 && point.Y < colorFrame.Height)) { int baseIndex = (point.Y * colorFrame.Width + point.X) * 4; colorPixels[baseIndex + AlphaIndex] = 255; } } } // Update the image int stride = colorFrame.Width * 4; // (B,G,R,Empty) playerBitmap.WritePixels(new Int32Rect(0, 0, playerBitmap.PixelWidth, playerBitmap.PixelHeight), colorPixels, stride, 0); playerImage.Source = playerBitmap; }
/// <summary> /// 吹き出しを描画する /// </summary> /// <param name="drawContecxt"></param> /// <param name="head"></param> /// <param name="headPoint"></param> /// <returns></returns> private void DrawFukidasi( DrawingContext drawContecxt, SkeletonPoint head, ColorImagePoint headPoint ) { double angle = Math.Atan2( head.X, head.Z ) * 180 / Math.PI; if ( Math.Abs( soundDir - angle ) < 10 ) { Rect rect = new Rect( headPoint.X + 32, headPoint.Y - 64, 96, 64 ); drawContecxt.DrawImage( fukidasiImage, rect ); if ( recognizedText != null ) { var text = new FormattedText( recognizedText, CultureInfo.GetCultureInfo( "ja-JP" ), FlowDirection.LeftToRight, new Typeface( "Verdana" ), 24, Brushes.Black ); drawContecxt.DrawText( text, new Point( head.X + 56, head.Y - 48 ) ); } } }
public ColorImagePoint[] C() { ColorImagePoint[] a = new ColorImagePoint[640 * 480]; this.sensor.CoordinateMapper.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, this.depthPixels, ColorImageFormat.RgbResolution640x480Fps30, a); return a; }
private void CameraPosition(FrameworkElement element, ColorImagePoint point) { Console.Out.WriteLine(point.X.ToString()); Canvas.SetLeft(element, point.X - element.Width / 2); Canvas.SetTop(element, point.Y - element.Height / 2); }
/// <summary> /// Callback to help with mapping depth pixel to color pixel data. Uses Kinect sensor's MapDepthToColorImagePoint to /// do the conversion /// </summary> /// <returns> /// The depth to color callback. /// </returns> private int DepthToColorCallback( uint depthFrameWidth, uint depthFrameHeight, uint colorFrameWidth, uint colorFrameHeight, float zoomFactor, Point viewOffset, int depthX, int depthY, ushort depthZ, out int colorX, out int colorY) { int retCode = 0; colorX = 0; colorY = 0; if (this.sensor != null) { var colorPoint = new ColorImagePoint(); try { DepthImagePoint depthImagePoint = new DepthImagePoint() { X = depthX, Y = depthY, Depth = depthZ, }; colorPoint = this.sensor.CoordinateMapper.MapDepthPointToColorPoint( this.sensor.DepthStream.Format, depthImagePoint, this.sensor.ColorStream.Format); } catch (InvalidOperationException e) { string traceStr = string.Format( CultureInfo.CurrentCulture, "Exception on MapDepthToColorImagePoint while translating depth point({0},{1},{2}). Exception={3}", depthX, depthY, depthZ, e.Message); Trace.WriteLineIf(this.traceLevel >= TraceLevel.Error, traceStr, TraceCategory); retCode = -1; } colorX = colorPoint.X; colorY = colorPoint.Y; } else { retCode = -1; } return retCode; }
// // Summary: // Tests whether the ColorImagePoint has a known value. // // Parameters: // colorImagePoint: // The ColorImagePoint to test. // // Returns: // Returns true if the ColorImagePoint has a known value, false otherwise. public static bool IsKnownPoint(ColorImagePoint colorImagePoint);
private void SaveImage(ColorImagePoint position,TrackingTime trackingTime,FrameworkElement control) { _fn = GetFilename(trackingTime); try { RenderTargetBitmap renderTargetBitmap=new RenderTargetBitmap(640,480,96,96,PixelFormats.Pbgra32); renderTargetBitmap.Render(control); renderTargetBitmap.Freeze(); int x = position.X - croppedImageWidth/2; if (x<0) { x = 0; } int width = croppedImageWidth; if (x+width>renderTargetBitmap.Width) { width = (int)renderTargetBitmap.Width - x; } CroppedBitmap croppedBitmap = new CroppedBitmap(renderTargetBitmap, new Int32Rect(x,0,width,(int)renderTargetBitmap.Height)); string ext = System.IO.Path.GetExtension(_fn).ToLower(); BitmapEncoder encoder = new PngBitmapEncoder(); if (encoder == null) return; encoder.Frames.Add(BitmapFrame.Create(croppedBitmap)); using (Stream stm = File.Create(_fn)) { encoder.Save(stm); } //backgroundWorker.RunWorkerAsync(_fn); } catch (Exception x) { MessageBox.Show("Sorry, I had trouble saving that photo.\n\nError: " + x.Message); //IsAutoEnabled = false; // timer.Stop(); } }
private void DetectGestures(ColorImagePoint headCoord, ColorImagePoint rightHandCoord, ColorImagePoint leftHandCoord, ColorImagePoint armsCenterCoord, ColorImagePoint shoulderLeftCoord, ColorImagePoint shoulderRightCoord, ColorImagePoint hipCenterCoord) { var isRightHandPassive = rightHandCoord.Y > shoulderRightCoord.Y && rightHandCoord.Y < hipCenterCoord.Y && rightHandCoord.X > shoulderRightCoord.X && rightHandCoord.X < shoulderLeftCoord.X; var isLeftHandPassive = leftHandCoord.Y > shoulderLeftCoord.Y && leftHandCoord.Y < hipCenterCoord.Y && leftHandCoord.X < shoulderLeftCoord.X && leftHandCoord.X > shoulderRightCoord.X; if (!isRightHandPassive || !isLeftHandPassive) { // Check if there are at least one hand on the top of the head var isRightHandUp = (rightHandCoord.Y < headCoord.Y); var isLeftHandUp = (leftHandCoord.Y < headCoord.Y); if (isRightHandUp || isLeftHandUp) { // SendKeys.SendWait("{UP}"); PressKey(Keys.Up); this.Status.Content = "Up"; } else { UpKey(Keys.Up); } // Check for left turn var isHandOnLeft = leftHandCoord.Y > headCoord.Y && leftHandCoord.Y < hipCenterCoord.Y; if (isHandOnLeft) { //SendKeys.SendWait("{LEFT}"); PressKey(Keys.Left); this.Status.Content = "Left"; } else { UpKey(Keys.Left); } // Check for right turn var isHandOnRight = rightHandCoord.Y > headCoord.Y && rightHandCoord.Y < hipCenterCoord.Y; if (isHandOnRight) { // SendKeys.SendWait("{RIGHT}"); PressKey(Keys.Right); this.Status.Content = "Right"; } else { UpKey(Keys.Right); } // Check for brakes var isLeftHandDown = leftHandCoord.Y > hipCenterCoord.Y; var isRightHandDown = rightHandCoord.Y > hipCenterCoord.Y; if (isLeftHandDown || isRightHandDown) { // SendKeys.SendWait("{DOWN}"); PressKey(Keys.Down); this.Status.Content = "Down"; } else { UpKey(Keys.Down); } } else { UpKey(Keys.Up); UpKey(Keys.Down); UpKey(Keys.Left); UpKey(Keys.Right); } }
public void Update(ColorImagePoint point, double ratioX = 1.0, double ratioY = 1.0) { Update(point.X * ratioX, point.Y * ratioY); }
public byte MapDepthFrameToColorFrame(string[] args) { try { int size = int.Parse(args[1]); verifArgs(size + 1, args); getKinectSensor(int.Parse(args[0])); int tabSize = size - 3; DepthImageFormat depthImageFormat = (DepthImageFormat)int.Parse(args[2]); short[] depthPixelData = new short[tabSize]; for (int i = 0; i < tabSize; i++) depthPixelData[i] = short.Parse(args[i + 2]); ColorImageFormat colorImageFormat = (ColorImageFormat)int.Parse(args[3 + tabSize]); ColorImagePoint[] colorCoordinates = new ColorImagePoint[tabSize]; Console.WriteLine(colorImageFormat); sensor.MapDepthFrameToColorFrame( depthImageFormat, depthPixelData, colorImageFormat, colorCoordinates); StringBuilder sb = new StringBuilder(); for (int i=0; i<tabSize; i++) { if (i > 0) sb.Append("||"); ColorImagePoint p = colorCoordinates[i]; sb.Append(p.X); sb.Append("||"); sb.Append(p.Y); } rData = sb.ToString(); return KSuccess.QueryOk; } catch (KActionException e) { rData = e.Message; return e.exceptionNumber; } }
/// <summary> /// キネクトの画像をバッファへ保存する /// </summary> /// <param name="kinectDevice"></param> /// <param name="colorFrame"></param> /// <param name="depthFrame"></param> private void SaveBuffer(ColorImageFrame colorFrame, DepthImageFrame depthFrame, SkeletonFrame skeletonFrame) { if (kinectDevice == null || depthFrame == null || colorFrame == null || skeletonFrame == null) return; ColorImageStream colorStream = kinectDevice.ColorStream; DepthImageStream depthStream = kinectDevice.DepthStream; screenImageStride = kinectDevice.DepthStream.FrameWidth * colorFrame.BytesPerPixel; int colorStride = colorFrame.BytesPerPixel * colorFrame.Width; //4×画像幅 int ImageIndex = 0; depthFrame.CopyPixelDataTo(_depthPixelData); colorFrame.CopyPixelDataTo(_colorPixelData); ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength]; short[] depthPixel = new short[depthFrame.PixelDataLength]; kinectDevice.MapDepthFrameToColorFrame(depthFrame.Format, depthPixel, colorFrame.Format, colorPoint); byte[] byteRoom = new byte[depthFrame.Height * screenImageStride]; byte[] bytePlayer = new byte[depthFrame.Height * screenImageStride]; double[] depth = new double[depthFrame.Height * screenImageStride]; int[] playerIndexArray = new int[depthFrame.Height * screenImageStride]; for (int depthY = 0; depthY < depthFrame.Height; depthY++) { for (int depthX = 0; depthX < depthFrame.Width; depthX++, ImageIndex += colorFrame.BytesPerPixel) { //ImageIndex += colorFrame.BytesPerPixel; int depthPixelIndex = depthX + (depthY * depthFrame.Width); int playerIndex = _depthPixelData[depthPixelIndex] & DepthImageFrame.PlayerIndexBitmask; //人のID取得 int x = Math.Min(colorPoint[depthPixelIndex].X, colorStream.FrameWidth - 1); int y = Math.Min(colorPoint[depthPixelIndex].Y, colorStream.FrameHeight - 1); int colorPixelIndex = (x * colorFrame.BytesPerPixel) + (y * colorStride); if (playerIndex != 0) { bytePlayer[ImageIndex] = _colorPixelData[colorPixelIndex]; //Blue bytePlayer[ImageIndex + 1] = _colorPixelData[colorPixelIndex + 1]; //Green bytePlayer[ImageIndex + 2] = _colorPixelData[colorPixelIndex + 2]; //Red bytePlayer[ImageIndex + 3] = 0xFF; //Alpha //ピクセル深度を取得 depth[ImageIndex] = _depthPixelData[depthPixelIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth; playerIndexArray[ImageIndex] = playerIndex; } else { byteRoom[ImageIndex] = _colorPixelData[colorPixelIndex]; //Blue byteRoom[ImageIndex + 1] = _colorPixelData[colorPixelIndex + 1]; //Green byteRoom[ImageIndex + 2] = _colorPixelData[colorPixelIndex + 2]; //Red byteRoom[ImageIndex + 3] = 0xFF; //Alpha } } } //人の情報をリングバッファへ保存 ringbuf.save_framedata(ref bytePlayer); //ringbuf.save_depthdata(depth); ringbuf.save_playerIndexdata(playerIndexArray); ringbuf.set_nextframe(); ////byteからビットマップへ書出し //_room_bitmap.WritePixels(_screenImageRect, byteRoom, screenImageStride, 0); //room_image.Source = _room_bitmap; RenderScreen2(); }
private static int FindDepthIndex(ColorImagePoint[] colorPoints, Point point) { for (var i = 0; i < colorPoints.Length; i++) { var cpoint = colorPoints[i]; if (cpoint.X == (int)Math.Round(point.X) && cpoint.Y == (int)Math.Round(point.Y)) return i; } return -1; }
/// <summary> /// 距離データをカラー画像に変換する /// </summary> /// <param name="kinect"></param> /// <param name="depthFrame"></param> /// <returns></returns> private void HeightMeasure( KinectSensor kinect, DepthImageFrame depthFrame, SkeletonFrame skeletonFrame ) { ColorImageStream colorStream = kinect.ColorStream; DepthImageStream depthStream = kinect.DepthStream; // トラッキングされている最初のスケルトンを取得する // インデックスはプレイヤーIDに対応しているのでとっておく Skeleton[] skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength]; skeletonFrame.CopySkeletonDataTo( skeletons ); int playerIndex = 0; for ( playerIndex = 0; playerIndex < skeletons.Length; playerIndex++ ) { if ( skeletons[playerIndex].TrackingState == SkeletonTrackingState.Tracked ) { break; } } if ( playerIndex == skeletons.Length ) { return; } // トラッキングされている最初のスケルトン Skeleton skeleton = skeletons[playerIndex]; // 実際のプレイヤーIDは、スケルトンのインデックス+1 playerIndex++; // 頭、足先がトラッキングされていない場合は、そのままRGBカメラのデータを返す Joint head = skeleton.Joints[JointType.Head]; Joint leftFoot = skeleton.Joints[JointType.FootLeft]; Joint rightFoot = skeleton.Joints[JointType.FootRight]; if ( (head.TrackingState != JointTrackingState.Tracked) || (leftFoot.TrackingState != JointTrackingState.Tracked) || (rightFoot.TrackingState != JointTrackingState.Tracked) ) { return; } // 距離カメラのピクセルごとのデータを取得する short[] depthPixel = new short[depthFrame.PixelDataLength]; depthFrame.CopyPixelDataTo( depthPixel ); // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ) ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength]; kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel, colorStream.Format, colorPoint ); // 頭のてっぺんを探す DepthImagePoint headDepth = depthFrame.MapFromSkeletonPoint( head.Position ); int top = 0; for ( int i = 0; (headDepth.Y - i) > 0; i++ ) { // 一つ上のY座標を取得し、プレイヤーがいなければ、現在の座標が最上位 int index = ((headDepth.Y - i) * depthFrame.Width) + headDepth.X; int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask; if ( player == playerIndex ) { top = i; } } // 頭のてっぺんを3次元座標に戻し、足の座標(下にあるほう)を取得する // この差分で身長を測る head.Position = depthFrame.MapToSkeletonPoint( headDepth.X, headDepth.Y - top ); Joint foot = (leftFoot.Position.Y < rightFoot.Position.Y) ? leftFoot : rightFoot; // 背丈を表示する DrawMeasure( kinect, colorStream, head, foot ); }
/// <summary> /// クリック動作を行ったのかチェック /// </summary> /// <param name="skeletonFrame"></param> /// <param name="point"></param> /// <returns></returns> private bool IsClicked( SkeletonFrame skeletonFrame, ColorImagePoint point ) { return IsSteady( skeletonFrame, point ); }
private void CameraPosition(FrameworkElement element, ColorImagePoint point) { Canvas.SetLeft(element, point.X - element.Width / 2); Canvas.SetTop(element, point.Y - element.Height / 2); }