private void Sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e) { try { DepthImagePixel[] depthImagePixel; using (var depthFrame = e.OpenDepthImageFrame()) { if (depthFrame == null) { return; } depthImagePixel = depthFrame.GetRawPixelData(); } fingerTipLeft.X = int.MaxValue; fingerTipRight.X = int.MinValue; hair.Y = int.MaxValue; for (int i = 0; i < depthImagePixel.Length; i++) { DepthImagePixel pixel = depthImagePixel[i]; if (pixel.IsKnownDepth && pixel.PlayerIndex > 0) { var point = new DepthImagePoint() { X = i % sensor.DepthStream.FrameWidth, Y = i / sensor.DepthStream.FrameWidth, Depth = pixel.Depth, }; if (point.X < fingerTipLeft.X) { fingerTipLeft = mapper.MapDepthPointToSkeletonPoint(sensor.DepthStream.Format, point); } if (point.X > fingerTipRight.X) { fingerTipRight = mapper.MapDepthPointToSkeletonPoint(sensor.DepthStream.Format, point); } if (point.Y < hair.Y) { hair = mapper.MapDepthPointToSkeletonPoint(sensor.DepthStream.Format, point); } } } heightFingerTip = Distance(fingerTipLeft, fingerTipRight) * 100; } catch { } }
public void TransformDepthTo3D(Byte [] data) { CoordinateMapper mapper = new CoordinateMapper(device); DepthImagePoint pixels = new DepthImagePoint(); SkeletonPoint [] points = new SkeletonPoint[320 * 120]; var p = mapper.MapDepthPointToSkeletonPoint(DepthImageFormat.Resolution640x480Fps30, pixels); }
/// <summary> /// Map PointDepth3D to PointSkeleton3D /// </summary> /// <param name="depthImageFormat"></param> /// <param name="pointDepth3D"></param> /// <returns></returns> public PointSkeleton3D MapDepthPointToSketelonPoint(DepthImageFormat depthImageFormat, PointDepth3D pointDepth3D) { DepthImagePoint point = new DepthImagePoint(); point.X = pointDepth3D.X; point.Y = pointDepth3D.Y; point.Depth = pointDepth3D.Depth; return(new PointSkeleton3D(mapper.MapDepthPointToSkeletonPoint(depthImageFormat, point))); }
public SkeletonPoint MapDepthPointToSkeletonPoint(DepthImagePoint depthImagePoint) { return(mapper.MapDepthPointToSkeletonPoint(dif, depthImagePoint)); }
public void TransformDepthTo3D( Byte [] data ) { CoordinateMapper mapper = new CoordinateMapper( device ); DepthImagePoint pixels = new DepthImagePoint(); SkeletonPoint [] points = new SkeletonPoint[ 320 * 120 ]; var p = mapper.MapDepthPointToSkeletonPoint( DepthImageFormat.Resolution640x480Fps30, pixels ); }
public OpenTK.Vector3[] ToColorSpace(CoordinateMapper mapper, IEnumerable<DepthImagePoint> points, DepthImageFormat format, float zTune = 1) { var sps = points.Select(p => mapper.MapDepthPointToSkeletonPoint(format, p)).ToArray(); var dist = sps.Select(p => mapper.MapSkeletonPointToColorPoint(p, ColorImageFormat.RgbResolution1280x960Fps12)) .Select(cp => new PointF((1280 - cp.X), cp.Y)).ToArray(); return StereoCalibration.Undistort(calib, dist).Zip(sps, (p, s) => new OpenTK.Vector3(p.X * s.Z * zTune, p.Y * s.Z * zTune, s.Z * zTune)).ToArray(); }
public float[] ToColorSpace(CoordinateMapper mapper, DepthImagePoint point, DepthImageFormat format, float zTune = 0) { var sp = mapper.MapDepthPointToSkeletonPoint(format, point); return ToColorSpace(mapper, sp, zTune); }
public SkeletonPoint DetermineCoordinate(Contour<Drawing.Point> contour) { SkeletonPoint kinectCoordinate; DepthImagePoint featurePoint = new DepthImagePoint(); CoordinateMapper changeCoordinate = new CoordinateMapper(sensorChooser.Kinect); Drawing.Point middlePoint = new Drawing.Point(0, 0); for (int i = 0; i < contour.Total; i++) { middlePoint.X += contour[i].X; middlePoint.Y += contour[i].Y; } featurePoint.X = middlePoint.X / contour.Total; featurePoint.Y = middlePoint.Y / contour.Total; kinectCoordinate = changeCoordinate.MapDepthPointToSkeletonPoint(DepthImageFormat.Resolution640x480Fps30, featurePoint); return (kinectCoordinate); }
private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e) { //TODO Keep the previous frame image as well, //Compare both on a background process and save it to the worksheet //Convert x&y differences to millimeters according to depth data (distance) //and some trigonometry BitmapSource depthBmp = null; blobCount = 0; using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) { using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) { if (depthFrame != null) { blobCount = 0; depthBmp = depthFrame.SliceDepthImage((int)sliderMin.Value, (int)sliderMax.Value); Image <Bgr, Byte> openCVImg = new Image <Bgr, byte>(depthBmp.ToBitmap()); Image <Gray, byte> gray_image = openCVImg.Convert <Gray, byte>(); if (running) { wsheet.Cells[1, frameCount + 1].Value = "Frame " + frameCount; frameCount++; using (MemStorage stor = new MemStorage()) { //Find contours with no holes try CV_RETR_EXTERNAL to find holes Contour <System.Drawing.Point> contours = gray_image.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL, stor); //Conversion of depthPixels to skeletonPoints which contain all three dimensions in meters. //The conversion and copying is assumed to be costly but there are no single pixel to single point conversion I could find. depthFrame.CopyDepthImagePixelDataTo(depthPixels); //mapper.MapDepthFrameToSkeletonFrame(depthFormat, depthPixels, skeletonPoints); for (int i = 0; contours != null; contours = contours.HNext) { i++; if ((contours.Area > Math.Pow(sliderMinSize.Value, 2)) && (contours.Area < Math.Pow(sliderMaxSize.Value, 2))) { MCvBox2D box = contours.GetMinAreaRect(); //DrQ RED BOX AROUND BLOB openCVImg.Draw(box, new Bgr(System.Drawing.Color.Red), 2); blobCount++; int x = (int)box.center.X; int y = (int)box.center.Y; DepthImagePoint p = new DepthImagePoint(); p.X = x; p.Y = y; p.Depth = depthPixels[x + 640 * y].Depth; SkeletonPoint s = mapper.MapDepthPointToSkeletonPoint(depthFormat, p); //TODO Conversion from absolute coordinates to relative coordinates addCoordData(3 * blobCount - 1, frameCount, s.X, s.Y, s.Z); /*if (KinectSensor.IsKnownPoint(s)) * { * addCoordData(3 * blobCount - 1, frameCount, s.X, s.Y, s.Z); * }*/ } } } } this.outImg.Source = ImageHelpers.ToBitmapSource(openCVImg); txtBlobCount.Text = blobCount.ToString(); getNext().RunWorkerAsync(openCVImg); } } if (colorFrame != null) { colorFrame.CopyPixelDataTo(this.colorPixels); this.colorBitmap.WritePixels( new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight), this.colorPixels, this.colorBitmap.PixelWidth * sizeof(int), 0); } } }
/// <summary> /// Calculates the position of a point on screen (depth image) in skeleton space /// </summary> /// <param name="p">point on screen (depth image)</param> /// <param name="DepthValue">Depth value of point</param> /// <param name="nui">active nui runtime</param> /// <param name="panel">panel on which the screen point lies</param> /// <returns></returns> public static NUIVector GetSkeletonSpacePosition(this Point p, short DepthValue, KinectSensor nui, Canvas panel) { DepthImagePoint dip = new DepthImagePoint { Depth = DepthValue, X = (int)p.X, Y = (int)p.Y }; CoordinateMapper cm = new CoordinateMapper(nui); return cm.MapDepthPointToSkeletonPoint(nui.DepthStream.Format, dip); //return nui.SkeletonEngine.DepthImageToSkeleton((float)(p.X/panel.ActualWidth), (float)(p.Y/panel.ActualHeight), DepthValue); }