/// <summary> /// Retrieve all the points (x, y, z position in meters) from Kinect, row by row. /// </summary> /// <returns>All the points (x, y, z position in meters) from Kinect, row by row.</returns> public MCvPoint3D32f[] RetrievePointCloudMap() { IntPtr img = CvInvoke.cvRetrieveFrame(Ptr, (int)OpenNIDataType.PointCloudMap); if (img == IntPtr.Zero) { return(null); } if (FlipType != Emgu.CV.CvEnum.FLIP.NONE) { CvInvoke.cvFlip(img, img, FlipType); } MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); MCvPoint3D32f[] points = new MCvPoint3D32f[iplImage.width * iplImage.height]; GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned); using (Matrix <float> m = new Matrix <float>(iplImage.height, iplImage.width, handle.AddrOfPinnedObject())) { CvInvoke.cvCopy(img, m, IntPtr.Zero); } handle.Free(); return(points); }
/// <summary> /// Retrieve a Bgr image frame after Grab() /// </summary> /// <param name="streamIndex">Stream index</param> /// <returns> A Bgr image frame</returns> public virtual Image <Bgr, Byte> RetrieveBgrFrame(int streamIndex) { IntPtr img = CvInvoke.cvRetrieveFrame(Ptr, streamIndex); if (img == IntPtr.Zero) { return(null); } MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); Image <Bgr, Byte> res; if (iplImage.nChannels == 1) { //if the image captured is Grayscale, convert it to BGR res = new Image <Bgr, Byte>(iplImage.width, iplImage.height); CvInvoke.cvCvtColor(img, res.Ptr, Emgu.CV.CvEnum.COLOR_CONVERSION.GRAY2BGR); } else { res = new Image <Bgr, byte>(iplImage.width, iplImage.height, iplImage.widthStep, iplImage.imageData); } //inplace flip the image if necessary res._Flip(FlipType); return(res); }
/// <summary> /// Retrieve the depth map from Kinect (in mm) /// </summary> /// <returns>The depth map from Kinect (in mm)</returns> public Image <Gray, int> RetrieveDepthMap() { IntPtr img = CvInvoke.cvRetrieveFrame(Ptr, (int)OpenNIDataType.DepthMap); if (img == IntPtr.Zero) { return(null); } MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); Image <Gray, int> res = new Image <Gray, int>(iplImage.width, iplImage.height, iplImage.widthStep, iplImage.imageData); //inplace flip the image if necessary res._Flip(FlipType); return(res); }
/// <summary> /// Retrieve a Gray image frame after Grab() /// </summary> /// <param name="streamIdx">Stream index. Use 0 for default.</param> /// <returns> A Gray image frame</returns> public virtual Image <Gray, Byte> RetrieveGrayFrame(int streamIdx) { IntPtr img = /*(_captureModuleType == CaptureModuleType.FFMPEG) ? CvInvoke.cvRetrieveFrame_FFMPEG(Ptr, streamIdx) :*/ CvInvoke.cvRetrieveFrame(Ptr, streamIdx); if (img == IntPtr.Zero) { return(null); } MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); Image <Gray, Byte> res; if (iplImage.nChannels == 3) { //if the image captured is Bgr, convert it to Grayscale res = new Image <Gray, Byte>(iplImage.width, iplImage.height); CvInvoke.cvCvtColor(img, res.Ptr, Emgu.CV.CvEnum.COLOR_CONVERSION.CV_BGR2GRAY); } else { res = new Image <Gray, byte>(iplImage.width, iplImage.height, iplImage.widthStep, iplImage.imageData); } //inplace flip the image if necessary res._Flip(FlipType); return(res); }