private void projectionYToolStripMenuItem_Click(object sender, EventArgs e) { if (img == null) // verify if the image is already opened { return; } Cursor = Cursors.WaitCursor; // clock cursor //copy Undo Image imgUndo = img.Copy(); MIplImage s = imgUndo.MIplImage; int height = s.height; Project_Y form = new Project_Y(ImageClass.Projection_Y(imgUndo), height); form.ShowDialog(); ImageViewer.Image = img.Bitmap; ImageViewer.Refresh(); // refresh image on the screen Cursor = Cursors.Default; // normal cursor }
/// <summary> /// 将MIplImage结构转换到IplImage指针; /// 注意:指针在使用完之后必须用Marshal.FreeHGlobal方法释放。 /// </summary> /// <param name="mi">MIplImage对象</param> /// <returns>返回IplImage指针</returns> public static IntPtr MIplImageToIplImagePointer(MIplImage mi) { IntPtr ptr = Marshal.AllocHGlobal(mi.NSize); Marshal.StructureToPtr(mi, ptr, false); return(ptr); }
//Função que encontra os digitos private void digitosToolStripMenuItem_Click(object sender, EventArgs e) { if (img == null) // verify if the image is already opened { return; } Cursor = Cursors.WaitCursor; // clock cursor //copy Undo Image imgUndo = img.Copy(); MIplImage s = imgUndo.MIplImage; //char "r"; ImageClass.Digits(img); String b = ImageClass.EvaluateCompNumber(); //Console.WriteLine(b); ImageViewer.Image = img.Bitmap; ImageViewer.Refresh(); // refresh image on the screen Cursor = Cursors.Default; // normal cursor }
//下面这些方法都不对啊,必须要for循环取出所有的像素点赋值才可以? //bitmap转IntPtr: bmp.GetHbitmap() /// <summary> /// Bitmap转MIplImage /// </summary> /// <param name="bmp"></param> /// <returns></returns> public static MIplImage BitmapToMIplImage(Bitmap bmp) { Emgu.CV.Image <Bgr, Byte> img = new Image <Bgr, byte>(bmp); MIplImage mi = img.MIplImage; return(mi); }
/// <summary> /// Compute the panoramic images given the images /// </summary> /// <param name="images">The input images</param> /// <returns>The panoramic image</returns> public Image <Bgr, Byte> Stitch(Image <Bgr, Byte>[] images) { IntPtr[] ptrs = new IntPtr[images.Length]; for (int i = 0; i < images.Length; ++i) { ptrs[i] = images[i].Ptr; } GCHandle handle = GCHandle.Alloc(ptrs, GCHandleType.Pinned); IntPtr resultIplImage = StitchingInvoke.CvStitcherStitch(_ptr, handle.AddrOfPinnedObject(), images.Length); handle.Free(); if (resultIplImage == IntPtr.Zero) { throw new ArgumentException("Requires more images"); } MIplImage tmp = (MIplImage)Marshal.PtrToStructure(resultIplImage, typeof(MIplImage)); Image <Bgr, Byte> result = new Image <Bgr, byte>(tmp.width, tmp.height); CvInvoke.cvCopy(resultIplImage, result, IntPtr.Zero); CvInvoke.cvReleaseImage(ref resultIplImage); return(result); }
public void DrawWatershedLines(Image <Gray, int> wtImg) { MIplImage data = wtImg.MIplImage; if (_watershedPixelCount == 0) { return; } int watershedColor = -1; unsafe { int offset = data.widthStep - data.width * sizeof(int); int *ptr = (int *)(data.imageData); for (int y = 0; y < data.height; y++) { for (int x = 0; x < data.width; x++, ptr++) { // if the pixel in our map is watershed pixel then draw it if (_pixelMap[x.ToString() + "," + y.ToString()].Label == WatershedCommon.WSHED) { *ptr = watershedColor; } else { *ptr = _pixelMap[x.ToString() + "," + y.ToString()].Label; } } ptr += offset; } } }
public unsafe void DoWork() { MIplImage m = imgOrig.MIplImage; MIplImage n = imgCmp.MIplImage; byte * dataRefPtr = (byte *)m.imageData.ToPointer(); // obter apontador do inicio da imagem byte * dataPtr = (byte *)n.imageData.ToPointer(); //Apontador imagem backup; int width = imgOrig.Width; int height = imgOrig.Height; int nChan = m.nChannels; // numero de canais 3 int padding = m.widthStep - m.nChannels * m.width; // alinhamento (padding) int x, y, count = 0; if (nChan == 3) // imagem em RGB { for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { if (dataPtr[0] == dataRefPtr[0]) { count++; } // avança apontador para próximo pixel dataPtr += nChan; dataRefPtr += nChan; } //no fim da linha avança alinhamento (padding) dataPtr += padding; dataRefPtr += padding; } prob = count / ((float)height * width); isFinish = true; } }
private void CreatePixelMapAndHeightSortedArray(Image <Gray, byte> wtImg) { MIplImage data = wtImg.MIplImage; _pictureWidth = data.width; _pictureHeight = data.height; // pixel map holds every pixel thus size of (_pictureWidth * _pictureHeight) _pixelMap = new Dictionary <string, WatershedPixel>(_pictureWidth * _pictureHeight); unsafe { int offset = data.widthStep - data.width; byte *ptr = (byte *)(data.imageData); // get histogram of all values in grey = height for (int y = 0; y < data.height; y++) { for (int x = 0; x < data.width; x++, ptr++) { WatershedPixel p = new WatershedPixel(x, y, *ptr); // add every pixel to the pixel map _pixelMap.Add(p.X.ToString() + "," + p.Y.ToString(), p); _heightSortedList[*ptr].Add(p); } ptr += offset; } } this._currentLabel = 0; }
/// <summary> /// Retrieve a Gray image frame after Grab() /// </summary> /// <param name="streamIdx">Stream index. Use 0 for default.</param> /// <returns> A Gray image frame</returns> public virtual Image <Gray, Byte> RetrieveGrayFrame(int streamIdx) { IntPtr img = /*(_captureModuleType == CaptureModuleType.FFMPEG) ? CvInvoke.cvRetrieveFrame_FFMPEG(Ptr, streamIdx) :*/ CvInvoke.cvRetrieveFrame(Ptr, streamIdx); if (img == IntPtr.Zero) { return(null); } MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); Image <Gray, Byte> res; if (iplImage.nChannels == 3) { //if the image captured is Bgr, convert it to Grayscale res = new Image <Gray, Byte>(iplImage.width, iplImage.height); CvInvoke.cvCvtColor(img, res.Ptr, Emgu.CV.CvEnum.COLOR_CONVERSION.CV_BGR2GRAY); } else { res = new Image <Gray, byte>(iplImage.width, iplImage.height, iplImage.widthStep, iplImage.imageData); } //inplace flip the image if necessary res._Flip(FlipType); return(res); }
/// <summary> /// IntPtr转MIplImage /// </summary> /// <param name="intptr"></param> /// <returns></returns> public static MIplImage IntPtrToMIplImage(IntPtr intptr) { MIplImage mi = new MIplImage(); mi = (MIplImage)Marshal.PtrToStructure(intptr, typeof(MIplImage)); return(mi); }
/// <summary> /// Retrieve a Bgr image frame after Grab() /// </summary> /// <param name="streamIndex">Stream index</param> /// <returns> A Bgr image frame</returns> public virtual Image <Bgr, Byte> RetrieveBgrFrame(int streamIndex) { IntPtr img = CvInvoke.cvRetrieveFrame(Ptr, streamIndex); if (img == IntPtr.Zero) { return(null); } MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); Image <Bgr, Byte> res; if (iplImage.nChannels == 1) { //if the image captured is Grayscale, convert it to BGR res = new Image <Bgr, Byte>(iplImage.width, iplImage.height); CvInvoke.cvCvtColor(img, res.Ptr, Emgu.CV.CvEnum.COLOR_CONVERSION.GRAY2BGR); } else { res = new Image <Bgr, byte>(iplImage.width, iplImage.height, iplImage.widthStep, iplImage.imageData); } //inplace flip the image if necessary res._Flip(FlipType); return(res); }
/// <summary> /// Retrieve all the points (x, y, z position in meters) from Kinect, row by row. /// </summary> /// <returns>All the points (x, y, z position in meters) from Kinect, row by row.</returns> public MCvPoint3D32f[] RetrievePointCloudMap() { IntPtr img = CvInvoke.cvRetrieveFrame(Ptr, (int)OpenNIDataType.PointCloudMap); if (img == IntPtr.Zero) { return(null); } if (FlipType != Emgu.CV.CvEnum.FLIP.NONE) { CvInvoke.cvFlip(img, img, FlipType); } MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); MCvPoint3D32f[] points = new MCvPoint3D32f[iplImage.width * iplImage.height]; GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned); using (Matrix <float> m = new Matrix <float>(iplImage.height, iplImage.width, handle.AddrOfPinnedObject())) { CvInvoke.cvCopy(img, m, IntPtr.Zero); } handle.Free(); return(points); }
/// <summary> /// Get the binary mask for the blobs listed in the CvBlobs /// </summary> /// <param name="blobs">The blobs</param> /// <returns>The binary mask for the specific blobs</returns> public Image <Gray, Byte> DrawBlobsMask(CvBlobs blobs) { MIplImage img = (MIplImage)Marshal.PtrToStructure(Ptr, typeof(MIplImage)); Image <Gray, Byte> mask = new Image <Gray, byte>(img.width, img.height); CvInvoke.cvbCvFilterLabels(Ptr, mask, blobs); return(mask); }
/// <summary> /// 將IplImage指針轉換成Emgucv中的Image對象; /// 注意:這裡需要您自己根據IplImage中的depth和nChannels來決定 /// </summary> /// <typeparam name = "TColor" >Color type of this image (either Gray, Bgr, Bgra, Hsv, Hls, Lab, Luv, Xyz or Ycc)</typeparam> /// <typeparam name = "TDepth" >Depth of this image (either Byte, SByte, Single, double, UInt16, Int16 or Int32)</typeparam> /// <param name = "ptr" >IplImage指針</param> /// <returns >返回Image對象</returns> public static Image <TColor, TDepth> IplImagePointerToEmgucvImage <TColor, TDepth>(IntPtr ptr) where TColor : struct, IColor where TDepth : new() { MIplImage mi = IplImagePointerToMIplImage(ptr); return(new Image <TColor, TDepth>(mi.width, mi.height, mi.widthStep, mi.imageData)); }
/// <summary> /// 将IplImage*转换为Bitmap(注:在OpenCV中IplImage* 对应EmguCV的IntPtr类型) /// </summary> /// <param name="ptrImage"></param> /// <returns>Bitmap对象</returns> public static Image<T, byte> ConvertIntPrToBitmap<T>(IntPtr ptrImage) where T : struct,IColor { //将IplImage指针转换成MIplImage结构 MIplImage mi = (MIplImage)Marshal.PtrToStructure(ptrImage, typeof(MIplImage)); Image<T, byte> image = new Image<T, byte>(mi.width, mi.height, mi.widthStep, mi.imageData); return image; }
/// <summary> /// Get the binary mask for the blobs listed in the CvBlobs /// </summary> /// <param name="blobs">The blobs</param> /// <returns>The binary mask for the specific blobs</returns> public Image <Gray, Byte> DrawBlobsMask(CvBlobs blobs) { #if NETFX_CORE MIplImage img = Marshal.PtrToStructure <MIplImage>(Ptr); #else MIplImage img = (MIplImage)Marshal.PtrToStructure(Ptr, typeof(MIplImage)); #endif Image <Gray, Byte> mask = new Image <Gray, byte>(img.Width, img.Height); cvbCvFilterLabels(Ptr, mask, blobs); return(mask); }
public static void DrawRectangles(Image <Bgr, byte> img, List <int[]> sign_coords, int type = 0) { unsafe { int x, y; MIplImage m = img.MIplImage; byte * dataPtr = (byte *)m.imageData.ToPointer(); // Pointer to the image int width = img.Width; int height = img.Height; int nChan = m.nChannels; // number of channels - 3 int padding = m.widthStep - m.nChannels * m.width; byte[] colors = new byte[3]; if (type.Equals(0)) { colors = new byte[] { 0, 0, 255 } } ; else if (type.Equals(1)) { colors = new byte[] { 255, 0, 0 } } ; else if (type.Equals(2)) { colors = new byte[] { 0, 255, 0 } } ; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { foreach (int[] sign in sign_coords) { //Drawing rectangle if ( x > sign[0] && x <sign[2] && (y == sign[1] || y == sign[3]) || //vertical lines y> sign[1] && y < sign[3] && (x == sign[0] || x == sign[2]) //horizontal lines ) { // get pixel address (dataPtr + y * m.widthStep + x * nChan)[0] = colors[0]; (dataPtr + y * m.widthStep + x * nChan)[1] = colors[1]; (dataPtr + y * m.widthStep + x * nChan)[2] = colors[2]; } } } } } }
public Image <Bgr, Byte> GetImage() { if (_camera.Handle == IntPtr.Zero) { return(null); } IntPtr frame = PsMoveApi.psmove_tracker_get_frame(_camera.Handle); MIplImage rgb32Image = (MIplImage)Marshal.PtrToStructure(frame, typeof(MIplImage)); return(new Image <Bgr, byte>(rgb32Image.width, rgb32Image.height, rgb32Image.widthStep, rgb32Image.imageData)); }
private void button3_Click(object sender, EventArgs e) { if (listView1.SelectedItems.Count == 0) { MessageBox.Show("请选择要合并的图像", "错误框", MessageBoxButtons.OK, MessageBoxIcon.Error); } else { MIplImage mipl = new MIplImage(); Image <Bgr, Byte> img = new Image <Bgr, Byte>(405, 200); IntPtr ptr = CvInvoke.cvCreateImage(new Size(405, 200), Emgu.CV.CvEnum.IPL_DEPTH.IPL_DEPTH_8U, 3); img.Ptr = ptr; List <Image> images = new List <Image>(); for (int i = 0; i < imageList1.Images.Count; i++) { images.Add(imageList1.Images[i]); } Image <Bgr, Byte> img0 = new Image <Bgr, Byte>(new Bitmap(images[0])); MCvScalar scalar = new MCvScalar(); for (int i = 0; i < img0.Height; i++) { for (int j = 0; j < img0.Width; j++) { scalar = CvInvoke.cvGet2D(img0.Ptr, i, j); CvInvoke.cvSet2D(img.Ptr, i, j, scalar); } } for (int i = 0; i < img0.Height; i++) { for (int j = img0.Width; j < img0.Width + 6; j++) { scalar.v0 = 255; scalar.v1 = 255; scalar.v2 = 255; CvInvoke.cvSet2D(img.Ptr, i, j, scalar); } } if (images.Count > 1) { Image <Bgr, Byte> img1 = new Image <Bgr, Byte>(new Bitmap(images[1])); for (int i = 0; i < img1.Height; i++) { for (int j = img0.Width + 5; j < img.Width; j++) { scalar = CvInvoke.cvGet2D(img1.Ptr, i, j - img0.Width - 5); CvInvoke.cvSet2D(img.Ptr, i, j, scalar); } } } CvInvoke.cvShowImage("合成", img.Ptr); } }
public void TestMarshalIplImage() { Image <Bgr, Single> image = new Image <Bgr, float>(2041, 1023); DateTime timeStart = DateTime.Now; for (int i = 0; i < 10000; i++) { MIplImage img = image.MIplImage; } TimeSpan timeSpan = DateTime.Now.Subtract(timeStart); Trace.WriteLine(String.Format("Time: {0} milliseconds", timeSpan.TotalMilliseconds)); }
private Image <Gray, Byte> Sharpen(Image <Gray, Byte> image) { Image <Gray, Byte> result = image.CopyBlank(); //copy a blank image MIplImage MIpImg = (MIplImage)System.Runtime.InteropServices.Marshal.PtrToStructure(image.Ptr, typeof(MIplImage)); MIplImage MIpImgResult = (MIplImage)System.Runtime.InteropServices.Marshal.PtrToStructure(result.Ptr, typeof(MIplImage)); int imageHeight = MIpImg.Height; int imageWidth = MIpImg.Width; unsafe { for (int height = 1; height < imageHeight - 1; height++) { //current_pixel line byte *currentPixel = (byte *)MIpImg.ImageData + imageWidth * height; //up_pixel line byte *uplinePixel = currentPixel - MIpImg.WidthStep; //down_pixel line byte *downlinePixel = currentPixel + MIpImg.WidthStep; //result current_pixel line byte *resultPixel = (byte *)MIpImgResult.ImageData + imageWidth * height; for (int width = 1; width < imageWidth - 1; width++) { //5*current_pixel-left_pixel-right_pixel-up_pixel-down_pixel int sharpValue = 5 * currentPixel[width] - currentPixel[width - 1] - currentPixel[width + 1] - uplinePixel[width] - downlinePixel[width]; if (sharpValue < 0) { sharpValue = 0; //Gray level 0~255 } if (sharpValue > 255) { sharpValue = 255; //Gray level 0~255 } resultPixel[width] = (byte)sharpValue; } } } // imageBox2.Image = result; imgDst = result; return(result); }
/// Negative using memory (faster processing) /// internal static void NegativeMemory(Image <Bgr, byte> img) { unsafe { // direct access to the image memory(sequencial) // direcion top left -> bottom right MIplImage m = img.MIplImage; byte * dataPtr = (byte *)m.imageData.ToPointer(); // Pointer to the image byte blue, green, red, gray; int width = img.Width; int height = img.Height; int nChan = m.nChannels; // number of channels - 3 int padding = m.widthStep - m.nChannels * m.width; // alinhament bytes (padding) int x, y; if (nChan == 3) // image in RGB { for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { //obtém as 3 componentes blue = dataPtr[0]; green = dataPtr[1]; red = dataPtr[2]; // convert to negative blue = (byte)(255 - (int)blue); red = (byte)(255 - (int)red); green = (byte)(255 - (int)green); // store in the image dataPtr[0] = blue; dataPtr[1] = green; dataPtr[2] = red; // advance the pointer to the next pixel dataPtr += nChan; } //at the end of the line advance the pointer by the aligment bytes (padding) dataPtr += padding; } } } }
/// <summary> /// Retrieve the depth map from Kinect (in mm) /// </summary> /// <returns>The depth map from Kinect (in mm)</returns> public Image <Gray, int> RetrieveDepthMap() { IntPtr img = CvInvoke.cvRetrieveFrame(Ptr, (int)OpenNIDataType.DepthMap); if (img == IntPtr.Zero) { return(null); } MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); Image <Gray, int> res = new Image <Gray, int>(iplImage.width, iplImage.height, iplImage.widthStep, iplImage.imageData); //inplace flip the image if necessary res._Flip(FlipType); return(res); }
private static void EdgeFilter() { Image <Rgb, Byte> img = new Image <Rgb, byte>(ori_img); MIplImage MIpImg = (MIplImage)System.Runtime.InteropServices.Marshal.PtrToStructure(img.Ptr, typeof(MIplImage)); unsafe { int height = img.Height; int width = img.Width; int point; byte *npixel = (byte *)MIpImg.ImageData; for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { point = w * 3; double avg = (npixel[point] + npixel[point + 1] + npixel[point + 2]) / 3; if (npixel[point] > avg - 15 && npixel[point] < avg + 15 && npixel[point + 1] > avg - 15 && npixel[point + 1] < avg + 15 && npixel[point + 2] > avg - 15 && npixel[point + 2] < avg + 15) { if (avg < 200) { npixel[point] = 0; npixel[point + 1] = 0; npixel[point + 2] = 0; } else { npixel[point] = 255; npixel[point + 1] = 255; npixel[point + 2] = 255; } } } npixel = npixel + MIpImg.WidthStep; } } //img.Save("edge_" + count + ".png"); //ori_img = img.Bitmap; pre_img = show_img; show_img = img.Bitmap; rform.UpdateImage(show_img); }
private void CameraGrabberFrameCallback0( IntPtr Grabber, IntPtr pFrameBuffer, ref tSdkFrameHead pFrameHead, IntPtr Context) { MIplImage IpImg = (MIplImage)Marshal.PtrToStructure(pFrameBuffer, typeof(MIplImage)); //Mat mat = MIpImg. Mat mat = CvInvoke.CvArrToMat(IpImg.ImageData); captureImageBox.Image = mat; if (m_bRecording) { MvApi.CameraPushFrame(m_hCamera[0], pFrameBuffer, ref pFrameHead); } }
internal static unsafe void NegativeImage(Image <Bgr, byte> img) { try { // ADD YOUR CODE HERE MIplImage m = img.MIplImage; byte * dataPtr = (byte *)m.imageData.ToPointer(); int width = img.Width; int height = img.Height; int padding = m.widthStep - m.nChannels * m.width; //GCHandle dataGC = GCHandle.Alloc(dataPtr, GCHandleType.Pinned); //CLMem bufferFilter = OpenCLDriver.clCreateBuffer(ctx, CLMemFlags.ReadWrite | CLMemFlags.CopyHostPtr, new SizeT(arr.Length * sizeof(float)), arrGC.AddrOfPinnedObject(), ref err); } catch (Exception exc) { MessageBox.Show(exc.ToString()); } }
private void border1pxToolStripMenuItem_Click(object sender, EventArgs e) { if (img == null) // verify if the image is already opened { return; } Cursor = Cursors.WaitCursor; // clock cursor //copy Undo Image imgUndo = img.Copy(); MIplImage s = imgUndo.MIplImage; ImageClass.Border(img); ImageViewer.Image = img.Bitmap; ImageViewer.Refresh(); // refresh image on the screen Cursor = Cursors.Default; // normal cursor }
private void digitosBarraToolStripMenuItem_Click(object sender, EventArgs e) { if (img == null) // verify if the image is already opened { return; } Cursor = Cursors.WaitCursor; // clock cursor //copy Undo Image imgUndo = img.Copy(); int[] matrix; MIplImage s = imgUndo.MIplImage; //matrix = ImageClass.RectangleIterative(img,(float)3.5); ////Console.WriteLine(ImageClass.DigitosBarra(img,matrix)); //ImageClass.DigitosBarra(img, matrix); ImageViewer.Image = img.Bitmap; ImageViewer.Refresh(); // refresh image on the screen Cursor = Cursors.Default; // normal cursor }
public Image <Gray, Byte> Test(Image <Gray, Byte> inputImg, double aFocalLinPixels) { /* * Bitmap b = aImage.Bitmap; * BitmapData bmData = b.LockBits(new Rectangle(0, 0, b.Width, b.Height), ImageLockMode.ReadWrite, PixelFormat.Format32bppArgb); * System.IntPtr Scan0 = bmData.Scan0; */ Image <Gray, Byte> resultImg = inputImg.CopyBlank(); MIplImage MIpImg1 = (MIplImage)System.Runtime.InteropServices.Marshal.PtrToStructure(inputImg.Ptr, typeof(MIplImage)); MIplImage MIpImg2 = (MIplImage)System.Runtime.InteropServices.Marshal.PtrToStructure(resultImg.Ptr, typeof(MIplImage)); int imageHeight = MIpImg1.Height; int imageWidth = MIpImg1.Width; unsafe { byte *npixel1 = (byte *)MIpImg1.ImageData; byte *npixel2 = (byte *)MIpImg2.ImageData; for (int y = 0; y < imageHeight; y++) { for (int x = 0; x < imageWidth; x++) { npixel2[0] = npixel1[0]; //npixel1[0] = XD_Table[width] ; //blue //npixel1[0] = 255; //green //npixel[2] = 255; //red //npixel1++; npixel2++; npixel1 = (byte *)(imageWidth * YD_Table[y, x] + XD_Table[y, x]); } } } return(resultImg); }
/// <summary> /// Capture a Gray image frame /// </summary> /// <returns> A Gray image frame</returns> public virtual Image <Gray, Byte> QueryGrayFrame() { IntPtr img = CvInvoke.cvQueryFrame(Ptr); MIplImage iplImage = (MIplImage)Marshal.PtrToStructure(img, typeof(MIplImage)); Image <Gray, Byte> res; if (iplImage.nChannels == 3) { //if the image captured is Bgr, convert it to Grayscale res = new Image <Gray, Byte>(iplImage.width, iplImage.height); CvInvoke.cvCvtColor(img, res.Ptr, Emgu.CV.CvEnum.COLOR_CONVERSION.CV_BGR2GRAY); } else { res = new Image <Gray, byte>(iplImage.width, iplImage.height, iplImage.widthStep, iplImage.imageData); } //inplace flip the image if necessary res._Flip(FlipType); return(res); }