private static NDArray MatToNdarray <T>(Mat mat) where T : struct { switch (typeof(T).Name) { case "Byte": { var data = new Vec3b[mat.Total()]; //mat.GetArray(0, 0, data); return(Vec3bToNdarray(data, mat.Height, mat.Width, mat.Channels())); } case "Int32": unsafe { var data = new Vec3i[mat.Total()]; mat.ForEachAsVec3i((value, position) => data[*position] = *value); return(Vec3iToNdarray(data, mat.Height, mat.Width, mat.Channels())); } case "Single": unsafe { var data = new Vec3d[mat.Total()]; //mat.GetArray(0, 0, data); throw new NotImplementedException(""); } default: throw new NotImplementedException(""); } }
//int ph_image_digest(const CImg<uint8_t> &img, double sigma, double gamma, // Digest &digest, int N) internal static Digest ph_image_digest(Mat img, double sigma, double gamma, int N) { //CImg<uint8_t> graysc; Mat graysc; if (img.Channels() >= 3) { //graysc = img.get_RGBtoYCbCr().channel(0); graysc = Utils.GetBrightnessComponent(img); } else if (img.Channels() == 1) { graysc = img; } else { throw new ArgumentException("Image should have 1 or 3 channels", nameof(img)); } //graysc.blur((float) sigma); graysc.GaussianBlur(Size.Zero, sigma, sigma, BorderTypes.Replicate); //(graysc / graysc.max()).pow(gamma); graysc.MinMaxLoc(out double _, out double max_val); graysc /= max_val; Cv2.Pow(graysc, gamma, graysc); var projs = ph_radon_projections(graysc, N); var features = ph_feature_vector(projs); var digest = ph_dct(features); return(digest); }
public PixelFormat GetPixelFormat(Mat image) { var ranges = new Range <double> [image.Channels()]; var matType = image.Type(); if (matType.Equals(MatType.MakeType(MatType.CV_8U, 1)) || matType.Equals(MatType.MakeType(MatType.CV_8U, 3)) || matType.Equals(MatType.MakeType(MatType.CV_8U, 4)) ) { for (int i = 0; i < image.Channels(); ++i) { ranges[i] = new Range <double>(byte.MinValue, byte.MaxValue); } return(new PixelFormat((PixelType)((int)ChannelType.U8 | image.Channels()), image.Channels() == 1 ? PixelChannels.Gray : PixelChannels.Unknown, typeof(byte), ranges, ColorSpace.Unknown)); } else if (matType.Equals(MatType.MakeType(MatType.CV_32F, 1)) || matType.Equals(MatType.MakeType(MatType.CV_32F, 3)) || matType.Equals(MatType.MakeType(MatType.CV_32F, 4)) ) { for (int i = 0; i < image.Channels(); ++i) { ranges[i] = new Range <double>(float.MinValue, float.MaxValue); } return(new PixelFormat((PixelType)((int)ChannelType.F32 | image.Channels()), image.Channels() == 1 ? PixelChannels.Gray : PixelChannels.Unknown, typeof(float), ranges, ColorSpace.Unknown)); } throw new Exception("PixelFormat is not supported."); }
private System.Drawing.Bitmap ConvertMatToBitmap(Mat cvImg) { System.Drawing.Bitmap bmpImg; //检查图像位深 if (cvImg.Depth() != MatType.CV_8U) { return(null); } //彩色图像 if (cvImg.Channels() == 3) { bmpImg = new System.Drawing.Bitmap( cvImg.Cols, cvImg.Rows, (int)cvImg.Step(), System.Drawing.Imaging.PixelFormat.Format24bppRgb, cvImg.Data); return(bmpImg); } //灰度图像 else if (cvImg.Channels() == 1) { bmpImg = new System.Drawing.Bitmap( cvImg.Cols, cvImg.Rows, (int)cvImg.Step(), System.Drawing.Imaging.PixelFormat.Format8bppIndexed, cvImg.Data); return(bmpImg); } return(null); }
/// <summary> /// Height, Width, Channels /// </summary> public static int[] Shape(Mat mat) { if (mat.Channels() > 1) { return(new int[] { mat.Height, mat.Width, mat.Channels() }); } else { return(new int[] { mat.Height, mat.Width }); } }
private static AudioFrame MatToAudioFrame(Mat mat, AVSampleFormat srctFormat, int sampleRate) { int channels = mat.Channels() > 1 ? mat.Channels() : mat.Height; AudioFrame frame = new AudioFrame(srctFormat, channels, mat.Width, sampleRate); bool isPlanar = ffmpeg.av_sample_fmt_is_planar(srctFormat) > 0; int stride = (int)mat.Step(); for (int i = 0; i < (isPlanar ? channels : 1); i++) { FFmpegHelper.CopyMemory(frame.Data[i], mat.Data + i * stride, (uint)stride); } return(frame); }
static void GetGray(Mat image, Mat gray) { if (image.Channels() == 3) { Cv2.CvtColor(image, gray, ColorConversionCodes.RGB2GRAY); } else if (image.Channels() == 4) { Cv2.CvtColor(image, gray, ColorConversionCodes.RGBA2GRAY); } else if (image.Channels() == 1) { image.CopyTo(gray); } }
//bufferをもとに白黒Matデータ作成 private void KinectImagetoMat(Mat mat, byte[] buffer) { int channel = mat.Channels(); int depth = mat.Depth(); unsafe { byte *matPtr = mat.DataPointer; for (int i = 0; i < this.imageWidth * this.imageHeight; i++) { if (buffer[i] == 255) { for (int j = 0; j < channel; j++) { *(matPtr + i * channel + j) = 255; } } else { for (int j = 0; j < channel; j++) { *(matPtr + i * channel + j) = 0; } } } } }
//! [scan-iterator] //! [scan-random] public static Mat ScanImageAndReduceRandomAccess(Mat I, byte[] table) { int channels = I.Channels(); switch (channels) { case 1: for (int i = 0; i < I.Rows; ++i) { for (int j = 0; j < I.Cols; ++j) { I.Set(i, j, table[I.At <byte>(i, j)]); } } break; case 3: for (int i = 0; i < I.Rows; ++i) { for (int j = 0; j < I.Cols; ++j) { Vec3b v = I.Get <Vec3b>(i, j); v.Item0 = table[v.Item0]; v.Item1 = table[v.Item1]; v.Item2 = table[v.Item2]; I.Set <Vec3b>(i, j, v); } } break; } return(I); }
public static Mat ScanImageAndReduceC(Mat I, byte[] table) { int channels = I.Channels(); int nRows = I.Rows; int nCols = I.Cols * channels; if (I.IsContinuous()) { nCols *= nRows; nRows = 1; } int i, j; unsafe { byte *p; for (i = 0; i < nRows; ++i) { p = (byte *)I.Ptr(i).ToPointer(); for (j = 0; j < nCols; ++j) { p[j] = table[p[j]]; } } } return(I); }
/// <summary> /// This uses ForEach function, as there is no equivalent of /// "iterator" in OpenCVSharp. /// /// <para> /// The function ForEachAsByte takes a function operation(pv,pp) as an argument. /// It then calls the operation() on each element of the matrix, passing in the /// values of pv and pp. /// </para> /// <para> /// pv - Pointer to the value of the element /// pp - Array of indices: pp[0] => Row-index of the element, pp[1] => Col-index of the /// element, and pp[2] => Z-index of the element. /// </para> /// </summary> /// <param name="I"></param> /// <param name="table"></param> /// <returns></returns> public static Mat ScanImageAndReduceForEach(Mat I, byte[] table) { int channels = I.Channels(); switch (channels) { case 1: unsafe { I.ForEachAsByte((pv, pp) => *pv = table[*pv]); } break; case 3: unsafe { I.ForEachAsVec3b((pv, pp) => { pv->Item0 = table[pv->Item0]; pv->Item1 = table[pv->Item1]; pv->Item2 = table[pv->Item2]; }); } break; } return(I); }
/// <summary> /// 把输入的图片变成灰度图,锐化一次会写入一次图片 /// </summary> /// <param name="input_img">原尺寸下分割出的文字</param> /// <param name="x">横坐标值</param> /// <param name="y">纵坐标值</param> /// <param name="width">向右</param> /// <param name="height">向下</param> /// <returns></returns> public static string Seg_img_to_text(Mat input_img, double x, double y, int width, int height) { //x *= input_img.Width; y *= input_img.Height; //0.09 0.262 //width = 71; height = 32; /*--缓存,好蠢的方式。但是Pix的LoadFromMemory里的byte是什么鬼--*/ string imgPath = @"D:\XD\1-dis\pic\cache\seged_image.jpg"; using (InputArray kernel = InputArray.Create <double>(new double[3, 3] { { -1, -1, -1 }, { -1, 9, -1 }, { -1, -1, -1 } })) using (Mat dst = new Mat(input_img, new OpenCvSharp.Rect((int)x, (int)y, width, height))) { //不准的话就画外接矩形 //https://www.itdaan.com/tw/46038a31b3020fce189dd3f30699ac0e if (dst.Channels() != 1) { Cv2.CvtColor(dst, dst, ColorConversionCodes.BGR2GRAY); } //OTSU = Cv2.Threshold(dst, dst, 200, 255, ThresholdTypes.Otsu); //Cv2.Resize(dst, dst, new Size(dst.Width * 5, dst.Height * 5), 0, 0, InterpolationFlags.Cubic); //Cv2.Filter2D(dst, dst, dst.Depth(), kernel, new Point(-1, -1), 0); //Cv2.Threshold(dst, dst, 125, 255, ThresholdTypes.Binary); Cv2.ImWrite(imgPath, dst); } string strResult = ImageToText(imgPath); if (string.IsNullOrEmpty(strResult)) { strResult = "无法识别"; } strResult = strResult.Replace(" ", ""); //删除中间的空格。为什么识别中文的时候会有空格?? return(strResult); }
private unsafe static IEnumerable <int> Y(Mat mat, IEnumerable <int> lmerged, double diffPixelRate, int offset) { List <int> ret = new List <int>(); byte * p = (byte *)mat.Data.ToPointer(); int width = mat.Width; int height = mat.Height; long step = mat.Step(); int channels = mat.Channels(); foreach (var x in lmerged) { double totalDistance = 0; for (int y = 0; y < height; ++y) { for (int c = 0; c < channels; ++c) { byte left = *(p + (y * step) + ((x - offset) * channels) + c); byte right = *(p + (y * step) + ((x + offset) * channels) + c); totalDistance += Math.Abs(left - right); } } double distancePerPixel = totalDistance / (255.0 * channels) / height; if (distancePerPixel >= diffPixelRate) { ret.Add(x); } } return(ret); }
/// <summary> /// 輝度値をステータスバーへ表示 /// </summary> /// <param name="mat"></param> /// <param name="X">画像のX座標</param> /// <param name="Y">画像のY座標</param> private void DrawBrightValue(Mat mat, int X, int Y) { // 画像データが無い場合は何もしない if (mat == null) { return; } if (mat.Data == IntPtr.Zero) { return; } string lblText; lblText = "(" + X.ToString() + ", " + Y.ToString() + ") = "; if (mat.Channels() == 1) { // モノクロの場合 lblText += mat.At <byte>(Y, X).ToString(); } else { // カラーの場合 lblText += "(" + mat.At <OpenCvSharp.CPlusPlus.Vec3b>(Y, X)[2].ToString() + ", " + mat.At <OpenCvSharp.CPlusPlus.Vec3b>(Y, X)[1].ToString() + ", " + mat.At <OpenCvSharp.CPlusPlus.Vec3b>(Y, X)[0].ToString() + ")"; } lblBright.Text = lblText; }
public static List <Spot> Circles(Mat img) { var gray = img.Clone(); if (img.Channels() > 1) { Cv2.CvtColor(img, gray, ColorConversionCodes.BGR2GRAY); } //Cv2.Flip(gray, gray, FlipMode.Y); var obj = Cv2.HoughCircles(gray, HoughModes.Gradient, 1, 10, 70, 10, 3, 10); List <Spot> spots = new List <Spot>(); for (int i = 0; i < obj.Length; i++) { var x = obj[i].Center.X; var y = obj[i].Center.Y; var d = obj[i].Radius * 2; spots.Add(new Spot(new System.Windows.Point(x, y), d, Brushes.Yellow)); } var sorted = PatternAnalyser.SortList(spots, true); return(sorted); }
/// <summary> /// Extracts the features. /// </summary> /// <returns><c>true</c>, if features was extracted, <c>false</c> otherwise.</returns> /// <param name="image">Image.</param> /// <param name="keypoints">Keypoints.</param> /// <param name="descriptors">Descriptors.</param> bool extractFeatures(Mat image, ref KeyPoint[] keypoints, ref Mat descriptors) { if (image.Total() == 0) { return(false); } if (image.Channels() != 1) { return(false); } keypoints = m_detector.Detect(image, null); if (keypoints.Length == 0) { return(false); } m_extractor.Compute(image, ref keypoints, descriptors); //m_detector.DetectAndCompute(image, ref keypoints, descriptors); // m_extractor.compute (image, keypoints, descriptors); if (keypoints.Length == 0) { return(false); } return(true); }
void crop_image(Mat outimg) { try { Mat copy = new Mat(); outimg.CopyTo(copy); OpenCvSharp.Rect rect = new OpenCvSharp.Rect(500, 0, 1100, 1080); crop = new Mat(outimg, rect); crop.CopyTo(outputimg); // Cv2.ImWrite("crop" + ".bmp", crop); if (outputimg.Channels() == 1) { Cv2.CvtColor(outputimg, outputimg, ColorConversionCodes.GRAY2BGR); } thread(crop); Holes1(crop); } catch (Exception Ex) { //MessageBox.Show(Ex.Message.ToString()); log.Error("Error Message: " + Ex.Message.ToString(), Ex); } }
/// <summary> /// Reduces color count in the image /// </summary> /// <param name="div">Output colors count</param> /// <returns></returns> public static unsafe Mat ColorReduced(this Mat source, int div = 64) { // prepare Mat image = source.Clone(); // "manual" implementation for (int j = 0; j < image.Rows; j++) { // get the address of row j byte *b = (byte *)image.Ptr(j).ToPointer(); for (int i = 0; i < image.Cols; i++) { for (int c = 0; c < image.Channels(); c++, b++) { *b = (byte)(*b / div * div + div / 2); } } } // this should run in parallel when possible //image.ForEachAsByte((byte* b, int* p) => *b = (byte)(*b / div * div + div / 2)); // return return(image); }
/// <summary> /// OpenCVのMatをSystem.Drawing.Bitmapに変換する /// </summary> /// <param name="src">変換するMat</param> /// <returns>System.Drawing.Bitmap</returns> #else /// <summary> /// Converts Mat to System.Drawing.Bitmap /// </summary> /// <param name="src">Mat</param> /// <returns></returns> #endif public static Bitmap ToBitmap(this Mat src) { if (src == null) { throw new ArgumentNullException(nameof(src)); } PixelFormat pf; switch (src.Channels()) { case 1: pf = PixelFormat.Format8bppIndexed; break; case 3: pf = PixelFormat.Format24bppRgb; break; case 4: pf = PixelFormat.Format32bppArgb; break; default: throw new ArgumentException("Number of channels must be 1, 3 or 4.", nameof(src)); } return(ToBitmap(src, pf)); }
//画像二値化 Mat Converter(Mat colorImage, Scalar colorHsv, int range) { int channel = colorImage.Channels(); int imageW = colorImage.Width; int imageH = colorImage.Height; colorImage.CvtColor(OpenCvSharp.ColorConversion.BgrToHsv); Mat grayImage = new Mat(imageH, imageW, MatType.CV_8UC1); unsafe { byte *matPtr = grayImage.DataPointer; byte *colorPtr = colorImage.DataPointer; for (int i = 0; i < imageW * imageH; i++) { //color Comperer if (*(colorPtr + i * channel) < (colorHsv.Val0 + range) && *(colorPtr + i * channel) > (colorHsv.Val0 - range) && (*(colorPtr + i * channel + 1) < (colorHsv.Val1 + range) && *(colorPtr + i * channel + 1) > (colorHsv.Val1 - range)) && (*(colorPtr + i * channel + 2) < (colorHsv.Val2 + range) && *(colorPtr + i * channel + 2) > (colorHsv.Val2 - range))) { *(matPtr + i) = 255; } else { *(matPtr + i) = 0; } } } return(grayImage); }
Mat Converter(Mat colorImage) { int channel = colorImage.Channels(); int imageW = colorImage.Width; int imageH = colorImage.Height; //colorImage.CvtColor(OpenCvSharp.ColorConversion.BgrToHsv); Mat grayImage = new Mat(imageH, imageW, MatType.CV_8UC1); unsafe { byte *matPtr = grayImage.DataPointer; byte *colorPtr = colorImage.DataPointer; for (int i = 0; i < imageW * imageH; i++) { int red = (*(colorPtr + i * channel) + *(colorPtr + i * channel + 1)) / 2; //color Comperer if (0 < *(colorPtr + i * channel)) { *(matPtr + i) = 0; } else { *(matPtr + i) = 255; } } } return(grayImage); }
public static unsafe Image ToImage(this Mat src) { if (src == null) { throw new ArgumentNullException(nameof(src)); } Image image; var imageSize = (int)(src.DataEnd.ToInt64() - src.Data.ToInt64()); var data = new Span<byte>(src.Data.ToPointer(), imageSize); switch (src.Channels()) { case 1: image = Image.LoadPixelData<L8>(data, src.Width, src.Height); break; case 3: image = Image.LoadPixelData<Rgb24>(data, src.Width, src.Height); break; case 4: image = Image.LoadPixelData<Argb32>(data, src.Width, src.Height); break; default: throw new ArgumentException("Number of channels must be 1, 3 or 4.", nameof(src)); } return image; }
public Mat MakeSkeleton( [InputPin(Description = "", PropertyMode = PropertyMode.Never)] Mat image ) { if (image.Channels() != 1) { throw new Exception("Input image should be a single channel image."); } Mat img = image.Clone(); Mat skel = Mat.Zeros(img.Size(), MatType.CV_8UC1); Mat temp; Mat eroded; Mat element = OpenCvWrapper.GetStructuringElement(new Size(3, 3), MorphShapes.Cross); bool done; do { eroded = img.Erode(element); temp = eroded.Dilate(element); temp = OpenCvWrapper.Subtract(img, temp); skel = OpenCvWrapper.BitwiseOr(skel, temp); eroded.CopyTo(img); done = (OpenCvWrapper.CountNonZero(img) == 0); } while (!done); return(skel); }
private unsafe static CoverSegmentColorInfo Median(Mat mat, int xOffset, int yOffset, int s_HORIZONTAL_SEGMENT_COUNT, int s_VERTICAL_SEGMENT_COUNT, int channelOffset) { byte *p = (byte *)mat.Data.ToPointer(); int width = mat.Width; int height = mat.Height; int channels = mat.Channels(); long step = mat.Step(); int horizontalPixelCount = width / s_HORIZONTAL_SEGMENT_COUNT; int verticalPixelCount = height / s_VERTICAL_SEGMENT_COUNT; int totalPixelCount = horizontalPixelCount * verticalPixelCount; List <int> rList = new List <int>(); int yInclusiveBegin = yOffset * verticalPixelCount; int yExclusiveEnd = (yOffset + 1) * verticalPixelCount; for (int y = yInclusiveBegin; y < yExclusiveEnd; ++y) { int xInclusiveBegin = xOffset * horizontalPixelCount; int xExclusiveEnd = (xOffset + 1) * horizontalPixelCount; long ystep = y * step; for (int x = xInclusiveBegin; x < xExclusiveEnd; ++x) { rList.Add(*(p + ystep + x * channels + channelOffset)); } } return(new CoverSegmentColorInfo(rList.OrderBy(s => s).Skip(totalPixelCount / 2).Take(1).First())); }
protected static void ImageEquals(Mat img1, Mat img2, double abs_error) { if (img1 == null && img2 == null) { return; } Assert.NotNull(img1); Assert.NotNull(img2); Assert.Equal(img1.Type(), img2.Type()); double abs_sum = abs_error * img1.Width * img1.Height; using (var comparison = new Mat()) { Cv2.Absdiff(img1, img2, comparison); if (img1.Channels() == 1) { Assert.False(Cv2.Sum(comparison).Val0 > abs_sum); } else { var channels = Cv2.Split(comparison); try { foreach (var channel in channels) { Assert.False(Cv2.Sum(channel).Val0 > abs_sum); } } finally { foreach (var channel in channels) { channel.Dispose(); } } } } }
public bool Load(Bitmap bitmap) { Bitmap = bitmap; try { Mat = Bitmap.ToMat(); if (Mat.Channels() > 1) { Mat.CvtColor(ColorConversionCodes.BGR2GRAY); } // Mat = bitmap.ToMat().CvtColor(ColorConversionCodes.BGR2GRAY); Locations = Classifier.DetectMultiScale( Mat, Configuration.ScaleFactor, Configuration.MinimumNeighbors, Configuration.HaarDetectionType, Configuration.MinimumSize, Configuration.MaximumSize).ToList(); } catch (Exception e) { return(false); } IsLoaded = true; return(true); }
public List <ImageFile> CreateYCbGrLayers(byte[] file) { Mat input = ConvertToMat(file); var grayInput = new Mat(); if (input.Channels() == 1) { grayInput = input; } else { Cv2.CvtColor(input, grayInput, ColorConversionCodes.BGR2YCrCb); } //Cv2.ApplyColorMap(grayInput, coloredImage, ColormapTypes.Winter); var files = new List <ImageFile>(); foreach (var item in Cv2.Split(grayInput)) { files.Add(new ImageFile { File = ConvertToByte(item) }); } return(files); }
private void frmMain_DragDrop(object sender, DragEventArgs e) { Mat m = new Mat(((System.Array)e.Data.GetData(DataFormats.FileDrop)).GetValue(0).ToString(), ImreadModes.Unchanged); if (m.Channels() == 4) { List <Mat> ms = new List <Mat>(m.Split()); Mat mrgb = new Mat(); Cv2.Merge(ms.Take(3).ToArray(), mrgb); Cv2.ImShow("Alpha", ms[3]); Cv2.ImShow("RGB", mrgb); mrgb.Dispose(); for (int i = 0; i < 4; ++i) { ms[i].Dispose(); } } else { MessageBox.Show("图片没有Alpha通道!"); } m.Dispose(); }
/// <summary> /// Convert Mat to NdArray /// </summary> /// <param name="mat">OpenCvSharp Mat object</param> /// <returns>NDArray object</returns> public static NDArray ToNdArray(this Mat mat) { NDArray array = np.zeros(1); int channel = mat.Channels(); byte[] data = new byte[mat.Cols * mat.Rows * channel]; Marshal.Copy(mat.DataStart, data, 0, data.Length); int depth = mat.Depth(); switch (depth) { case CV_8U: array = new NDArray(data); break; case CV_8S: array = new NDArray(data); break; case CV_16U: ushort[] UshortData = new ushort[data.Length / 2]; Buffer.BlockCopy(data, 0, UshortData, 0, data.Length / 2); array = new NDArray(UshortData); break; case CV_16S: short[] shortData = new short[data.Length / 2]; Buffer.BlockCopy(data, 0, shortData, 0, data.Length); array = new NDArray(shortData); break; case CV_32S: int[] intData = new int[data.Length / 4]; Buffer.BlockCopy(data, 0, intData, 0, data.Length); array = new NDArray(intData); break; case CV_32F: float[] floatData = new float[data.Length / 4]; Buffer.BlockCopy(data, 0, floatData, 0, data.Length); array = new NDArray(floatData); // array = array.astype(np.float32, false); break; case CV_64F: double[] doubleData = new double[data.Length / 8]; Buffer.BlockCopy(data, 0, doubleData, 0, data.Length); array = new NDArray(doubleData); // array = array.astype(np.float64, false); break; case CV_USRTYPE1: array = np.zeros(1); throw new Exception("Can not support User Type!"); } array = channel == 1 ? array.reshape(mat.Rows, mat.Cols) : array.reshape(mat.Rows, mat.Cols, channel); return(array); }
public static Color GetPixel(this Mat mat, int r, int c) { var channels = mat.Channels(); Color clr; if (mat.Channels() == 1) { var v = mat.Get <byte>(r, c); clr = Color.FromArgb(v, v, v); } else { var vec3b = mat.Get <Vec3b>(r, c); clr = Color.FromArgb(vec3b.Item0, vec3b.Item1, vec3b.Item2); } return(clr); }
//キャリブレーション public void CaliblationUpdate(ShadowPackage proccesedMatPckage) { Mat srcImg = new Mat(); //入力画像 srcImg = proccesedMatPckage.srcMat.Clone(); this.imgChannles = srcImg.Channels(); #region if (this.fragCutImg == 1) { this.cutRect = new CutRect(srcImg); this.fragCutImg = 0; this.imageWidth = srcImg.Width; this.imageHeight = srcImg.Height; } #endregion //座標の取得 this.backIn_Pt = this.changePt(this._getPtForCalib(0)); this.backOut_Pt = this.changePt(this._getPtForCalib(1)); this.floorIn_Pt = this.changePt(this._getPtForCalib(2)); this.floorOut_Pt = this.changePt(this._getPtForCalib(3)); this.backOut_Pt = this.changePtRange(this.backOut_Pt); this.floorOut_Pt = this.changePtRange(this.floorOut_Pt); //back #region this.backIn_Pt = this.changePtRange(this.backIn_Pt); this.backDstImg = this.PerspectiveProject(srcImg, this.backIn_Pt, this.backOut_Pt).Clone(); this.backDstImg = cutRect.CutImage(this.backDstImg,this.backOut_Pt).Clone(); #endregion //floor #region this.floorIn_Pt = this.changePtRange(this.floorIn_Pt); this.floorDstImg = this.PerspectiveProject(srcImg, this.floorIn_Pt, this.floorOut_Pt).Clone(); this.floorDstImg = cutRect.CutImage(this.floorDstImg, this.floorOut_Pt).Clone(); #endregion srcImg.Dispose(); }
Mat Converter(Mat colorImage) { int channel = colorImage.Channels(); int imageW = colorImage.Width; int imageH = colorImage.Height; //colorImage.CvtColor(OpenCvSharp.ColorConversion.BgrToHsv); Mat grayImage = new Mat(imageH, imageW, MatType.CV_8UC1); unsafe { byte* matPtr = grayImage.DataPointer; byte* colorPtr = colorImage.DataPointer; for (int i = 0; i < imageW * imageH; i++) { int red = (*(colorPtr + i * channel) + *(colorPtr + i * channel + 1))/2; //color Comperer if (0 < *(colorPtr + i * channel)) { *(matPtr + i) = 0; } else { *(matPtr + i) = 255; } } } return grayImage; }
/// <summary> /// BitmapSourceをMatに変換する. /// </summary> /// <param name="src">変換するBitmapSource</param> /// <param name="dst">出力先のMat</param> #else /// <summary> /// Converts BitmapSource to Mat /// </summary> /// <param name="src">Input BitmapSource</param> /// <param name="dst">Output Mat</param> #endif public static void ToMat(this BitmapSource src, Mat dst) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); if (src.PixelWidth != dst.Width || src.PixelHeight != dst.Height) throw new ArgumentException("size of src must be equal to size of dst"); if (dst.Dims() > 2) throw new ArgumentException("Mat dimensions must be 2"); int w = src.PixelWidth; int h = src.PixelHeight; int bpp = src.Format.BitsPerPixel; int channels = WriteableBitmapConverter.GetOptimumChannels(src.Format); if (dst.Channels() != channels) { throw new ArgumentException("nChannels of dst is invalid", "dst"); } bool submat = dst.IsSubmatrix(); bool continuous = dst.IsContinuous(); unsafe { byte* p = (byte*)(dst.Data); long step = dst.Step(); // 1bppは手作業でコピー if (bpp == 1) { if (submat) throw new NotImplementedException("submatrix not supported"); // BitmapImageのデータを配列にコピー // 要素1つに横8ピクセル分のデータが入っている。 int stride = (w / 8) + 1; byte[] pixels = new byte[h * stride]; src.CopyPixels(pixels, stride, 0); int x = 0; for (int y = 0; y < h; y++) { int offset = y * stride; // この行の各バイトを調べていく for (int bytePos = 0; bytePos < stride; bytePos++) { if (x < w) { // 現在の位置のバイトからそれぞれのビット8つを取り出す byte b = pixels[offset + bytePos]; for (int i = 0; i < 8; i++) { if (x >= w) { break; } p[step * y + x] = ((b & 0x80) == 0x80) ? (byte)255 : (byte)0; b <<= 1; x++; } } } // 次の行へ x = 0; } } // 8bpp /*else if (bpp == 8) { int stride = w; byte[] pixels = new byte[h * stride]; src.CopyPixels(pixels, stride, 0); for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { p[step * y + x] = pixels[y * stride + x]; } } }*/ // 24bpp, 32bpp, ... else { int stride = w * ((bpp + 7) / 8); if (!submat && continuous) { long imageSize = dst.DataEnd.ToInt64() - dst.Data.ToInt64(); if (imageSize < 0) throw new OpenCvSharpException("The mat has invalid data pointer"); if (imageSize > Int32.MaxValue) throw new OpenCvSharpException("Too big mat data"); src.CopyPixels(Int32Rect.Empty, dst.Data, (int)imageSize, stride); } else { // 高さ1pxの矩形ごと(≒1行ごと)にコピー var roi = new Int32Rect { X = 0, Y = 0, Width = w, Height = 1 }; IntPtr dstData = dst.Data; for (int y = 0; y < h; y++) { roi.Y = y; src.CopyPixels(roi, dstData, stride, stride); dstData = new IntPtr(dstData.ToInt64() + stride); } } } } }
private static string detectBarcode(string fileName, double thresh, bool debug = false, double rotation = 0) { Console.WriteLine("\nProcessing: {0}", fileName); // load the image and convert it to grayscale var image = new Mat(fileName); if (rotation != 0) { rotateImage(image, image, rotation, 1); } if (debug) { Cv2.ImShow("Source", image); Cv2.WaitKey(1); // do events } var gray = new Mat(); var channels = image.Channels(); if (channels > 1) { Cv2.CvtColor(image, gray, ColorConversion.BgrToGray); } else { image.CopyTo(gray); } // compute the Scharr gradient magnitude representation of the images // in both the x and y direction var gradX = new Mat(); Cv2.Sobel(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0, ksize: -1); //Cv2.Scharr(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0); var gradY = new Mat(); Cv2.Sobel(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1, ksize: -1); //Cv2.Scharr(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1); // subtract the y-gradient from the x-gradient var gradient = new Mat(); Cv2.Subtract(gradX, gradY, gradient); Cv2.ConvertScaleAbs(gradient, gradient); if (debug) { Cv2.ImShow("Gradient", gradient); Cv2.WaitKey(1); // do events } // blur and threshold the image var blurred = new Mat(); Cv2.Blur(gradient, blurred, new Size(9, 9)); var threshImage = new Mat(); Cv2.Threshold(blurred, threshImage, thresh, 255, ThresholdType.Binary); if (debug) { Cv2.ImShow("Thresh", threshImage); Cv2.WaitKey(1); // do events } // construct a closing kernel and apply it to the thresholded image var kernel = Cv2.GetStructuringElement(StructuringElementShape.Rect, new Size(21, 7)); var closed = new Mat(); Cv2.MorphologyEx(threshImage, closed, MorphologyOperation.Close, kernel); if (debug) { Cv2.ImShow("Closed", closed); Cv2.WaitKey(1); // do events } // perform a series of erosions and dilations Cv2.Erode(closed, closed, null, iterations: 4); Cv2.Dilate(closed, closed, null, iterations: 4); if (debug) { Cv2.ImShow("Erode & Dilate", closed); Cv2.WaitKey(1); // do events } //find the contours in the thresholded image, then sort the contours //by their area, keeping only the largest one Point[][] contours; HiearchyIndex[] hierarchyIndexes; Cv2.FindContours( closed, out contours, out hierarchyIndexes, mode: ContourRetrieval.CComp, method: ContourChain.ApproxSimple); if (contours.Length == 0) { throw new NotSupportedException("Couldn't find any object in the image."); } var contourIndex = 0; var previousArea = 0; var biggestContourRect = Cv2.BoundingRect(contours[0]); while ((contourIndex >= 0)) { var contour = contours[contourIndex]; var boundingRect = Cv2.BoundingRect(contour); //Find bounding rect for each contour var boundingRectArea = boundingRect.Width * boundingRect.Height; if (boundingRectArea > previousArea) { biggestContourRect = boundingRect; previousArea = boundingRectArea; } contourIndex = hierarchyIndexes[contourIndex].Next; } /*biggestContourRect.Width += 10; biggestContourRect.Height += 10; biggestContourRect.Left -= 5; biggestContourRect.Top -= 5;*/ var barcode = new Mat(image, biggestContourRect); //Crop the image Cv2.CvtColor(barcode, barcode, ColorConversion.BgrToGray); Cv2.ImShow("Barcode", barcode); Cv2.WaitKey(1); // do events var barcodeClone = barcode.Clone(); var barcodeText = getBarcodeText(barcodeClone); if (string.IsNullOrWhiteSpace(barcodeText)) { Console.WriteLine("Enhancing the barcode..."); //Cv2.AdaptiveThreshold(barcode, barcode, 255, //AdaptiveThresholdType.GaussianC, ThresholdType.Binary, 9, 1); //var th = 119; var th = 100; Cv2.Threshold(barcode, barcode, th, 255, ThresholdType.ToZero); Cv2.Threshold(barcode, barcode, th, 255, ThresholdType.Binary); barcodeText = getBarcodeText(barcode); } Cv2.Rectangle(image, new Point(biggestContourRect.X, biggestContourRect.Y), new Point(biggestContourRect.X + biggestContourRect.Width, biggestContourRect.Y + biggestContourRect.Height), new Scalar(0, 255, 0), 2); if (debug) { Cv2.ImShow("Segmented Source", image); Cv2.WaitKey(1); // do events } Cv2.WaitKey(0); Cv2.DestroyAllWindows(); return barcodeText; }
/// <summary> /// MatをWriteableBitmapに変換する. /// 返却値を新たに生成せず引数で指定したWriteableBitmapに格納するので、メモリ効率が良い。 /// </summary> /// <param name="src">変換するMat</param> /// <param name="dst">変換結果を設定するWriteableBitmap</param> #else /// <summary> /// Converts Mat to WriteableBitmap. /// This method is more efficient because new instance of WriteableBitmap is not allocated. /// </summary> /// <param name="src">Input Mat</param> /// <param name="dst">Output WriteableBitmap</param> #endif public static void ToWriteableBitmap(Mat src, WriteableBitmap dst) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); if (src.Width != dst.PixelWidth || src.Height != dst.PixelHeight) throw new ArgumentException("size of src must be equal to size of dst"); //if (src.Depth != BitDepth.U8) //throw new ArgumentException("bit depth of src must be BitDepth.U8", "src"); if (src.Dims() > 2) throw new ArgumentException("Mat dimensions must be 2"); int w = src.Width; int h = src.Height; int bpp = dst.Format.BitsPerPixel; int channels = GetOptimumChannels(dst.Format); if (src.Channels() != channels) { throw new ArgumentException("channels of dst != channels of PixelFormat", "dst"); } bool submat = src.IsSubmatrix(); bool continuous = src.IsContinuous(); unsafe { byte* pSrc = (byte*)(src.Data); int sstep = (int)src.Step(); if (bpp == 1) { if (submat) throw new NotImplementedException("submatrix not supported"); // 手作業で移し替える int stride = w / 8 + 1; if (stride < 2) stride = 2; byte[] pixels = new byte[h * stride]; for (int x = 0, y = 0; y < h; y++) { int offset = y * stride; for (int bytePos = 0; bytePos < stride; bytePos++) { if (x < w) { byte b = 0; // 現在の位置から横8ピクセル分、ビットがそれぞれ立っているか調べ、1つのbyteにまとめる for (int i = 0; i < 8; i++) { b <<= 1; if (x < w && pSrc[sstep * y + x] != 0) { b |= 1; } x++; } pixels[offset + bytePos] = b; } } x = 0; } dst.WritePixels(new Int32Rect(0, 0, w, h), pixels, stride, 0); return; } // 一気にコピー if (!submat && continuous) { long imageSize = src.DataEnd.ToInt64() - src.Data.ToInt64(); if (imageSize < 0) throw new OpenCvSharpException("The mat has invalid data pointer"); if (imageSize > Int32.MaxValue) throw new OpenCvSharpException("Too big mat data"); dst.WritePixels(new Int32Rect(0, 0, w, h), src.Data, (int)imageSize, sstep); return; } // 一列ごとにコピー try { dst.Lock(); int dstep = dst.BackBufferStride; byte* pDst = (byte*)dst.BackBuffer; for (int y = 0; y < h; y++) { long offsetSrc = (y * sstep); long offsetDst = (y * dstep); Utility.CopyMemory(pDst + offsetDst, pSrc + offsetSrc, w * channels); } } finally { dst.Unlock(); } } }
//画像二値化 Mat Converter(Mat colorImage, Scalar colorHsv, int range) { int channel = colorImage.Channels(); int imageW = colorImage.Width; int imageH = colorImage.Height; colorImage.CvtColor(OpenCvSharp.ColorConversion.BgrToHsv); Mat grayImage = new Mat(imageH, imageW, MatType.CV_8UC1); unsafe { byte* matPtr = grayImage.DataPointer; byte* colorPtr = colorImage.DataPointer; for (int i = 0; i < imageW * imageH; i++) { //color Comperer if (*(colorPtr + i * channel) < (colorHsv.Val0 + range) && *(colorPtr + i * channel) > (colorHsv.Val0 - range) && (*(colorPtr + i * channel + 1) < (colorHsv.Val1 + range) && *(colorPtr + i * channel + 1) > (colorHsv.Val1 - range)) && (*(colorPtr + i * channel + 2) < (colorHsv.Val2 + range) && *(colorPtr + i * channel + 2) > (colorHsv.Val2 - range))) { *(matPtr + i) = 255; } else { *(matPtr + i) = 0; } } } return grayImage; }
// Update is called once per frame void Update() { if (runCalibration) { if (Input.GetMouseButton(0) || Input.GetMouseButton(1) || Input.GetMouseButton(2)) { if (Input.GetMouseButton(0)) { //Debug.Log(Input.mousePosition); GameObject bc = GameObject.FindGameObjectWithTag("BlueCross"); bc.transform.localPosition = new Vector3(Map(Input.mousePosition.x, Screen.width / 2.0f - 320.0f, Screen.width / 2.0f + 320.0f, 0.0f, 640.0f) - 320.0f, -Map(Input.mousePosition.y, Screen.height / 2.0f + 240.0f, Screen.height / 2.0f - 240.0f, 0.0f, 480.0f) + 240.0f, 0.0f); } else if (Input.GetMouseButton(1)) { GameObject yc = GameObject.FindGameObjectWithTag("YellowCross"); yc.transform.localPosition = new Vector3(Map(Input.mousePosition.x, Screen.width / 2.0f - 320.0f, Screen.width / 2.0f + 320.0f, 0.0f, 640.0f) - 320.0f, -Map(Input.mousePosition.y, Screen.height / 2.0f + 240.0f, Screen.height / 2.0f - 240.0f, 0.0f, 480.0f) + 240.0f, 0.0f); nextBt = true; } else if (Input.GetMouseButton(2) && nextBt == true) { if (addKinectPoint()) { addProjectorPoint(); Debug.Log("Point Added! -> (" + kinectCoordinates.Count + ") "); nextBt = false; } else { Debug.Log("Kinect Point out of bounds!"); } } } if (Input.GetKeyDown(KeyCode.A)) { //PointerEventData pointer = new PointerEventData(EventSystem.current); //pointer.position = Input.mousePosition; //List<RaycastResult> raycastResults = new List<RaycastResult>(); //EventSystem.current.RaycastAll(pointer, raycastResults); if (addKinectPoint()) { addProjectorPoint(); Debug.Log("Point Added! -> " + kinectCoordinates.Count); } else { Debug.Log("Kinect Point out of bounds!"); } } if (Input.GetKeyDown(KeyCode.S)) { if (kinectCoordinates.Count >= 8) { Debug.Log("Starting Calibration..."); findTransformation(kinectCoordinates, projectorCoordinates); foundResult = true; } else { Debug.Log("Not Enough Points!"); } } if (Input.GetKeyDown(KeyCode.D) && foundResult == true) { showResult = !showResult; if (!showResult) { screenTx.SetPixels32(resetPixels); screenTx.Apply(false); } Debug.Log("Show result toggle: " + showResult); } if (Input.GetKeyDown(KeyCode.F) && foundResult == true) { using (CvFileStorage fs = new CvFileStorage("KinectCalibration.xml", null, FileStorageMode.Write)) { string nodeName = "calibResult"; fs.Write(nodeName, result.ToCvMat()); nodeName = "kinectPoints"; Mat kinectPts = new Mat(1, kinectCoordinates.Count, MatType.CV_64FC3); for (int i = 0; i < kinectCoordinates.Count; i++) { kinectPts.Set<CvPoint3D64f>(0, i, (CvPoint3D64f)kinectCoordinates[i]); } fs.Write(nodeName, kinectPts.ToCvMat()); nodeName = "projectorPoints"; Mat projPts = new Mat(1, projectorCoordinates.Count, MatType.CV_64FC2); for (int i = 0; i < projectorCoordinates.Count; i++) { projPts.Set<CvPoint2D64f>(0, i, (CvPoint2D64f)projectorCoordinates[i]); } fs.Write(nodeName, projPts.ToCvMat()); fs.Dispose(); } Debug.Log("Calib Data saved!"); } if (Input.GetKeyDown(KeyCode.Q)) { delLastPoints(); } if (kinect.GetDepthRaw()) { try { Mat src = DoDepthBuffer(kinect.usersDepthMap, KinectWrapper.GetDepthWidth(), KinectWrapper.GetDepthHeight()); dBuffer = src.Clone(); src.ConvertTo(src, OpenCvSharp.CPlusPlus.MatType.CV_8UC1, 255.0f / NUI_IMAGE_DEPTH_MAXIMUM); Mat show = new Mat(KinectWrapper.GetDepthHeight(), KinectWrapper.GetDepthWidth(), OpenCvSharp.CPlusPlus.MatType.CV_8UC4); Mat alpha = new Mat(KinectWrapper.GetDepthHeight(), KinectWrapper.GetDepthWidth(), OpenCvSharp.CPlusPlus.MatType.CV_8UC1, new Scalar(255)); Mat[] planes = new Mat[4] { src, src, src, alpha }; Cv2.Merge(planes, show); //Mat falseColorsMap = new Mat(); //Cv2.ApplyColorMap(src, falseColorsMap, OpenCvSharp.CPlusPlus.ColorMapMode.Rainbow); //Cv2.ImShow("show", falseColorsMap); int matSize = (int)show.Total() * show.Channels(); byte[] rColors = new byte[matSize]; Marshal.Copy(show.DataStart, rColors, 0, matSize); scTex.LoadRawTextureData(rColors); scTex.Apply(false); ScreenObject.GetComponent<RawImage>().texture = scTex; if (showResult) { //ResultObject.SetActive(true); screenTx.SetPixels32(resetPixels); long discarded = 0; long drawn = 0; long bounds = 0; //Color32[] txcl = (Color32[])resetPixels.Clone(); Color32[] txcl = new Color32[screenTx.height * screenTx.width]; for (int i = 0; i < txcl.Length; i++) { Color32 cCol = new Color32(0, 0, 0, 255); txcl[i] = cCol; } screenTx.SetPixels32(txcl, 0); Color32 sccolor = Color.white; for (int i = 0; i < show.Rows; i += 5) { for (int j = 0; j < show.Cols; j += 5) { CvPoint3D64f realVal = NuiTransformDepthImageToSkeleton((long)j, (long)i, dBuffer.Get<ushort>((int)i, (int)j)); if (realVal.Z < projThresh && realVal.Z > 1.0) { CvPoint2D64f scCoord = convertKinectToProjector(realVal); if (scCoord.X > 0.0 && scCoord.X < Screen.width && scCoord.Y > 0.0 && scCoord.Y < Screen.height) { //Debug.Log(scCoord.X.ToString() + " " + scCoord.Y.ToString()); //Vec4b bgrPixel = falseColorsMap.At<Vec4b>(i, j); //Color32 sccolor = new Color32(bgrPixel[2], bgrPixel[1], bgrPixel[0], 255); int X = Mathf.CeilToInt((float)scCoord.X); int Y = Mathf.CeilToInt((float)scCoord.Y); int arrPos = ((screenTx.height - Y) * screenTx.width) + X; //Debug.Log(scCoord.X + " -> " + X + " --" + scCoord.Y + " -> " + Y + " = " + arrPos + " == " + screenTx.height + " == " + screenTx.width); txcl[arrPos] = sccolor; //screenTx.SetPixel((int)scCoord.X, Screen.height - (int)scCoord.Y, sccolor); drawn++; } else { bounds++; } } else { discarded++; } } } Debug.Log("Discarded: " + discarded + " Bounds: " + bounds + " Drawn: " + drawn); screenTx.SetPixels32(txcl, 0); screenTx.Apply(false); //GameObject.FindGameObjectWithTag("Restex").GetComponent<RawImage>().texture = screenTx; //CvContour contourfinder = new CvContour(); } else { //ResultObject.SetActive(false); } } catch (System.Exception e) { throw e; } } } }
/// <summary> /// System.Drawing.BitmapからOpenCVのMatへ変換して返す. /// </summary> /// <param name="src">変換するSystem.Drawing.Bitmap</param> /// <param name="dst">変換結果を格納するMat</param> #else /// <summary> /// Converts System.Drawing.Bitmap to Mat /// </summary> /// <param name="src">System.Drawing.Bitmap object to be converted</param> /// <param name="dst">A Mat object which is converted from System.Drawing.Bitmap</param> #endif public static unsafe void ToMat(this Bitmap src, Mat dst) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); if (dst.IsDisposed) throw new ArgumentException("The specified dst is disposed.", "dst"); if (dst.Depth() != MatType.CV_8U) throw new NotSupportedException("Mat depth != CV_8U"); if (dst.Dims() != 2) throw new NotSupportedException("Mat dims != 2"); if (src.Width != dst.Width || src.Height != dst.Height) throw new ArgumentException("src.Size != dst.Size"); int w = src.Width; int h = src.Height; Rectangle rect = new Rectangle(0, 0, w, h); BitmapData bd = null; try { bd = src.LockBits(rect, ImageLockMode.ReadOnly, src.PixelFormat); byte* p = (byte*)bd.Scan0.ToPointer(); int sstep = bd.Stride; int offset = sstep - (w / 8); uint dstep = (uint)dst.Step(); IntPtr dstData = dst.Data; byte* dstPtr = (byte*)dstData.ToPointer(); bool submat = dst.IsSubmatrix(); bool continuous = dst.IsContinuous(); switch (src.PixelFormat) { case PixelFormat.Format1bppIndexed: { if (dst.Channels() != 1) throw new ArgumentException("Invalid nChannels"); if (submat) throw new NotImplementedException("submatrix not supported"); int x = 0; int y; int bytePos; byte b; int i; for (y = 0; y < h; y++) { // 横は必ず4byte幅に切り上げられる。 // この行の各バイトを調べていく for (bytePos = 0; bytePos < sstep; bytePos++) { if (x < w) { // 現在の位置のバイトからそれぞれのビット8つを取り出す b = p[bytePos]; for (i = 0; i < 8; i++) { if (x >= w) { break; } // IplImageは8bit/pixel dstPtr[dstep * y + x] = ((b & 0x80) == 0x80) ? (byte)255 : (byte)0; b <<= 1; x++; } } } // 次の行へ x = 0; p += sstep; } } break; case PixelFormat.Format8bppIndexed: case PixelFormat.Format24bppRgb: { if (src.PixelFormat == PixelFormat.Format8bppIndexed) if (dst.Channels() != 1) throw new ArgumentException("Invalid nChannels"); if (src.PixelFormat == PixelFormat.Format24bppRgb) if (dst.Channels() != 3) throw new ArgumentException("Invalid nChannels"); // ステップが同じで連続なら、一気にコピー if (dstep == sstep && !submat && continuous) { uint length = (uint)(dst.DataEnd.ToInt64() - dstData.ToInt64()); Utility.CopyMemory(dstData, bd.Scan0, length); } else { // 各行ごとにdstの行バイト幅コピー byte* sp = (byte*)bd.Scan0; byte* dp = (byte*)dst.Data; for (int y = 0; y < h; y++) { Utility.CopyMemory(dp, sp, dstep); sp += sstep; dp += dstep; } } } break; case PixelFormat.Format32bppRgb: case PixelFormat.Format32bppArgb: case PixelFormat.Format32bppPArgb: { switch (dst.Channels()) { case 4: if (!submat && continuous) { uint length = (uint)(dst.DataEnd.ToInt64() - dstData.ToInt64()); Utility.CopyMemory(dstData, bd.Scan0, length); } else { byte* sp = (byte*)bd.Scan0; byte* dp = (byte*)dst.Data; for (int y = 0; y < h; y++) { Utility.CopyMemory(dp, sp, dstep); sp += sstep; dp += dstep; } } break; case 3: for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { dstPtr[y * dstep + x * 3 + 0] = p[y * sstep + x * 4 + 0]; dstPtr[y * dstep + x * 3 + 1] = p[y * sstep + x * 4 + 1]; dstPtr[y * dstep + x * 3 + 2] = p[y * sstep + x * 4 + 2]; } } break; default: throw new ArgumentException("Invalid nChannels"); } } break; } } finally { if(bd != null) src.UnlockBits(bd); } }