/// <summary> /// 深度データからリアルワールドの色を見つける /// </summary> /// <param name="depthData"></param> /// <param name="colorFrameData"></param> /// <param name="colorSize"></param> /// <returns></returns> public List <Tuple <CvPoint3D64f, CvColor> > DepthColorMatToRealPoints(CvMat depthMat, CvMat colorMat) { List <Tuple <CvPoint3D64f, CvColor> > res = new List <Tuple <CvPoint3D64f, CvColor> >(); int bytesPerPixel = colorMat.ElemChannels; CvSize colorSize = colorMat.GetSize(); unsafe { short *depthArr = depthMat.DataInt16; byte * colorArr = colorMat.DataByte; for (int y = 0; y < depthHeight; ++y) { for (int x = 0; x < depthWidth; ++x) { int depthIndex = (y * depthWidth) + x; ushort depthVal = (ushort)depthArr[depthIndex]; ColorSpacePoint colorPoint = this.MapDepthPointToColorSpace(x, y, depthVal, colorSize.Width, colorSize.Height); CameraSpacePoint cameraPoint = this.MapDepthPointToCameraSpace(x, y, depthVal); // make sure the depth pixel maps to a valid point in color space int colorX = (int)Math.Floor(colorPoint.X + 0.5); int colorY = (int)Math.Floor(colorPoint.Y + 0.5); if ((colorX >= 0) && (colorX < colorSize.Width) && (colorY >= 0) && (colorY < colorSize.Height)) { // calculate index into color array int colorIndex = ((colorY * colorSize.Width) + colorX) * bytesPerPixel; CvColor color = new CvColor(colorArr[colorIndex + 2], colorArr[colorIndex + 1], colorArr[colorIndex + 0]); res.Add(Tuple.Create((CvPoint3D64f)cameraPoint.ToCvPoint3D(), color)); } } } } return(res); }
/// <summary> /// Creates lookup table /// </summary> /// <returns></returns> private CvColor[] CreateLutData() { CvColor[] lutData = new CvColor[256]; for (int i = 0; i < lutData.Length; i++) { double r, g, b; if (i >= 0 && i <= 63) { r = 0; g = 255.0 / 63 * i; b = 255; } else if (i > 63 && i <= 127) { r = 0; g = 255; b = 255 - (255.0 / (127 - 63) * (i - 63)); } else if (i > 127 && i <= 191) { r = 255.0 / (191 - 127) * (i - 127); g = 255; b = 0; } else // if (i > 191 && i < 256) { r = 255; g = 255 - (255.0 / (255 - 191) * (i - 191)); b = 0; } lutData[i] = new CvColor((byte)r, (byte)g, (byte)b); } return lutData; }
private static void usingCInterface() { using (var src = new IplImage(@"..\..\Images\Penguin.Png", LoadMode.AnyDepth | LoadMode.AnyColor)) using (var dst = new IplImage(src.Size, src.Depth, src.NChannels)) { for (var y = 0; y < src.Height; y++) { for (var x = 0; x < src.Width; x++) { CvColor pixel = src[y, x]; dst[y, x] = new CvColor { B = (byte)(255 - pixel.B), G = (byte)(255 - pixel.G), R = (byte)(255 - pixel.R) }; } } // [C] Accessing Pixel // https://github.com/shimat/opencvsharp/wiki/%5BC%5D-Accessing-Pixel using (new CvWindow("C Interface: Src", image: src)) using (new CvWindow("C Interface: Dst", image: dst)) { Cv.WaitKey(0); } } }
public byte[] CameraPose() { //Create a defined registration pattern - in this case a cube var cube = CoordinateDefinition.Microcube(); var yu2 = LatestYUVImage(); var colorCv = new CvColor(yu2); //Find and draw (make sure it can be found) var markers = Vision.FindAruco(colorCv); if (!markers.Any()) { return(PoseFormatter.PoseToBytes(new double[4, 4])); } //zeros //Calculate pose var depth = LatestDepthImage(); CameraSpacePoint[] _3dImage = new CameraSpacePoint[KinectSettings.COLOR_PIXEL_COUNT]; KxBuffer.instance.coordinateMapper.MapColorFrameToCameraSpace(depth, _3dImage); var cvCameraSpace = new CvCameraSpace(_3dImage); var kxTransform = Vision.GetPoseFromImage(cube, cvCameraSpace, markers); var pose = kxTransform.CameraPose; return(PoseFormatter.PoseToBytes(pose)); }
public EyeDetect() { CvColor[] colors = new CvColor[] { new CvColor(0, 0, 255), new CvColor(0, 128, 255), new CvColor(0, 255, 255), new CvColor(0, 255, 0), new CvColor(255, 128, 0), new CvColor(255, 255, 0), new CvColor(255, 0, 0), new CvColor(255, 0, 255), }; const double Scale = 1.25; const double ScaleFactor = 2.5; const int MinNeighbors = 2; using (CvCapture cap = CvCapture.FromCamera(1)) using (CvWindow w = new CvWindow("Eye Tracker")) { while (CvWindow.WaitKey(10) < 0) { using (IplImage img = cap.QueryFrame()) using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1)) { using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("C:\\Program Files\\OpenCV\\data\\haarcascades\\haarcascade_eye.xml")) using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); Stopwatch watch = Stopwatch.StartNew(); CvSeq <CvAvgComp> eyes = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30)); watch.Stop(); //Console.WriteLine("detection time = {0}msn", watch.ElapsedMilliseconds); for (int i = 0; i < eyes.Total; i++) { CvRect r = eyes[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * Scale), Y = Cv.Round((r.Y + r.Height * 0.5) * Scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale); img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0); } } w.Image = img; } } } }
/// <summary> /// 入力特徴量を図にする /// </summary> /// <param name="data_array"></param> private void Debug_DrawInputFeature(CvPoint2D32f[] points, int[] id_array) { using (IplImage pointsPlot = Cv.CreateImage(new CvSize(300, 300), BitDepth.U8, 3)) { pointsPlot.Zero(); for (int i = 0; i < id_array.Length; i++) { int x = (int)(points[i].X * 300); int y = (int)(300 - points[i].Y * 300); int res = id_array[i]; // CvColor color = (res == 1) ? CvColor.Red : CvColor.GreenYellow; CvColor color = new CvColor(); if (res == 1) { color = CvColor.Red; } else if (res == 2) { color = CvColor.GreenYellow; } pointsPlot.Circle(x, y, 2, color, -1); } CvWindow.ShowImages(pointsPlot); } }
public Text() { // cvInitFont, cvPutText // フォントを初期化して,テキストを描画する List <FontFace> font_face = new List <FontFace>( (FontFace[])Enum.GetValues(typeof(FontFace)) ); font_face.Remove(FontFace.Italic); // (1)画像を確保し初期化する using (IplImage img = Cv.CreateImage(new CvSize(450, 600), BitDepth.U8, 3)) { Cv.Zero(img); // (2)フォント構造体を初期化する CvFont[] font = new CvFont[font_face.Count * 2]; for (int i = 0; i < font.Length; i += 2) { font[i] = new CvFont(font_face[i / 2], 1.0, 1.0); font[i + 1] = new CvFont(font_face[i / 2] | FontFace.Italic, 1.0, 1.0); } // (3)フォントを指定して,テキストを描画する for (int i = 0; i < font.Length; i++) { CvColor rcolor = CvColor.Random(); Cv.PutText(img, "OpenCV sample code", new CvPoint(15, (i + 1) * 30), font[i], rcolor); } // (4)画像の表示,キーが押されたときに終了 using (CvWindow w = new CvWindow(img)) { CvWindow.WaitKey(0); } } }
private void histRenkli(IplImage src) { int w = src.Width; int h = src.Height; for (int i = 0; i < 256; i++) { countRed[i] = 0; countBlue[i] = 0; countGreen[i] = 0; } for (int y = 0; y < src.Height; y++) { for (int x = 0; x < src.Width; x++) { for (int i = 0; i < 256; i++) { CvColor renk = src[y, x]; if (renk.R == i) { countRed[i]++; } if (renk.B == i) { countBlue[i]++; } if (renk.G == i) { countGreen[i]++; } } } } histCiz(); }
private void SetBrightness(IplImage image, Int32 x, Int32 y, Double newValue) { newValue = (newValue > 1.0) ? 255.0 : (newValue * 255.0); CvColor color = new CvColor((Byte)newValue, (Byte)newValue, (Byte)newValue); image[y, x] = color; }
public FaceDetect() { CheckMemoryLeak(); // CvHaarClassifierCascade, cvHaarDetectObjects CvColor[] colors = new CvColor[]{ new CvColor(0,0,255), new CvColor(0,128,255), new CvColor(0,255,255), new CvColor(0,255,0), new CvColor(255,128,0), new CvColor(255,255,0), new CvColor(255,0,0), new CvColor(255,0,255), }; const double Scale = 1.14; const double ScaleFactor = 1.0850; const int MinNeighbors = 2; using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color)) using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1)) { using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade)) using (var storage = new CvMemStorage()) { storage.Clear(); // 顔の検出 Stopwatch watch = Stopwatch.StartNew(); CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30)); watch.Stop(); Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds); // 検出した箇所にまるをつける for (int i = 0; i < faces.Total; i++) { CvRect r = faces[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * Scale), Y = Cv.Round((r.Y + r.Height * 0.5) * Scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale); img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0); } } // ウィンドウに表示 CvWindow.ShowImages(img); } }
public IplImage SkinDetection(IplImage src) { skin = new IplImage(src.Size, BitDepth.U8, 3); IplImage output = new IplImage(src.Size, BitDepth.U8, 1); Cv.Copy(src, skin); //CvAdaptiveSkinDetector detector = new CvAdaptiveSkinDetector(1, MorphingMethod.ErodeDilate); //detector.Process(src, output); for (int x = 0; x < src.Width; x++) { for (int y = 0; y < src.Height; y++) { //if (output[y, x].Val0 != 0) //{ // skin[y, x] = CvColor.Green; //} CvColor Color = skin[y, x]; if (Color.R < 100) { skin[y, x] = new CvColor(0, 255, 0); } } } return(skin); }
/// <summary> /// Creates lookup table /// </summary> /// <returns></returns> private CvColor[] CreateLutData() { CvColor[] lutData = new CvColor[256]; for (int i = 0; i < lutData.Length; i++) { double r, g, b; if (i >= 0 && i <= 63) { r = 0; g = 255.0 / 63 * i; b = 255; } else if (i > 63 && i <= 127) { r = 0; g = 255; b = 255 - (255.0 / (127 - 63) * (i - 63)); } else if (i > 127 && i <= 191) { r = 255.0 / (191 - 127) * (i - 127); g = 255; b = 0; } else // if (i > 191 && i < 256) { r = 255; g = 255 - (255.0 / (255 - 191) * (i - 191)); b = 0; } lutData[i] = new CvColor((byte)r, (byte)g, (byte)b); } return(lutData); }
public FaceDetect() { CheckMemoryLeak(); // CvHaarClassifierCascade, cvHaarDetectObjects CvColor[] colors = new CvColor[] { new CvColor(0, 0, 255), new CvColor(0, 128, 255), new CvColor(0, 255, 255), new CvColor(0, 255, 0), new CvColor(255, 128, 0), new CvColor(255, 255, 0), new CvColor(255, 0, 0), new CvColor(255, 0, 255), }; const double Scale = 1.14; const double ScaleFactor = 1.0850; const int MinNeighbors = 2; using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color)) using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1)) { using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade)) using (var storage = new CvMemStorage()) { storage.Clear(); // 顔の検出 Stopwatch watch = Stopwatch.StartNew(); CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30)); watch.Stop(); Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds); // 検出した箇所にまるをつける for (int i = 0; i < faces.Total; i++) { CvRect r = faces[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * Scale), Y = Cv.Round((r.Y + r.Height * 0.5) * Scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale); img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0); } } // ウィンドウに表示 CvWindow.ShowImages(img); } }
public static void DrawRect(this IplImage ipl, CvRect rect, CvColor color, int thickNess) { var roi = ipl.ROI; ipl.ResetROI(); ipl.DrawRect(rect.X, rect.Y, rect.X + rect.Width, rect.Y + rect.Height, color, thickNess); ipl.SetROI(roi); }
public System.Drawing.Bitmap FaceDetect(IplImage src) { // CvHaarClassifierCascade, cvHaarDetectObjects // 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다 CvColor[] colors = new CvColor[]{ new CvColor(0,0,255), new CvColor(0,128,255), new CvColor(0,255,255), new CvColor(0,255,0), new CvColor(255,128,0), new CvColor(255,255,0), new CvColor(255,0,0), new CvColor(255,0,255), }; const double scale = 1.04; const double scaleFactor = 1.139; const int minNeighbors = 1; using (IplImage img = src.Clone()) using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1)) { // 얼굴 검출을 위한 화상을 생성한다. using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Environment.CurrentDirectory + "\\" + "haarcascade_frontalface_alt.xml")) using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); // 얼굴을 검출한다. CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(20, 20)); // 검출한 얼굴에 검은색 원을 덮어씌운다. for (int i = 0; i < faces.Total; i++) { CvRect r = faces[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * scale), Y = Cv.Round((r.Y + r.Height * 0.5) * scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale); img.Circle(center, radius, new CvColor(0, 0, 0), -1, LineType.Link8, 0); } } FindFace = img.Clone(); //생성한 IplImage 화상을 비트맵으로 변환해 반환한다. return FindFace.ToBitmap(System.Drawing.Imaging.PixelFormat.Format24bppRgb); } }
public System.Drawing.Bitmap FaceDetect(IplImage src) { // CvHaarClassifierCascade, cvHaarDetectObjects // 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다 CvColor[] colors = new CvColor[] { new CvColor(0, 0, 255), new CvColor(0, 128, 255), new CvColor(0, 255, 255), new CvColor(0, 255, 0), new CvColor(255, 128, 0), new CvColor(255, 255, 0), new CvColor(255, 0, 0), new CvColor(255, 0, 255), }; const double scale = 1.04; const double scaleFactor = 1.139; const int minNeighbors = 1; using (IplImage img = src.Clone()) using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1)) { // 얼굴 검출을 위한 화상을 생성한다. using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Environment.CurrentDirectory + "\\" + "haarcascade_frontalface_alt.xml")) using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); // 얼굴을 검출한다. CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(20, 20)); // 검출한 얼굴에 검은색 원을 덮어씌운다. for (int i = 0; i < faces.Total; i++) { CvRect r = faces[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * scale), Y = Cv.Round((r.Y + r.Height * 0.5) * scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale); img.Circle(center, radius, new CvColor(0, 0, 0), -1, LineType.Link8, 0); } } FindFace = img.Clone(); //생성한 IplImage 화상을 비트맵으로 변환해 반환한다. return(FindFace.ToBitmap(System.Drawing.Imaging.PixelFormat.Format24bppRgb)); } }
public int GetArucoMarkerCount() { var yu2 = LatestYUVImage(); var colorCv = new CvColor(yu2); //Find and draw (make sure it can be found) var markers = Vision.FindAruco(colorCv); return(markers.Count); }
private Double GetHue(IplImage image, Int32 x, Int32 y) { Single value = 0.0f; Single max = 0.0f; Single min = 0.0f; Single delta = 0.0f; Single red = 0.0f; Single green = 0.0f; Single blue = 0.0f; CvColor cvColor = image[y, x]; red = ((Single)cvColor.R) / 255.0f; green = ((Single)cvColor.G) / 255.0f; blue = ((Single)cvColor.B) / 255.0f; max = Math.Max(red, green); max = Math.Max(max, blue); min = Math.Min(red, green); min = Math.Min(min, blue); delta = max - min; if (delta == 0.0f) { return(0.0); } if (max == red) { value = (60.0f * (green - blue)) / delta; } else if (max == green) { value = 120.0f + ((60.0f * (blue - red)) / delta); } else { value = 240.0f + ((60.0f * (red - green)) / delta); } if (value >= 360.0f) { value -= 359.0f; } if (value < 0.0f) { value += 360.0f; } value /= 360.0f; return((Double)(value)); }
private void 얼굴검출ToolStripMenuItem_Click(object sender, EventArgs e) { CvColor[] colors = new CvColor[] { new CvColor(0, 0, 255), new CvColor(0, 128, 255), new CvColor(0, 255, 255), new CvColor(0, 255, 0), new CvColor(255, 128, 0), new CvColor(255, 255, 0), new CvColor(255, 0, 0), new CvColor(255, 0, 255), }; const double scale = 1.04; const double scaleFactor = 1.139; const int minNeighbors = 2; using (IplImage img = src.Clone()) using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1)) { // 얼굴 검출용의 화상의 생성 using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } //using (CvHaarClassifierCascade cascade = Cv.Load<CvHaarClassifierCascade>(Const.XmlHaarcascade)) // 아무거나 가능 using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Application.StartupPath + "\\" + "haarcascade_frontalface_alt.xml")) // using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); // 얼굴의 검출 CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(30, 30), new CvSize(180, 180)); // 검출한 얼굴에 원을 그린다 for (int i = 0; i < faces.Total; i++) { CvRect r = faces[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * scale), Y = Cv.Round((r.Y + r.Height * 0.5) * scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale); img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0); } } dst = img.Clone(); pictureBoxIpl2.ImageIpl = dst; } }
public void KnnSearch(CvPoint3D64f point, CvColor color, out int[] indices, out float[] dists, int knn) { if (knn == 0) { indices = new int[0]; dists = new float[0]; return; } float[] input = new float[] { (float)point.X, (float)point.Y, (float)point.Z, (float)(color.R * _colorScale / 255), (float)(color.G * _colorScale / 255), (float)(color.B * _colorScale / 255) }; _flannIndex.KnnSearch(input, out indices, out dists, knn, _searchParam); }
public static void Run(string[] args) { var xef = new Xef(@"../../../Resources/cube.xef"); var depth = xef.LoadDepthFrame(0); var color = xef.LoadColorFrame(0); var cvColor = new CvColor(color); var cvDepth = new CvDepth(depth); //render kinect color to UI (using KinectX.Extensions;) cvColor.Show(); Console.Read(); }
/////////////////////// public static IplImage FaceDetect(IplImage src) { IplImage FindFace; // CvHaarClassifierCascade, cvHaarDetectObjects // 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다 CvColor[] colors = new CvColor[] { new CvColor(0, 0, 255), new CvColor(0, 128, 255), new CvColor(0, 255, 255), new CvColor(0, 255, 0), new CvColor(255, 128, 0), new CvColor(255, 255, 0), new CvColor(255, 0, 0), new CvColor(255, 0, 255), }; const double scale = 1; const double scaleFactor = 1.139; const int minNeighbors = 2; IplImage img = src.Clone(); IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 3); { // 얼굴 검출용의 화상의 생성 using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("C:\\haarcascade_frontalface_default.xml")) using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); // 얼굴의 검출 CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(24, 24)); // 검출한 얼굴에 원을 그린다 for (int i = 0; i < faces.Total; i++) { CvRect r = faces[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * scale), Y = Cv.Round((r.Y + r.Height * 0.5) * scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale); img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0); } } FindFace = img.Clone(); return(FindFace); } }
/// <summary> /// Extracts MSER by C++-style code (cv::MSER) /// </summary> /// <param name="gray"></param> /// <param name="dst"></param> private void CppStyleMSER(Mat gray, Mat dst) { MSER mser = new MSER(); Point[][] contours = mser.Run(gray, null); // operator() foreach (Point[] pts in contours) { CvColor color = CvColor.Random(); foreach (Point p in pts) { dst.Circle(p, 1, color); } } }
public static (KxTransform Transform, List <Marker> Markers) Calibrate(CvColor cvColor, CvCameraSpace cs) { //Define Board var cube = CoordinateDefinition.Microcube(); //Look for Board var markers = Vision.FindAruco(cvColor); if (!markers.Any()) { throw new Exception("No calibration pattern could be found in the image!"); } //Calculate Camera Pose return(Vision.GetPoseFromImage(cube, cs, markers), markers); }
/// <summary> /// ユーザのボクセルを返す /// </summary> /// <param name="depthData"></param> /// <param name="colorFrameData"></param> /// <param name="bodyIndexFrameData"></param> /// <param name="colorSize"></param> /// <returns></returns> public Dictionary <int, List <Tuple <CvPoint3D64f, CvColor> > > GetEachUserColorPoints(CvMat depthMat, CvMat colorMat, CvMat userMat) { Dictionary <int, List <Tuple <CvPoint3D64f, CvColor> > > res = new Dictionary <int, List <Tuple <CvPoint3D64f, CvColor> > >(); List <Tuple <CvPoint3D64f, CvColor> > lis; int bytesPerPixel = colorMat.ElemChannels; CvSize colorSize = colorMat.GetSize(); unsafe { short *depthArr = depthMat.DataInt16; byte * colorArr = colorMat.DataByte; byte * userArr = userMat.DataByte; for (int y = 0; y < depthHeight; ++y) { for (int x = 0; x < depthWidth; ++x) { int depthIndex = (y * depthWidth) + x; byte player = userArr[depthIndex]; if (player != 0xff) { ushort depthVal = (ushort)depthArr[depthIndex]; ColorSpacePoint colorPoint = this.MapDepthPointToColorSpace(x, y, depthVal, colorSize.Width, colorSize.Height); CameraSpacePoint cameraPoint = this.MapDepthPointToCameraSpace(x, y, depthVal); // make sure the depth pixel maps to a valid point in color space int colorX = (int)Math.Floor(colorPoint.X + 0.5); int colorY = (int)Math.Floor(colorPoint.Y + 0.5); if ((colorX >= 0) && (colorX < colorSize.Width) && (colorY >= 0) && (colorY < colorSize.Height)) { // calculate index into color array int colorIndex = ((colorY * colorSize.Width) + colorX) * bytesPerPixel; CvColor color = new CvColor(colorArr[colorIndex + 2], colorArr[colorIndex + 1], colorArr[colorIndex + 0]); if (res.TryGetValue((int)player, out lis)) { res[(int)player].Add(Tuple.Create((CvPoint3D64f)cameraPoint.ToCvPoint3D(), color)); } else { res[(int)player] = new List <Tuple <CvPoint3D64f, CvColor> >(); res[(int)player].Add(Tuple.Create((CvPoint3D64f)cameraPoint.ToCvPoint3D(), color)); } } } } } } return(res); }
// Update is called once per frame void Update() { IplImage frame = Cv.QueryFrame(capture); imgBinary = new IplImage(frame.Size, BitDepth.U8, 1); imgLabel = new IplImage(frame.Size, BitDepth.F32, 1); imgRender = new IplImage(frame.Size, BitDepth.U8, 3); imgContour = new IplImage(frame.Size, BitDepth.U8, 3); imgPolygon = new IplImage(frame.Size, BitDepth.U8, 3); Color[] cols = new Color[texture.width * texture.height]; Cv.CvtColor(frame, imgBinary, ColorConversion.BgrToGray); Cv.Threshold(imgBinary, imgBinary, 100, 255, ThresholdType.Binary); CvBlobs blobs = new CvBlobs(); uint result = blobs.Label(imgBinary, imgLabel); foreach (KeyValuePair <uint, CvBlob> item in blobs) { CvBlob b = item.Value; //Console.WriteLine ("{0} | Centroid:{1} Area:{2}", item.Key, b.Centroid, b.Area); CvContourChainCode cc = b.Contour; cc.RenderContourChainCode(imgContour); CvContourPolygon polygon = cc.ConvertChainCodesToPolygon(); foreach (CvPoint p in polygon) { imgPolygon.Circle(p, 1, CvColor.Red, -1); } } blobs.RenderBlobs(imgLabel, frame, imgRender); for (int y = 0; y < texture.height; y++) { for (int x = 0; x < texture.width; x++) { CvColor col = imgRender.Get2D(y, x); cols[y * texture.width + x] = new Color(col.R / 255.0f, col.G / 255.0f, col.B / 255.0f, 1.0f); } } // int t2 = System.Environment.TickCount; texture.SetPixels(cols); //int t3 = System.Environment.TickCount; //Debug.Log("t2-t1=" + (t2 - t1) + " t3-t2=" + (t3 - t2)); texture.Apply(); }
public Delaunay() { CvRect rect = new CvRect(0, 0, 600, 600); CvColor activeFacetColor = new CvColor(255, 0, 0); CvColor delaunayColor = new CvColor(0, 0, 0); CvColor voronoiColor = new CvColor(0, 180, 0); CvColor bkgndColor = new CvColor(255, 255, 255); Random rand = new Random(); using (CvMemStorage storage = new CvMemStorage(0)) using (IplImage img = new IplImage(rect.Size, BitDepth.U8, 3)) using (CvWindow window = new CvWindow("delaunay")) { img.Set(bkgndColor); CvSubdiv2D subdiv = new CvSubdiv2D(rect, storage); for (int i = 0; i < 200; i++) { CvPoint2D32f fp = new CvPoint2D32f { X = (float)rand.Next(5, rect.Width - 10), Y = (float)rand.Next(5, rect.Height - 10) }; LocatePoint(subdiv, fp, img, activeFacetColor); window.Image = img; if (CvWindow.WaitKey(100) >= 0) { break; } subdiv.Insert(fp); subdiv.CalcVoronoi2D(); img.Set(bkgndColor); DrawSubdiv(img, subdiv, delaunayColor, voronoiColor); window.Image = img; if (CvWindow.WaitKey(100) >= 0) { break; } } img.Set(bkgndColor); PaintVoronoi(subdiv, img); window.Image = img; CvWindow.WaitKey(0); } }
/// <summary> /// /// </summary> /// <param name="imgHueMask"></param> /// <param name="imgRgbDst"></param> /// <param name="color"></param> private void DisplaySkinPoints(IplImage imgHueMask, IplImage imgRgbDst, CvColor color) { if (imgHueMask.Size != imgRgbDst.Size) throw new ArgumentException(); for (int y = 0; y < imgHueMask.Height; y++) { for (int x = 0; x < imgHueMask.Width; x++) { byte value = (byte)imgHueMask[y, x].Val0; if (value != 0) { imgRgbDst[y, x] = color; } } } }
/// /// Fonk /// private IplImage GriYap(IplImage src) { for (int y = 0; y < src.Height; y++) { for (int x = 0; x < src.Width; x++) { CvColor c = src[y, x]; src[y, x] = new CvColor() { B = (Byte)((c.R + c.B + c.G) / 3), G = (Byte)((c.R + c.B + c.G) / 3), R = (Byte)((c.R + c.B + c.G) / 3), }; } } return(src); }
public void KnnSearch(CvPoint3D64f point, CvColor color, out int[] indices, out float[] dists, float radius, int maxResult) { float[] input = new float[] { (float)point.X, (float)point.Y, (float)point.Z, (float)(color.R * _colorScale / 255), (float)(color.G * _colorScale / 255), (float)(color.B * _colorScale / 255) }; int count = 0; const int divCount = 2; int maxResult2 = maxResult * 4; for (int k = divCount - 1; k >= 0; k--) { float coef = (float)Math.Pow(0.1, k); int[] indices2 = new int[maxResult2]; float[] dists2 = new float[maxResult2]; _flannIndex.RadiusSearch(input, indices2, dists2, radius * coef, maxResult2, _searchParam); for (count = 0; count < maxResult2; count++) { if (dists2[count] == 0 && dists2[count] == 0f && ModelPoints[0].Item1 != point) { break; } } if (count >= maxResult) { indices = new int[maxResult]; dists = new float[maxResult]; for (int j = 0; j < maxResult; j++) { indices[j] = indices2[j]; dists[j] = dists2[j]; } return; } if (k == 0) { indices = new int[count]; dists = new float[count]; for (int j = 0; j < count; j++) { indices[j] = indices2[j]; dists[j] = dists2[j]; } return; } } this.KnnSearch(point, color, out indices, out dists, count); }
/// <summary> /// Extracts MSER by C-style code (cvExtractMSER) /// </summary> /// <param name="imgGray"></param> /// <param name="imgDst"></param> private void CStyleMSER(IplImage imgGray, IplImage imgDst) { using (CvMemStorage storage = new CvMemStorage()) { CvContour[] contours; CvMSERParams param = new CvMSERParams(); Cv.ExtractMSER(imgGray, null, out contours, storage, param); foreach (CvContour c in contours) { CvColor color = CvColor.Random(); for (int i = 0; i < c.Total; i++) { imgDst.Circle(c[i].Value, 1, color); } } } }
void RenderImagePlane() { IplImage frame = Cv.QueryFrame(capture); Color[] pixelArray = new Color[captureTexture.width * captureTexture.height]; for (int y = 0; y < captureTexture.height; y++) { for (int x = 0; x < captureTexture.width; x++) { CvColor tempColor = frame.Get2D(y, x); pixelArray[y * captureTexture.width + x] = new Color(tempColor.R / 255.0f, tempColor.G / 255.0f, tempColor.B / 255.0f, 1.0f); } } captureTexture.SetPixels(pixelArray); captureTexture.Apply(); renderer.material.mainTexture = captureTexture; }
protected int CountSkinPoints(IplImage imgHueMask, CvColor color) { int count = 0; for (int y = 0; y < imgHueMask.Height; y++) { for (int x = 0; x < imgHueMask.Width; x++) { byte value = (byte)imgHueMask[y, x].Val0; if (value != 0) { count++; imgHueMask[y, x] = color; } } } return(count); }
private IplImage Gkanal(IplImage src) { IplImage Gkanal = Cv.CreateImage(new CvSize(src.Width, src.Height), BitDepth.U8, 3); for (int y = 0; y < src.Height; y++) { for (int x = 0; x < src.Width; x++) { CvColor c = src[y, x]; Gkanal[y, x] = new CvColor() { B = (byte)(c.G), G = (byte)(c.G), R = (byte)(c.G), }; } } return(Gkanal); }
public void KnnSearch(CvPoint3D64f point, CvColor color, out int[] indices, out float[] dists, float radius, int maxResult) { float[] input = new float[] { (float)point.X, (float)point.Y, (float)point.Z, (float)(color.R * _colorScale / 255), (float)(color.G * _colorScale / 255), (float)(color.B * _colorScale / 255) }; int count = 0; const int divCount = 2; int maxResult2 = maxResult * 4; for (int k = divCount - 1; k >= 0; k--) { float coef = (float)Math.Pow(0.1, k); int[] indices2 = new int[maxResult2]; float[] dists2 = new float[maxResult2]; _flannIndex.RadiusSearch(input, indices2, dists2, radius * coef, maxResult2, _searchParam); for (count = 0; count < maxResult2; count++) { if (dists2[count] == 0 && dists2[count] == 0f && ModelPoints[0].Item1 != point) break; } if (count >= maxResult) { indices = new int[maxResult]; dists = new float[maxResult]; for (int j = 0; j < maxResult; j++) { indices[j] = indices2[j]; dists[j] = dists2[j]; } return; } if (k == 0) { indices = new int[count]; dists = new float[count]; for (int j = 0; j < count; j++) { indices[j] = indices2[j]; dists[j] = dists2[j]; } return; } } this.KnnSearch(point, color, out indices, out dists, count); }
public MatTest() { // 行列aとbを初期化 // aはオーソドックスに1次元配列で元データを指定 double[] _a = new double[]{ 1, 2, 3, 4, 5, 6, 7, 8, 9, }; // bは二次元配列で指定してみる. 1チャンネルの行列ならこちらの方が楽. double[,] _b = new double[,]{ {1, 4, 7}, {2, 5, 8}, {3, 6, 9} }; // 色(RGB3チャンネル)の配列 CvColor[,] _c = new CvColor[,]{ {CvColor.Red, CvColor.Green, CvColor.Blue}, {CvColor.Brown, CvColor.Cyan, CvColor.Pink}, {CvColor.Magenta, CvColor.Navy, CvColor.Violet} }; using (CvMat a = new CvMat(3, 3, MatrixType.F64C1, _a)) // 元データが1次元配列の場合 using (CvMat b = CvMat.FromArray(_b)) // 元データが2次元配列の場合 using (CvMat c = CvMat.FromArray(_c, MatrixType.U8C3)) // 多チャンネル配列の場合 { // aとbの値を表示 Console.WriteLine("a : \n{0}", a); Console.WriteLine("b : \n{0}", b); // 行列の掛け算 Console.WriteLine("A * B : \n{0}", a * b); // 加算、減算 Console.WriteLine("a + b : \n{0}", a + b); Console.WriteLine("a - b : \n{0}", a - b); // 論理演算 Console.WriteLine("a & b : \n{0}", a & b); Console.WriteLine("a | b : \n{0}", a | b); Console.WriteLine("~a : \n{0}", ~a); } // CvMatは大してメモリを食うことはないと思われるので、 // いちいちusingせずGCにまかせてもいいかも。 Console.WriteLine("press any key to quit"); Console.Read(); }
public void FaceDetect() { // CvHaarClassifierCascade, cvHaarDetectObjects // 顔を検出するためにHaar分類器のカスケードを用いる CvColor[] colors = new CvColor[]{ new CvColor(0,0,255), new CvColor(0,128,255), new CvColor(0,255,255), new CvColor(0,255,0), new CvColor(255,128,0), new CvColor(255,255,0), new CvColor(255,0,0), new CvColor(255,0,255), }; const double Scale = 1.14; const double ScaleFactor = 1.0850; const int MinNeighbors = 2; using (IplImage img = new IplImage(Application.dataPath + TestImageName, LoadMode.Color)) using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1)) { // 顔検出用の画像の生成 using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } //using (CvHaarClassifierCascade cascade = Cv.Load<CvHaarClassifierCascade>(Const.XmlHaarcascade)) // どっちでも可 using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Application.dataPath + TestTextName)) // using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); // 顔の検出 Stopwatch watch = Stopwatch.StartNew(); CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30)); watch.Stop(); // Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds); UnityEngine.Debug.Log("detection time = " + watch.ElapsedMilliseconds + " ms"); int i=0; for (i = 0; i < faces.Total; i++) { CvRect r = faces[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * Scale), Y = Cv.Round((r.Y + r.Height * 0.5) * Scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale); img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0); } } // ウィンドウに表示 CvWindow.ShowImages(img); } }
/// <summary> /// 点をプロット /// </summary> /// <param name="img"></param> /// <param name="center"></param> /// <param name="color"></param> /// <param name="d"></param> private void DrawCross(IplImage img, CvPoint center, CvColor color, int d) { img.Line(center.X - d, center.Y - d, center.X + d, center.Y + d, color, 1, 0); img.Line(center.X + d, center.Y - d, center.X - d, center.Y + d, color, 1, 0); }
/// <summary> /// ユーザのボクセルを返す /// </summary> /// <param name="depthData"></param> /// <param name="colorFrameData"></param> /// <param name="bodyIndexFrameData"></param> /// <param name="colorSize"></param> /// <returns></returns> public Dictionary<int, List<Tuple<CvPoint3D64f, CvColor>>> GetEachUserColorPoints(CvMat depthMat, CvMat colorMat, CvMat userMat) { Dictionary<int, List<Tuple<CvPoint3D64f, CvColor>>> res = new Dictionary<int, List<Tuple<CvPoint3D64f, CvColor>>>(); List<Tuple<CvPoint3D64f, CvColor>> lis; int bytesPerPixel = colorMat.ElemChannels; CvSize colorSize = colorMat.GetSize(); unsafe { short* depthArr = depthMat.DataInt16; byte* colorArr = colorMat.DataByte; byte* userArr = userMat.DataByte; for (int y = 0; y < depthHeight; ++y) { for (int x = 0; x < depthWidth; ++x) { int depthIndex = (y * depthWidth) + x; byte player = userArr[depthIndex]; if (player != 0xff) { ushort depthVal = (ushort)depthArr[depthIndex]; ColorSpacePoint colorPoint = this.MapDepthPointToColorSpace(x, y, depthVal, colorSize.Width, colorSize.Height); CameraSpacePoint cameraPoint = this.MapDepthPointToCameraSpace(x, y, depthVal); // make sure the depth pixel maps to a valid point in color space int colorX = (int)Math.Floor(colorPoint.X + 0.5); int colorY = (int)Math.Floor(colorPoint.Y + 0.5); if ((colorX >= 0) && (colorX < colorSize.Width) && (colorY >= 0) && (colorY < colorSize.Height)) { // calculate index into color array int colorIndex = ((colorY * colorSize.Width) + colorX) * bytesPerPixel; CvColor color = new CvColor(colorArr[colorIndex + 2], colorArr[colorIndex + 1], colorArr[colorIndex + 0]); if (res.TryGetValue((int)player, out lis)) { res[(int)player].Add(Tuple.Create((CvPoint3D64f)cameraPoint.ToCvPoint3D(), color)); } else { res[(int)player] = new List<Tuple<CvPoint3D64f, CvColor>>(); res[(int)player].Add(Tuple.Create((CvPoint3D64f)cameraPoint.ToCvPoint3D(), color)); } } } } } } return res; }
public PixelAccess() { // IplImage // 8ビット3チャンネルカラー画像を読み込み,ピクセルデータを変更する // 画像の読み込み using (IplImage img = new IplImage(Const.ImageLenna, LoadMode.Color)) { // (1)ピクセルデータ(R,G,B)を順次取得し,変更する ///* // 低速だけど簡単な方法 { for (int y = 0; y < img.Height; y++) { for (int x = 0; x < img.Width; x++) { CvColor c = img[y, x]; img[y, x] = new CvColor { B = (byte)Math.Round(c.B * 0.7 + 10), G = (byte)Math.Round(c.G * 1.0), R = (byte)Math.Round(c.R * 0.0), }; } } } //*/ /* // ポインタを使った、多分高速な方法 unsafe { byte* ptr = (byte*)img.ImageData; // 画素データへのポインタ for (int y = 0; y < img.Height; y++) { for (int x = 0; x < img.Width; x++) { int offset = (img.WidthStep * y) + (x * 3); byte b = ptr[offset + 0]; // B byte g = ptr[offset + 1]; // G byte r = ptr[offset + 2]; // R ptr[offset + 0] = (byte)Math.Round(b * 0.7 + 10); ptr[offset + 1] = (byte)Math.Round(g * 1.0); ptr[offset + 2] = (byte)Math.Round(r * 0.0); } } } //*/ /* // unsafeではなくIntPtrで頑張る方法 (VB.NET向き) { IntPtr ptr = img.ImageData; for (int y = 0; y < img.Height; y++) { for (int x = 0; x < img.Width; x++) { int offset = (img.WidthStep * y) + (x * 3); byte b = Marshal.ReadByte(ptr, offset + 0); // B byte g = Marshal.ReadByte(ptr, offset + 1); // G byte r = Marshal.ReadByte(ptr, offset + 2); // R Marshal.WriteByte(ptr, offset + 0, (byte)Math.Round(b * 0.7 + 10)); Marshal.WriteByte(ptr, offset + 1, (byte)Math.Round(g * 1.0)); Marshal.WriteByte(ptr, offset + 2, (byte)Math.Round(r * 0.0)); } } } //*/ //*/ // (2)変更した結果の表示 using (CvWindow w = new CvWindow("Image", WindowMode.AutoSize)) { w.Image = img; Cv.WaitKey(0); } } }
private void timer1_Tick(object sender, EventArgs e) { // キャプチャの開始. Capture starts. IplImage ipl1 = capture.QueryFrame(); int l = 0; int r = 0; int m = 0; // 取得したカメラ画像の高さと幅を取得し、labelに表示. Height and width of camera are shown in label. labelWidth.Text = capture.FrameWidth.ToString(); labelHeight.Text = capture.FrameHeight.ToString(); if (ipl1 != null) { // pictureBoxに取得した画像を表示. Show the captured image. pictureBox1.Image = ipl1.ToBitmap(); // メモリリークが発生するらしいので // プログラムが動的に確保したメモリ領域のうち、 // 不要になった領域を定期的に自動解放する if (GC.GetTotalMemory(false) > 600000) { GC.Collect(); } // Image processing should be written from here. // Extract red color for (int y = 0; y < ipl1.Height; y++) { for (int x = 0; x < ipl1.Width; x++) { CvColor c = ipl1[y, x]; // Red color extraction // If the pixel is red-like, the image is white, else black. if (c.R > 80 && c.B < 70 && c.G < 70) { ipl1[y, x] = new CvColor() { B = 255, G = 255, R = 255, }; if (x > 0 && x < ipl1.Width / 3) { l++; } else if (x > ipl1.Width / 3 && x < 2 * ipl1.Width / 3) { m++; } else if (x > 2 * ipl1.Width / 3 && x < ipl1.Width) { r++; } /* sumX = sumX + x; sumY = sumY + y; PixelNum++; */ } else { ipl1[y, x] = new CvColor() { // Red color extraction B = 0, G = 0, R = 0, }; } } } // Show the image to picturebox2. pictureBox2.Image = ipl1.ToBitmap(); //MidY = sumY / (PixelNum+1); //MidX = sumX / (PixelNum+1); //System.Console.WriteLine(MidY); //System.Console.WriteLine(MidX); //B1 = ipl1.Width / 3; //B2 = 2*ipl1.Width / 3; /* if (MidX > 0 && MidX < B1) { textBox1.Text = br.TurnLeft(); } else if (MidX > B1 && MidX < B2) { textBox1.Text = br.Forward(); } else if (MidX > B2 && MidX < ipl1.Width) { textBox1.Text = br.TurnRight(); } else { textBox1.Text = br.Stop(); } */ /* int redpixel = m + l + r; int SumPixel = ipl1.Width * ipl1.Height; if (redpixel > SumPixel/2) { textBox1.Text = br.Back(); } else { if (l > m && l > r) { textBox1.Text = br.TurnLeft(); } else if (r > m && r > l) { textBox1.Text = br.TurnRight(); } else if (m > r && m > l) { textBox1.Text = br.Forward(); } } */ if (l > m && l > r) { textBox1.Text = br.TurnLeft(); } else if (r > m && r > l) { textBox1.Text = br.TurnRight(); } else if (m > r && m > l) { textBox1.Text = br.Forward(); } } else { timer1.Stop(); } }
public static IplImage FaceDe(IplImage src) { CvColor[] colors = new CvColor[]{ new CvColor(0,0,255), new CvColor(0,128,255), new CvColor(0,255,255), new CvColor(0,255,0), new CvColor(255,128,0), new CvColor(255,255,0), new CvColor(255,0,0), new CvColor(255,0,255), }; const double Scale = 1.04; const double ScaleFactor = 1.139; const int MinNeighbors = 2; //CvArr waraiotoko = Cv.LoadImage("j"); IplImage warai = Cv.LoadImage("C:\\Users\\tamago\\Documents\\Visual Studio 2010\\project\\facematch_sample\\facematch_sample\\warai_flat.png"); using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(src.Width / Scale), Cv.Round(src.Height / Scale)), BitDepth.U8, 1)) { // 顔検出用の画像の生成 using (IplImage gray = new IplImage(src.Size, BitDepth.U8, 1)) { Cv.CvtColor(src, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } //using (CvHaarClassifierCascade cascade = Cv.Load<CvHaarClassifierCascade>(Const.XmlHaarcascade)) // どっちでも可 using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("C:\\Users\\tamago\\Documents\\Visual Studio 2010\\project\\facematch_sample\\facematch_sample\\haarcascade_frontalface_alt.xml")) using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); // 顔の検出 CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30)); // モザイク処理 for (int d = 0; d < faces.Total; d++) { CvRect r = faces[d].Value.Rect; CvSize size = new CvSize(r.Width + 30, r.Height + 30); using (IplImage img_laugh_resized = new IplImage(size, warai.Depth, warai.NChannels)) { Cv.Resize(warai, img_laugh_resized, Interpolation.NearestNeighbor); int i_max = (((r.X + img_laugh_resized.Width) > src.Width) ? src.Width - r.X : img_laugh_resized.Width); int j_max = (((r.Y + img_laugh_resized.Height) > src.Height) ? src.Height - r.Y : img_laugh_resized.Height); for (int j = 0; j < img_laugh_resized.Width; ++j) { for (int i = 0; i < img_laugh_resized.Height; ++i) { CvColor color = img_laugh_resized[i, j]; if (img_laugh_resized[i, j].Val1 != 0) src[r.Y + i, r.X + j] = color;//img_laugh_resized[i, j]; } } } } return src; } } }
/// <summary> /// 入力特徴量を図にする /// </summary> /// <param name="data_array"></param> private void Debug_DrawInputFeature(CvPoint2D32f[] points,int[] id_array) { using (IplImage pointsPlot = Cv.CreateImage(new CvSize(300, 300), BitDepth.U8, 3)) { pointsPlot.Zero(); for (int i = 0; i < id_array.Length; i++) { int x = (int)(points[i].X * 300); int y = (int)(300 - points[i].Y * 300); int res = id_array[i]; // CvColor color = (res == 1) ? CvColor.Red : CvColor.GreenYellow; CvColor color = new CvColor(); if (res == 1) { color = CvColor.Red; } else if (res == 2) { color = CvColor.GreenYellow; } pointsPlot.Circle(x, y, 2, color, -1); } CvWindow.ShowImages(pointsPlot); } }
public PixelAccess() { using (IplImage img = new IplImage(FilePath.Image.Lenna, LoadMode.Color)) { // easy method (slow) { for (int y = 0; y < img.Height; y++) { for (int x = 0; x < img.Width; x++) { CvColor c = img[y, x]; img[y, x] = new CvColor { B = (byte)Math.Round(c.B * 0.7 + 10), G = (byte)Math.Round(c.G * 1.0), R = (byte)Math.Round(c.R * 0.0), }; } } } /* // fast operation unsafe { byte* ptr = (byte*)img.ImageData; // 画素データへのポインタ for (int y = 0; y < img.Height; y++) { for (int x = 0; x < img.Width; x++) { int offset = (img.WidthStep * y) + (x * 3); byte b = ptr[offset + 0]; // B byte g = ptr[offset + 1]; // G byte r = ptr[offset + 2]; // R ptr[offset + 0] = (byte)Math.Round(b * 0.7 + 10); ptr[offset + 1] = (byte)Math.Round(g * 1.0); ptr[offset + 2] = (byte)Math.Round(r * 0.0); } } } //*/ /* // pointer operation by managed code { IntPtr ptr = img.ImageData; for (int y = 0; y < img.Height; y++) { for (int x = 0; x < img.Width; x++) { int offset = (img.WidthStep * y) + (x * 3); byte b = Marshal.ReadByte(ptr, offset + 0); // B byte g = Marshal.ReadByte(ptr, offset + 1); // G byte r = Marshal.ReadByte(ptr, offset + 2); // R Marshal.WriteByte(ptr, offset + 0, (byte)Math.Round(b * 0.7 + 10)); Marshal.WriteByte(ptr, offset + 1, (byte)Math.Round(g * 1.0)); Marshal.WriteByte(ptr, offset + 2, (byte)Math.Round(r * 0.0)); } } } //*/ //*/ using (CvWindow w = new CvWindow("Image", WindowMode.AutoSize)) { w.Image = img; Cv.WaitKey(0); } } }
/// <summary> /// 深度データからリアルワールドの色を見つける /// </summary> /// <param name="depthData"></param> /// <param name="colorFrameData"></param> /// <param name="colorSize"></param> /// <returns></returns> public List<Tuple<CvPoint3D64f, CvColor>> DepthColorMatToRealPoints(CvMat depthMat, CvMat colorMat) { List<Tuple<CvPoint3D64f, CvColor>> res = new List<Tuple<CvPoint3D64f, CvColor>>(); int bytesPerPixel = colorMat.ElemChannels; CvSize colorSize = colorMat.GetSize(); unsafe { short* depthArr = depthMat.DataInt16; byte* colorArr = colorMat.DataByte; for (int y = 0; y < depthHeight; ++y) { for (int x = 0; x < depthWidth; ++x) { int depthIndex = (y * depthWidth) + x; ushort depthVal = (ushort)depthArr[depthIndex]; ColorSpacePoint colorPoint = this.MapDepthPointToColorSpace(x, y, depthVal, colorSize.Width, colorSize.Height); CameraSpacePoint cameraPoint = this.MapDepthPointToCameraSpace(x, y, depthVal); // make sure the depth pixel maps to a valid point in color space int colorX = (int)Math.Floor(colorPoint.X + 0.5); int colorY = (int)Math.Floor(colorPoint.Y + 0.5); if ((colorX >= 0) && (colorX < colorSize.Width) && (colorY >= 0) && (colorY < colorSize.Height)) { // calculate index into color array int colorIndex = ((colorY * colorSize.Width) + colorX) * bytesPerPixel; CvColor color = new CvColor(colorArr[colorIndex + 2], colorArr[colorIndex + 1], colorArr[colorIndex + 0]); res.Add(Tuple.Create((CvPoint3D64f)cameraPoint.ToCvPoint3D(), color)); } } } } return res; }
/// <summary> /// /// </summary> /// <param name="img"></param> /// <param name="fp"></param> /// <param name="color"></param> private void DrawSubdivPoint(IplImage img, CvPoint2D32f fp, CvColor color) { img.Circle(fp, 3, color, Cv.FILLED, LineType.AntiAlias, 0); }
public EyeDetect() { CvColor[] colors = new CvColor[]{ new CvColor(0,0,255), new CvColor(0,128,255), new CvColor(0,255,255), new CvColor(0,255,0), new CvColor(255,128,0), new CvColor(255,255,0), new CvColor(255,0,0), new CvColor(255,0,255), }; const double Scale = 1.25; const double ScaleFactor = 2.5; const int MinNeighbors = 2; using (CvCapture cap = CvCapture.FromCamera(1)) using (CvWindow w = new CvWindow("Eye Tracker")) { while (CvWindow.WaitKey(10) < 0) { using (IplImage img = cap.QueryFrame()) using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1)) { using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1)) { Cv.CvtColor(img, gray, ColorConversion.BgrToGray); Cv.Resize(gray, smallImg, Interpolation.Linear); Cv.EqualizeHist(smallImg, smallImg); } using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("C:\\Program Files\\OpenCV\\data\\haarcascades\\haarcascade_eye.xml")) using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); Stopwatch watch = Stopwatch.StartNew(); CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30)); watch.Stop(); //Console.WriteLine("detection time = {0}msn", watch.ElapsedMilliseconds); for (int i = 0; i < eyes.Total; i++) { CvRect r = eyes[i].Value.Rect; CvPoint center = new CvPoint { X = Cv.Round((r.X + r.Width * 0.5) * Scale), Y = Cv.Round((r.Y + r.Height * 0.5) * Scale) }; int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale); img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0); } } w.Image = img; } } } }
public SVM() { // CvSVM // SVMを利用して2次元ベクトルの3クラス分類問題を解く const int S = 1000; const int SIZE = 400; CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks); // (1)画像領域の確保と初期化 using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3)) { img.Zero(); // (2)学習データの生成 CvPoint[] pts = new CvPoint[S]; int[] res = new int[S]; for (int i = 0; i < S; i++) { pts[i].X = (int)(rng.RandInt() % SIZE); pts[i].Y = (int)(rng.RandInt() % SIZE); if (pts[i].Y > 50 * Math.Cos(pts[i].X * Cv.PI / 100) + 200) { img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(255, 0, 0)); img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(255, 0, 0)); res[i] = 1; } else { if (pts[i].X > 200) { img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 255, 0)); img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 255, 0)); res[i] = 2; } else { img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 0, 255)); img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 0, 255)); res[i] = 3; } } } // (3)学習データの表示 Cv.NamedWindow("SVM", WindowMode.AutoSize); Cv.ShowImage("SVM", img); Cv.WaitKey(0); // (4)学習パラメータの生成 float[] data = new float[S * 2]; for (int i = 0; i < S; i++) { data[i * 2] = ((float)pts[i].X) / SIZE; data[i * 2 + 1] = ((float)pts[i].Y) / SIZE; } // (5)SVMの学習 using (CvSVM svm = new CvSVM()) { CvMat data_mat = new CvMat(S, 2, MatrixType.F32C1, data); CvMat res_mat = new CvMat(S, 1, MatrixType.S32C1, res); CvTermCriteria criteria = new CvTermCriteria(1000, float.Epsilon); CvSVMParams param = new CvSVMParams(SVMType.CSvc, SVMKernelType.Rbf, 10.0, 8.0, 1.0, 10.0, 0.5, 0.1, null, criteria); svm.Train(data_mat, res_mat, null, null, param); // (6)学習結果の描画 for (int i = 0; i < SIZE; i++) { for (int j = 0; j < SIZE; j++) { float[] a = { (float)j / SIZE, (float)i / SIZE }; CvMat m = new CvMat(1, 2, MatrixType.F32C1, a); float ret = svm.Predict(m); CvColor color = new CvColor(); switch ((int)ret) { case 1: color = new CvColor(100, 0, 0); break; case 2: color = new CvColor(0, 100, 0); break; case 3: color = new CvColor(0, 0, 100); break; } img[i, j] = color; } } // (7)トレーニングデータの再描画 for (int i = 0; i < S; i++) { CvColor color = new CvColor(); switch (res[i]) { case 1: color = new CvColor(255, 0, 0); break; case 2: color = new CvColor(0, 255, 0); break; case 3: color = new CvColor(0, 0, 255); break; } img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), color); img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), color); } // (8)サポートベクターの描画 int sv_num = svm.GetSupportVectorCount(); for (int i = 0; i < sv_num; i++) { var support = svm.GetSupportVector(i); img.Circle(new CvPoint((int)(support[0] * SIZE), (int)(support[1] * SIZE)), 5, new CvColor(200, 200, 200)); } // (9)画像の表示 Cv.NamedWindow("SVM", WindowMode.AutoSize); Cv.ShowImage("SVM", img); Cv.WaitKey(0); Cv.DestroyWindow("SVM"); } } }
/// <summary> /// /// </summary> /// <param name="img"></param> /// <param name="subdiv"></param> /// <param name="delaunay_color"></param> /// <param name="voronoi_color"></param> private void DrawSubdiv(IplImage img, CvSubdiv2D subdiv, CvColor delaunay_color, CvColor voronoi_color) { CvSeqReader reader = new CvSeqReader(); int total = subdiv.Edges.Total; int elem_size = subdiv.Edges.ElemSize; subdiv.Edges.StartRead(reader, false); for (int i = 0; i < total; i++) { //CvQuadEdge2D edge = (CvQuadEdge2D)reader.CvPtr; CvQuadEdge2D edge = CvQuadEdge2D.FromSeqReader(reader); if (Cv.IS_SET_ELEM(edge)) { DrawSubdivEdge(img, (CvSubdiv2DEdge)edge + 1, voronoi_color); DrawSubdivEdge(img, (CvSubdiv2DEdge)edge, delaunay_color); } //reader.NextSeqElem(elem_size); Cv.NEXT_SEQ_ELEM(elem_size, reader); } }