// Use this for initialization void Start() { // Un-mirror the webcam image //if (FlipLeftRightAxis) //{ // transform.localScale = new Vector3(-transform.localScale.x, // transform.localScale.y, transform.localScale.z); //} //if (FlipUpDownAxis) //{ // transform.localScale = new Vector3(transform.localScale.x, // -transform.localScale.y, transform.localScale.z); //} show = new Mat(480, 640, OpenCvSharp.CPlusPlus.MatType.CV_8UC4); // Load the cascades //haarCascade = CvHaarClassifierCascade.FromFile(Application.dataPath + "/Data/haarcascade_frontalface_default.xml"); //cascade = new CascadeClassifier(Application.dataPath + "/opencvkinect/Data/haarcascade_frontalface_alt.xml"); imHeight = KinectWrapper.GetDepthHeight(); imWidth = KinectWrapper.GetDepthWidth(); //result = new Texture2D(480, 640, TextureFormat.RGBA32, false); bg = new Mat(roiH, roiW, MatType.CV_8UC1); fg = new Mat(roiH, roiW, MatType.CV_8UC1); fgthresh = new Mat(roiH, roiW, MatType.CV_8UC1); //showImg = new IplImage(); //fgthreshImg = new IplImage(); tracks = new List<Trak>(); blobs = new OpenCvSharp.Blob.CvBlobs(); foodAdd = new List<Vector2>(); boulderAdd = new List<Vector2>(); rippleAdd = new List<Vector2>(); foodPosA = Spawner.Instance.GetFoodAreaCoordinate(0); foodPosB = Spawner.Instance.GetFoodAreaCoordinate(1); //detectorThread = new Thread(new ThreadStart(DoTracking)); //running = true; //detectorThread.Start(); prepareHandDetector(); }
/// <summary> /// Draws or prints information about blobs. (cvRenderBlobs) /// </summary> /// <param name="blobs">List of blobs.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="mode">Render mode. By default is CV_BLOB_RENDER_COLOR|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_ANGLE.</param> public static void RenderBlobs(CvBlobs blobs, IplImage imgSource, IplImage imgDest, RenderBlobsMode mode) { RenderBlobs(blobs, imgSource, imgDest, mode, 1.0); }
/// <summary> /// /// </summary> /// <param name="img"></param> /// <param name="blobs"></param> /// <returns></returns> public static int Perform(Mat img, CvBlobs blobs) { if (img == null) { throw new ArgumentNullException("img"); } if (blobs == null) { throw new ArgumentNullException("blobs"); } if (img.Type() != MatType.CV_8UC1) { throw new ArgumentException("'img' must be a 1-channel U8 image."); } LabelData labels = blobs.Labels; if (labels == null) { throw new ArgumentException(""); } //if(labels.GetLength(0) != h || labels.GetLength(1) != w) if (labels.Rows != img.Height || labels.Cols != img.Width) { throw new ArgumentException("img.Size != labels' size"); } int numPixels = 0; blobs.Clear(); int w = img.Cols; int h = img.Rows; int step = (int)img.Step(); byte[] imgIn; unsafe { byte *imgInPtr = (byte *)img.Data; if ((long)h * step > Int32.MaxValue) { throw new ArgumentException("Too big image (image data > 2^31)"); } int length = h * step; imgIn = new byte[length]; Marshal.Copy(new IntPtr(imgInPtr), imgIn, 0, imgIn.Length); } int label = 0; int lastLabel = 0; CvBlob lastBlob = null; for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { if (imgIn[x + y * step] == 0) { continue; } bool labeled = labels[y, x] != 0; if (!labeled && ((y == 0) || (imgIn[x + (y - 1) * step] == 0))) { labeled = true; // Label contour. label++; if (label == MarkerValue) { throw new Exception(); } labels[y, x] = label; numPixels++; // XXX This is not necessary at all. I only do this for consistency. if (y > 0) { labels[y - 1, x] = MarkerValue; } CvBlob blob = new CvBlob(label, x, y); blobs.Add(label, blob); lastLabel = label; lastBlob = blob; blob.Contour.StartingPoint = new Point(x, y); int direction = 1; int xx = x; int yy = y; bool contourEnd = false; do { for (int numAttempts = 0; numAttempts < 3; numAttempts++) { bool found = false; for (int i = 0; i < 3; i++) { int nx = xx + MovesE[direction, i, 0]; int ny = yy + MovesE[direction, i, 1]; if ((nx < w) && (nx >= 0) && (ny < h) && (ny >= 0)) { if (imgIn[nx + ny * step] != 0) { found = true; blob.Contour.ChainCode.Add((CvChainCode)MovesE[direction, i, 3]); xx = nx; yy = ny; direction = MovesE[direction, i, 2]; break; } labels[ny, nx] = MarkerValue; } } if (!found) { direction = (direction + 1) % 4; } else { if (labels[yy, xx] != label) { labels[yy, xx] = label; numPixels++; if (xx < blob.MinX) { blob.MinX = xx; } else if (xx > blob.MaxX) { blob.MaxX = xx; } if (yy < blob.MinY) { blob.MinY = yy; } else if (yy > blob.MaxY) { blob.MaxY = yy; } blob.Area++; blob.M10 += xx; blob.M01 += yy; blob.M11 += xx * yy; blob.M20 += xx * xx; blob.M02 += yy * yy; } break; } contourEnd = ((xx == x) && (yy == y) && (direction == 1)); if (contourEnd) { break; } } } while (!contourEnd); } if ((y + 1 < h) && (imgIn[x + (y + 1) * step] == 0) && (labels[y + 1, x] == 0)) { labeled = true; // Label internal contour int l; CvBlob blob; if (labels[y, x] == 0) { l = labels[y, x - 1]; labels[y, x] = l; numPixels++; if (l == lastLabel) { blob = lastBlob; } else { blob = blobs[l]; lastLabel = l; lastBlob = blob; } if (blob == null) { throw new Exception(); } blob.Area++; blob.M10 += x; blob.M01 += y; blob.M11 += x * y; blob.M20 += x * x; blob.M02 += y * y; } else { l = labels[y, x]; if (l == lastLabel) { blob = lastBlob; } else { blob = blobs[l]; lastLabel = l; lastBlob = blob; } } if (blob == null) { throw new Exception(); } // XXX This is not necessary (I believe). I only do this for consistency. labels[y + 1, x] = MarkerValue; var contour = new CvContourChainCode { StartingPoint = new Point(x, y) }; int direction = 3; int xx = x; int yy = y; do { for (int numAttempts = 0; numAttempts < 3; numAttempts++) { bool found = false; for (int i = 0; i < 3; i++) { int nx = xx + MovesI[direction, i, 0]; int ny = yy + MovesI[direction, i, 1]; if (imgIn[nx + ny * step] != 0) { found = true; contour.ChainCode.Add((CvChainCode)MovesI[direction, i, 3]); xx = nx; yy = ny; direction = MovesI[direction, i, 2]; break; } labels[ny, nx] = MarkerValue; } if (!found) { direction = (direction + 1) % 4; } else { if (labels[yy, xx] == 0) { labels[yy, xx] = l; numPixels++; blob.Area++; blob.M10 += xx; blob.M01 += yy; blob.M11 += xx * yy; blob.M20 += xx * xx; blob.M02 += yy * yy; } break; } } } while (!(xx == x && yy == y)); blob.InternalContours.Add(contour); } //else if (!imageOut(x, y)) if (!labeled) { // Internal pixel int l = labels[y, x - 1]; labels[y, x] = l; numPixels++; CvBlob blob; if (l == lastLabel) { blob = lastBlob; } else { blob = blobs[l]; lastLabel = l; lastBlob = blob; } if (blob == null) { throw new Exception(); } blob.Area++; blob.M10 += x; blob.M01 += y; blob.M11 += x * y; blob.M20 += x * x; blob.M02 += y * y; } } } foreach (var kv in blobs) { kv.Value.SetMoments(); } GC.KeepAlive(img); return(numPixels); }
/// <summary> /// Draws or prints information about blobs. (cvRenderBlobs) /// </summary> /// <param name="blobs">List of blobs.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> public static void RenderBlobs(CvBlobs blobs, IplImage imgSource, IplImage imgDest) { RenderBlobs(blobs, imgSource, imgDest, (RenderBlobsMode)0x000f, 1.0); }
/// <summary> /// Draws or prints information about blobs. (cvRenderBlobs) /// </summary> /// <param name="blobs">List of blobs.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> public static void RenderBlobs(CvBlobs blobs, Mat imgSource, Mat imgDest) { RenderBlobs(blobs, imgSource, imgDest, (RenderBlobsModes)0x000f, 1.0); }