// Use this for initialization void Start () { List<Mat> images = new List<Mat> (); List<int> labelsList = new List<int> (); MatOfInt labels = new MatOfInt (); images.Add (Highgui.imread (Utils.getFilePath ("facerec/facerec_0.bmp"), 0)); images.Add (Highgui.imread (Utils.getFilePath ("facerec/facerec_1.bmp"), 0)); labelsList.Add (0); labelsList.Add (1); labels.fromList (labelsList); Mat testSampleMat = Highgui.imread (Utils.getFilePath ("facerec/facerec_sample.bmp"), 0); int testSampleLabel = 0; // foreach (Mat item in images) { // Debug.Log ("images.ToString " + item.ToString ()); // } // foreach (int item in labelsList) { // Debug.Log ("labels.ToString " + item.ToString ()); // } int[] predictedLabel = new int[1]; double[] predictedConfidence = new double[1]; FaceRecognizer faceRecognizer = FaceRecognizer.createEigenFaceRecognizer (); faceRecognizer.train (images, labels); faceRecognizer.predict (testSampleMat, predictedLabel, predictedConfidence); Debug.Log ("Predicted class: " + predictedLabel [0] + " / " + "Actual class: " + testSampleLabel); Debug.Log ("Confidence: " + predictedConfidence [0]); Mat predictedMat = images [predictedLabel [0]]; Mat baseMat = new Mat (testSampleMat.rows (), predictedMat.cols () + testSampleMat.cols (), CvType.CV_8UC1); predictedMat.copyTo (baseMat.submat (new OpenCVForUnity.Rect (0, 0, predictedMat.cols (), predictedMat.rows ()))); testSampleMat.copyTo (baseMat.submat (new OpenCVForUnity.Rect (predictedMat.cols (), 0, testSampleMat.cols (), testSampleMat.rows ()))); Core.putText (baseMat, "Predicted", new Point (10, baseMat.rows () - 5), Core.FONT_HERSHEY_SIMPLEX, 0.4, new Scalar (255), 1, Core.LINE_AA, false); Core.putText (baseMat, "TestSample", new Point (predictedMat.cols () + 10, baseMat.rows () - 5), Core.FONT_HERSHEY_SIMPLEX, 0.4, new Scalar (255), 1, Core.LINE_AA, false); Texture2D texture = new Texture2D (baseMat.cols (), baseMat.rows (), TextureFormat.RGBA32, false); Utils.matToTexture2D (baseMat, texture); gameObject.GetComponent<Renderer> ().material.mainTexture = texture; }
// Use this for initialization void Start () { Mat imgMat = new Mat (500, 500, CvType.CV_8UC3, new Scalar (0, 0, 0)); Debug.Log ("imgMat dst ToString " + imgMat.ToString ()); int rand_num = 50; MatOfPoint pointsMat = new MatOfPoint (); pointsMat.alloc (rand_num); Core.randu (pointsMat, 100, 400); Point[] points = pointsMat.toArray (); for (int i=0; i<rand_num; ++i) { Core.circle (imgMat, points [i], 2, new Scalar (255, 255, 255), -1); } MatOfInt hullInt = new MatOfInt (); Imgproc.convexHull (pointsMat, hullInt); List<Point> pointMatList = pointsMat.toList (); List<int> hullIntList = hullInt.toList (); List<Point> hullPointList = new List<Point> (); for (int j=0; j < hullInt.toList().Count; j++) { hullPointList.Add (pointMatList [hullIntList [j]]); } MatOfPoint hullPointMat = new MatOfPoint (); hullPointMat.fromList (hullPointList); List<MatOfPoint> hullPoints = new List<MatOfPoint> (); hullPoints.Add (hullPointMat); Imgproc.drawContours (imgMat, hullPoints, -1, new Scalar (0, 255, 0), 2); Imgproc.cvtColor (imgMat, imgMat, Imgproc.COLOR_BGR2RGB); Texture2D texture = new Texture2D (imgMat.cols (), imgMat.rows (), TextureFormat.RGBA32, false); Utils.matToTexture2D (imgMat, texture); gameObject.GetComponent<Renderer> ().material.mainTexture = texture; }
/// <summary> /// Scanning the specified frame, outs and net. /// </summary> /// <param name="frame">Frame.</param> /// <param name="outs">Outs.</param> /// <param name="net">Net.</param> private void postscan(Mat frame, List <Mat> outs, Net net) { string outLayerType = outBlobTypes[0]; List <int> classIdsList = new List <int>(); List <float> confidencesList = new List <float>(); List <OpenCVForUnity.CoreModule.Rect> boxesList = new List <OpenCVForUnity.CoreModule.Rect>(); if (outLayerType == "Region") { for (int i = 0; i < outs.Count; ++i) { // Network produces output blob with a shape NxC where N is a number of // detected objects and C is a number of classes + 4 where the first 4 // numbers are [center_x, center_y, width, height] //Debug.Log("outs[i].ToString() " + outs[i].ToString()); float[] positionData = new float[5]; float[] confidenceData = new float[outs[i].cols() - 5]; for (int p = 0; p < outs[i].rows(); p++) { outs[i].get(p, 0, positionData); outs[i].get(p, 5, confidenceData); int maxIdx = confidenceData.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I; float confidence = confidenceData[maxIdx]; if (confidence > confThreshold) { int centerX = (int)(positionData[0] * frame.cols()); int centerY = (int)(positionData[1] * frame.rows()); int width = (int)(positionData[2] * frame.cols()); int height = (int)(positionData[3] * frame.rows()); int left = centerX - width / 2; int top = centerY - height / 2; classIdsList.Add(maxIdx); confidencesList.Add((float)confidence); boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height)); } } } } else { Debug.Log("Unknown output layer type: " + outLayerType); } MatOfRect boxes = new MatOfRect(); boxes.fromList(boxesList); MatOfFloat confidences = new MatOfFloat(); confidences.fromList(confidencesList); MatOfInt indices = new MatOfInt(); Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices); //for-loop for the mini game - if a new class appears, add it to the for (int i = 0; i < indices.total(); ++i) { int idx = (int)indices.get(i, 0)[0]; if (!minigameList.Contains(classIdsList[idx])) { Debug.Log(classNames[classIdsList[idx]]); minigameList.Add(classIdsList[idx]); if (minigameList.Count() > 1) { wordDisplay.text = minigameList.Count().ToString() + " words"; } else { wordDisplay.text = minigameList.Count().ToString() + " word"; } } } indices.Dispose(); boxes.Dispose(); confidences.Dispose(); }
/// <summary> /// Hands the pose estimation process. /// </summary> public void handPoseEstimationProcess(Mat rgbaMat) { //Imgproc.blur(mRgba, mRgba, new Size(5,5)); Imgproc.GaussianBlur (rgbaMat, rgbaMat, new OpenCVForUnity.Size (3, 3), 1, 1); //Imgproc.medianBlur(mRgba, mRgba, 3); if (!isColorSelected) return; List<MatOfPoint> contours = detector.getContours (); detector.process (rgbaMat); // Debug.Log ("Contours count: " + contours.Count); if (contours.Count <= 0) { return; } RotatedRect rect = Imgproc.minAreaRect (new MatOfPoint2f (contours [0].toArray ())); double boundWidth = rect.size.width; double boundHeight = rect.size.height; int boundPos = 0; for (int i = 1; i < contours.Count; i++) { rect = Imgproc.minAreaRect (new MatOfPoint2f (contours [i].toArray ())); if (rect.size.width * rect.size.height > boundWidth * boundHeight) { boundWidth = rect.size.width; boundHeight = rect.size.height; boundPos = i; } } OpenCVForUnity.Rect boundRect = Imgproc.boundingRect (new MatOfPoint (contours [boundPos].toArray ())); Imgproc.rectangle (rgbaMat, boundRect.tl (), boundRect.br (), CONTOUR_COLOR_WHITE, 2, 8, 0); // Debug.Log ( // " Row start [" + // (int)boundRect.tl ().y + "] row end [" + // (int)boundRect.br ().y + "] Col start [" + // (int)boundRect.tl ().x + "] Col end [" + // (int)boundRect.br ().x + "]"); double a = boundRect.br ().y - boundRect.tl ().y; a = a * 0.7; a = boundRect.tl ().y + a; // Debug.Log ( // " A [" + a + "] br y - tl y = [" + (boundRect.br ().y - boundRect.tl ().y) + "]"); //Core.rectangle( mRgba, boundRect.tl(), boundRect.br(), CONTOUR_COLOR, 2, 8, 0 ); Imgproc.rectangle (rgbaMat, boundRect.tl (), new Point (boundRect.br ().x, a), CONTOUR_COLOR, 2, 8, 0); MatOfPoint2f pointMat = new MatOfPoint2f (); Imgproc.approxPolyDP (new MatOfPoint2f (contours [boundPos].toArray ()), pointMat, 3, true); contours [boundPos] = new MatOfPoint (pointMat.toArray ()); MatOfInt hull = new MatOfInt (); MatOfInt4 convexDefect = new MatOfInt4 (); Imgproc.convexHull (new MatOfPoint (contours [boundPos].toArray ()), hull); if (hull.toArray ().Length < 3) return; Imgproc.convexityDefects (new MatOfPoint (contours [boundPos] .toArray ()), hull, convexDefect); List<MatOfPoint> hullPoints = new List<MatOfPoint> (); List<Point> listPo = new List<Point> (); for (int j = 0; j < hull.toList().Count; j++) { listPo.Add (contours [boundPos].toList () [hull.toList () [j]]); } MatOfPoint e = new MatOfPoint (); e.fromList (listPo); hullPoints.Add (e); List<MatOfPoint> defectPoints = new List<MatOfPoint> (); List<Point> listPoDefect = new List<Point> (); for (int j = 0; j < convexDefect.toList().Count; j = j+4) { Point farPoint = contours [boundPos].toList () [convexDefect.toList () [j + 2]]; int depth = convexDefect.toList () [j + 3]; if (depth > threasholdSlider.value && farPoint.y < a) { listPoDefect.Add (contours [boundPos].toList () [convexDefect.toList () [j + 2]]); } // Debug.Log ("defects [" + j + "] " + convexDefect.toList () [j + 3]); } MatOfPoint e2 = new MatOfPoint (); e2.fromList (listPo); defectPoints.Add (e2); // Debug.Log ("hull: " + hull.toList ()); // Debug.Log ("defects: " + convexDefect.toList ()); Imgproc.drawContours (rgbaMat, hullPoints, -1, CONTOUR_COLOR, 3); // int defectsTotal = (int)convexDefect.total(); // Debug.Log ("Defect total " + defectsTotal); this.numberOfFingers = listPoDefect.Count; if (this.numberOfFingers > 5) this.numberOfFingers = 5; // Debug.Log ("numberOfFingers " + numberOfFingers); // Core.putText (mRgba, "" + numberOfFingers, new Point (mRgba.cols () / 2, mRgba.rows () / 2), Core.FONT_HERSHEY_PLAIN, 4.0, new Scalar (255, 255, 255, 255), 6, Core.LINE_AA, false); numberOfFingersText.text = numberOfFingers.ToString (); foreach (Point p in listPoDefect) { Imgproc.circle (rgbaMat, p, 6, new Scalar (255, 0, 255, 255), -1); } }
/// <summary> /// Postprocess the specified frame, outs and net. /// </summary> /// <param name="frame">Frame.</param> /// <param name="outs">Outs.</param> /// <param name="net">Net.</param> /// <param name="backend">Backend.</param> protected virtual void postprocess(Mat frame, List <Mat> outs, Net net, int backend = Dnn.DNN_BACKEND_OPENCV) { MatOfInt outLayers = net.getUnconnectedOutLayers(); string outLayerType = outBlobTypes[0]; List <int> classIdsList = new List <int>(); List <float> confidencesList = new List <float>(); List <Rect2d> boxesList = new List <Rect2d>(); if (net.getLayer(new DictValue(0)).outputNameToIndex("im_info") != -1) { // Faster-RCNN or R-FCN // Network produces output blob with a shape 1x1xNx7 where N is a number of // detections and an every detection is a vector of values // [batchId, classId, confidence, left, top, right, bottom] if (outs.Count == 1) { outs[0] = outs[0].reshape(1, (int)outs[0].total() / 7); //Debug.Log ("outs[i].ToString() " + outs [0].ToString ()); float[] data = new float[7]; for (int i = 0; i < outs[0].rows(); i++) { outs[0].get(i, 0, data); float confidence = data[2]; if (confidence > confThreshold) { int class_id = (int)(data[1]); float left = data[3] * frame.cols(); float top = data[4] * frame.rows(); float right = data[5] * frame.cols(); float bottom = data[6] * frame.rows(); float width = right - left + 1f; float height = bottom - top + 1f; classIdsList.Add((int)(class_id) - 1); // Skip 0th background class id. confidencesList.Add((float)confidence); boxesList.Add(new Rect2d(left, top, width, height)); } } } } else if (outLayerType == "DetectionOutput") { // Network produces output blob with a shape 1x1xNx7 where N is a number of // detections and an every detection is a vector of values // [batchId, classId, confidence, left, top, right, bottom] if (outs.Count == 1) { outs[0] = outs[0].reshape(1, (int)outs[0].total() / 7); //Debug.Log ("outs[i].ToString() " + outs [0].ToString ()); float[] data = new float[7]; for (int i = 0; i < outs[0].rows(); i++) { outs[0].get(i, 0, data); float confidence = data[2]; if (confidence > confThreshold) { int class_id = (int)(data[1]); float left = data[3] * frame.cols(); float top = data[4] * frame.rows(); float right = data[5] * frame.cols(); float bottom = data[6] * frame.rows(); float width = right - left + 1f; float height = bottom - top + 1f; classIdsList.Add((int)(class_id) - 1); // Skip 0th background class id. confidencesList.Add((float)confidence); boxesList.Add(new Rect2d(left, top, width, height)); } } } } else if (outLayerType == "Region") { for (int i = 0; i < outs.Count; ++i) { // Network produces output blob with a shape NxC where N is a number of // detected objects and C is a number of classes + 4 where the first 4 // numbers are [center_x, center_y, width, height] //Debug.Log ("outs[i].ToString() "+outs[i].ToString()); float[] positionData = new float[5]; float[] confidenceData = new float[outs[i].cols() - 5]; for (int p = 0; p < outs[i].rows(); p++) { outs[i].get(p, 0, positionData); outs[i].get(p, 5, confidenceData); int maxIdx = confidenceData.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I; float confidence = confidenceData[maxIdx]; if (confidence > confThreshold) { float centerX = positionData[0] * frame.cols(); float centerY = positionData[1] * frame.rows(); float width = positionData[2] * frame.cols(); float height = positionData[3] * frame.rows(); float left = centerX - width / 2; float top = centerY - height / 2; classIdsList.Add(maxIdx); confidencesList.Add((float)confidence); boxesList.Add(new Rect2d(left, top, width, height)); } } } } else { Debug.Log("Unknown output layer type: " + outLayerType); } // NMS is used inside Region layer only on DNN_BACKEND_OPENCV for another backends we need NMS in sample // or NMS is required if number of outputs > 1 if (outLayers.total() > 1 || (outLayerType == "Region" && backend != Dnn.DNN_BACKEND_OPENCV)) { Dictionary <int, List <int> > class2indices = new Dictionary <int, List <int> >(); for (int i = 0; i < classIdsList.Count; i++) { if (confidencesList[i] >= confThreshold) { if (!class2indices.ContainsKey(classIdsList[i])) { class2indices.Add(classIdsList[i], new List <int>()); } class2indices[classIdsList[i]].Add(i); } } List <Rect2d> nmsBoxesList = new List <Rect2d>(); List <float> nmsConfidencesList = new List <float>(); List <int> nmsClassIdsList = new List <int>(); foreach (int key in class2indices.Keys) { List <Rect2d> localBoxesList = new List <Rect2d>(); List <float> localConfidencesList = new List <float>(); List <int> classIndicesList = class2indices[key]; for (int i = 0; i < classIndicesList.Count; i++) { localBoxesList.Add(boxesList[classIndicesList[i]]); localConfidencesList.Add(confidencesList[classIndicesList[i]]); } using (MatOfRect2d localBoxes = new MatOfRect2d(localBoxesList.ToArray())) using (MatOfFloat localConfidences = new MatOfFloat(localConfidencesList.ToArray())) using (MatOfInt nmsIndices = new MatOfInt()) { Dnn.NMSBoxes(localBoxes, localConfidences, confThreshold, nmsThreshold, nmsIndices); for (int i = 0; i < nmsIndices.total(); i++) { int idx = (int)nmsIndices.get(i, 0)[0]; nmsBoxesList.Add(localBoxesList[idx]); nmsConfidencesList.Add(localConfidencesList[idx]); nmsClassIdsList.Add(key); } } } boxesList = nmsBoxesList; classIdsList = nmsClassIdsList; confidencesList = nmsConfidencesList; } for (int idx = 0; idx < boxesList.Count; ++idx) { Rect2d box = boxesList[idx]; drawPred(classIdsList[idx], confidencesList[idx], box.x, box.y, box.x + box.width, box.y + box.height, frame); } }
protected override void postprocess(Mat frame, List <Mat> outs, Net net, int backend = Dnn.DNN_BACKEND_OPENCV) { // # Decode bboxes and landmarks Mat dets = pb.decode(outs[0], outs[1], outs[2]); // # Ignore low scores + NMS int num = dets.rows(); if (boxes_m_c1 == null) { boxes_m_c1 = new Mat(num, 4, CvType.CV_64FC1); } if (boxes_m_c4 == null) { boxes_m_c4 = new Mat(num, 1, CvType.CV_64FC4); } if (confidences_m == null) { confidences_m = new Mat(num, 1, CvType.CV_32FC1); } if (boxes == null) { boxes = new MatOfRect2d(boxes_m_c4); } if (confidences == null) { confidences = new MatOfFloat(confidences_m); } if (indices == null) { indices = new MatOfInt(); } Mat bboxes = dets.colRange(0, 4); bboxes.convertTo(boxes_m_c1, CvType.CV_64FC1); // x1,y1,x2,y2 => x,y,w,h Mat boxes_m_0_2 = boxes_m_c1.colRange(0, 2); Mat boxes_m_2_4 = boxes_m_c1.colRange(2, 4); Core.subtract(boxes_m_2_4, boxes_m_0_2, boxes_m_2_4); MatUtils.copyToMat(new IntPtr(boxes_m_c1.dataAddr()), boxes_m_c4); Mat scores = dets.colRange(14, 15); scores.copyTo(confidences_m); Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices); // # Draw boudning boxes and landmarks on the original image for (int i = 0; i < indices.total(); ++i) { int idx = (int)indices.get(i, 0)[0]; float[] bbox_arr = new float[4]; bboxes.get(idx, 0, bbox_arr); float[] confidence_arr = new float[1]; confidences.get(idx, 0, confidence_arr); drawPred(0, confidence_arr[0], bbox_arr[0], bbox_arr[1], bbox_arr[2], bbox_arr[3], frame); Mat landmarks = dets.colRange(4, 14); float[] landmarks_arr = new float[10]; landmarks.get(idx, 0, landmarks_arr); Point[] points = new Point[] { new Point(landmarks_arr[0], landmarks_arr[1]), new Point(landmarks_arr[2], landmarks_arr[3]), new Point(landmarks_arr[4], landmarks_arr[5]), new Point(landmarks_arr[6], landmarks_arr[7]), new Point(landmarks_arr[8], landmarks_arr[9]) }; drawPredPoints(points, frame); } }
private void ImgList_SelectionChanged(object sender, SelectionChangedEventArgs e) { int pos; if (ImgList != null && ImgList.SelectedItem != null) { if (DetectList != null) { DetectList.Clear(); } // var OriImageMat = new Mat(ImgList.SelectedItem.ToString()); var ThrImageMat = new Mat(); pos = ImgList.SelectedIndex; //CurrentFN = ImgList.SelectedItem.ToString(); CurrentFN = fileEntries[pos]; Mat OriImageMat = new Mat(CurrentFN); Mat GrayImage = OriImageMat.CvtColor(ColorConversionCodes.BGR2GRAY); //int slideVal; //int.TryParse(SliderValue, out slideVal); Cv2.Threshold(GrayImage, ThrImageMat, int.Parse(SliderValue), 255, ThresholdTypes.Binary); int[] mask = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }; Mat kernel = new Mat(3, 3, MatType.CV_32F, mask); Cv2.MorphologyEx(ThrImageMat, ThrImageMat, MorphTypes.Open, kernel); Cv2.MorphologyEx(ThrImageMat, ThrImageMat, MorphTypes.Close, kernel); Cv2.BitwiseNot(ThrImageMat, ThrImageMat); var label = new MatOfInt(); var stats = new MatOfInt(); var centroids = new MatOfDouble(); var nLabels = Cv2.ConnectedComponentsWithStats(ThrImageMat, label, stats, centroids, PixelConnectivity.Connectivity8, MatType.CV_32S); var statsIndexer = stats.GetGenericIndexer <int>(); for (int i = 0; i < nLabels; i++) { //Threshold줄때 일정크기 이하의 객체는 노이즈로 간주하고 패스 if (statsIndexer[i, 4] < 90) { continue; } var rect = new OpenCvSharp.Rect { X = statsIndexer[i, 0], Y = statsIndexer[i, 1], Width = statsIndexer[i, 2], Height = statsIndexer[i, 3] }; Cv2.Rectangle(OriImageMat, rect, new Scalar(33, 113, 243), 3); obj temp = new obj(); //중심좌표 x,y (0 ~ 1) temp.X = ((rect.X + rect.Width) / 2.0) / OriImageMat.Width; temp.Y = ((rect.Y + rect.Height) / 2.0) / OriImageMat.Height; temp.W = (double)rect.Width / (double)OriImageMat.Width; temp.H = (double)rect.Height / (double)OriImageMat.Height; DetectList.Add(temp); } OriginalImg.Source = OriImageMat.ToWriteableBitmap(); ThresholdImg.Source = ThrImageMat.ToWriteableBitmap(); } }
/// <summary> /// Gets the marker identifier. /// </summary> /// <returns>The marker identifier.</returns> /// <param name="markerImage">Marker image.</param> /// <param name="nRotations">N rotations.</param> public static int getMarkerId (Mat markerImage, MatOfInt nRotations, byte[,] markerDesign) { Mat grey = markerImage; // Threshold image Imgproc.threshold (grey, grey, 125, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU); //Markers are divided in 7x7 regions, of which the inner 5x5 belongs to marker info //the external border should be entirely black int size = markerDesign.GetLength (0); int cellSize = markerImage.rows () / (size + 2); for (int y=0; y<(size+2); y++) { int inc = size + 1; if (y == 0 || y == (size + 1)) inc = 1; //for first and last row, check the whole border for (int x=0; x<(size+2); x+=inc) { int cellX = x * cellSize; int cellY = y * cellSize; Mat cell = new Mat (grey, new OpenCVForUnity.Rect (cellX, cellY, cellSize, cellSize)); int nZ = Core.countNonZero (cell); cell.Dispose (); if (nZ > (cellSize * cellSize) / 2) { return -1;//can not be a marker because the border element is not black! } } } Mat bitMatrix = Mat.zeros (size, size, CvType.CV_8UC1); //get information(for each inner square, determine if it is black or white) for (int y=0; y<size; y++) { for (int x=0; x<size; x++) { int cellX = (x + 1) * cellSize; int cellY = (y + 1) * cellSize; Mat cell = new Mat (grey, new OpenCVForUnity.Rect (cellX, cellY, cellSize, cellSize)); int nZ = Core.countNonZero (cell); if (nZ > (cellSize * cellSize) / 2) bitMatrix.put (y, x, new byte[]{1}); //bitMatrix.at<uchar> (y, x) = 1; cell.Dispose (); } } // Debug.Log ("bitMatrix " + bitMatrix.dump()); //check all possible rotations Mat[] rotations = new Mat[4]; for (int i = 0; i < rotations.Length; i++) { rotations [i] = new Mat (); } int[] distances = new int[4]; rotations [0] = bitMatrix; distances [0] = hammDistMarker (rotations [0], markerDesign); int first = distances [0]; int second = 0; for (int i=1; i<4; i++) { //get the hamming distance to the nearest possible word rotations [i] = rotate (rotations [i - 1]); distances [i] = hammDistMarker (rotations [i], markerDesign); if (distances [i] < first) { first = distances [i]; second = i; } } // Debug.Log ("first " + first); nRotations.fromArray (second); if (first == 0) { int id = mat2id (rotations [second]); bitMatrix.Dispose (); for (int i = 0; i < rotations.Length; i++) { rotations [i].Dispose (); } return id; } return -1; }
//public delegate void Process(int[] tgrdeteced); void tagramDetect(Mat t_rgbaMat, Action <TangramResultModel, List <MyShape> > prc) { List <MyShape> lms = new List <MyShape>(); System.Diagnostics.Stopwatch watch = null; long elapsedMs; TangramResultModel trm = null; Observable.Start(() => { mut.WaitOne(); Imgproc.resize(t_rgbaMat, rgbaMat, new Size(nW_goc, nH_goc)); watch = System.Diagnostics.Stopwatch.StartNew(); if (warp != null) { warp.Init(rgbaMat); Mat wMat = warp.warpPerspective(rgbaMat); rgbaMat = wMat.submat(0, nH, 0, nW); } else { rgbaMat = rgbaMat.submat(0, nH, 0, nW); } all_thresh = Mat.zeros(nH, nW, CvType.CV_8UC3); all_thresh_afct = Mat.zeros(nH, nW, CvType.CV_8UC3); dbMat = Mat.zeros(nH, nW, CvType.CV_8UC3); all_thresh_af = Mat.zeros(nH, nW, CvType.CV_8UC3); rgbaMat.copyTo(rgbMat); rgbMat.convertTo(rgbMat2, CvType.CV_8UC3, 0.8, 60); rgbMat2.copyTo(rgbMat2copy); rgbMat.convertTo(rgbMat3, CvType.CV_8UC3, 1, 60); rgbMat.convertTo(rgbMat4, CvType.CV_8UC3, 1.25, 35); rgbMat.convertTo(rgbMat, CvType.CV_8UC3, 1.25, 35); Imgproc.cvtColor(rgbMat, hsvMat, Imgproc.COLOR_RGB2HSV); Imgproc.cvtColor(rgbMat2, hsvMat2, Imgproc.COLOR_RGB2HSV); Imgproc.cvtColor(rgbMat3, hsvMat3, Imgproc.COLOR_RGB2HSV); Imgproc.cvtColor(rgbMat3, hsvMat4, Imgproc.COLOR_RGB2HSV); watch.Stop(); elapsedMs = watch.ElapsedMilliseconds; Mat markers = Mat.zeros(rgbaMat.size(), CvType.CV_32SC1); watch = System.Diagnostics.Stopwatch.StartNew(); for (int obj_i = 0; obj_i < ls_obj.Length; obj_i++) { var obj = ls_obj[obj_i]; if (obj_i == (int)tgr.ORANGE | obj_i == (int)tgr.YELLOW | obj_i == (int)tgr.GREEN) { Core.inRange(hsvMat2, obj.getHSVmin(), obj.getHSVmax(), thresholdMat); } else if (obj_i == (int)tgr.LIGHTBLUE) { Core.inRange(hsvMat, obj.getHSVmin(), obj.getHSVmax(), thresholdMat); } else { Core.inRange(hsvMat, obj.getHSVmin(), obj.getHSVmax(), thresholdMat); } if (obj_i == (int)tgr.RED) { Core.inRange(hsvMat, new Scalar(0, 20, 45), new Scalar(5, 255, 255), thresholdMat2); thresholdMat2.copyTo(thresholdMat, thresholdMat2); } thresholdMatArr[obj_i] = thresholdMat.clone(); } //thresholdMatArr[(int)tgr.LIGHTBLUE].setTo(new Scalar(0), thresholdMatArr[(int)tgr.BLUE]); //thresholdMatArr[(int)tgr.LIGHTBLUE].setTo(new Scalar(0), thresholdMatArr[(int)tgr.GREEN]); for (int obj_i = 0; obj_i < ls_obj.Length; obj_i++) { var obj = ls_obj[obj_i]; all_cts.Clear(); thresholdMat = thresholdMatArr[obj_i]; if (toggle_db[obj_i] == true) { all_thresh.setTo(obj.ColorRGB, thresholdMat); } if (true | obj_i == (int)tgr.PURPLE | obj_i == (int)tgr.YELLOW | obj_i == (int)tgr.RED | obj_i == (int)tgr.GREEN | obj_i == (int)tgr.ORANGE) { Imgproc.erode(thresholdMat, thresholdMat2, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5)), new Point(-1, -1), 1); } if (obj_i == (int)tgr.LIGHTBLUE | obj_i == (int)tgr.PURPLE) { Imgproc.erode(thresholdMat, thresholdMat2, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5)), new Point(-1, -1), 1); } if (toggle_db[obj_i] == true) { all_thresh_af.setTo(obj.ColorRGB, thresholdMat2); } all_thresh_afct.setTo(new Scalar(obj_i + 1), thresholdMat2); color_filter.Add(thresholdMat2.clone()); Imgproc.findContours(thresholdMat2, all_cts, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); Scalar c = obj.getColor(); for (int ct_i = 0; ct_i < all_cts.Count; ct_i++) { double area = Imgproc.contourArea(all_cts[ct_i]); // if (area < MIN_OBJECT_AREA) if (area < MIN_OBJECT_AREAS[obj_i] * 0.55) { all_cts.RemoveAt(ct_i); ct_i--; } if (area > MAX_OBJECT_AREAS[obj_i] * 1.3) { all_cts.RemoveAt(ct_i); ct_i--; } } MyShape chon = null; MyShape ms = new MyShape(); float dt = 1000000; for (int ct_i = 0; ct_i < all_cts.Count; ct_i++) { var ct = all_cts[ct_i]; var peri = Imgproc.arcLength(new MatOfPoint2f(ct.toArray()), true); var epsilon = 0.1 * peri; if (obj_i == (int)tgr.ORANGE || obj_i == (int)tgr.YELLOW) { epsilon = 0.065 * peri; } Imgproc.approxPolyDP(new MatOfPoint2f(ct.toArray()), approx_ct, epsilon, true); MatOfInt pts_cvh = new MatOfInt(); Imgproc.convexHull(ct, pts_cvh, true); var cvh_numPts = pts_cvh.toArray().Length; Point[] cvh_pts = new Point[cvh_numPts]; var ct_pts = ct.toArray(); for (int i = 0; i < cvh_numPts; i++) { var i1 = pts_cvh.toArray()[i]; var p1 = ct_pts[i1]; cvh_pts[i] = p1; try { if (debug == true) { var i2 = pts_cvh.toArray()[(i + 1) % cvh_numPts]; var p2 = ct_pts[i2]; Imgproc.circle(rgbMat2, p1, 1, c, 2); } } catch (Exception e) { Utilities.LogFormat("Here3:{0},{1},{2}", rgbMat2 == null, p1 == null, c == null); Utilities.Log("Exception is {0}", e.ToString()); Utilities.Log("Trace is {0}", e.StackTrace.ToString()); } } MatOfPoint2f approx_cvh = new MatOfPoint2f(); var epsilon2 = peri * 0.1; if (obj_i == (int)tgr.ORANGE) { epsilon2 = peri * 0.065; } Imgproc.approxPolyDP(new MatOfPoint2f(cvh_pts), approx_cvh, epsilon2, true); var ct_ori = new MatOfPoint(ct.toArray()); MatOfPoint approx_ct2 = new MatOfPoint(approx_ct.toArray()); List <MatOfPoint> approx_cvh2 = new List <MatOfPoint>(); approx_cvh2.Add(new MatOfPoint(approx_cvh.toArray())); var mu = Imgproc.moments(approx_cvh2[0], true); cterTgr.x = mu.m10 / mu.m00; cterTgr.y = mu.m01 / mu.m00; if (approx_ct2.size().height == 3 | approx_ct2.size().height == 4) { var points = approx_cvh2[0].toArray(); var numpoints = points.Length; ms._id = obj_i; ms.ps = new Point[numpoints]; double rat = 1.16; if (obj_i == (int)tgr.PURPLE) { rat = 1.20; } else if (obj_i == (int)tgr.LIGHTBLUE) { rat = 1.20; } else if (obj_i == (int)tgr.RED | obj_i == (int)tgr.BLUE) { rat = 1.09; } else if (obj_i == (int)tgr.YELLOW) { rat = 1.10; } else if (obj_i == (int)tgr.ORANGE) { rat = 1.10; } else if (obj_i == (int)tgr.GREEN) { rat = 1.10; } var ind_huyen = 0; var max = -1d; if (numpoints == 3 || numpoints == 4) { for (int p_i = 0; p_i < numpoints; p_i++) { var p = points[p_i]; var p2 = points[(p_i + 1) % numpoints]; var vect = p - cterTgr; vect = vect * rat; var p_new = cterTgr + vect; points[p_i].x = (int)(p_new.x * 100) / 100f; points[p_i].y = (int)(p_new.y * 100) / 100f; if (numpoints == 4) { ms.ps[p_i] = p_new; } if (numpoints == 3) { var vt = p2 - p; var length = vt.x * vt.x + vt.y * vt.y; if (length > max) { ind_huyen = p_i; max = length; } } } } if (numpoints == 3) { var i_nhon1 = ind_huyen; var i_nhon2 = (ind_huyen + 1) % numpoints; var i_vuong = (ind_huyen + 2) % numpoints; ms.ps[0] = points[i_vuong]; ms.ps[1] = points[i_nhon1]; ms.ps[2] = points[i_nhon2]; } else if (numpoints == 4) { if (obj_i == (int)tgr.ORANGE) { var vt_cheo1 = ms.ps[0] - ms.ps[2]; var vt_cheo2 = ms.ps[1] - ms.ps[3]; var leng_cheo1 = vt_cheo1.x * vt_cheo1.x + vt_cheo1.y * vt_cheo1.y; var leng_cheo2 = vt_cheo2.x * vt_cheo2.x + vt_cheo2.y * vt_cheo2.y; var i_nhon = 0; if (leng_cheo2 > leng_cheo1) { i_nhon = 1; } ms.ps[0] = points[i_nhon]; ms.ps[1] = points[(i_nhon + 1)]; ms.ps[2] = points[(i_nhon + 2)]; ms.ps[3] = points[(i_nhon + 3) % numpoints]; var i_prvNhon = (i_nhon + 4 - 1) % numpoints; var i_aftNhon = i_nhon + 1; var vt_prvNhon = points[i_prvNhon] - points[i_nhon]; var vt_aftNhon = points[i_aftNhon] - points[i_nhon]; var len_prvNhon = vt_prvNhon.x * vt_prvNhon.x + vt_prvNhon.y * vt_prvNhon.y; var len_aftNhon = vt_aftNhon.x * vt_aftNhon.x + vt_aftNhon.y * vt_aftNhon.y; Imgproc.line(dbMat, points[i_prvNhon], points[i_nhon], c, 1); if (len_prvNhon > len_aftNhon) { ms.isFlip = true; Imgproc.putText(dbMat, " IsFLIP", ms.ps[3], 1, 1, c, 1); } else { ms.isFlip = false; Imgproc.putText(dbMat, " IsNOTFLIP", ms.ps[3], 1, 1, c, 1); } } } var centerMat = new Point(rgbMat.width() / 2f, rgbMat.height() / 2f); var vtLech = centerMat - cterTgr; var dt2 = vtLech.x * vtLech.x + vtLech.y * vtLech.y; if (dt2 < dt) { chon = ms; } } try { Imgproc.circle(rgbMat, cterTgr, 1, c, 1); Imgproc.putText(rgbMat, mu.m00.ToString(), cterTgr, 1, 1, c, 1); } catch (Exception e) { Utilities.LogFormat("Here2:{0},{1},{2}", rgbMat == null, cterTgr == null, c == null); Utilities.Log("Exception is {0}", e.ToString()); Utilities.Log("Trace is {0}", e.StackTrace.ToString()); } //if (approx_ct2.size().height == 3 | approx_ct2.size().height == 4) break; } if (chon != null) { lms.Add(chon); var ps = chon.ps; for (int i = 0; i < ps.Length; i++) { var p1 = ps[i]; var p2 = ps[(i + 1) % ps.Length]; try { Imgproc.line(rgbMat2, p1, p2, c, 1); Imgproc.line(all_thresh_afct, p1, p2, new Scalar(255, 255, 255), 1); Imgproc.line(dbMat, p1, p2, c, 1); Imgproc.circle(dbMat, p1, 1, c); } catch (Exception e) { Utilities.LogFormat("Here1:{0},{1},{2}", rgbMat2 == null, p1 == null, p2 == null); Utilities.Log("Exception is {0}", e.ToString()); Utilities.Log("Trace is {0}", e.StackTrace.ToString()); } } } watch.Stop(); elapsedMs = watch.ElapsedMilliseconds; } TangramShape msl = new TangramShape(); msl.datas = lms; var json = JsonUtility.ToJson(msl); watch = System.Diagnostics.Stopwatch.StartNew(); trm = tangramFeatureModelList.Detect(msl.datas.ToArray()); watch.Stop(); elapsedMs = watch.ElapsedMilliseconds; mut.ReleaseMutex(); }).ObserveOnMainThread().Subscribe((rx) => { prc(trm, lms); if (debug == true) { mut.WaitOne(); if (texture != null && debug == true) { Utils.matToTexture2D(dbMat, texture); } if (dbText1 != null && debug == true) { Utils.matToTexture2D(rgbMat2copy, dbText1); } if (dbText2 != null && debug == true) { Utils.matToTexture2D(rgbMat3, dbText2); } if (dbText3 != null && debug == true) { Utils.matToTexture2D(rgbMat4, dbText3); } if (dbText4 != null && debug == true) { Utils.matToTexture2D(rgbMat, dbText4); } all_thresh_afct = all_thresh_afct * 25; Imgproc.cvtColor(rgbMat2, rgbMat2, Imgproc.COLOR_RGBA2RGB); Imgproc.cvtColor(all_thresh, all_thresh, Imgproc.COLOR_RGBA2RGB); Mat a = new Mat(all_thresh.size(), CvType.CV_8UC3); Core.addWeighted(all_thresh, 0.2, rgbMat2, 0.8, 0, a); if (dbText5 != null && debug == true) { Utils.matToTexture2D(a, dbText5); } if (dbText6 != null && debug == true) { Utils.matToTexture2D(all_thresh, dbText6); } if (dbText7 != null && debug == true) { Utils.matToTexture2D(all_thresh_afct, dbText7); } if (dbText8 != null && debug == true) { Utils.matToTexture2D(all_thresh_af, dbText8); } mut.ReleaseMutex(); } }); }
public void FHTSample() { const string imPath = @"_data\image\building.jpg"; using (var image = new Mat(imPath, ImreadModes.Grayscale)) using (var hough = new Mat()) using (var canny = new Mat()) { Cv2.Canny(image, canny, 50, 200, 3); CvXImgProc.FastHoughTransform(canny, hough, MatType.CV_32S /*C1*/, AngleRangeOption.ARO_315_135, HoughOP.FHT_ADD, HoughDeskewOption.DESKEW); var lines = new List <Vec4i>(); GetLocalExtr(lines, canny, hough, 255f * 0.3f * Math.Min(canny.Rows, canny.Cols), 50); var cannyColor = new Mat(); Cv2.CvtColor(canny, cannyColor, ColorConversionCodes.GRAY2BGR); for (var i = 0; i < lines.Count; i++) { var line = lines[i]; Cv2.Line(cannyColor, new Point(line.Item0, line.Item1), new Point(line.Item2, line.Item3), Scalar.Red); } //cannyColor.SaveImage("cannycolor.png"); ShowImagesWhenDebugMode(image, canny, cannyColor); } bool GetLocalExtr(List <Vec4i> lines, Mat src, Mat fht, float minWeight, int maxCount) { const int MAX_LEN = 10_000; var weightedPoints = new List <KeyValuePair <int, Point> >(); for (var y = 0; y < fht.Rows; ++y) { if (weightedPoints.Count > MAX_LEN) { break; } var fhtMat = new MatOfInt(fht); var fhtIndexer = fhtMat.GetIndexer(); var pLineY = Math.Max(y - 1, 0); var cLineY = y; var nLineY = Math.Min(y + 1, fht.Rows - 1); for (var x = 0; x < fht.Cols; ++x) { if (weightedPoints.Count > MAX_LEN) { break; } var value = fhtIndexer[cLineY, x]; if (value >= minWeight) { var isLocalMax = 0; var start = Math.Max(x - 1, 0); var end = Math.Min(x + 1, fht.Cols - 1); for (var xx = start; xx < end; ++xx) { var pLine = fhtIndexer[pLineY, xx]; var cLine = fhtIndexer[cLineY, xx]; var nLine = fhtIndexer[nLineY, xx]; if (!incIfGreater(value, pLine, ref isLocalMax) || !incIfGreater(value, cLine, ref isLocalMax) || !incIfGreater(value, nLine, ref isLocalMax)) { isLocalMax = 0; break; } } if (isLocalMax > 0) { weightedPoints.Add(new KeyValuePair <int, Point>(value, new Point(x, y))); } } } } if (weightedPoints.Count == 0) { return(true); } // Sort WeightedPoints weightedPoints = weightedPoints.OrderByDescending(x => x.Key).ToList(); weightedPoints = weightedPoints.Take(maxCount).ToList(); for (var i = 0; i < weightedPoints.Count; i++) { lines.Add(CvXImgProc.HoughPoint2Line(weightedPoints[i].Value, src)); } return(true); } bool incIfGreater(int a, int b, ref int value) { if (/*value == 0 || */ a < b) { return(false); } if (a > b) { ++(value); } return(true); } }
private void HandPoseEstimationProcess(Mat rgbaMat) { // rgbaMat.copyTo(mRgba); float DOWNSCALE_RATIO = 1.0f; if (enableDownScale) { mRgba = imageOptimizationHelper.GetDownScaleMat(rgbaMat); DOWNSCALE_RATIO = imageOptimizationHelper.downscaleRatio; } else { // mRgba = rgbaMat; rgbaMat.copyTo(mRgba); DOWNSCALE_RATIO = 1.0f; } // Imgproc.blur(mRgba, mRgba, new Size(5,5)); Imgproc.GaussianBlur(mRgba, mRgba, new Size(3, 3), 1, 1); // Imgproc.medianBlur(mRgba, mRgba, 3); if (!isColorSelected) { return; } List <MatOfPoint> contours = detector.GetContours(); detector.Process(mRgba); // Debug.Log ("Contours count: " + contours.Count); if (contours.Count <= 0) { return; } RotatedRect rect = Imgproc.minAreaRect(new MatOfPoint2f(contours[0].toArray())); double boundWidth = rect.size.width; double boundHeight = rect.size.height; int boundPos = 0; for (int i = 1; i < contours.Count; i++) { rect = Imgproc.minAreaRect(new MatOfPoint2f(contours[i].toArray())); if (rect.size.width * rect.size.height > boundWidth * boundHeight) { boundWidth = rect.size.width; boundHeight = rect.size.height; boundPos = i; } } MatOfPoint contour = contours[boundPos]; OpenCVForUnity.CoreModule.Rect boundRect = Imgproc.boundingRect(new MatOfPoint(contour.toArray())); Imgproc.rectangle(mRgba, boundRect.tl(), boundRect.br(), CONTOUR_COLOR_WHITE, 2, 8, 0); // Debug.Log ( // " Row start [" + // (int)boundRect.tl ().y + "] row end [" + // (int)boundRect.br ().y + "] Col start [" + // (int)boundRect.tl ().x + "] Col end [" + // (int)boundRect.br ().x + "]"); double a = boundRect.br().y - boundRect.tl().y; a = a * 0.7; a = boundRect.tl().y + a; // Debug.Log (" A [" + a + "] br y - tl y = [" + (boundRect.br ().y - boundRect.tl ().y) + "]"); // Imgproc.rectangle(mRgba, boundRect.tl(), new Point(boundRect.br().x, a), CONTOUR_COLOR, 2, 8, 0); MatOfPoint2f pointMat = new MatOfPoint2f(); Imgproc.approxPolyDP(new MatOfPoint2f(contour.toArray()), pointMat, 3, true); contour = new MatOfPoint(pointMat.toArray()); MatOfInt hull = new MatOfInt(); MatOfInt4 convexDefect = new MatOfInt4(); Imgproc.convexHull(new MatOfPoint(contour.toArray()), hull); if (hull.toArray().Length < 3) { return; } Imgproc.convexityDefects(new MatOfPoint(contour.toArray()), hull, convexDefect); List <MatOfPoint> hullPoints = new List <MatOfPoint>(); List <Point> listPo = new List <Point>(); for (int j = 0; j < hull.toList().Count; j++) { listPo.Add(contour.toList()[hull.toList()[j]] * DOWNSCALE_RATIO); } /* * MatOfPoint e = new MatOfPoint(); * e.fromList(listPo); * hullPoints.Add(e); * * List<Point> listPoDefect = new List<Point>(); * * if (convexDefect.rows() > 0) * { * List<int> convexDefectList = convexDefect.toList(); * List<Point> contourList = contour.toList(); * for (int j = 0; j < convexDefectList.Count; j = j + 4) * { * Point farPoint = contourList[convexDefectList[j + 2]]; * int depth = convexDefectList[j + 3]; * if (depth > threshholdDetect && farPoint.y < a) * { * listPoDefect.Add(contourList[convexDefectList[j + 2]]); * Imgproc.line(rgbaMat, farPoint, listPo[convexDefectList[j + 2]], new Scalar(255, 0, 0, 255),1,1); * } * // Debug.Log ("convexDefectList [" + j + "] " + convexDefectList [j + 3]); * } * }*/ // Debug.Log ("hull: " + hull.toList ()); // if (convexDefect.rows () > 0) { // Debug.Log ("defects: " + convexDefect.toList ()); // } //Imgproc.drawContours (rgbaMat, hullPoints, -1, CONTOUR_COLOR, 3); for (int p = 0; p < listPo.Count; p++) { if (p % 2 == 0) { Imgproc.circle(rgbaMat, listPo[p], 6, new Scalar(255, 0, 0, 255), -1); // Imgproc.putText(rgbaMat,p.ToString(),listPo[p],1,1,new Scalar(255,0,0,255)); // check if close List <Point> fLMscaled = OpenCVForUnityUtils.ConvertVector2ListToPointList(facePoints); for (int q = 0; q < fLMscaled.Count; q++) { if (ifLessThanDPoint(listPo[p], fLMscaled[q], 8)) { //Point1 = listPo[p]; //Point2 = fLMscaled[q]; handPoint = p; facePoint = q; print(Point1 + " " + Point2); } } if (p == handPoint && facePoint != 0) { Point1 = listPo[p]; Point2 = fLMscaled[facePoint]; Imgproc.line(rgbaMat, Point1, Point2, new Scalar(255, 255, 255, 255)); } } } // int defectsTotal = (int)convexDefect.total(); // Debug.Log ("Defect total " + defectsTotal); /* numberOfFingers = listPoDefect.Count; * if (numberOfFingers > 5) * numberOfFingers = 5;/ * * // Debug.Log ("numberOfFingers " + numberOfFingers); * * // Imgproc.putText (rgbaMat, "" + numberOfFingers, new Point (rgbaMat.cols () / 2, rgbaMat.rows () / 2), Imgproc.FONT_HERSHEY_PLAIN, 4.0, new Scalar (255, 255, 255, 255), 6, Imgproc.LINE_AA, false); * * * /* foreach (Point p in listPoDefect) { * * Point tempp = GetNearestL(p, listPo); * tempp = ConvertDownscale(tempp, DOWNSCALE_RATIO); * Point p2 = ConvertDownscale(p, DOWNSCALE_RATIO); * * Imgproc.circle (rgbaMat, tempp, 6, new Scalar (0, 0, 255, 255), -1); * Imgproc.circle(rgbaMat, p2, 6, new Scalar(255, 0, 255, 255), -1); * }*/ }
// // C++: static Ptr_FREAK cv::xfeatures2d::FREAK::create(bool orientationNormalized = true, bool scaleNormalized = true, float patternScale = 22.0f, int nOctaves = 4, vector_int selectedPairs = std::vector<int>()) // //javadoc: FREAK::create(orientationNormalized, scaleNormalized, patternScale, nOctaves, selectedPairs) public static FREAK create(bool orientationNormalized, bool scaleNormalized, float patternScale, int nOctaves, MatOfInt selectedPairs) { if (selectedPairs != null) { selectedPairs.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat selectedPairs_mat = selectedPairs; FREAK retVal = FREAK.__fromPtr__(xfeatures2d_FREAK_create_10(orientationNormalized, scaleNormalized, patternScale, nOctaves, selectedPairs_mat.nativeObj)); return(retVal); #else return(null); #endif }
//============================================================ //======================outer defect========================== static void FindContour_and_outer_defect(Mat img, List <Point[]> contours_final, ref int nLabels, out int[,] stats, string mode) { // variable OpenCvSharp.Point[][] temp = new Point[1][]; //0: 內圈 ; 1: 外圈 OpenCvSharp.Point[] contour_now; int out_defect_size_min = 0; if (mode == "inner") { contour_now = contours_final[0]; } else { contour_now = contours_final[1]; } // Convex hull var ellipsecontour = Cv2.FitEllipse(contour_now); Mat convex_mask_img = Mat.Zeros(img.Size(), MatType.CV_8UC1); Cv2.Ellipse(convex_mask_img, ellipsecontour, 255, -1); // Contour temp[0] = contour_now; Mat contour_mask_img = Mat.Zeros(img.Size(), MatType.CV_8UC1); Cv2.DrawContours(contour_mask_img, temp, -1, 255, -1); Mat diff_image = contour_mask_img ^ convex_mask_img; //Opening Mat kernel = Mat.Ones(5, 5, MatType.CV_8UC1);//改變凹角大小 diff_image = diff_image.MorphologyEx(MorphTypes.Open, kernel); //=========================吃掉邊界======================================= //temp[0] = contour_now; //Cv2.DrawContours(diff_image, temp, -1, 0, 4); //================================================================ convex_mask_img.SaveImage("./" + mode + "convex" + ".jpg"); contour_mask_img.SaveImage("./" + mode + "contour" + ".jpg"); diff_image.SaveImage("./" + mode + "mask" + ".jpg"); //Connected Component var labelMat = new MatOfInt(); var statsMat = new MatOfInt();// Row: number of labels Column: 5 var centroidsMat = new MatOfDouble(); nLabels = Cv2.ConnectedComponentsWithStats(diff_image, labelMat, statsMat, centroidsMat); var labels = labelMat.ToRectangularArray(); stats = statsMat.ToRectangularArray(); var centroids = centroidsMat.ToRectangularArray(); }
// // C++: vector_int cv::xfeatures2d::PCTSignatures::getInitSeedIndexes() // /** * Initial seeds (initial number of clusters) for the k-means algorithm. * return automatically generated */ public MatOfInt getInitSeedIndexes() { ThrowIfDisposed(); return(MatOfInt.fromNativeAddr(xfeatures2d_PCTSignatures_getInitSeedIndexes_10(nativeObj))); }
/// <summary> /// Get result form all output /// </summary> /// <param name="output"></param> /// <param name="image"></param> /// <param name="threshold"></param> /// <param name="nmsThreshold">threshold for nms</param> /// <param name="nms">Enable Non-maximum suppression or not</param> private static void GetResult(IEnumerable <Mat> output, Mat image, float threshold, float nmsThreshold, bool nms = true) { //for nms List <int> classIds = new List <int>(); List <float> confidences = new List <float>(); List <float> probabilities = new List <float>(); List <Rect2d> boxes = new List <Rect2d>(); var w = image.width(); var h = image.height(); /* * YOLO3 COCO trainval output * 0 1 : center 2 3 : w/h * 4 : confidence 5 ~ 84 : class probability */ const int prefix = 5; //skip 0~4 foreach (Mat prob in output) { for (int i = 0; i < prob.rows(); i++) { var confidence = (float)prob.get(i, 4)[0]; if (confidence > threshold) { //get classes probability Core.MinMaxLocResult minAndMax = Core.minMaxLoc(prob.row(i).colRange(prefix, prob.cols())); int classes = (int)minAndMax.maxLoc.x; var probability = (float)prob.get(i, classes + prefix)[0]; if (probability > threshold) //more accuracy, you can cancel it { //get center and width/height float centerX = (float)prob.get(i, 0)[0] * w; float centerY = (float)prob.get(i, 1)[0] * h; float width = (float)prob.get(i, 2)[0] * w; float height = (float)prob.get(i, 3)[0] * h; if (!nms) { // draw result (if don't use NMSBoxes) Draw(image, classes, confidence, probability, centerX, centerY, width, height); continue; } //put data to list for NMSBoxes classIds.Add(classes); confidences.Add(confidence); probabilities.Add(probability); boxes.Add(new Rect2d(centerX, centerY, width, height)); } } } } if (!nms) { return; } //using non-maximum suppression to reduce overlapping low confidence box MatOfRect2d bboxes = new MatOfRect2d(); MatOfFloat scores = new MatOfFloat(); MatOfInt indices = new MatOfInt(); bboxes.fromList(boxes); scores.fromList(probabilities); Dnn.NMSBoxes(bboxes, scores, threshold, nmsThreshold, indices); int[] indicesA = indices.toArray(); foreach (var i in indicesA) { var box = boxes[i]; Draw(image, classIds[i], confidences[i], probabilities[i], box.x, box.y, box.width, box.height); } }
void Process() { string imText = "DRAW PATTERN"; Core.putText(frame_pot, imText, new Point(110, 50), Core.FONT_HERSHEY_COMPLEX, 1.0, new Scalar(255, 0, 0), 2); Mat hierarchy = new Mat (); List<MatOfPoint> contours = new List<MatOfPoint> (); MatOfPoint maxitem = new MatOfPoint (); MatOfInt hullInt = new MatOfInt (); frameclone = frame_thresh_final.clone (); Imgproc.findContours (frameclone, contours, hierarchy, Imgproc.RETR_LIST , Imgproc.CHAIN_APPROX_NONE); maxitem = contours [0]; n = 0; for(int i=0; i<contours.Count; i++){ if(contours[i].total() > maxitem.total()){ maxitem = contours[i]; n=i; } } OpenCVForUnity.Rect bRect = Imgproc.boundingRect (maxitem); int bRect_height = bRect.height; int bRect_width = bRect.width; if (bRect_height < 200 || bRect_width < 200) return; // Drawing Contours on the Frame //Imgproc.drawContours (frame_pot, contours, n, new Scalar(0, 255, 0), 2); Imgproc.convexHull (maxitem, hullInt); List<Point> maxitemPointList = maxitem.toList (); List<int> hullIntList = hullInt.toList (); List<Point> hullPointList = new List<Point> (); for (int j=0; j < hullInt.toList().Count; j++) { hullPointList.Add (maxitemPointList [hullIntList [j]]); } MatOfPoint hullPointMat = new MatOfPoint (); hullPointMat.fromList (hullPointList); List<MatOfPoint> hullPoints = new List<MatOfPoint> (); hullPoints.Add (hullPointMat); // Drawing Convex Hull on the Frame //Imgproc.drawContours (frame_pot, hullPoints, -1, new Scalar (0, 0, 255), 2); MatOfInt4 convexityDef = new MatOfInt4 (); Imgproc.convexityDefects (maxitem, hullInt, convexityDef); List<int> conDefIntList = convexityDef.toList (); List<Point> startpts = new List<Point> (); List<Point> farpts = new List<Point> (); List<Point> endpts = new List<Point> (); int tolerance = (int)(bRect_height/6); //Debug.Log ("Tolerance: " + tolerance); int[] defarray = new int[100]; int coordX = 10000, coordY = 10000; int x1 = (int) sphere1.transform.position.x; int y1 = (int) sphere1.transform.position.y; int x2 = (int) sphere2.transform.position.x; int y2 = (int) sphere2.transform.position.y; int x3 = (int) sphere3.transform.position.x; int y3 = (int) sphere3.transform.position.y; int x4 = (int) sphere4.transform.position.x; int y4 = (int) sphere4.transform.position.y; Point pointer = new Point(); for(int i=0; i < conDefIntList.Count/4 ; i++) { startpts.Add(maxitemPointList[conDefIntList[4*i]]); endpts.Add(maxitemPointList[conDefIntList[4*i+1]]); farpts.Add(maxitemPointList[conDefIntList[4*i+2]]); Point s = startpts[i]; Point e = endpts[i]; Point f = farpts[i]; if (GetDistance(s,f) > tolerance) { //Core.circle(frame_pot, s, 15, new Scalar(255, 225, 0), -1); if (s.y < coordY) { pointer = s; coordY = (int) s.y; coordX = (int) s.x; } } } Core.circle(frame_pot, pointer, 15, new Scalar(255, 225, 0), -1); coordX = coordX - 240; coordY = -coordY + 320; if (coordX > x1-50 && coordX < x1+50 && coordY > y1-50 && coordY < y1+50) { if (previous.Equals('1')) return; input += "1"; AddLine(previous, '1'); previous = '1'; Material mat1 = sphere1.GetComponent<Renderer>().material; mat1.color = Color.yellow; StartCoroutine(WaitAndChangeColor("1")); } else if (coordX > x2-50 && coordX < x2+50 && coordY > y2-50 && coordY < y2+50) { if (previous.Equals('2')) return; input += "2"; AddLine(previous, '2'); previous = '2'; Material mat2 = sphere2.GetComponent<Renderer>().material; mat2.color = Color.yellow; StartCoroutine(WaitAndChangeColor("2")); } else if (coordX > x3-50 && coordX < x3+50 && coordY > y3-50 && coordY < y3+50) { if (previous.Equals('3')) return; input += "3"; AddLine(previous, '3'); previous = '3'; Material mat3 = sphere3.GetComponent<Renderer>().material; mat3.color = Color.yellow; StartCoroutine(WaitAndChangeColor("3")); } else if (coordX > x4-50 && coordX < x4+50 && coordY > y4-50 && coordY < y4+50) { if (previous.Equals('4')) return; input += "4"; AddLine(previous, '4'); previous = '4'; Material mat4 = sphere4.GetComponent<Renderer>().material; mat4.color = Color.yellow; StartCoroutine(WaitAndChangeColor("4")); } if (input.Length == password.Length) { auth = true; if (input.Equals(password)) { correct = true; } else { correct = false; } } }
/// <summary> /// Postprocess the specified frame, outs and net. /// </summary> /// <param name="frame">Frame.</param> /// <param name="outs">Outs.</param> /// <param name="net">Net.</param> private void postprocess(Mat frame, List <Mat> outs, Net net) { string outLayerType = outBlobTypes[0]; List <int> classIdsList = new List <int>(); List <float> confidencesList = new List <float>(); List <OpenCVForUnity.CoreModule.Rect> boxesList = new List <OpenCVForUnity.CoreModule.Rect>(); if (outLayerType == "Region") { for (int i = 0; i < outs.Count; ++i) { // Network produces output blob with a shape NxC where N is a number of // detected objects and C is a number of classes + 4 where the first 4 // numbers are [center_x, center_y, width, height] //Debug.Log("outs[i].ToString() " + outs[i].ToString()); float[] positionData = new float[5]; float[] confidenceData = new float[outs[i].cols() - 5]; for (int p = 0; p < outs[i].rows(); p++) { outs[i].get(p, 0, positionData); outs[i].get(p, 5, confidenceData); int maxIdx = confidenceData.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I; float confidence = confidenceData[maxIdx]; if (confidence > confThreshold) { int centerX = (int)(positionData[0] * frame.cols()); int centerY = (int)(positionData[1] * frame.rows()); int width = (int)(positionData[2] * frame.cols()); int height = (int)(positionData[3] * frame.rows()); int left = centerX - width / 2; int top = centerY - height / 2; classIdsList.Add(maxIdx); confidencesList.Add((float)confidence); boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height)); } } } } else { Debug.Log("Unknown output layer type: " + outLayerType); } MatOfRect boxes = new MatOfRect(); boxes.fromList(boxesList); MatOfFloat confidences = new MatOfFloat(); confidences.fromList(confidencesList); MatOfInt indices = new MatOfInt(); Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices); //Check the Language selected switch (menuVariables.GetLanguage()) { case "EN": vocOffset = 0; break; case "ES": vocOffset = 80; break; case "FR": vocOffset = 160; break; case "DE": vocOffset = 240; break; case "IT": vocOffset = 320; break; default: vocOffset = 0; break; } //Draw the bouding box only if its in the center of the image (On Cursor) for (int i = 0; i < indices.total(); ++i) { int idx = (int)indices.get(i, 0)[0]; OpenCVForUnity.CoreModule.Rect box = boxesList[idx]; if (isOnCursor(box, cursorObject.GetComponent <Cursor>())) { if (minigameList[wordFoundCounter] == classIdsList[idx]) { drawPred(vocOffset + classIdsList[idx], confidencesList[idx], box.x, box.y, box.x + box.width, box.y + box.height, frame); //Update the text summarizing the object encountered vocIDList.Add(classIdsList[idx]); //vocLearn.text += classNames[classIdsList[idx]] + "\t" + classNames[240 + classIdsList[idx]] + "\t" + classNames[160 + classIdsList[idx]] + "\t" + classNames[320 + classIdsList[idx]] + "\n"; EnglishText.text += "\n" + classNames[classIdsList[idx]]; SpanishText.text += "\n" + classNames[80 + classIdsList[idx]]; FrenchText.text += "\n" + classNames[160 + classIdsList[idx]]; GermanText.text += "\n" + classNames[240 + classIdsList[idx]]; ItalianText.text += "\n" + classNames[320 + classIdsList[idx]]; wordFound = true; Debug.Log("You found the" + classNames[classIdsList[idx]]); } } } indices.Dispose(); boxes.Dispose(); confidences.Dispose(); }
// // C++: static Ptr_BRISK cv::BRISK::create(int thresh, int octaves, vector_float radiusList, vector_int numberList, float dMax = 5.85f, float dMin = 8.2f, vector_int indexChange = std::vector<int>()) // /** * The BRISK constructor for a custom pattern, detection threshold and octaves * * param thresh AGAST detection threshold score. * param octaves detection octaves. Use 0 to do single scale. * param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for * keypoint scale 1). * param numberList defines the number of sampling points on the sampling circle. Must be the same * size as radiusList.. * param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint * scale 1). * param dMin threshold for the long pairings used for orientation determination (in pixels for * keypoint scale 1). * param indexChange index remapping of the bits. * return automatically generated */ public static BRISK create(int thresh, int octaves, MatOfFloat radiusList, MatOfInt numberList, float dMax, float dMin, MatOfInt indexChange) { if (radiusList != null) { radiusList.ThrowIfDisposed(); } if (numberList != null) { numberList.ThrowIfDisposed(); } if (indexChange != null) { indexChange.ThrowIfDisposed(); } Mat radiusList_mat = radiusList; Mat numberList_mat = numberList; Mat indexChange_mat = indexChange; return(BRISK.__fromPtr__(features2d_BRISK_create_10(thresh, octaves, radiusList_mat.nativeObj, numberList_mat.nativeObj, dMax, dMin, indexChange_mat.nativeObj))); }
/// <summary> /// Process /// </summary> /// <returns></returns> private async void Process() { float DOWNSCALE_RATIO = 1.0f; while (true) { // Check TaskCancel if (tokenSource.Token.IsCancellationRequested) { break; } rgbaMat = webCamTextureToMatHelper.GetMat(); // Debug.Log ("rgbaMat.ToString() " + rgbaMat.ToString ()); Mat downScaleRgbaMat = null; DOWNSCALE_RATIO = 1.0f; if (enableDownScale) { downScaleRgbaMat = imageOptimizationHelper.GetDownScaleMat(rgbaMat); DOWNSCALE_RATIO = imageOptimizationHelper.downscaleRatio; } else { downScaleRgbaMat = rgbaMat; DOWNSCALE_RATIO = 1.0f; } Imgproc.cvtColor(downScaleRgbaMat, bgrMat, Imgproc.COLOR_RGBA2BGR); await Task.Run(() => { // detect faces on the downscale image if (!enableSkipFrame || !imageOptimizationHelper.IsCurrentFrameSkipped()) { if (net == null) { Imgproc.putText(rgbaMat, "model file is not loaded.", new Point(5, rgbaMat.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false); Imgproc.putText(rgbaMat, "Please read console message.", new Point(5, rgbaMat.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false); } else { // Create a 4D blob from a frame. Size inpSize = new Size(inpWidth > 0 ? inpWidth : bgrMat.cols(), inpHeight > 0 ? inpHeight : bgrMat.rows()); Mat blob = Dnn.blobFromImage(bgrMat, scale, inpSize, mean, swapRB, false); // Run a model. net.setInput(blob); if (net.getLayer(new DictValue(0)).outputNameToIndex("im_info") != -1) { // Faster-RCNN or R-FCN Imgproc.resize(bgrMat, bgrMat, inpSize); Mat imInfo = new Mat(1, 3, CvType.CV_32FC1); imInfo.put(0, 0, new float[] { (float)inpSize.height, (float)inpSize.width, 1.6f }); net.setInput(imInfo, "im_info"); } TickMeter tm = new TickMeter(); tm.start(); List <Mat> outs = new List <Mat>(); net.forward(outs, outBlobNames); tm.stop(); // Debug.Log ("Inference time, ms: " + tm.getTimeMilli ()); postprocess(bgrMat, outs, net); for (int i = 0; i < outs.Count; i++) { outs[i].Dispose(); } blob.Dispose(); if (enableDownScale) { for (int i = 0; i < _boxesList.Count; ++i) { var rect = _boxesList[i]; _boxesList[i] = new OpenCVForUnity.CoreModule.Rect( (int)(rect.x * DOWNSCALE_RATIO), (int)(rect.y * DOWNSCALE_RATIO), (int)(rect.width * DOWNSCALE_RATIO), (int)(rect.height * DOWNSCALE_RATIO)); } } } //Imgproc.rectangle(rgbaMat, new Point(0, 0), new Point(rgbaMat.width(), rgbaMat.height()), new Scalar(0, 0, 0, 0), -1); MatOfRect boxes = new MatOfRect(); boxes.fromList(_boxesList); MatOfFloat confidences = new MatOfFloat(); confidences.fromList(_confidencesList); MatOfInt indices = new MatOfInt(); Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices); // Debug.Log ("indices.dump () "+indices.dump ()); // Debug.Log ("indices.ToString () "+indices.ToString()); for (int i = 0; i < indices.total(); ++i) { int idx = (int)indices.get(i, 0)[0]; OpenCVForUnity.CoreModule.Rect box = _boxesList[idx]; drawPred(_classIdsList[idx], _confidencesList[idx], box.x, box.y, box.x + box.width, box.y + box.height, rgbaMat); } indices.Dispose(); boxes.Dispose(); confidences.Dispose(); } }); Utils.fastMatToTexture2D(rgbaMat, texture); Thread.Sleep(10); } }
// // C++: static Ptr_FREAK cv::xfeatures2d::FREAK::create(bool orientationNormalized = true, bool scaleNormalized = true, float patternScale = 22.0f, int nOctaves = 4, vector_int selectedPairs = std::vector<int>()) // /** * param orientationNormalized Enable orientation normalization. * param scaleNormalized Enable scale normalization. * param patternScale Scaling of the description pattern. * param nOctaves Number of octaves covered by the detected keypoints. * param selectedPairs (Optional) user defined selected pairs indexes, * return automatically generated */ public static FREAK create(bool orientationNormalized, bool scaleNormalized, float patternScale, int nOctaves, MatOfInt selectedPairs) { if (selectedPairs != null) { selectedPairs.ThrowIfDisposed(); } Mat selectedPairs_mat = selectedPairs; return(FREAK.__fromPtr__(xfeatures2d_FREAK_create_10(orientationNormalized, scaleNormalized, patternScale, nOctaves, selectedPairs_mat.nativeObj))); }
/// <summary> /// Hands the pose estimation process. /// </summary> public void handPoseEstimationProcess(Mat rgbaMat) { //Imgproc.blur(mRgba, mRgba, new Size(5,5)); Imgproc.GaussianBlur(rgbaMat, rgbaMat, new OpenCVForUnity.Size(3, 3), 1, 1); //Imgproc.medianBlur(mRgba, mRgba, 3); if (!isColorSelected) { return; } List <MatOfPoint> contours = detector.getContours(); detector.process(rgbaMat); // Debug.Log ("Contours count: " + contours.Count); if (contours.Count <= 0) { return; } RotatedRect rect = Imgproc.minAreaRect(new MatOfPoint2f(contours [0].toArray())); double boundWidth = rect.size.width; double boundHeight = rect.size.height; int boundPos = 0; for (int i = 1; i < contours.Count; i++) { rect = Imgproc.minAreaRect(new MatOfPoint2f(contours [i].toArray())); if (rect.size.width * rect.size.height > boundWidth * boundHeight) { boundWidth = rect.size.width; boundHeight = rect.size.height; boundPos = i; } } OpenCVForUnity.Rect boundRect = Imgproc.boundingRect(new MatOfPoint(contours [boundPos].toArray())); Imgproc.rectangle(rgbaMat, boundRect.tl(), boundRect.br(), CONTOUR_COLOR_WHITE, 2, 8, 0); // Debug.Log ( // " Row start [" + // (int)boundRect.tl ().y + "] row end [" + // (int)boundRect.br ().y + "] Col start [" + // (int)boundRect.tl ().x + "] Col end [" + // (int)boundRect.br ().x + "]"); double a = boundRect.br().y - boundRect.tl().y; a = a * 0.7; a = boundRect.tl().y + a; // Debug.Log ( // " A [" + a + "] br y - tl y = [" + (boundRect.br ().y - boundRect.tl ().y) + "]"); //Core.rectangle( mRgba, boundRect.tl(), boundRect.br(), CONTOUR_COLOR, 2, 8, 0 ); Imgproc.rectangle(rgbaMat, boundRect.tl(), new Point(boundRect.br().x, a), CONTOUR_COLOR, 2, 8, 0); MatOfPoint2f pointMat = new MatOfPoint2f(); Imgproc.approxPolyDP(new MatOfPoint2f(contours [boundPos].toArray()), pointMat, 3, true); contours [boundPos] = new MatOfPoint(pointMat.toArray()); MatOfInt hull = new MatOfInt(); MatOfInt4 convexDefect = new MatOfInt4(); Imgproc.convexHull(new MatOfPoint(contours [boundPos].toArray()), hull); if (hull.toArray().Length < 3) { return; } Imgproc.convexityDefects(new MatOfPoint(contours [boundPos].toArray()), hull, convexDefect); List <MatOfPoint> hullPoints = new List <MatOfPoint> (); List <Point> listPo = new List <Point> (); for (int j = 0; j < hull.toList().Count; j++) { listPo.Add(contours [boundPos].toList() [hull.toList() [j]]); } MatOfPoint e = new MatOfPoint(); e.fromList(listPo); hullPoints.Add(e); List <MatOfPoint> defectPoints = new List <MatOfPoint> (); List <Point> listPoDefect = new List <Point> (); for (int j = 0; j < convexDefect.toList().Count; j = j + 4) { Point farPoint = contours [boundPos].toList() [convexDefect.toList() [j + 2]]; int depth = convexDefect.toList() [j + 3]; if (depth > threasholdSlider.value && farPoint.y < a) { listPoDefect.Add(contours [boundPos].toList() [convexDefect.toList() [j + 2]]); } // Debug.Log ("defects [" + j + "] " + convexDefect.toList () [j + 3]); } MatOfPoint e2 = new MatOfPoint(); e2.fromList(listPo); defectPoints.Add(e2); // Debug.Log ("hull: " + hull.toList ()); // Debug.Log ("defects: " + convexDefect.toList ()); Imgproc.drawContours(rgbaMat, hullPoints, -1, CONTOUR_COLOR, 3); // int defectsTotal = (int)convexDefect.total(); // Debug.Log ("Defect total " + defectsTotal); this.numberOfFingers = listPoDefect.Count; if (this.numberOfFingers > 5) { this.numberOfFingers = 5; } // Debug.Log ("numberOfFingers " + numberOfFingers); // Core.putText (mRgba, "" + numberOfFingers, new Point (mRgba.cols () / 2, mRgba.rows () / 2), Core.FONT_HERSHEY_PLAIN, 4.0, new Scalar (255, 255, 255, 255), 6, Core.LINE_AA, false); numberOfFingersText.text = numberOfFingers.ToString(); foreach (Point p in listPoDefect) { Imgproc.circle(rgbaMat, p, 6, new Scalar(255, 0, 255, 255), -1); } }
/// <summary> /// computes the connected components labeled image of boolean image. /// image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0 /// represents the background label. ltype specifies the output label image type, an important /// consideration based on the total number of labels or alternatively the total number of /// pixels in the source image. /// </summary> /// <param name="image">the image to be labeled</param> /// <param name="labels">destination labeled rectangular array</param> /// <param name="connectivity">8 or 4 for 8-way or 4-way connectivity respectively</param> /// <returns>The number of labels</returns> public static int ConnectedComponents(InputArray image, out int[,] labels, PixelConnectivity connectivity) { using (var labelsMat = new MatOfInt()) { int result = ConnectedComponents(image, labelsMat, connectivity, MatType.CV_32S); labels = labelsMat.ToRectangularArray(); return result; } }
/// <summary> /// Recognizes the markers. /// </summary> /// <param name="grayscale">Grayscale.</param> /// <param name="detectedMarkers">Detected markers.</param> void recognizeMarkers(Mat grayscale, List <Marker> detectedMarkers) { List <Marker> goodMarkers = new List <Marker> (); // Identify the markers for (int i = 0; i < detectedMarkers.Count; i++) { Marker marker = detectedMarkers [i]; // Find the perspective transformation that brings current marker to rectangular form Mat markerTransform = Imgproc.getPerspectiveTransform(new MatOfPoint2f(marker.points.toArray()), m_markerCorners2d); // Transform image to get a canonical marker image Imgproc.warpPerspective(grayscale, canonicalMarkerImage, markerTransform, markerSize); for (int p = 0; p < m_markerDesigns.Count; p++) { MatOfInt nRotations = new MatOfInt(0); int id = Marker.getMarkerId(canonicalMarkerImage, nRotations, m_markerDesigns [p]); if (id != -1) { marker.id = id; // Debug.Log ("id " + id); //sort the points so that they are always in the same order no matter the camera orientation List <Point> MarkerPointsList = marker.points.toList(); // std::rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end()); MarkerPointsList = MarkerPointsList.Skip(4 - nRotations.toArray() [0]).Concat(MarkerPointsList.Take(4 - nRotations.toArray() [0])).ToList(); marker.points.fromList(MarkerPointsList); goodMarkers.Add(marker); } nRotations.Dispose(); } } // Debug.Log ("goodMarkers " + goodMarkers.Count); // Refine marker corners using sub pixel accuracy if (goodMarkers.Count > 0) { List <Point> preciseCornersPoint = new List <Point> (4 * goodMarkers.Count); for (int i = 0; i < preciseCornersPoint.Capacity; i++) { preciseCornersPoint.Add(new Point(0, 0)); } for (int i = 0; i < goodMarkers.Count; i++) { Marker marker = goodMarkers [i]; List <Point> markerPointsList = marker.points.toList(); for (int c = 0; c < 4; c++) { preciseCornersPoint [i * 4 + c] = markerPointsList [c]; } } MatOfPoint2f preciseCorners = new MatOfPoint2f(preciseCornersPoint.ToArray()); TermCriteria termCriteria = new TermCriteria(TermCriteria.MAX_ITER | TermCriteria.EPS, 30, 0.01); Imgproc.cornerSubPix(grayscale, preciseCorners, new Size(5, 5), new Size(-1, -1), termCriteria); preciseCornersPoint = preciseCorners.toList(); // Copy refined corners position back to markers for (int i = 0; i < goodMarkers.Count; i++) { Marker marker = goodMarkers [i]; List <Point> markerPointsList = marker.points.toList(); for (int c = 0; c < 4; c++) { markerPointsList [c] = preciseCornersPoint [i * 4 + c]; } } preciseCorners.Dispose(); } detectedMarkers.Clear(); detectedMarkers.AddRange(goodMarkers); }
/// <summary> /// Gets the marker identifier. /// </summary> /// <returns>The marker identifier.</returns> /// <param name="markerImage">Marker image.</param> /// <param name="nRotations">N rotations.</param> public static int getMarkerId (Mat markerImage, MatOfInt nRotations, byte[,] markerDesign) { Mat grey = markerImage; // Threshold image Imgproc.threshold (grey, grey, 125, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU); //Markers are divided in 7x7 regions, of which the inner 5x5 belongs to marker info //the external border should be entirely black int size = markerDesign.GetLength(0); int cellSize = markerImage.rows () / (size + 2); for (int y=0; y<(size+2); y++) { int inc = size + 1; if (y == 0 || y == (size + 1)) inc = 1; //for first and last row, check the whole border for (int x=0; x<(size+2); x+=inc) { int cellX = x * cellSize; int cellY = y * cellSize; Mat cell = new Mat (grey, new OpenCVForUnity.Rect (cellX, cellY, cellSize, cellSize)); int nZ = Core.countNonZero (cell); cell.Dispose (); if (nZ > (cellSize * cellSize) / 2) { return -1;//can not be a marker because the border element is not black! } } } Mat bitMatrix = Mat.zeros (size, size, CvType.CV_8UC1); //get information(for each inner square, determine if it is black or white) for (int y=0; y<size; y++) { for (int x=0; x<size; x++) { int cellX = (x + 1) * cellSize; int cellY = (y + 1) * cellSize; Mat cell = new Mat (grey, new OpenCVForUnity.Rect (cellX, cellY, cellSize, cellSize)); int nZ = Core.countNonZero (cell); if (nZ > (cellSize * cellSize) / 2) bitMatrix.put (y, x, new byte[]{1}); //bitMatrix.at<uchar> (y, x) = 1; cell.Dispose (); } } // Debug.Log ("bitMatrix " + bitMatrix.dump()); //check all possible rotations Mat[] rotations = new Mat[4]; for (int i = 0; i < rotations.Length; i++) { rotations [i] = new Mat (); } int[] distances = new int[4]; rotations [0] = bitMatrix; distances [0] = hammDistMarker (rotations [0], markerDesign); int first = distances [0]; int second = 0; for (int i=1; i<4; i++) { //get the hamming distance to the nearest possible word rotations [i] = rotate (rotations [i - 1]); distances [i] = hammDistMarker (rotations [i], markerDesign); if (distances [i] < first) { first = distances [i]; second = i; } } // Debug.Log ("first " + first); nRotations.fromArray (second); if (first == 0) { int id = mat2id (rotations [second]); bitMatrix.Dispose (); for (int i = 0; i < rotations.Length; i++) { rotations [i].Dispose (); } return id; } return -1; }
void Process(){ Mat hierarchy = new Mat (); List<MatOfPoint> contours = new List<MatOfPoint> (); MatOfPoint maxitem = new MatOfPoint (); MatOfInt hullInt = new MatOfInt (); frameclone = frame_thresh_final.clone (); Imgproc.findContours (frameclone, contours, hierarchy, Imgproc.RETR_LIST , Imgproc.CHAIN_APPROX_NONE); maxitem = contours[0]; n = 0; for(int i=0; i<contours.Count; i++){ if(contours[i].total() > maxitem.total()){ maxitem = contours[i]; n=i; } } OpenCVForUnity.Rect bRect = Imgproc.boundingRect (maxitem); int bRect_height = bRect.height; int bRect_width = bRect.width; Imgproc.drawContours(frame_thresh_final, contours, n, new Scalar(255, 255, 255), -1); Imgproc.convexHull( maxitem, hullInt); List<Point> maxitemPointList = maxitem.toList (); List<int> hullIntList = hullInt.toList (); List<Point> hullPointList = new List<Point> (); for (int j=0; j < hullInt.toList().Count; j++) { hullPointList.Add (maxitemPointList [hullIntList[j]]); } MatOfPoint hullPointMat = new MatOfPoint (); hullPointMat.fromList (hullPointList); List<MatOfPoint> hullPoints = new List<MatOfPoint> (); hullPoints.Add (hullPointMat); //Imgproc.drawContours (frame, hullPoints, -1, new Scalar (0, 255, 0), 2); MatOfInt4 convexityDef = new MatOfInt4 (); Imgproc.convexityDefects (maxitem, hullInt, convexityDef); List<int> conDefIntList = convexityDef.toList (); List<Point> startpts = new List<Point> (); List<Point> farpts = new List<Point> (); List<Point> endpts = new List<Point> (); double defx1 = 1000, defx2 = 1000; int countx1 = 0, countx2 = 0; int tolerance = (int)(bRect_height/5.5); int count = 0, index = 0; //Debug.Log ("Tolerance: " + tolerance); double angleTol = 95.0; int[] defarray = new int[100]; //CvFont font = new CvFont (FontFace.Vector0, 1.0, 1.0); for(int i=0; i < conDefIntList.Count/4 ; i++){ startpts.Add(maxitemPointList[conDefIntList[4*i]]); endpts.Add(maxitemPointList[conDefIntList[4*i+1]]); farpts.Add(maxitemPointList[conDefIntList[4*i+2]]); Point s = startpts[i]; Point e = endpts[i]; Point f = farpts[i]; if( GetAngle(s, f, e) < angleTol && GetDistance(s,f) > tolerance && GetDistance(e,f) > tolerance ){ //string text = Convert.ToString(count); //Debug.Log("Depth1: "+GetDistance(s,f)); //Debug.Log("Depth2: "+GetDistance(e,f)); //Core.circle( frame_pot, f, 10, new Scalar(0, 0, 255), -1); //Core.circle( frame_pot, s, 10, new Scalar(0, 255, 0), -1); //Core.circle( frame_pot, e, 10, new Scalar(255, 0, 0), -1); //Core.putText(frame_pot, text, f, Core.FONT_HERSHEY_COMPLEX , 1.0, new Scalar(255, 255, 255)); //frame_pot.PutText(text, f, font, CvColor.White); if(f.x < defx1){ defx2 = defx1; countx2 = countx1; defx1 = f.x; countx1 = count; } else if(f.x < defx2) { defx2 = f.x; countx2 = count; } defarray[count] = index; count++; } index++; } //Debug.Log ("Count: " + count); //Debug.Log ("Total: " + farpts.Count); Point point1 = farpts [defarray [countx1]]; Point point2 = farpts [defarray [countx2]]; //Core.circle (frame_pot, point1, 15, new Scalar (255, 0, 0), 2); //Core.circle (frame_pot, point2, 15, new Scalar (255, 0, 0), 2); point1.y -= 5; double posX = (point1.x + point2.x)/2.0; double posY = (point1.y + point2.y)/2.0; posX_new = (float)(posX - 240); posY_new = (float)(-posY + 320); double dist = Math.Sqrt(Math.Pow(point1.x - point2.x, 2) + Math.Pow(point1.y - point2.y, 2)); scale1 = dist * 500000 / 640.0; scale2 = dist * 700 / 640.0; scale3 = dist * 600 / 640.0; scale4 = dist * 15 / 640.0; scale5 = dist * 70 / 640.0; ringObj[0].transform.position = new Vector3(posX_new, posY_new, 0.0f); ringObj[1].transform.position = new Vector3(posX_new, posY_new, 0.0f); ringObj[2].transform.position = new Vector3(posX_new, posY_new, 0.0f); ringObj[3].transform.position = new Vector3(posX_new, posY_new, 0.0f); ringObj[4].transform.position = new Vector3(posX_new, posY_new, 0.0f); ringObj[0].transform.localScale = new Vector3((float)scale1, (float)scale1, (float)(scale1*1.5)); ringObj[1].transform.localScale = new Vector3((float)scale2, (float)scale2, (float)(scale2)); ringObj[2].transform.localScale = new Vector3((float)scale3, (float)scale3, (float)(scale3)); ringObj[3].transform.localScale = new Vector3((float)scale4, (float)scale4, (float)(scale4)); ringObj[4].transform.localScale = new Vector3((float)scale5, (float)scale5, (float)(scale5)); Point point3 = new Point(point1.x, point2.y); angle_rot = GetAngle( point1, point2, point3); ringObj[0].transform.RotateAround( new Vector3(posX_new, posY_new, 0.0f), Vector3.forward, (float)angle_rot); ringObj[1].transform.RotateAround( new Vector3(posX_new, posY_new, 0.0f), Vector3.forward, (float)angle_rot); ringObj[2].transform.RotateAround( new Vector3(posX_new, posY_new, 0.0f), Vector3.forward, (float)angle_rot); ringObj[3].transform.RotateAround( new Vector3(posX_new, posY_new, 0.0f), Vector3.forward, (float)angle_rot); ringObj[4].transform.RotateAround( new Vector3(posX_new, posY_new, 0.0f), Vector3.forward, (float)angle_rot); }
/*=============================================* * 輪郭ごとの頂点から手を判別するまで *=============================================*/ /// <summary> /// Contours to hand gesture. /// </summary> /// <param name="rgbaMat">Rgba mat.</param> /// <param name="contour">Contour.</param> private static void _contourToHandGesture(Mat rgbaMat, MatOfPoint contour) { try { //頂点を調査する準備をする _pointOfVertices(rgbaMat, contour); //基準輪郭のサイズの取得と描画(長方形) OpenCVForUnity.Rect boundRect = Imgproc.boundingRect(new MatOfPoint(contour.toArray())); Imgproc.rectangle(rgbaMat, boundRect.tl(), boundRect.br(), HGColorSpuiter.ColorToScalar(ContourRangeColor), 2, 8, 0); /*=============================================* * 腕まで含んだ手の大きさを取得する **=============================================*/ //腕まで含んだ手の大きさを識別する MatOfInt hull = new MatOfInt(); Imgproc.convexHull(new MatOfPoint(contour.toArray()), hull); //腕まで含んだ手の範囲を取得 List <Point> armPointList = new List <Point>(); for (int j = 0; j < hull.toList().Count; j++) { Point armPoint = contour.toList()[hull.toList()[j]]; bool addFlag = true; foreach (Point point in armPointList.ToArray()) { //輪郭の1/10より近い頂点は誤差としてまとめる double distance = Mathf.Sqrt((float)((armPoint.x - point.x) * (armPoint.x - point.x) + (armPoint.y - point.y) * (armPoint.y - point.y))); if (distance <= Mathf.Min((float)boundRect.width, (float)boundRect.height) / 10) { addFlag = false; break; } } if (addFlag) { armPointList.Add(armPoint); } } MatOfPoint armMatOfPoint = new MatOfPoint(); armMatOfPoint.fromList(armPointList); List <MatOfPoint> armPoints = new List <MatOfPoint>(); armPoints.Add(armMatOfPoint); //腕まで含んだ手の範囲を描画 Imgproc.drawContours(rgbaMat, armPoints, -1, HGColorSpuiter.ColorToScalar(ArmRangeColor), 3); //腕まで含んだ手が三角形の場合はそれ以上の識別が難しい if (hull.toArray().Length < 3) { return; } /*=============================================* * 掌の大きさを取得する **=============================================*/ //凸面の頂点から凹面の点のみを取得し、掌の範囲を取得する MatOfInt4 convexDefect = new MatOfInt4(); Imgproc.convexityDefects(new MatOfPoint(contour.toArray()), hull, convexDefect); //凹面の点をフィルタリングして取得 List <Point> palmPointList = new List <Point>(); for (int j = 0; j < convexDefect.toList().Count; j = j + 4) { Point farPoint = contour.toList()[convexDefect.toList()[j + 2]]; int depth = convexDefect.toList()[j + 3]; if (depth > depthThreashold && farPoint.y < boundRect.br().y - boundRect.tl().y) { palmPointList.Add(contour.toList()[convexDefect.toList()[j + 2]]); } } MatOfPoint palmMatOfPoint = new MatOfPoint(); palmMatOfPoint.fromList(palmPointList); List <MatOfPoint> palmPoints = new List <MatOfPoint>(); palmPoints.Add(palmMatOfPoint); //掌の範囲を描画 Imgproc.drawContours(rgbaMat, palmPoints, -1, HGColorSpuiter.ColorToScalar(PalmRangeColor), 3); /*=============================================* * 掌+指先の大きさを取得する **=============================================*/ //掌の位置を元に手首を除いた範囲を取得する List <Point> handPointList = new List <Point>(); handPointList.AddRange(armPointList.ToArray()); handPointList.Reverse(); handPointList.RemoveAt(0); handPointList.Insert(0, palmPointList.ToArray()[0]); handPointList.RemoveAt(handPointList.Count - 1); handPointList.Insert(handPointList.Count, palmPointList.ToArray()[palmPointList.Count - 1]); MatOfPoint handMatOfPoint = new MatOfPoint(); handMatOfPoint.fromList(handPointList); List <MatOfPoint> handPoints = new List <MatOfPoint>(); handPoints.Add(handMatOfPoint); Imgproc.drawContours(rgbaMat, handPoints, -1, HGColorSpuiter.ColorToScalar(HandRangeColor), 3); /*=============================================* * 指先の位置を取得する **=============================================*/ //掌の各頂点の中心を求める List <Point> palmCenterPoints = new List <Point>(); for (int i = 0; i < palmPointList.Count; i++) { Point palmPoint = palmPointList.ToArray()[i]; Point palmPointNext = new Point(); if (i + 1 < palmPointList.Count) { palmPointNext = palmPointList.ToArray()[i + 1]; } else { palmPointNext = palmPointList.ToArray()[0]; } Point palmCenterPoint = new Point((palmPoint.x + palmPointNext.x) / 2, (palmPoint.y + palmPointNext.y) / 2); palmCenterPoints.Add(palmCenterPoint); } //掌の頂点から最も近い手の頂点を求める for (int i = 0; i < palmCenterPoints.Count && i + 1 < handPointList.Count && i < 5; i++) { Point palmPoint = palmCenterPoints.ToArray()[i]; List <Point> fingerList = new List <Point>(); fingerList.Add(palmPoint); fingerList.Add(handPointList.ToArray()[i + 1]); MatOfPoint fingerPoint = new MatOfPoint(); fingerPoint.fromList(fingerList); List <MatOfPoint> fingerPoints = new List <MatOfPoint>(); fingerPoints.Add(fingerPoint); Imgproc.drawContours(rgbaMat, fingerPoints, -1, HGColorSpuiter.ColorToScalar(FingerRangeColor), 3); } // Imgproc.putText(rgbaMat, "", new Point(2, rgbaMat.rows()-30), Core.FONT_HERSHEY_SIMPLEX, 1.0, HGColorSpuiter.ColorToScalar(Color.black), 2, Imgproc.LINE_AA, false); } catch (System.Exception e) { Debug.Log(e.Message); } }
/// <summary> /// computes the connected components labeled image of boolean image. /// image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0 /// represents the background label. ltype specifies the output label image type, an important /// consideration based on the total number of labels or alternatively the total number of /// pixels in the source image. /// </summary> /// <param name="image">the image to be labeled</param> /// <param name="connectivity">8 or 4 for 8-way or 4-way connectivity respectively</param> /// <returns></returns> public static ConnectedComponents ConnectedComponentsEx( InputArray image, PixelConnectivity connectivity = PixelConnectivity.Connectivity8) { using (var labelsMat = new MatOfInt()) using (var statsMat = new MatOfInt()) using (var centroidsMat = new MatOfDouble()) { int nLabels = ConnectedComponentsWithStats( image, labelsMat, statsMat, centroidsMat, connectivity, MatType.CV_32S); var labels = labelsMat.ToRectangularArray(); var stats = statsMat.ToRectangularArray(); var centroids = centroidsMat.ToRectangularArray(); var blobs = new ConnectedComponents.Blob[nLabels]; for (int i = 0; i < nLabels; i++) { blobs[i] = new ConnectedComponents.Blob { Label = i, Left = stats[i, 0], Top = stats[i, 1], Width = stats[i, 2], Height = stats[i, 3], Area = stats[i, 4], Centroid = new Point2d(centroids[i, 0], centroids[i, 1]), }; } return new ConnectedComponents(blobs, labels, nLabels); } }
/// <summary> /// Hands the pose estimation process. /// </summary> public void handPoseEstimationProcess(Mat rgbaMat) { //Imgproc.blur(mRgba, mRgba, new Size(5,5)); Imgproc.GaussianBlur(rgbaMat, rgbaMat, new OpenCVForUnity.Size(3, 3), 1, 1); //Imgproc.medianBlur(mRgba, mRgba, 3); if (!isColorSelected) { return; } List <MatOfPoint> contours = detector.getContours(); detector.process(rgbaMat); //Debug.Log(contours + " | " + contours.Count); //string[] output = contours.ToArray(); for (int i = 0; i < contours.Count; i++) { //Debug.Log("MatOfPoint2f " + new MatOfPoint2f(contours[i].toArray()) + " | " + i); //Debug.Log("MatOfPoint " + contours [i] + " | " + i); //Imgproc.circle(rgbaMat, contours[i], 6, new Scalar(0, 255, 0, 255), -1); //Debug.Log ("kotka" + MatOfPoint.ReferenceEquals(x, y)); } if (contours.Count <= 0) { return; } RotatedRect rect = Imgproc.minAreaRect(new MatOfPoint2f(contours[0].toArray())); double boundWidth = rect.size.width; double boundHeight = rect.size.height; int boundPos = 0; for (int i = 1; i < contours.Count; i++) { rect = Imgproc.minAreaRect(new MatOfPoint2f(contours[i].toArray())); if (rect.size.width * rect.size.height > boundWidth * boundHeight) { boundWidth = rect.size.width; boundHeight = rect.size.height; boundPos = i; } } OpenCVForUnity.Rect boundRect = Imgproc.boundingRect(new MatOfPoint(contours[boundPos].toArray())); Imgproc.rectangle(rgbaMat, boundRect.tl(), boundRect.br(), CONTOUR_COLOR_WHITE, 2, 8, 0); //tochkaX = boundRect.tl ().x; //tochkaY = boundRect.tl ().y; Imgproc.circle(rgbaMat, boundRect.tl(), 6, new Scalar(0, 255, 0, 255), -1); Imgproc.circle(rgbaMat, boundRect.br(), 6, new Scalar(0, 255, 0, 255), -1); pointbX = boundRect.br().x; pointbY = boundRect.br().y; pointaX = boundRect.x; pointbY = boundRect.y; double a = boundRect.br().y - boundRect.tl().y; a = a * 0.7; a = boundRect.tl().y + a; Imgproc.rectangle(rgbaMat, boundRect.tl(), new Point(boundRect.br().x, a), CONTOUR_COLOR, 2, 8, 0); MatOfPoint2f pointMat = new MatOfPoint2f(); Imgproc.approxPolyDP(new MatOfPoint2f(contours[boundPos].toArray()), pointMat, 3, true); contours[boundPos] = new MatOfPoint(pointMat.toArray()); MatOfInt hull = new MatOfInt(); MatOfInt4 convexDefect = new MatOfInt4(); Imgproc.convexHull(new MatOfPoint(contours[boundPos].toArray()), hull); if (hull.toArray().Length < 3) { return; } Imgproc.convexityDefects(new MatOfPoint(contours[boundPos].toArray()), hull, convexDefect); List <MatOfPoint> hullPoints = new List <MatOfPoint>(); List <Point> listPo = new List <Point>(); for (int j = 0; j < hull.toList().Count; j++) { listPo.Add(contours[boundPos].toList()[hull.toList()[j]]); } MatOfPoint e = new MatOfPoint(); e.fromList(listPo); hullPoints.Add(e); List <MatOfPoint> defectPoints = new List <MatOfPoint>(); List <Point> listPoDefect = new List <Point>(); for (int j = 0; j < convexDefect.toList().Count; j = j + 4) { Point farPoint = contours[boundPos].toList()[convexDefect.toList()[j + 2]]; int depth = convexDefect.toList()[j + 3]; if (depth > 8700 && farPoint.y < a) { listPoDefect.Add(contours[boundPos].toList()[convexDefect.toList()[j + 2]]); } } MatOfPoint e2 = new MatOfPoint(); e2.fromList(listPo); defectPoints.Add(e2); Imgproc.drawContours(rgbaMat, hullPoints, -1, CONTOUR_COLOR, 3); this.numberOfFingers = listPoDefect.Count; if (this.numberOfFingers > 5) { this.numberOfFingers = 5; } foreach (Point p in listPoDefect) { Imgproc.circle(rgbaMat, p, 6, new Scalar(255, 0, 255, 255), -1); } }
/// <summary> /// Recognizes the markers. /// </summary> /// <param name="grayscale">Grayscale.</param> /// <param name="detectedMarkers">Detected markers.</param> void recognizeMarkers (Mat grayscale, List<Marker> detectedMarkers) { List<Marker> goodMarkers = new List<Marker> (); // Identify the markers for (int i=0; i<detectedMarkers.Count; i++) { Marker marker = detectedMarkers [i]; // Find the perspective transformation that brings current marker to rectangular form Mat markerTransform = Imgproc.getPerspectiveTransform (new MatOfPoint2f (marker.points.toArray ()), m_markerCorners2d); // Transform image to get a canonical marker image Imgproc.warpPerspective (grayscale, canonicalMarkerImage, markerTransform, markerSize); MatOfInt nRotations = new MatOfInt (0); int id = Marker.getMarkerId (canonicalMarkerImage, nRotations, m_markerDesign); if (id != - 1) { marker.id = id; // Debug.Log ("id " + id); //sort the points so that they are always in the same order no matter the camera orientation List<Point> MarkerPointsList = marker.points.toList (); // std::rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end()); MarkerPointsList = MarkerPointsList.Skip (4 - nRotations.toArray () [0]).Concat (MarkerPointsList.Take (4 - nRotations.toArray () [0])).ToList (); marker.points.fromList (MarkerPointsList); goodMarkers.Add (marker); } nRotations.Dispose (); } // Debug.Log ("goodMarkers " + goodMarkers.Count); // Refine marker corners using sub pixel accuracy if (goodMarkers.Count > 0) { List<Point> preciseCornersPoint = new List<Point> (4 * goodMarkers.Count); for (int i = 0; i < preciseCornersPoint.Capacity; i++) { preciseCornersPoint.Add (new Point (0, 0)); } for (int i=0; i<goodMarkers.Count; i++) { Marker marker = goodMarkers [i]; List<Point> markerPointsList = marker.points.toList (); for (int c = 0; c <4; c++) { preciseCornersPoint [i * 4 + c] = markerPointsList [c]; } } MatOfPoint2f preciseCorners = new MatOfPoint2f (preciseCornersPoint.ToArray ()); TermCriteria termCriteria = new TermCriteria (TermCriteria.MAX_ITER | TermCriteria.EPS, 30, 0.01); Imgproc.cornerSubPix (grayscale, preciseCorners, new Size (5, 5), new Size (-1, -1), termCriteria); preciseCornersPoint = preciseCorners.toList (); // Copy refined corners position back to markers for (int i=0; i<goodMarkers.Count; i++) { Marker marker = goodMarkers [i]; List<Point> markerPointsList = marker.points.toList (); for (int c=0; c<4; c++) { markerPointsList [c] = preciseCornersPoint [i * 4 + c]; } } preciseCorners.Dispose (); } detectedMarkers.Clear (); detectedMarkers.AddRange (goodMarkers); }
/// <summary> /// Postprocess the specified frame, outs and net. /// </summary> /// <param name="frame">Frame.</param> /// <param name="outs">Outs.</param> /// <param name="net">Net.</param> private void postprocess(Mat frame, List <Mat> outs, Net net) { string outLayerType = outBlobTypes[0]; List <int> classIdsList = new List <int>(); List <float> confidencesList = new List <float>(); List <OpenCVForUnity.CoreModule.Rect> boxesList = new List <OpenCVForUnity.CoreModule.Rect>(); if (net.getLayer(new DictValue(0)).outputNameToIndex("im_info") != -1) { // Faster-RCNN or R-FCN // Network produces output blob with a shape 1x1xNx7 where N is a number of // detections and an every detection is a vector of values // [batchId, classId, confidence, left, top, right, bottom] if (outs.Count == 1) { outs[0] = outs[0].reshape(1, (int)outs[0].total() / 7); //Debug.Log ("outs[i].ToString() " + outs [0].ToString ()); float[] data = new float[7]; for (int i = 0; i < outs[0].rows(); i++) { outs[0].get(i, 0, data); float confidence = data[2]; if (confidence > confThreshold) { int class_id = (int)(data[1]); int left = (int)(data[3] * frame.cols()); int top = (int)(data[4] * frame.rows()); int right = (int)(data[5] * frame.cols()); int bottom = (int)(data[6] * frame.rows()); int width = right - left + 1; int height = bottom - top + 1; classIdsList.Add((int)(class_id) - 0); confidencesList.Add((float)confidence); boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height)); } } } } else if (outLayerType == "DetectionOutput") { // Network produces output blob with a shape 1x1xNx7 where N is a number of // detections and an every detection is a vector of values // [batchId, classId, confidence, left, top, right, bottom] if (outs.Count == 1) { outs[0] = outs[0].reshape(1, (int)outs[0].total() / 7); // Debug.Log ("outs[i].ToString() " + outs [0].ToString ()); float[] data = new float[7]; for (int i = 0; i < outs[0].rows(); i++) { outs[0].get(i, 0, data); float confidence = data[2]; if (confidence > confThreshold) { int class_id = (int)(data[1]); int left = (int)(data[3] * frame.cols()); int top = (int)(data[4] * frame.rows()); int right = (int)(data[5] * frame.cols()); int bottom = (int)(data[6] * frame.rows()); int width = right - left + 1; int height = bottom - top + 1; classIdsList.Add((int)(class_id) - 0); confidencesList.Add((float)confidence); boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height)); } } } } else if (outLayerType == "Region") { for (int i = 0; i < outs.Count; ++i) { // Network produces output blob with a shape NxC where N is a number of // detected objects and C is a number of classes + 4 where the first 4 // numbers are [center_x, center_y, width, height] //Debug.Log ("outs[i].ToString() "+outs[i].ToString()); float[] positionData = new float[5]; float[] confidenceData = new float[outs[i].cols() - 5]; for (int p = 0; p < outs[i].rows(); p++) { outs[i].get(p, 0, positionData); outs[i].get(p, 5, confidenceData); int maxIdx = confidenceData.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I; float confidence = confidenceData[maxIdx]; if (confidence > confThreshold) { int centerX = (int)(positionData[0] * frame.cols()); int centerY = (int)(positionData[1] * frame.rows()); int width = (int)(positionData[2] * frame.cols()); int height = (int)(positionData[3] * frame.rows()); int left = centerX - width / 2; int top = centerY - height / 2; classIdsList.Add(maxIdx); confidencesList.Add((float)confidence); boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height)); } } } } else { Debug.Log("Unknown output layer type: " + outLayerType); } MatOfRect boxes = new MatOfRect(); boxes.fromList(boxesList); MatOfFloat confidences = new MatOfFloat(); confidences.fromList(confidencesList); MatOfInt indices = new MatOfInt(); Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices); // Debug.Log ("indices.dump () "+indices.dump ()); // Debug.Log ("indices.ToString () "+indices.ToString()); for (int i = 0; i < indices.total(); ++i) { int idx = (int)indices.get(i, 0)[0]; OpenCVForUnity.CoreModule.Rect box = boxesList[idx]; drawPred(classIdsList[idx], confidencesList[idx], box.x, box.y, box.x + box.width, box.y + box.height, frame); } indices.Dispose(); boxes.Dispose(); confidences.Dispose(); }
/// <summary> /// Computes convex hull for a set of 2D points. /// </summary> /// <param name="points">The input 2D point set, represented by CV_32SC2 or CV_32FC2 matrix</param> /// <param name="clockwise">If true, the output convex hull will be oriented clockwise, /// otherwise it will be oriented counter-clockwise. Here, the usual screen coordinate /// system is assumed - the origin is at the top-left corner, x axis is oriented to the right, /// and y axis is oriented downwards.</param> /// <returns>The output convex hull. It is a vector of 0-based point /// indices of the hull points in the original array (since the set of convex hull /// points is a subset of the original point set).</returns> public int[] ConvexHullIndices(InputArray points, bool clockwise = false) { var dst = new MatOfInt(); Cv2.ConvexHull(points, dst, clockwise, false); return dst.ToArray(); }
// Use this for initialization void Run() { //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console. Utils.setDebugMode(true); Mat img = Imgcodecs.imread(image_filepath, Imgcodecs.IMREAD_COLOR); if (img.empty()) { Debug.LogError(image_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". "); img = new Mat(368, 368, CvType.CV_8UC3, new Scalar(0, 0, 0)); } //Adust Quad.transform.localScale. gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1); Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation); float imageWidth = img.width(); float imageHeight = img.height(); float widthScale = (float)Screen.width / imageWidth; float heightScale = (float)Screen.height / imageHeight; if (widthScale < heightScale) { Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2; } else { Camera.main.orthographicSize = imageHeight / 2; } Net detector = null; Net recognizer = null; if (string.IsNullOrEmpty(detectionmodel_filepath) || string.IsNullOrEmpty(recognitionmodel_filepath)) { Debug.LogError(detectionmodel_filepath + " or " + recognitionmodel_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". "); } else { detector = Dnn.readNet(detectionmodel_filepath); recognizer = Dnn.readNet(recognitionmodel_filepath); } if (detector == null || recognizer == null) { Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false); Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false); } else { TickMeter tickMeter = new TickMeter(); List <Mat> outs = new List <Mat>(); List <string> outNames = new List <string>(); outNames.Add("feature_fusion/Conv_7/Sigmoid"); outNames.Add("feature_fusion/concat_3"); // Create a 4D blob from a frame. Size inpSize = new Size(inpWidth > 0 ? inpWidth : img.cols(), inpHeight > 0 ? inpHeight : img.rows()); Mat blob = Dnn.blobFromImage(img, 1.0, inpSize, new Scalar(123.68, 116.78, 103.94), true, false); // blobFromImage(frame, blob, 1.0, Size(inpWidth, inpHeight), Scalar(123.68, 116.78, 103.94), true, false); // Run detection model. detector.setInput(blob); tickMeter.start(); detector.forward(outs, outNames); tickMeter.stop(); Mat scores = outs[0]; Mat geometry = outs[1]; // Decode predicted bounding boxes. List <RotatedRect> boxes = new List <RotatedRect>(); List <float> confidences = new List <float>(); decodeBoundingBoxes(scores, geometry, confThreshold, boxes, confidences); // Apply non-maximum suppression procedure. MatOfRotatedRect boxesMat = new MatOfRotatedRect(boxes.ToArray()); MatOfFloat confidencesMat = new MatOfFloat(confidences.ToArray()); MatOfInt indicesMat = new MatOfInt(); Dnn.NMSBoxesRotated(boxesMat, confidencesMat, confThreshold, nmsThreshold, indicesMat); List <int> indices = indicesMat.toList(); Point ratio = new Point(img.cols() / inpWidth, img.rows() / inpHeight); // Render text. for (int i = 0; i < indices.Count; ++i) { RotatedRect box = boxes[indices[i]]; Point[] vertices = new Point[4]; box.points(vertices); for (int j = 0; j < 4; ++j) { vertices[j].x *= ratio.x; vertices[j].y *= ratio.y; } for (int j = 0; j < 4; ++j) { Imgproc.line(img, vertices[j], vertices[(j + 1) % 4], new Scalar(0, 255, 0), 1); } if (recognizer != null) { Mat cropped = new Mat(); fourPointsTransform(img, vertices, cropped); //Debug.Log(cropped); Imgproc.cvtColor(cropped, cropped, Imgproc.COLOR_BGR2GRAY); Mat blobCrop = Dnn.blobFromImage(cropped, 1.0 / 127.5, new Size(), Scalar.all(127.5)); recognizer.setInput(blobCrop); //Debug.Log(blobCrop); tickMeter.start(); Mat result = recognizer.forward(); tickMeter.stop(); string wordRecognized; decodeText(result, out wordRecognized); Imgproc.putText(img, wordRecognized, vertices[1], Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(255, 0, 0), 1, Imgproc.LINE_AA, false); Debug.Log(wordRecognized); cropped.Dispose(); blobCrop.Dispose(); result.Dispose(); } } Debug.Log("Inference time, ms: " + tickMeter.getTimeMilli()); for (int i = 0; i < outs.Count; i++) { outs[i].Dispose(); } blob.Dispose(); detector.Dispose(); recognizer.Dispose(); } Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB); Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false); Utils.matToTexture2D(img, texture); gameObject.GetComponent <Renderer>().material.mainTexture = texture; Utils.setDebugMode(false); }