public Vector4 DetectMarkers(Mat rgbMat) { Mat ids = new Mat(); List <Mat> corners = new List <Mat>(); List <Mat> rejectedCorners = new List <Mat>(); Mat rvecs = new Mat(); //Mat rotMat = new Mat(3, 3, CvType.CV_64FC1); Mat tvecs = new Mat(); DetectorParameters detectorParams = DetectorParameters.create(); Dictionary dictionary = Aruco.getPredefinedDictionary((int)dictionaryId); Vector4 ARM = new Vector3(); // detect markers Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); // if at least one marker detected if (ids.total() > 0) { // estimate pose, from marker to camera Aruco.estimatePoseSingleMarkers(corners, markerSize, camMatrix, distCoeffs, rvecs, tvecs); // Marker centre location double[] tvecArr = tvecs.get(0, 0); // image flip + coordinates transformation (OpenCV to Unity) ARM = new Vector4((float)tvecArr[0] + 0.005f, (float)tvecArr[1] + 0.005f, -(float)tvecArr[2], 1f); Debug.Log("raw ARM " + ARM.ToString("f5")); //// Rotation and convert to Unity matrix format //double[] rvecArr = rvecs.get(0, 0); //Mat rvec = new Mat(3, 1, CvType.CV_64FC1); //rvec.put(0, 0, rvecArr); //Calib3d.Rodrigues(rvec, rotMat); //double[] rotMatArr = new double[rotMat.total()]; //rotMat.get(0, 0, rotMatArr); //// Transformation matrix //Matrix4x4 transformationM = new Matrix4x4(); //transformationM.SetRow(0, new Vector4((float)rotMatArr[0], (float)rotMatArr[1], (float)rotMatArr[2], (float)tvecArr[0])); //transformationM.SetRow(1, new Vector4((float)rotMatArr[3], (float)rotMatArr[4], (float)rotMatArr[5], (float)tvecArr[1])); //transformationM.SetRow(2, new Vector4((float)rotMatArr[6], (float)rotMatArr[7], (float)rotMatArr[8], (float)tvecArr[2])); //transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); //Debug.Log("transformationM " + transformationM.ToString()); success = true; } else { Debug.Log("not detected"); success = false; } return(ARM); }
private void DetectARUcoMarker() { if (downscaleRatio == 1) { downScaleRgbaMat = rgbaMat4Thread; } else { Imgproc.resize(rgbaMat4Thread, downScaleRgbaMat, new Size(), 1.0 / downscaleRatio, 1.0 / downscaleRatio, Imgproc.INTER_LINEAR); } Imgproc.cvtColor(downScaleRgbaMat, grayMat, Imgproc.COLOR_RGBA2GRAY); // Detect markers and estimate Pose Aruco.detectMarkers(grayMat, dictionary, corners, ids, detectorParams, rejected); if (applyEstimationPose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { //This example can display ARObject on only first detected marker. if (i == 0) { // Position double[] tvec = tvecs.get(i, 0); // Rotation double[] rv = rvecs.get(i, 0); Mat rvec = new Mat(3, 1, CvType.CV_64FC1); rvec.put(0, 0, rv [0]); rvec.put(1, 0, rv [1]); rvec.put(2, 0, rv [2]); Calib3d.Rodrigues(rvec, rotMat); transformationM.SetRow(0, new Vector4((float)rotMat.get(0, 0) [0], (float)rotMat.get(0, 1) [0], (float)rotMat.get(0, 2) [0], (float)tvec [0])); transformationM.SetRow(1, new Vector4((float)rotMat.get(1, 0) [0], (float)rotMat.get(1, 1) [0], (float)rotMat.get(1, 2) [0], (float)tvec [1])); transformationM.SetRow(2, new Vector4((float)rotMat.get(2, 0) [0], (float)rotMat.get(2, 1) [0], (float)rotMat.get(2, 2) [0], (float)(tvec [2] / downscaleRatio))); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); // Right-handed coordinates system (OpenCV) to left-handed one (Unity) ARM = invertYM * transformationM; // Apply Z axis inverted matrix. ARM = ARM * invertZM; hasUpdatedARTransformMatrix = true; break; } } } }
private void EstimatePoseCanonicalMarker(Mat Img) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Aruco.drawAxis(Img, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f); // Update the AR object depend on marker id UpdateARObjectTransform(rvec, tvec, list_ids[i]); } } }
private void DetectARUcoMarker() { Imgproc.cvtColor(downScaleFrameMat, grayMat, Imgproc.COLOR_RGBA2GRAY); // Detect markers and estimate Pose Aruco.detectMarkers(grayMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); if (applyEstimationPose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { //This example can display ARObject on only first detected marker. if (i == 0) { // Convert to unity pose data. double[] rvecArr = new double[3]; rvecs.get(0, 0, rvecArr); double[] tvecArr = new double[3]; tvecs.get(0, 0, tvecArr); tvecArr[2] /= imageOptimizationHelper.downscaleRatio; PoseData poseData = ARUtils.ConvertRvecTvecToPoseData(rvecArr, tvecArr); // Changes in pos/rot below these thresholds are ignored. if (enableLowPassFilter) { ARUtils.LowpassPoseData(ref oldPoseData, ref poseData, positionLowPass, rotationLowPass); } oldPoseData = poseData; // Create transform matrix. transformationM = Matrix4x4.TRS(poseData.pos, poseData.rot, Vector3.one); // Right-handed coordinates system (OpenCV) to left-handed one (Unity) // https://stackoverflow.com/questions/30234945/change-handedness-of-a-row-major-4x4-transformation-matrix ARM = invertYM * transformationM * invertYM; // Apply Y-axis and Z-axis refletion matrix. (Adjust the posture of the AR object) ARM = ARM * invertYM * invertZM; hasUpdatedARTransformMatrix = true; break; } } } }
private void EstimatePoseMarker(GameObject ARGameObject) { Aruco.estimatePoseSingleMarkers(cornersAruco, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < idsAruco.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { if (i == 0) { UpdateARObjectTransform(rvec, tvec, ARGameObject); } } } }
private void EstimatePoseChArUcoDiamondMarker(Mat rgbMat) { Aruco.estimatePoseSingleMarkers(diamondCorners, diamondSquareLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < rvecs.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Aruco.drawAxis(rgbMat, camMatrix, distCoeffs, rvec, tvec, diamondSquareLength * 0.5f); // This example can display the ARObject on only first detected marker. if (i == 0) { UpdateARObjectTransform(rvec, tvec); } } } }
private void DetectARUcoMarker() { // Detect markers and estimate Pose Aruco.detectMarkers(downScaleMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); if (applyEstimationPose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { //This example can display ARObject on only first detected marker. if (i == 0) { // Convert to unity pose data. double[] rvecArr = new double[3]; rvecs.get(0, 0, rvecArr); double[] tvecArr = new double[3]; tvecs.get(0, 0, tvecArr); tvecArr[2] /= DOWNSCALE_RATIO; PoseData poseData = ARUtils.ConvertRvecTvecToPoseData(rvecArr, tvecArr); // Create transform matrix. transformationM = Matrix4x4.TRS(poseData.pos, poseData.rot, Vector3.one); // Right-handed coordinates system (OpenCV) to left-handed one (Unity) // https://stackoverflow.com/questions/30234945/change-handedness-of-a-row-major-4x4-transformation-matrix ARM = invertYM * transformationM * invertYM; // Apply Y-axis and Z-axis refletion matrix. (Adjust the posture of the AR object) ARM = ARM * invertYM * invertZM; hasUpdatedARTransformMatrix = true; break; } } } }
public void OnFrameMatAcquired(Mat grayMat, Matrix4x4 projectionMatrix, Matrix4x4 cameraToWorldMatrix, CameraIntrinsics cameraIntrinsics) { isDetectingInFrameArrivedThread = true; DebugUtils.VideoTick(); Mat downScaleMat = null; float DOWNSCALE_RATIO; if (enableDownScale) { downScaleMat = imageOptimizationHelper.GetDownScaleMat(grayMat); DOWNSCALE_RATIO = imageOptimizationHelper.downscaleRatio; } else { downScaleMat = grayMat; DOWNSCALE_RATIO = 1.0f; } Mat camMatrix = null; MatOfDouble distCoeffs = null; if (useStoredCameraParameters) { camMatrix = this.camMatrix; distCoeffs = this.distCoeffs; } else { camMatrix = CreateCameraMatrix(cameraIntrinsics.FocalLengthX, cameraIntrinsics.FocalLengthY, cameraIntrinsics.PrincipalPointX / DOWNSCALE_RATIO, cameraIntrinsics.PrincipalPointY / DOWNSCALE_RATIO); distCoeffs = new MatOfDouble(cameraIntrinsics.RadialDistK1, cameraIntrinsics.RadialDistK2, cameraIntrinsics.RadialDistK3, cameraIntrinsics.TangentialDistP1, cameraIntrinsics.TangentialDistP2); } if (enableDetection) { // Detect markers and estimate Pose Aruco.detectMarkers(downScaleMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); if (applyEstimationPose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { //This example can display ARObject on only first detected marker. if (i == 0) { // Convert to unity pose data. double[] rvecArr = new double[3]; rvecs.get(0, 0, rvecArr); double[] tvecArr = new double[3]; tvecs.get(0, 0, tvecArr); tvecArr[2] /= DOWNSCALE_RATIO; PoseData poseData = ARUtils.ConvertRvecTvecToPoseData(rvecArr, tvecArr); // Create transform matrix. transformationM = Matrix4x4.TRS(poseData.pos, poseData.rot, Vector3.one); lock (sync) { // Right-handed coordinates system (OpenCV) to left-handed one (Unity) ARM = invertYM * transformationM; // Apply Z-axis inverted matrix. ARM = ARM * invertZM; } hasUpdatedARTransformMatrix = true; break; } } } } Mat rgbMat4preview = null; if (displayCameraPreview) { rgbMat4preview = new Mat(); Imgproc.cvtColor(downScaleMat, rgbMat4preview, Imgproc.COLOR_GRAY2RGB); if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat4preview, corners, ids, new Scalar(0, 255, 0)); if (applyEstimationPose) { for (int i = 0; i < ids.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Calib3d.drawFrameAxes(rgbMat4preview, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f); } } } } } DebugUtils.TrackTick(); Enqueue(() => { if (!webCamTextureToMatHelper.IsPlaying()) { return; } if (displayCameraPreview && rgbMat4preview != null) { Utils.fastMatToTexture2D(rgbMat4preview, texture); rgbMat4preview.Dispose(); } if (applyEstimationPose) { if (hasUpdatedARTransformMatrix) { hasUpdatedARTransformMatrix = false; lock (sync) { // Apply camera transform matrix. ARM = cameraToWorldMatrix * invertZM * ARM; if (enableLerpFilter) { arGameObject.SetMatrix4x4(ARM); } else { ARUtils.SetTransformFromMatrix(arGameObject.transform, ref ARM); } } } } grayMat.Dispose(); }); isDetectingInFrameArrivedThread = false; }
private void DetectMarkers() { Utils.texture2DToMat(imgTexture, rgbMat); Debug.Log("imgMat dst ToString " + rgbMat.ToString()); gameObject.transform.localScale = new Vector3(imgTexture.width, imgTexture.height, 1); Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation); float width = rgbMat.width(); float height = rgbMat.height(); float imageSizeScale = 1.0f; float widthScale = (float)Screen.width / width; float heightScale = (float)Screen.height / height; if (widthScale < heightScale) { Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2; imageSizeScale = (float)Screen.height / (float)Screen.width; } else { Camera.main.orthographicSize = height / 2; } // set camera parameters. int max_d = (int)Mathf.Max(width, height); double fx = max_d; double fy = max_d; double cx = width / 2.0f; double cy = height / 2.0f; Mat camMatrix = new Mat(3, 3, CvType.CV_64FC1); camMatrix.put(0, 0, fx); camMatrix.put(0, 1, 0); camMatrix.put(0, 2, cx); camMatrix.put(1, 0, 0); camMatrix.put(1, 1, fy); camMatrix.put(1, 2, cy); camMatrix.put(2, 0, 0); camMatrix.put(2, 1, 0); camMatrix.put(2, 2, 1.0f); Debug.Log("camMatrix " + camMatrix.dump()); MatOfDouble distCoeffs = new MatOfDouble(0, 0, 0, 0); Debug.Log("distCoeffs " + distCoeffs.dump()); // calibration camera matrix values. Size imageSize = new Size(width * imageSizeScale, height * imageSizeScale); double apertureWidth = 0; double apertureHeight = 0; double[] fovx = new double[1]; double[] fovy = new double[1]; double[] focalLength = new double[1]; Point principalPoint = new Point(0, 0); double[] aspectratio = new double[1]; Calib3d.calibrationMatrixValues(camMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectratio); Debug.Log("imageSize " + imageSize.ToString()); Debug.Log("apertureWidth " + apertureWidth); Debug.Log("apertureHeight " + apertureHeight); Debug.Log("fovx " + fovx [0]); Debug.Log("fovy " + fovy [0]); Debug.Log("focalLength " + focalLength [0]); Debug.Log("principalPoint " + principalPoint.ToString()); Debug.Log("aspectratio " + aspectratio [0]); // To convert the difference of the FOV value of the OpenCV and Unity. double fovXScale = (2.0 * Mathf.Atan((float)(imageSize.width / (2.0 * fx)))) / (Mathf.Atan2((float)cx, (float)fx) + Mathf.Atan2((float)(imageSize.width - cx), (float)fx)); double fovYScale = (2.0 * Mathf.Atan((float)(imageSize.height / (2.0 * fy)))) / (Mathf.Atan2((float)cy, (float)fy) + Mathf.Atan2((float)(imageSize.height - cy), (float)fy)); Debug.Log("fovXScale " + fovXScale); Debug.Log("fovYScale " + fovYScale); // Adjust Unity Camera FOV https://github.com/opencv/opencv/commit/8ed1945ccd52501f5ab22bdec6aa1f91f1e2cfd4 if (widthScale < heightScale) { arCamera.fieldOfView = (float)(fovx [0] * fovXScale); } else { arCamera.fieldOfView = (float)(fovy [0] * fovYScale); } // Display objects near the camera. arCamera.nearClipPlane = 0.01f; Mat ids = new Mat(); List <Mat> corners = new List <Mat> (); List <Mat> rejectedCorners = new List <Mat> (); Mat rvecs = new Mat(); Mat tvecs = new Mat(); Mat rotMat = new Mat(3, 3, CvType.CV_64FC1); DetectorParameters detectorParams = DetectorParameters.create(); Dictionary dictionary = Aruco.getPredefinedDictionary((int)dictionaryId); // detect markers. Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); // if at least one marker detected if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat, corners, ids, new Scalar(0, 255, 0)); // estimate pose. if (applyEstimationPose) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Aruco.drawAxis(rgbMat, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f); } // This example can display the ARObject on only first detected marker. if (i == 0) { // Get translation vector double[] tvecArr = tvecs.get(i, 0); // Get rotation vector double[] rvecArr = rvecs.get(i, 0); Mat rvec = new Mat(3, 1, CvType.CV_64FC1); rvec.put(0, 0, rvecArr); // Convert rotation vector to rotation matrix. Calib3d.Rodrigues(rvec, rotMat); double[] rotMatArr = new double[rotMat.total()]; rotMat.get(0, 0, rotMatArr); // Convert OpenCV camera extrinsic parameters to Unity Matrix4x4. Matrix4x4 transformationM = new Matrix4x4(); // from OpenCV transformationM.SetRow(0, new Vector4((float)rotMatArr [0], (float)rotMatArr [1], (float)rotMatArr [2], (float)tvecArr [0])); transformationM.SetRow(1, new Vector4((float)rotMatArr [3], (float)rotMatArr [4], (float)rotMatArr [5], (float)tvecArr [1])); transformationM.SetRow(2, new Vector4((float)rotMatArr [6], (float)rotMatArr [7], (float)rotMatArr [8], (float)tvecArr [2])); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); Debug.Log("transformationM " + transformationM.ToString()); Matrix4x4 invertYM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, -1, 1)); Debug.Log("invertYM " + invertYM.ToString()); // right-handed coordinates system (OpenCV) to left-handed one (Unity) // https://stackoverflow.com/questions/30234945/change-handedness-of-a-row-major-4x4-transformation-matrix Matrix4x4 ARM = invertYM * transformationM * invertYM; if (shouldMoveARCamera) { ARM = arGameObject.transform.localToWorldMatrix * ARM.inverse; Debug.Log("ARM " + ARM.ToString()); ARUtils.SetTransformFromMatrix(arCamera.transform, ref ARM); } else { ARM = arCamera.transform.localToWorldMatrix * ARM; Debug.Log("ARM " + ARM.ToString()); ARUtils.SetTransformFromMatrix(arGameObject.transform, ref ARM); } } } } } if (showRejectedCorners && rejectedCorners.Count > 0) { Aruco.drawDetectedMarkers(rgbMat, rejectedCorners, new Mat(), new Scalar(255, 0, 0)); } Utils.matToTexture2D(rgbMat, texture); }
// Use this for initialization void Start() { Mat rgbMat = new Mat(imgTexture.height, imgTexture.width, CvType.CV_8UC3); Utils.texture2DToMat(imgTexture, rgbMat); Debug.Log("imgMat dst ToString " + rgbMat.ToString()); gameObject.transform.localScale = new Vector3(imgTexture.width, imgTexture.height, 1); Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation); float width = rgbMat.width(); float height = rgbMat.height(); float imageSizeScale = 1.0f; float widthScale = (float)Screen.width / width; float heightScale = (float)Screen.height / height; if (widthScale < heightScale) { Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2; imageSizeScale = (float)Screen.height / (float)Screen.width; } else { Camera.main.orthographicSize = height / 2; } // set cameraparam. int max_d = (int)Mathf.Max(width, height); double fx = max_d; double fy = max_d; double cx = width / 2.0f; double cy = height / 2.0f; Mat camMatrix = new Mat(3, 3, CvType.CV_64FC1); camMatrix.put(0, 0, fx); camMatrix.put(0, 1, 0); camMatrix.put(0, 2, cx); camMatrix.put(1, 0, 0); camMatrix.put(1, 1, fy); camMatrix.put(1, 2, cy); camMatrix.put(2, 0, 0); camMatrix.put(2, 1, 0); camMatrix.put(2, 2, 1.0f); Debug.Log("camMatrix " + camMatrix.dump()); MatOfDouble distCoeffs = new MatOfDouble(0, 0, 0, 0); Debug.Log("distCoeffs " + distCoeffs.dump()); // calibration camera. Size imageSize = new Size(width * imageSizeScale, height * imageSizeScale); double apertureWidth = 0; double apertureHeight = 0; double[] fovx = new double[1]; double[] fovy = new double[1]; double[] focalLength = new double[1]; Point principalPoint = new Point(0, 0); double[] aspectratio = new double[1]; Calib3d.calibrationMatrixValues(camMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectratio); Debug.Log("imageSize " + imageSize.ToString()); Debug.Log("apertureWidth " + apertureWidth); Debug.Log("apertureHeight " + apertureHeight); Debug.Log("fovx " + fovx [0]); Debug.Log("fovy " + fovy [0]); Debug.Log("focalLength " + focalLength [0]); Debug.Log("principalPoint " + principalPoint.ToString()); Debug.Log("aspectratio " + aspectratio [0]); // To convert the difference of the FOV value of the OpenCV and Unity. double fovXScale = (2.0 * Mathf.Atan((float)(imageSize.width / (2.0 * fx)))) / (Mathf.Atan2((float)cx, (float)fx) + Mathf.Atan2((float)(imageSize.width - cx), (float)fx)); double fovYScale = (2.0 * Mathf.Atan((float)(imageSize.height / (2.0 * fy)))) / (Mathf.Atan2((float)cy, (float)fy) + Mathf.Atan2((float)(imageSize.height - cy), (float)fy)); Debug.Log("fovXScale " + fovXScale); Debug.Log("fovYScale " + fovYScale); // Adjust Unity Camera FOV https://github.com/opencv/opencv/commit/8ed1945ccd52501f5ab22bdec6aa1f91f1e2cfd4 if (widthScale < heightScale) { ARCamera.fieldOfView = (float)(fovx [0] * fovXScale); } else { ARCamera.fieldOfView = (float)(fovy [0] * fovYScale); } Mat ids = new Mat(); List <Mat> corners = new List <Mat> (); List <Mat> rejected = new List <Mat> (); Mat rvecs = new Mat(); Mat tvecs = new Mat(); Mat rotMat = new Mat(3, 3, CvType.CV_64FC1); DetectorParameters detectorParams = DetectorParameters.create(); Dictionary dictionary = Aruco.getPredefinedDictionary(dictionaryId); // detect markers. Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejected); // estimate pose. if (applyEstimationPose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); } if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat, corners, ids, new Scalar(255, 0, 0)); if (applyEstimationPose) { for (int i = 0; i < ids.total(); i++) { // Debug.Log ("ids.dump() " + ids.dump ()); Aruco.drawAxis(rgbMat, camMatrix, distCoeffs, rvecs, tvecs, markerLength * 0.5f); // This example can display ARObject on only first detected marker. if (i == 0) { // position double[] tvec = tvecs.get(i, 0); // rotation double[] rv = rvecs.get(i, 0); Mat rvec = new Mat(3, 1, CvType.CV_64FC1); rvec.put(0, 0, rv[0]); rvec.put(1, 0, rv[1]); rvec.put(2, 0, rv[2]); Calib3d.Rodrigues(rvec, rotMat); Matrix4x4 transformationM = new Matrix4x4(); // from OpenCV transformationM.SetRow(0, new Vector4((float)rotMat.get(0, 0) [0], (float)rotMat.get(0, 1) [0], (float)rotMat.get(0, 2) [0], (float)tvec [0])); transformationM.SetRow(1, new Vector4((float)rotMat.get(1, 0) [0], (float)rotMat.get(1, 1) [0], (float)rotMat.get(1, 2) [0], (float)tvec [1])); transformationM.SetRow(2, new Vector4((float)rotMat.get(2, 0) [0], (float)rotMat.get(2, 1) [0], (float)rotMat.get(2, 2) [0], (float)tvec [2])); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); Debug.Log("transformationM " + transformationM.ToString()); Matrix4x4 invertZM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, 1, -1)); Debug.Log("invertZM " + invertZM.ToString()); Matrix4x4 invertYM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, -1, 1)); Debug.Log("invertYM " + invertYM.ToString()); // right-handed coordinates system (OpenCV) to left-handed one (Unity) Matrix4x4 ARM = invertYM * transformationM; // Apply Z axis inverted matrix. ARM = ARM * invertZM; if (shouldMoveARCamera) { ARM = ARGameObject.transform.localToWorldMatrix * ARM.inverse; Debug.Log("ARM " + ARM.ToString()); ARUtils.SetTransformFromMatrix(ARCamera.transform, ref ARM); } else { ARM = ARCamera.transform.localToWorldMatrix * ARM; Debug.Log("ARM " + ARM.ToString()); ARUtils.SetTransformFromMatrix(ARGameObject.transform, ref ARM); } } } } } if (showRejected && rejected.Count > 0) { Aruco.drawDetectedMarkers(rgbMat, rejected, new Mat(), new Scalar(0, 0, 255)); } Texture2D texture = new Texture2D(rgbMat.cols(), rgbMat.rows(), TextureFormat.RGBA32, false); Utils.matToTexture2D(rgbMat, texture); gameObject.GetComponent <Renderer> ().material.mainTexture = texture; }
// Update is called once per frame void Update() { if (webCamTextureToMatHelper.IsPlaying() && webCamTextureToMatHelper.DidUpdateThisFrame()) { Mat rgbaMat = webCamTextureToMatHelper.GetMat(); Imgproc.cvtColor(rgbaMat, rgbMat, Imgproc.COLOR_RGBA2RGB); // detect markers. Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejected); // estimate pose. if (applyEstimationPose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); } if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat, corners, ids, new Scalar(255, 0, 0)); if (applyEstimationPose) { for (int i = 0; i < ids.total(); i++) { Aruco.drawAxis(rgbMat, camMatrix, distCoeffs, rvecs, tvecs, markerLength * 0.5f); // This example can display ARObject on only first detected marker. if (i == 0) { // position double[] tvec = tvecs.get(i, 0); // rotation double[] rv = rvecs.get(i, 0); Mat rvec = new Mat(3, 1, CvType.CV_64FC1); rvec.put(0, 0, rv [0]); rvec.put(1, 0, rv [1]); rvec.put(2, 0, rv [2]); Calib3d.Rodrigues(rvec, rotMat); transformationM.SetRow(0, new Vector4((float)rotMat.get(0, 0) [0], (float)rotMat.get(0, 1) [0], (float)rotMat.get(0, 2) [0], (float)tvec [0])); transformationM.SetRow(1, new Vector4((float)rotMat.get(1, 0) [0], (float)rotMat.get(1, 1) [0], (float)rotMat.get(1, 2) [0], (float)tvec [1])); transformationM.SetRow(2, new Vector4((float)rotMat.get(2, 0) [0], (float)rotMat.get(2, 1) [0], (float)rotMat.get(2, 2) [0], (float)tvec [2])); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); // right-handed coordinates system (OpenCV) to left-handed one (Unity) ARM = invertYM * transformationM; // Apply Z axis inverted matrix. ARM = ARM * invertZM; if (shouldMoveARCamera) { ARM = ARGameObject.transform.localToWorldMatrix * ARM.inverse; ARUtils.SetTransformFromMatrix(ARCamera.transform, ref ARM); } else { ARM = ARCamera.transform.localToWorldMatrix * ARM; ARUtils.SetTransformFromMatrix(ARGameObject.transform, ref ARM); } } } } } if (showRejected && rejected.Count > 0) { Aruco.drawDetectedMarkers(rgbMat, rejected, new Mat(), new Scalar(0, 0, 255)); } Imgproc.putText(rgbaMat, "W:" + rgbaMat.width() + " H:" + rgbaMat.height() + " SO:" + Screen.orientation, new Point(5, rgbaMat.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false); Utils.matToTexture2D(rgbMat, texture, webCamTextureToMatHelper.GetBufferColors()); } }
// Update is called once per frame void Update() { if (webCamTextureToMatHelper.IsPlaying() && webCamTextureToMatHelper.DidUpdateThisFrame()) { Mat rgbaMat = webCamTextureToMatHelper.GetMat(); Imgproc.cvtColor(rgbaMat, rgbMat, Imgproc.COLOR_RGBA2RGB); // detect markers and estimate pose Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejected); // Aruco.detectMarkers (imgMat, dictionary, corners, ids); if (estimatePose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); } // draw results if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat, corners, ids, new Scalar(255, 0, 0)); if (estimatePose) { for (int i = 0; i < ids.total(); i++) { Aruco.drawAxis(rgbMat, camMatrix, distCoeffs, rvecs, tvecs, markerLength * 0.5f); //This example can display ARObject on only first detected marker. if (i == 0) { // position double[] tvec = tvecs.get(i, 0); Vector4 pos = new Vector4((float)tvec [0], (float)tvec [1], (float)tvec [2], 0); // from OpenCV // right-handed coordinates system (OpenCV) to left-handed one (Unity) ARGameObject.transform.localPosition = new Vector3(pos.x, -pos.y, pos.z); // rotation double[] rv = rvecs.get(i, 0); Mat rvec = new Mat(3, 1, CvType.CV_64FC1); rvec.put(0, 0, rv[0]); rvec.put(1, 0, rv[1]); rvec.put(2, 0, rv[2]); Calib3d.Rodrigues(rvec, rotMat); Vector3 forward = new Vector3((float)rotMat.get(0, 2) [0], (float)rotMat.get(1, 2) [0], (float)rotMat.get(2, 2) [0]); // from OpenCV Vector3 up = new Vector3((float)rotMat.get(0, 1) [0], (float)rotMat.get(1, 1) [0], (float)rotMat.get(2, 1) [0]); // from OpenCV // right-handed coordinates system (OpenCV) to left-handed one (Unity) Quaternion rot = Quaternion.LookRotation(new Vector3(forward.x, -forward.y, forward.z), new Vector3(up.x, -up.y, up.z)); ARGameObject.transform.localRotation = rot; // Apply Z axis inverted matrix. invertZM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, 1, -1)); transformationM = ARGameObject.transform.localToWorldMatrix * invertZM; // Apply camera transform matrix. transformationM = ARCamera.transform.localToWorldMatrix * transformationM; ARUtils.SetTransformFromMatrix(ARGameObject.transform, ref transformationM); } } } } if (showRejected && rejected.Count > 0) { Aruco.drawDetectedMarkers(rgbMat, rejected, new Mat(), new Scalar(0, 0, 255)); } Imgproc.putText(rgbaMat, "W:" + rgbaMat.width() + " H:" + rgbaMat.height() + " SO:" + Screen.orientation, new Point(5, rgbaMat.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false); Utils.matToTexture2D(rgbMat, texture, webCamTextureToMatHelper.GetBufferColors()); } }
// Update is called once per frame void Update() { if (webCamTextureToMatHelper.isPlaying() && webCamTextureToMatHelper.didUpdateThisFrame()) { Mat rgbaMat = webCamTextureToMatHelper.GetMat(); Imgproc.cvtColor(rgbaMat, rgbMat, Imgproc.COLOR_RGBA2RGB); // detect markers and estimate pose Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejected); // Aruco.detectMarkers (imgMat, dictionary, corners, ids); if (estimatePose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); } // draw results if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat, corners, ids, new Scalar(255, 0, 0)); if (estimatePose) { for (int i = 0; i < ids.total(); i++) { Aruco.drawAxis(rgbMat, camMatrix, distCoeffs, rvecs, tvecs, markerLength * 0.5f); //This sample can display ARObject on only first detected marker. if (i == 0) { Calib3d.Rodrigues(rvecs, rotMat); transformationM.SetRow(0, new Vector4((float)rotMat.get(0, 0) [0], (float)rotMat.get(0, 1) [0], (float)rotMat.get(0, 2) [0], (float)tvecs.get(0, 0) [0])); transformationM.SetRow(1, new Vector4((float)rotMat.get(1, 0) [0], (float)rotMat.get(1, 1) [0], (float)rotMat.get(1, 2) [0], (float)tvecs.get(0, 0) [1])); transformationM.SetRow(2, new Vector4((float)rotMat.get(2, 0) [0], (float)rotMat.get(2, 1) [0], (float)rotMat.get(2, 2) [0], (float)tvecs.get(0, 0) [2])); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); if (shouldMoveARCamera) { ARM = ARGameObject.transform.localToWorldMatrix * invertZM * transformationM.inverse * invertYM; // Debug.Log ("ARM " + ARM.ToString ()); ARUtils.SetTransformFromMatrix(ARCamera.transform, ref ARM); } else { ARM = ARCamera.transform.localToWorldMatrix * invertYM * transformationM * invertZM; // Debug.Log ("ARM " + ARM.ToString ()); ARUtils.SetTransformFromMatrix(ARGameObject.transform, ref ARM); } } } } } if (showRejected && rejected.Count > 0) { Aruco.drawDetectedMarkers(rgbMat, rejected, new Mat(), new Scalar(0, 0, 255)); } Imgproc.putText(rgbaMat, "W:" + rgbaMat.width() + " H:" + rgbaMat.height() + " SO:" + Screen.orientation, new Point(5, rgbaMat.rows() - 10), Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false); Utils.matToTexture2D(rgbMat, texture, webCamTextureToMatHelper.GetBufferColors()); } }
public List <ZOArucoTrackerDetection> DetectMarkers(Mat rgbMat) { List <ZOArucoTrackerDetection> results = new List <ZOArucoTrackerDetection>(); // Debug.Log("imgMat dst ToString " + rgbMat.ToString()); float width = rgbMat.width(); float height = rgbMat.height(); float imageSizeScale = 1.0f; float widthScale = (float)Screen.width / width; float heightScale = (float)Screen.height / height; if (widthScale < heightScale) { // Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2; imageSizeScale = (float)Screen.height / (float)Screen.width; } else { // Camera.main.orthographicSize = height / 2; } // set camera parameters. int max_d = (int)Mathf.Max(width, height); double fx = max_d; double fy = max_d; double cx = width / 2.0f; double cy = height / 2.0f; Mat camMatrix = new Mat(3, 3, CvType.CV_64FC1); camMatrix.put(0, 0, fx); camMatrix.put(0, 1, 0); camMatrix.put(0, 2, cx); camMatrix.put(1, 0, 0); camMatrix.put(1, 1, fy); camMatrix.put(1, 2, cy); camMatrix.put(2, 0, 0); camMatrix.put(2, 1, 0); camMatrix.put(2, 2, 1.0f); // Debug.Log("camMatrix " + camMatrix.dump()); MatOfDouble distCoeffs = new MatOfDouble(0, 0, 0, 0); // Debug.Log("distCoeffs " + distCoeffs.dump()); // calibration camera matrix values. Size imageSize = new Size(width * imageSizeScale, height * imageSizeScale); double apertureWidth = 0; double apertureHeight = 0; double[] fovx = new double[1]; double[] fovy = new double[1]; double[] focalLength = new double[1]; Point principalPoint = new Point(0, 0); double[] aspectratio = new double[1]; Calib3d.calibrationMatrixValues(camMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectratio); // Debug.Log("imageSize " + imageSize.ToString()); // Debug.Log("apertureWidth " + apertureWidth); // Debug.Log("apertureHeight " + apertureHeight); // Debug.Log("fovx " + fovx[0]); // Debug.Log("fovy " + fovy[0]); // Debug.Log("focalLength " + focalLength[0]); // Debug.Log("principalPoint " + principalPoint.ToString()); // Debug.Log("aspectratio " + aspectratio[0]); // To convert the difference of the FOV value of the OpenCV and Unity. double fovXScale = (2.0 * Mathf.Atan((float)(imageSize.width / (2.0 * fx)))) / (Mathf.Atan2((float)cx, (float)fx) + Mathf.Atan2((float)(imageSize.width - cx), (float)fx)); double fovYScale = (2.0 * Mathf.Atan((float)(imageSize.height / (2.0 * fy)))) / (Mathf.Atan2((float)cy, (float)fy) + Mathf.Atan2((float)(imageSize.height - cy), (float)fy)); // Debug.Log("fovXScale " + fovXScale); // Debug.Log("fovYScale " + fovYScale); Mat ids = new Mat(); List <Mat> corners = new List <Mat>(); List <Mat> rejectedCorners = new List <Mat>(); Mat rvecs = new Mat(); Mat tvecs = new Mat(); Mat rotMat = new Mat(3, 3, CvType.CV_64FC1); DetectorParameters detectorParams = DetectorParameters.create(); Dictionary dictionary = Aruco.getPredefinedDictionary((int)dictionaryId); // detect markers. Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); // Debug.Log("INFO: Number of markers detected: " + ids.total()); // if at least one marker detected if (ids.total() > 0) { if (_debug) { Aruco.drawDetectedMarkers(rgbMat, corners, ids, new Scalar(0, 255, 0)); } // estimate pose. Aruco.estimatePoseSingleMarkers(corners, _markerLengthMeters, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { // Get translation vector double[] tvecArr = tvecs.get(i, 0); // Get rotation vector double[] rvecArr = rvecs.get(i, 0); Mat rvec = new Mat(3, 1, CvType.CV_64FC1); rvec.put(0, 0, rvecArr); // Convert rotation vector to rotation matrix. Calib3d.Rodrigues(rvec, rotMat); double[] rotMatArr = new double[rotMat.total()]; rotMat.get(0, 0, rotMatArr); // Convert OpenCV camera extrinsic parameters to Unity Matrix4x4. Matrix4x4 transformationM = new Matrix4x4(); // from OpenCV transformationM.SetRow(0, new Vector4((float)rotMatArr[0], (float)rotMatArr[1], (float)rotMatArr[2], (float)tvecArr[0])); transformationM.SetRow(1, new Vector4((float)rotMatArr[3], (float)rotMatArr[4], (float)rotMatArr[5], (float)tvecArr[1])); transformationM.SetRow(2, new Vector4((float)rotMatArr[6], (float)rotMatArr[7], (float)rotMatArr[8], (float)tvecArr[2])); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); // Debug.Log("transformationM " + transformationM.ToString()); ZOArucoTrackerDetection detection = new ZOArucoTrackerDetection(); int [] currentId = new int[1]; // ids.get(0, i, currentId); ids.get(i, 0, currentId); detection.arucoId = currentId[0]; detection.transform = transformationM; results.Add(detection); } } return(results); }
private void ARUcoDetect() { if (DOWNSCALE_RATIO == 1) { downScaleRgbaMat = rgbaMat4Thread; } else { Imgproc.resize(rgbaMat4Thread, downScaleRgbaMat, new Size(), 1.0 / DOWNSCALE_RATIO, 1.0 / DOWNSCALE_RATIO, Imgproc.INTER_LINEAR); } Imgproc.cvtColor(downScaleRgbaMat, rgbMat, Imgproc.COLOR_RGBA2RGB); // Detect markers and estimate Pose Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejected); if (estimatePose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { //This example can display ARObject on only first detected marker. if (i == 0) { // Position double[] tvec = tvecs.get(i, 0); // Rotation double[] rv = rvecs.get(i, 0); Mat rvec = new Mat(3, 1, CvType.CV_64FC1); rvec.put(0, 0, rv [0]); rvec.put(1, 0, rv [1]); rvec.put(2, 0, rv [2]); Calib3d.Rodrigues(rvec, rotMat); transformationM.SetRow(0, new Vector4((float)rotMat.get(0, 0) [0], (float)rotMat.get(0, 1) [0], (float)rotMat.get(0, 2) [0], (float)tvec [0])); transformationM.SetRow(1, new Vector4((float)rotMat.get(1, 0) [0], (float)rotMat.get(1, 1) [0], (float)rotMat.get(1, 2) [0], (float)tvec [1])); transformationM.SetRow(2, new Vector4((float)rotMat.get(2, 0) [0], (float)rotMat.get(2, 1) [0], (float)rotMat.get(2, 2) [0], (float)(tvec [2] / DOWNSCALE_RATIO))); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); // Right-handed coordinates system (OpenCV) to left-handed one (Unity) ARM = invertYM * transformationM; // Apply Z axis inverted matrix. ARM = ARM * invertZM; ARTransMatrixUpdated = true; break; } } } /* * // Draw results * if (ids.total () > 0) { * Aruco.drawDetectedMarkers (rgbMat, corners, ids, new Scalar (255, 0, 0)); * * if (estimatePose) { * for (int i = 0; i < ids.total (); i++) { * Aruco.drawAxis (rgbMat, camMatrix, distCoeffs, rvecs, tvecs, markerLength * 0.5f); * } * } * } */ }
void OnRenderObject() { if (pointCloudVisualizer == null) { return; } if (vertices == null || uintColorData == null) { return; } pointCloudBuffer.SetData(vertices); colorBuffer.SetData(uintColorData); pointCloudVisualizer.SetBuffer("_PointCloudData", pointCloudBuffer); pointCloudVisualizer.SetBuffer("_ColorData", colorBuffer); //Aruco if (applyAruco) { //transformation Matrix is static if (isStatic) { //using earlier frames, decide transformationM if (!matrixCofigured) { while (frameCount > 0) { imgTexture.LoadRawTextureData(byteColorData); Utils.texture2DToMat(imgTexture, rgbMat); //Upside down Core.flip(rgbMat, rgbMat, 0); Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejected, camMatrix, distCoeffs); if (ids.total() == 1) { Debug.Log("detected"); frameCount--; } } Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); // position tvec = tvecs.get(0, 0); // rotation rvec = new Mat(rvecs, range, range); Calib3d.Rodrigues(rvec, rotMat); //from marker coordinate(opencv) to camera coordinate(opencv) transformationM.SetRow(0, new Vector4((float)rotMat.get(0, 0)[0], (float)rotMat.get(0, 1)[0], (float)rotMat.get(0, 2)[0], (float)tvec[0])); transformationM.SetRow(1, new Vector4((float)rotMat.get(1, 0)[0], (float)rotMat.get(1, 1)[0], (float)rotMat.get(1, 2)[0], (float)tvec[1])); transformationM.SetRow(2, new Vector4((float)rotMat.get(2, 0)[0], (float)rotMat.get(2, 1)[0], (float)rotMat.get(2, 2)[0], (float)tvec[2])); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); // right-handed coordinates system (OpenCV) to left-handed one (Unity) // from camera coordinate(opencv) to camera coordinate(unity) transformationM = invertYM * transformationM; // Apply Z axis inverted matrix. // from marker coordinate(Unity) to camera coordinate(Unity) transformationM = transformationM * invertZM; //from camera coordinate(unity) to marker coordinate(unity) //rotate 90 degrees around the x-axis(alignM) //from marker coordinate(unity) to world coordinate(unity) transformationM = markerOrigin.transform.localToWorldMatrix * alignM * transformationM.inverse; //transformationM: the matrix which transforms camera coordinate(unity) into world coordinate(unity) //move RealSense model ARUtils.SetTransformFromMatrix(viewCamera.transform, ref transformationM); matrixCofigured = true; Debug.Log("Transformation Matrix is configured"); if (drawMesh) { meshDrawer.draw(corners, vertices, ref transformationM, width, markerLength); } } } else { imgTexture.LoadRawTextureData(byteColorData); Utils.texture2DToMat(imgTexture, rgbMat); Core.flip(rgbMat, rgbMat, 0); Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejected, camMatrix, distCoeffs); if (ids.total() > 0) { Debug.Log("detected: " + ids.total()); Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); // position tvec = tvecs.get(0, 0); // rotation rvec = new Mat(rvecs, range, range); Calib3d.Rodrigues(rvec, rotMat); transformationM.SetRow(0, new Vector4((float)rotMat.get(0, 0)[0], (float)rotMat.get(0, 1)[0], (float)rotMat.get(0, 2)[0], (float)tvec[0])); transformationM.SetRow(1, new Vector4((float)rotMat.get(1, 0)[0], (float)rotMat.get(1, 1)[0], (float)rotMat.get(1, 2)[0], (float)tvec[1])); transformationM.SetRow(2, new Vector4((float)rotMat.get(2, 0)[0], (float)rotMat.get(2, 1)[0], (float)rotMat.get(2, 2)[0], (float)tvec[2])); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); transformationM = invertYM * transformationM; transformationM = transformationM * invertZM; transformationM = markerOrigin.transform.localToWorldMatrix * alignM * transformationM.inverse; ARUtils.SetTransformFromMatrix(viewCamera.transform, ref transformationM); } } } else { transformationM = Matrix4x4.TRS(viewCamera.transform.position, viewCamera.transform.rotation, viewCamera.transform.lossyScale); } pointCloudVisualizer.SetMatrix("_transformationM", transformationM); trs = Matrix4x4.TRS(transform.position, transform.rotation, transform.lossyScale); pointCloudVisualizer.SetMatrix("_trs", trs); pointCloudVisualizer.SetPass(0); Graphics.DrawProcedural(MeshTopology.Points, dataLength); }
public void OnFrameMatAcquired(Mat bgraMat, Matrix4x4 projectionMatrix, Matrix4x4 cameraToWorldMatrix) { downScaleFrameMat = imageOptimizationHelper.GetDownScaleMat(bgraMat); if (enableDetection) { Imgproc.cvtColor(downScaleFrameMat, grayMat, Imgproc.COLOR_BGRA2GRAY); // Detect markers and estimate Pose Aruco.detectMarkers(grayMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); if (applyEstimationPose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { //This example can display ARObject on only first detected marker. if (i == 0) { // Convert to unity pose data. double[] rvecArr = new double[3]; rvecs.get(0, 0, rvecArr); double[] tvecArr = new double[3]; tvecs.get(0, 0, tvecArr); tvecArr[2] /= imageOptimizationHelper.downscaleRatio; PoseData poseData = ARUtils.ConvertRvecTvecToPoseData(rvecArr, tvecArr); // Changes in pos/rot below these thresholds are ignored. if (enableLowPassFilter) { ARUtils.LowpassPoseData(ref oldPoseData, ref poseData, positionLowPass, rotationLowPass); } oldPoseData = poseData; // Create transform matrix. transformationM = Matrix4x4.TRS(poseData.pos, poseData.rot, Vector3.one); lock (sync){ // Right-handed coordinates system (OpenCV) to left-handed one (Unity) ARM = invertYM * transformationM; // Apply Z-axis inverted matrix. ARM = ARM * invertZM; } hasUpdatedARTransformMatrix = true; break; } } } } Mat rgbMat4preview = null; if (displayCameraPreview) { rgbMat4preview = new Mat(); Imgproc.cvtColor(downScaleFrameMat, rgbMat4preview, Imgproc.COLOR_BGRA2RGB); if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat4preview, corners, ids, new Scalar(0, 255, 0)); if (applyEstimationPose) { for (int i = 0; i < ids.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Aruco.drawAxis(rgbMat4preview, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f); } } } } } UnityEngine.WSA.Application.InvokeOnAppThread(() => { if (!webCamTextureToMatHelper.IsPlaying()) { return; } if (displayCameraPreview && rgbMat4preview != null) { OpenCVForUnity.Utils.fastMatToTexture2D(rgbMat4preview, texture); } if (applyEstimationPose) { if (hasUpdatedARTransformMatrix) { hasUpdatedARTransformMatrix = false; lock (sync){ // Apply camera transform matrix. ARM = cameraToWorldMatrix * invertZM * ARM; ARUtils.SetTransformFromMatrix(arGameObject.transform, ref ARM); } } } bgraMat.Dispose(); if (rgbMat4preview != null) { rgbMat4preview.Dispose(); } }, false); }
/// <summary> /// Looks for markers in the most recent ZED image, and updates all registered MarkerObjects accordingly. /// </summary> private void ImageUpdated(Camera cam, Mat camMat, Mat iamgeMat) { Dictionary predict = Aruco.getPredefinedDictionary((int)markerDictionary); //Load the selected pre-defined dictionary. //Create empty structures to hold the output of Aruco.detectMarkers. List <Mat> corners = new List <Mat>(); Mat ids = new Mat(); DetectorParameters detectparams = DetectorParameters.create(); List <Mat> rejectedpoints = new List <Mat>(); //There is no overload for detectMarkers that will take camMat without this also. //Call OpenCV to tell us which markers were detected, and give their 2D positions. Aruco.detectMarkers(iamgeMat, predict, corners, ids, detectparams, rejectedpoints, camMat); //Make matrices to hold the output rotations and translations of each detected marker. Mat rotvectors = new Mat(); Mat transvectors = new Mat(); //Convert the 2D corner positions into a 3D pose for each detected marker. Aruco.estimatePoseSingleMarkers(corners, markerWidthMeters, camMat, new Mat(), rotvectors, transvectors); //Now we have ids, rotvectors and transvectors, which are all vertical arrays holding info about each detection: // - ids: An Nx1 array (N = number of markers detected) where each slot is the ID of a detected marker in the dictionary. // - rotvectors: An Nx3 array where each row is for an individual detection: The first row is the rotation of the marker // listed in the first row of ids, etc. The columns are the X, Y and Z angles of that marker's rotation, BUT they are not // directly usable in Unity because they're calculated very differetly. We'll deal with that soon. // - transvectors: An Nx1 array like rotvectors with each row corresponding to a detected marker, with a double[3] array with the X, Y and Z positions. // positions. These three values are usable in Unity - they're just relative to the camera, not the world, which is easy to fix. //Convert matrix of IDs into a List, to simply things for those not familiar with using Matrices. List <int> detectedIDs = new List <int>(); for (int i = 0; i < ids.height(); i++) { int id = (int)ids.get(i, 0)[0]; if (!detectedIDs.Contains(id)) { detectedIDs.Add(id); } } //We'll go through every ID that's been registered into registered Markers, and see if we found any markers in the scene with that ID. Dictionary <int, List <sl.Pose> > detectedWorldPoses = new Dictionary <int, List <sl.Pose> >(); //Key is marker ID, value is world space poses. //foreach (int id in registeredMarkers.Keys) for (int index = 0; index < transvectors.rows(); index++) { int id = (int)ids.get(index, 0)[0]; if (!registeredMarkers.ContainsKey(id) || registeredMarkers[id].Count == 0) { continue; //Don't waste time if the list is empty. Can happen if markers are added, then removed. } if (detectedIDs.Contains(id)) //At least one MarkerObject needs to be updated. Convert pose to world space and call MarkerDetected() on it. { //Translation is just pose relative to camera. But we need to flip Y because of OpenCV's different coordinate system. Vector3 localpos; localpos.x = (float)transvectors.get(index, 0)[0]; localpos.y = -(float)transvectors.get(index, 0)[1]; localpos.z = (float)transvectors.get(index, 0)[2]; Vector3 worldpos = cam.transform.TransformPoint(localpos); //Convert from local to world space. //Because of different coordinate frame, we need to flip the Y direction, which is pointing down instead of up. //We need to do this before we calculate the 3x3 rotation matrix soon, as that makes it muuuch harder to work with. double[] flip = rotvectors.get(index, 0); flip[1] = -flip[1]; rotvectors.put(index, 0, flip); //Convert this rotation vector to a 3x3 matrix, which will hold values we can use in Unity. Mat rotmatrix = new Mat(); Calib3d.Rodrigues(rotvectors.row(index), rotmatrix); //This new 3x3 matrix holds a vector pointing right in the first column, a vector pointing up in the second, //and a vector pointing forward in the third column. Rows 0, 1 and 2 are the X, Y and Z values of each vector. //We'll grab the forward and up vectors, which we can put into Quaternion.LookRotation() to get a representative Quaternion. Vector3 forward; forward.x = (float)rotmatrix.get(2, 0)[0]; forward.y = (float)rotmatrix.get(2, 1)[0]; forward.z = (float)rotmatrix.get(2, 2)[0]; Vector3 up; up.x = (float)rotmatrix.get(1, 0)[0]; up.y = (float)rotmatrix.get(1, 1)[0]; up.z = (float)rotmatrix.get(1, 2)[0]; Quaternion rot = Quaternion.LookRotation(forward, up); //Compensate for flip on Z axis. rot *= Quaternion.Euler(0, 0, 180); //Convert from local space to world space by multiplying the camera's world rotation with it. Quaternion worldrot = cam.transform.rotation * rot; if (!detectedWorldPoses.ContainsKey(id)) { detectedWorldPoses.Add(id, new List <sl.Pose>()); } detectedWorldPoses[id].Add(new sl.Pose() { translation = worldpos, rotation = worldrot }); foreach (MarkerObject marker in registeredMarkers[id]) { marker.MarkerDetectedSingle(worldpos, worldrot); } } } //Call the event that gives all marker world poses, if any listeners. if (OnMarkersDetected != null) { OnMarkersDetected.Invoke(detectedWorldPoses); } //foreach (int detectedid in detectedWorldPoses.Keys) foreach (int key in registeredMarkers.Keys) { if (detectedWorldPoses.ContainsKey(key)) { foreach (MarkerObject marker in registeredMarkers[key]) { marker.MarkerDetectedAll(detectedWorldPoses[key]); } } else { foreach (MarkerObject marker in registeredMarkers[key]) { marker.MarkerNotDetected(); } } } }