private void EstimatePoseGridBoard(Mat rgbMat) { int valid = Aruco.estimatePoseBoard(corners, ids, gridBoard, camMatrix, distCoeffs, rvec, tvec); // if at least one board marker detected if (valid > 0) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Calib3d.drawFrameAxes(rgbMat, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f); UpdateARObjectTransform(rvec, tvec); } }
private void OnDetectionDone() { DebugUtils.TrackTick(); if (displayCameraPreview) { Imgproc.cvtColor(downScaleMat, rgbMat4preview, Imgproc.COLOR_GRAY2RGB); if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat4preview, corners, ids, new Scalar(0, 255, 0)); if (applyEstimationPose) { for (int i = 0; i < ids.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Calib3d.drawFrameAxes(rgbMat4preview, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f); } } } } Utils.fastMatToTexture2D(rgbMat4preview, texture); } if (applyEstimationPose) { if (hasUpdatedARTransformMatrix) { hasUpdatedARTransformMatrix = false; // Apply the cameraToWorld matrix with the Z-axis inverted. ARM = arCamera.cameraToWorldMatrix * invertZM * ARM; if (enableLerpFilter) { arGameObject.SetMatrix4x4(ARM); } else { ARUtils.SetTransformFromMatrix(arGameObject.transform, ref ARM); } } } isDetecting = false; }
private void EstimatePoseChArUcoDiamondMarker(Mat rgbMat) { Aruco.estimatePoseSingleMarkers(diamondCorners, diamondSquareLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < rvecs.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Calib3d.drawFrameAxes(rgbMat, camMatrix, distCoeffs, rvec, tvec, diamondSquareLength * 0.5f); // This example can display the ARObject on only first detected marker. if (i == 0) { UpdateARObjectTransform(rvec, tvec); } } } }
public void OnFrameMatAcquired(Mat grayMat, Matrix4x4 projectionMatrix, Matrix4x4 cameraToWorldMatrix, CameraIntrinsics cameraIntrinsics) { isDetectingInFrameArrivedThread = true; DebugUtils.VideoTick(); Mat downScaleMat = null; float DOWNSCALE_RATIO; if (enableDownScale) { downScaleMat = imageOptimizationHelper.GetDownScaleMat(grayMat); DOWNSCALE_RATIO = imageOptimizationHelper.downscaleRatio; } else { downScaleMat = grayMat; DOWNSCALE_RATIO = 1.0f; } Mat camMatrix = null; MatOfDouble distCoeffs = null; if (useStoredCameraParameters) { camMatrix = this.camMatrix; distCoeffs = this.distCoeffs; } else { camMatrix = CreateCameraMatrix(cameraIntrinsics.FocalLengthX, cameraIntrinsics.FocalLengthY, cameraIntrinsics.PrincipalPointX / DOWNSCALE_RATIO, cameraIntrinsics.PrincipalPointY / DOWNSCALE_RATIO); distCoeffs = new MatOfDouble(cameraIntrinsics.RadialDistK1, cameraIntrinsics.RadialDistK2, cameraIntrinsics.RadialDistK3, cameraIntrinsics.TangentialDistP1, cameraIntrinsics.TangentialDistP2); } if (enableDetection) { // Detect markers and estimate Pose Aruco.detectMarkers(downScaleMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); if (applyEstimationPose && ids.total() > 0) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { //This example can display ARObject on only first detected marker. if (i == 0) { // Convert to unity pose data. double[] rvecArr = new double[3]; rvecs.get(0, 0, rvecArr); double[] tvecArr = new double[3]; tvecs.get(0, 0, tvecArr); tvecArr[2] /= DOWNSCALE_RATIO; PoseData poseData = ARUtils.ConvertRvecTvecToPoseData(rvecArr, tvecArr); // Create transform matrix. transformationM = Matrix4x4.TRS(poseData.pos, poseData.rot, Vector3.one); lock (sync) { // Right-handed coordinates system (OpenCV) to left-handed one (Unity) ARM = invertYM * transformationM; // Apply Z-axis inverted matrix. ARM = ARM * invertZM; } hasUpdatedARTransformMatrix = true; break; } } } } Mat rgbMat4preview = null; if (displayCameraPreview) { rgbMat4preview = new Mat(); Imgproc.cvtColor(downScaleMat, rgbMat4preview, Imgproc.COLOR_GRAY2RGB); if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat4preview, corners, ids, new Scalar(0, 255, 0)); if (applyEstimationPose) { for (int i = 0; i < ids.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Calib3d.drawFrameAxes(rgbMat4preview, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f); } } } } } DebugUtils.TrackTick(); Enqueue(() => { if (!webCamTextureToMatHelper.IsPlaying()) { return; } if (displayCameraPreview && rgbMat4preview != null) { Utils.fastMatToTexture2D(rgbMat4preview, texture); rgbMat4preview.Dispose(); } if (applyEstimationPose) { if (hasUpdatedARTransformMatrix) { hasUpdatedARTransformMatrix = false; lock (sync) { // Apply camera transform matrix. ARM = cameraToWorldMatrix * invertZM * ARM; if (enableLerpFilter) { arGameObject.SetMatrix4x4(ARM); } else { ARUtils.SetTransformFromMatrix(arGameObject.transform, ref ARM); } } } } grayMat.Dispose(); }); isDetectingInFrameArrivedThread = false; }
private void DetectMarkers() { Utils.texture2DToMat(imgTexture, rgbMat); Debug.Log("imgMat dst ToString " + rgbMat.ToString()); gameObject.transform.localScale = new Vector3(imgTexture.width, imgTexture.height, 1); Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation); float width = rgbMat.width(); float height = rgbMat.height(); float imageSizeScale = 1.0f; float widthScale = (float)Screen.width / width; float heightScale = (float)Screen.height / height; if (widthScale < heightScale) { Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2; imageSizeScale = (float)Screen.height / (float)Screen.width; } else { Camera.main.orthographicSize = height / 2; } // set camera parameters. int max_d = (int)Mathf.Max(width, height); double fx = max_d; double fy = max_d; double cx = width / 2.0f; double cy = height / 2.0f; Mat camMatrix = new Mat(3, 3, CvType.CV_64FC1); camMatrix.put(0, 0, fx); camMatrix.put(0, 1, 0); camMatrix.put(0, 2, cx); camMatrix.put(1, 0, 0); camMatrix.put(1, 1, fy); camMatrix.put(1, 2, cy); camMatrix.put(2, 0, 0); camMatrix.put(2, 1, 0); camMatrix.put(2, 2, 1.0f); Debug.Log("camMatrix " + camMatrix.dump()); MatOfDouble distCoeffs = new MatOfDouble(0, 0, 0, 0); Debug.Log("distCoeffs " + distCoeffs.dump()); // calibration camera matrix values. Size imageSize = new Size(width * imageSizeScale, height * imageSizeScale); double apertureWidth = 0; double apertureHeight = 0; double[] fovx = new double[1]; double[] fovy = new double[1]; double[] focalLength = new double[1]; Point principalPoint = new Point(0, 0); double[] aspectratio = new double[1]; Calib3d.calibrationMatrixValues(camMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectratio); Debug.Log("imageSize " + imageSize.ToString()); Debug.Log("apertureWidth " + apertureWidth); Debug.Log("apertureHeight " + apertureHeight); Debug.Log("fovx " + fovx [0]); Debug.Log("fovy " + fovy [0]); Debug.Log("focalLength " + focalLength [0]); Debug.Log("principalPoint " + principalPoint.ToString()); Debug.Log("aspectratio " + aspectratio [0]); // To convert the difference of the FOV value of the OpenCV and Unity. double fovXScale = (2.0 * Mathf.Atan((float)(imageSize.width / (2.0 * fx)))) / (Mathf.Atan2((float)cx, (float)fx) + Mathf.Atan2((float)(imageSize.width - cx), (float)fx)); double fovYScale = (2.0 * Mathf.Atan((float)(imageSize.height / (2.0 * fy)))) / (Mathf.Atan2((float)cy, (float)fy) + Mathf.Atan2((float)(imageSize.height - cy), (float)fy)); Debug.Log("fovXScale " + fovXScale); Debug.Log("fovYScale " + fovYScale); // Adjust Unity Camera FOV https://github.com/opencv/opencv/commit/8ed1945ccd52501f5ab22bdec6aa1f91f1e2cfd4 if (widthScale < heightScale) { arCamera.fieldOfView = (float)(fovx [0] * fovXScale); } else { arCamera.fieldOfView = (float)(fovy [0] * fovYScale); } // Display objects near the camera. arCamera.nearClipPlane = 0.01f; Mat ids = new Mat(); List <Mat> corners = new List <Mat> (); List <Mat> rejectedCorners = new List <Mat> (); Mat rvecs = new Mat(); Mat tvecs = new Mat(); Mat rotMat = new Mat(3, 3, CvType.CV_64FC1); DetectorParameters detectorParams = DetectorParameters.create(); Dictionary dictionary = Aruco.getPredefinedDictionary((int)dictionaryId); // detect markers. Aruco.detectMarkers(rgbMat, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs); // if at least one marker detected if (ids.total() > 0) { Aruco.drawDetectedMarkers(rgbMat, corners, ids, new Scalar(0, 255, 0)); // estimate pose. if (applyEstimationPose) { Aruco.estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); for (int i = 0; i < ids.total(); i++) { using (Mat rvec = new Mat(rvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) using (Mat tvec = new Mat(tvecs, new OpenCVForUnity.CoreModule.Rect(0, i, 1, 1))) { // In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue) Calib3d.drawFrameAxes(rgbMat, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f); } // This example can display the ARObject on only first detected marker. if (i == 0) { // Get translation vector double[] tvecArr = tvecs.get(i, 0); // Get rotation vector double[] rvecArr = rvecs.get(i, 0); Mat rvec = new Mat(3, 1, CvType.CV_64FC1); rvec.put(0, 0, rvecArr); // Convert rotation vector to rotation matrix. Calib3d.Rodrigues(rvec, rotMat); double[] rotMatArr = new double[rotMat.total()]; rotMat.get(0, 0, rotMatArr); // Convert OpenCV camera extrinsic parameters to Unity Matrix4x4. Matrix4x4 transformationM = new Matrix4x4(); // from OpenCV transformationM.SetRow(0, new Vector4((float)rotMatArr [0], (float)rotMatArr [1], (float)rotMatArr [2], (float)tvecArr [0])); transformationM.SetRow(1, new Vector4((float)rotMatArr [3], (float)rotMatArr [4], (float)rotMatArr [5], (float)tvecArr [1])); transformationM.SetRow(2, new Vector4((float)rotMatArr [6], (float)rotMatArr [7], (float)rotMatArr [8], (float)tvecArr [2])); transformationM.SetRow(3, new Vector4(0, 0, 0, 1)); Debug.Log("transformationM " + transformationM.ToString()); Matrix4x4 invertYM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, -1, 1)); Debug.Log("invertYM " + invertYM.ToString()); // right-handed coordinates system (OpenCV) to left-handed one (Unity) // https://stackoverflow.com/questions/30234945/change-handedness-of-a-row-major-4x4-transformation-matrix Matrix4x4 ARM = invertYM * transformationM * invertYM; if (shouldMoveARCamera) { ARM = arGameObject.transform.localToWorldMatrix * ARM.inverse; Debug.Log("ARM " + ARM.ToString()); ARUtils.SetTransformFromMatrix(arCamera.transform, ref ARM); } else { ARM = arCamera.transform.localToWorldMatrix * ARM; Debug.Log("ARM " + ARM.ToString()); ARUtils.SetTransformFromMatrix(arGameObject.transform, ref ARM); } } } } } if (showRejectedCorners && rejectedCorners.Count > 0) { Aruco.drawDetectedMarkers(rgbMat, rejectedCorners, new Mat(), new Scalar(255, 0, 0)); } Utils.matToTexture2D(rgbMat, texture); }