/// <summary> /// Get the intrinsic calibration parameters for a given camera, this also aligns the camera intrinsics based /// on device orientation. /// /// For example, if the device orientation is portrait and camera intrinsics is in /// landscape. This function will inverse the intrinsic x and y, and report intrinsics in portrait mode. /// /// The intrinsics are as specified by the TangoCameraIntrinsics struct and are accessed via the API. /// </summary> /// <param name="cameraId">The camera ID to retrieve the calibration intrinsics for.</param> /// <param name="alignedIntrinsics"> /// A TangoCameraIntrinsics filled with calibration intrinsics for the camera, this intrinsics is also /// aligned with device orientation. /// </param> public static void GetDeviceOrientationAlignedIntrinsics(TangoEnums.TangoCameraId cameraId, TangoCameraIntrinsics alignedIntrinsics) { TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); float intrinsicsRatio = (float)intrinsics.width / (float)intrinsics.height; Tango.OrientationManager.Rotation rotation = TangoSupport.RotateFromAToB( AndroidHelper.GetDisplayRotation(), AndroidHelper.GetColorCameraRotation()); switch (rotation) { case Tango.OrientationManager.Rotation.ROTATION_90: alignedIntrinsics.cx = intrinsics.cy; alignedIntrinsics.cy = intrinsics.width - intrinsics.cx; alignedIntrinsics.fx = intrinsics.fy; alignedIntrinsics.fy = intrinsics.fx; alignedIntrinsics.width = intrinsics.height; alignedIntrinsics.height = intrinsics.width; break; case Tango.OrientationManager.Rotation.ROTATION_180: alignedIntrinsics.cx = intrinsics.width - intrinsics.cx; alignedIntrinsics.cy = intrinsics.height - intrinsics.cy; alignedIntrinsics.fx = intrinsics.fx; alignedIntrinsics.fy = intrinsics.fy; alignedIntrinsics.width = intrinsics.width; alignedIntrinsics.height = intrinsics.height; break; case Tango.OrientationManager.Rotation.ROTATION_270: alignedIntrinsics.cx = intrinsics.height - intrinsics.cy; alignedIntrinsics.cy = intrinsics.cx; alignedIntrinsics.fx = intrinsics.fy; alignedIntrinsics.fy = intrinsics.fx; alignedIntrinsics.width = intrinsics.height; alignedIntrinsics.height = intrinsics.width; break; default: alignedIntrinsics.cx = intrinsics.cx; alignedIntrinsics.cy = intrinsics.cy; alignedIntrinsics.fx = intrinsics.fx; alignedIntrinsics.fy = intrinsics.fy; alignedIntrinsics.width = intrinsics.width; alignedIntrinsics.height = intrinsics.height; break; } alignedIntrinsics.distortion0 = intrinsics.distortion0; alignedIntrinsics.distortion1 = intrinsics.distortion1; alignedIntrinsics.distortion2 = intrinsics.distortion2; alignedIntrinsics.distortion3 = intrinsics.distortion3; alignedIntrinsics.distortion4 = intrinsics.distortion4; alignedIntrinsics.camera_id = intrinsics.camera_id; alignedIntrinsics.calibration_type = intrinsics.calibration_type; }
/// <summary> /// Get the intrinsic calibration parameters for a given camera. /// /// The intrinsics are as specified by the TangoCameraIntrinsics struct and are accessed via the API. /// </summary> /// <param name="cameraId">The camera ID to retrieve the calibration intrinsics for.</param> /// <param name="intrinsics">A TangoCameraIntrinsics filled with calibration intrinsics for the camera.</param> public static void GetIntrinsics(TangoEnums.TangoCameraId cameraId, TangoCameraIntrinsics intrinsics) { int returnValue = API.TangoService_getCameraIntrinsics(cameraId, intrinsics); #if UNITY_EDITOR // In editor, base 'intrinsics' off of properties of emulation camera. if (cameraId == TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR && EmulatedEnvironmentRenderHelper.m_emulationCamera != null) { // Instantiate any resources that we haven't yet. _InternResourcesForEmulation(); EmulatedEnvironmentRenderHelper.m_emulationCamera.targetTexture = m_emulatedColorRenderTexture; intrinsics.width = (uint)EMULATED_CAMERA_WIDTH; intrinsics.height = (uint)EMULATED_CAMERA_HEIGHT; float fov = EmulatedEnvironmentRenderHelper.m_emulationCamera.fieldOfView; float focalLengthInPixels = (1 / Mathf.Tan(fov * 0.5f * Mathf.Deg2Rad)) * (intrinsics.height * 0.5f); intrinsics.fy = intrinsics.fx = focalLengthInPixels; intrinsics.cx = intrinsics.width / 2f; intrinsics.cy = intrinsics.height / 2f; } #endif if (returnValue != Common.ErrorType.TANGO_SUCCESS) { Debug.Log("IntrinsicsProviderAPI.TangoService_getCameraIntrinsics() failed!"); } }
/// <summary> /// Calculates the depth in the color camera space at a user-specified /// location using nearest-neighbor interpolation. /// </summary> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success and /// Common.ErrorType.TANGO_INVALID on invalid input. /// </returns> /// <param name="pointCloud"> /// The point cloud. Cannot be null and must have at least one point. /// </param> /// <param name="pointCount"> /// The number of points to read from the point cloud. /// </param> /// <param name="timestamp">The timestamp of the depth points.</param> /// <param name="cameraIntrinsics"> /// The camera intrinsics for the color camera. Cannot be null. /// </param> /// <param name="matrix"> /// Transformation matrix of the color camera with respect to the Unity /// World frame. /// </param> /// <param name="uvCoordinates"> /// The UV coordinates for the user selection. This is expected to be /// between (0.0, 0.0) and (1.0, 1.0). /// </param> /// <param name="colorCameraPoint"> /// The point (x, y, z), where (x, y) is the back-projection of the UV /// coordinates to the color camera space and z is the z coordinate of /// the point in the point cloud nearest to the user selection after /// projection onto the image plane. If there is not a point cloud point /// close to the user selection after projection onto the image plane, /// then the point will be set to (0.0, 0.0, 0.0) and isValidPoint will /// be set to 0. /// </param> /// <param name="isValidPoint"> /// A flag valued 1 if there is a point cloud point close to the user /// selection after projection onto the image plane and valued 0 /// otherwise. /// </param> public static int GetDepthAtPointNearestNeighbor( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 colorCameraPoint, out bool isValidPoint) { GCHandle pointCloudHandle = GCHandle.Alloc(pointCloud, GCHandleType.Pinned); TangoXYZij pointCloudXyzIj = new TangoXYZij(); pointCloudXyzIj.timestamp = timestamp; pointCloudXyzIj.xyz_count = pointCount; pointCloudXyzIj.xyz = pointCloudHandle.AddrOfPinnedObject(); DMatrix4x4 doubleMatrix = new DMatrix4x4(matrix); // Unity has Y pointing screen up; Tango camera has Y pointing // screen down. float[] uvCoordinatesArray = new float[2]; uvCoordinatesArray[0] = uvCoordinates.x; uvCoordinatesArray[1] = 1.0f - uvCoordinates.y; int isValidPointInteger; int returnValue = TangoSupportAPI.TangoSupport_getDepthAtPointNearestNeighborMatrixTransform( pointCloudXyzIj, cameraIntrinsics, ref doubleMatrix, uvCoordinatesArray, out colorCameraPoint, out isValidPointInteger); isValidPoint = isValidPointInteger != 0; pointCloudHandle.Free(); return(returnValue); }
/// <summary> /// Calculates the depth in the color camera space at a user-specified /// location using bilateral filtering weighted by both spatial distance /// from the user coordinate and by intensity similarity. /// </summary> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, /// Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> /// <param name="pointCloud"> /// The point cloud. Cannot be null and must have at least one point. /// </param> /// <param name="pointCount"> /// The number of points to read from the point cloud. /// </param> /// <param name="timestamp">The timestamp of the depth points.</param> /// <param name="cameraIntrinsics"> /// The camera intrinsics for the color camera. Cannot be null. /// </param> /// <param name="colorImage"> /// The color image buffer. Cannot be null. /// </param> /// <param name="matrix"> /// Transformation matrix of the color camera with respect to the Unity /// World frame. /// </param> /// <param name="uvCoordinates"> /// The UV coordinates for the user selection. This is expected to be /// between (0.0, 0.0) and (1.0, 1.0). /// </param> /// <param name="colorCameraPoint"> /// The point (x, y, z), where (x, y) is the back-projection of the UV /// coordinates to the color camera space and z is the z coordinate of /// the point in the point cloud nearest to the user selection after /// projection onto the image plane. If there is not a point cloud point /// close to the user selection after projection onto the image plane, /// then the point will be set to (0.0, 0.0, 0.0) and isValidPoint will /// be set to false. /// </param> /// <param name="isValidPoint"> /// A flag valued true if there is a point cloud point close to the user /// selection after projection onto the image plane and valued false /// otherwise. /// </param> public static int ScreenCoordinateToWorldBilateral( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, TangoImageBuffer colorImage, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 colorCameraPoint, out bool isValidPoint) { GCHandle pointCloudHandle = GCHandle.Alloc(pointCloud, GCHandleType.Pinned); TangoXYZij pointCloudXyzIj = new TangoXYZij(); pointCloudXyzIj.timestamp = timestamp; pointCloudXyzIj.xyz_count = pointCount; pointCloudXyzIj.xyz = pointCloudHandle.AddrOfPinnedObject(); DMatrix4x4 doubleMatrix = new DMatrix4x4(matrix); // Unity has Y pointing screen up; Tango camera has Y pointing // screen down. Vector2 uvCoordinatesTango = new Vector2(uvCoordinates.x, 1.0f - uvCoordinates.y); int isValidPointInteger; int returnValue = TangoSupportAPI.TangoSupport_getDepthAtPointBilateralCameraIntrinsicsMatrixTransform( pointCloudXyzIj, cameraIntrinsics, colorImage, ref doubleMatrix, ref uvCoordinatesTango, out colorCameraPoint, out isValidPointInteger); isValidPoint = isValidPointInteger != 0; pointCloudHandle.Free(); return(returnValue); }
public static int TangoSupport_fitPlaneModelNearPointMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out DVector3 intersectionPoint, double[] planeModel) { intersectionPoint = new DVector3(); return(Common.ErrorType.TANGO_SUCCESS); }
/// <summary> /// Get the camera/sensor intrinsics. /// </summary> /// <param name="cameraId">Camera identifier.</param> /// <param name="intrinsics">Camera intrinsics data.</param> public static void GetIntrinsics(TangoEnums.TangoCameraId cameraId, [Out] TangoCameraIntrinsics intrinsics) { int returnValue = VideoOverlayAPI.TangoService_getCameraIntrinsics(cameraId, intrinsics); if (returnValue != Common.ErrorType.TANGO_SUCCESS) { Debug.Log("IntrinsicsProviderAPI.TangoService_getCameraIntrinsics() failed!"); } }
public static int TangoSupport_getDepthAtPointNearestNeighborMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out Vector3 colorCameraPoint, out int isValidPoint) { colorCameraPoint = Vector3.zero; isValidPoint = 1; return(Common.ErrorType.TANGO_SUCCESS); }
public static int GetDepthAtPointNearestNeighbor( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 colorCameraPoint, out bool isValidPoint) { return(ScreenCoordinateToWorldNearestNeighbor(pointCloud, pointCount, timestamp, cameraIntrinsics, ref matrix, uvCoordinates, out colorCameraPoint, out isValidPoint)); }
/// <summary> /// Pass the camera intrinsics to both PostProcess and ARScreen shader. /// /// The camera intrinsics are needed to distort and undo distort the camera image. /// </summary> /// <param name="intrinsics">Color camera intrinsics.</param> internal void SetupIntrinsic(TangoCameraIntrinsics intrinsics) { m_postProcessMaterial.SetFloat("_Width", (float)intrinsics.width); m_postProcessMaterial.SetFloat("_Height", (float)intrinsics.height); m_postProcessMaterial.SetFloat("_Fx", (float)intrinsics.fx); m_postProcessMaterial.SetFloat("_Fy", (float)intrinsics.fy); m_postProcessMaterial.SetFloat("_Cx", (float)intrinsics.cx); m_postProcessMaterial.SetFloat("_Cy", (float)intrinsics.cy); m_postProcessMaterial.SetFloat("_K0", (float)intrinsics.distortion0); m_postProcessMaterial.SetFloat("_K1", (float)intrinsics.distortion1); m_postProcessMaterial.SetFloat("_K2", (float)intrinsics.distortion2); }
/// <summary> /// Fits a plane to a point cloud near a user-specified location. This /// occurs in two passes. First, all points in cloud within /// <c>maxPixelDistance</c> to <c>uvCoordinates</c> after projection are kept. Then a /// plane is fit to the subset cloud using RANSAC. After the initial fit /// all inliers from the original cloud are used to refine the plane /// model. /// </summary> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, /// Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> /// <param name="pointCloud"> /// The point cloud. Cannot be null and must have at least three points. /// </param> /// <param name="pointCount"> /// The number of points to read from the point cloud. /// </param> /// <param name="timestamp">The timestamp of the point cloud.</param> /// <param name="cameraIntrinsics"> /// The camera intrinsics for the color camera. Cannot be null. /// </param> /// <param name="matrix"> /// Transformation matrix of the color camera with respect to the Unity /// World frame. /// </param> /// <param name="uvCoordinates"> /// The UV coordinates for the user selection. This is expected to be /// between (0.0, 0.0) and (1.0, 1.0). /// </param> /// <param name="intersectionPoint"> /// The output point in depth camera coordinates that the user selected. /// </param> /// <param name="plane">The plane fit.</param> public static int FitPlaneModelNearClick( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 intersectionPoint, out Plane plane) { GCHandle pointCloudHandle = GCHandle.Alloc(pointCloud, GCHandleType.Pinned); TangoXYZij pointCloudXyzIj = new TangoXYZij(); pointCloudXyzIj.timestamp = timestamp; pointCloudXyzIj.xyz_count = pointCount; pointCloudXyzIj.xyz = pointCloudHandle.AddrOfPinnedObject(); DMatrix4x4 doubleMatrix = new DMatrix4x4(matrix); // Unity has Y pointing screen up; Tango camera has Y pointing // screen down. Vector2 uvCoordinatesTango = new Vector2(uvCoordinates.x, 1.0f - uvCoordinates.y); DVector3 doubleIntersectionPoint = new DVector3(); double[] planeArray = new double[4]; int returnValue = TangoSupportAPI.TangoSupport_fitPlaneModelNearPointMatrixTransform( pointCloudXyzIj, cameraIntrinsics, ref doubleMatrix, ref uvCoordinatesTango, out doubleIntersectionPoint, planeArray); if (returnValue != Common.ErrorType.TANGO_SUCCESS) { intersectionPoint = new Vector3(0.0f, 0.0f, 0.0f); plane = new Plane(new Vector3(0.0f, 0.0f, 0.0f), 0.0f); } else { intersectionPoint = doubleIntersectionPoint.ToVector3(); Vector3 normal = new Vector3((float)planeArray[0], (float)planeArray[1], (float)planeArray[2]); float distance = (float)planeArray[3] / normal.magnitude; plane = new Plane(normal, distance); } pointCloudHandle.Free(); return(returnValue); }
/// <summary> /// Calculate the camera extrinsics for this device. /// </summary> private void _UpdateExtrinsics() { TangoCoordinateFramePair pair; TangoPoseData imu_T_devicePose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(imu_T_devicePose, 0, pair); TangoPoseData imu_T_depthCameraPose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(imu_T_depthCameraPose, 0, pair); TangoPoseData imu_T_colorCameraPose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(imu_T_colorCameraPose, 0, pair); // Convert into matrix form to combine the poses. Matrix4x4 device_T_imu = Matrix4x4.Inverse(imu_T_devicePose.ToMatrix4x4()); m_device_T_depthCamera = device_T_imu * imu_T_depthCameraPose.ToMatrix4x4(); m_device_T_colorCamera = device_T_imu * imu_T_colorCameraPose.ToMatrix4x4(); m_unityWorld_T_startService.SetColumn(0, new Vector4(1, 0, 0, 0)); m_unityWorld_T_startService.SetColumn(1, new Vector4(0, 0, 1, 0)); m_unityWorld_T_startService.SetColumn(2, new Vector4(0, 1, 0, 0)); m_unityWorld_T_startService.SetColumn(3, new Vector4(0, 0, 0, 1)); // Update the camera intrinsics too. TangoCameraIntrinsics colorCameraIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, colorCameraIntrinsics); m_colorCameraIntrinsics.calibration_type = (int)colorCameraIntrinsics.calibration_type; m_colorCameraIntrinsics.width = colorCameraIntrinsics.width; m_colorCameraIntrinsics.height = colorCameraIntrinsics.height; m_colorCameraIntrinsics.cx = colorCameraIntrinsics.cx; m_colorCameraIntrinsics.cy = colorCameraIntrinsics.cy; m_colorCameraIntrinsics.fx = colorCameraIntrinsics.fx; m_colorCameraIntrinsics.fy = colorCameraIntrinsics.fy; m_colorCameraIntrinsics.distortion0 = colorCameraIntrinsics.distortion0; m_colorCameraIntrinsics.distortion1 = colorCameraIntrinsics.distortion1; m_colorCameraIntrinsics.distortion2 = colorCameraIntrinsics.distortion2; m_colorCameraIntrinsics.distortion3 = colorCameraIntrinsics.distortion3; m_colorCameraIntrinsics.distortion4 = colorCameraIntrinsics.distortion4; }
/// <summary> /// Set the AR screen rendering distortion parameters. This affects correcting for the curvature of the lens. /// </summary> /// <param name="rectifyImage">If <c>true</c>, rectify the AR screen image when rendering.</param> public static void SetARScreenDistortion(bool rectifyImage) { #if UNITY_EDITOR // There is no distortion in emulation. #else if (!rectifyImage) { API.TangoUnity_setRenderTextureDistortion(null); } else { TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); API.TangoUnity_setRenderTextureDistortion(intrinsics); } #endif }
/// <summary> /// Get the intrinsic calibration parameters for a given camera, this also aligns the camera intrinsics based /// on device orientation. /// /// For example, if the device orientation is portrait and camera intrinsics is in /// landscape. This function will inverse the intrinsic x and y, and report intrinsics in portrait mode. /// /// The intrinsics are as specified by the TangoCameraIntrinsics struct. Intrinsics are read from the /// on-device intrinsics file (typically <code>/sdcard/config/calibration.xml</code>, but to ensure /// compatibility applications should only access these parameters via the API), or default internal model /// parameters corresponding to the device are used if the calibration.xml file is not found. /// </summary> /// <param name="cameraId">The camera ID to retrieve the calibration intrinsics for.</param> /// <param name="alignedIntrinsics"> /// A TangoCameraIntrinsics filled with calibration intrinsics for the camera, this intrinsics is also /// aligned with device orientation. /// </param> public static void GetDeviceOientationAlignedIntrinsics(TangoEnums.TangoCameraId cameraId, TangoCameraIntrinsics alignedIntrinsics) { TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); float intrinsicsRatio = (float)intrinsics.width / (float)intrinsics.height; bool isLandscape = (AndroidHelper.GetDefaultOrientation() + (int)AndroidHelper.GetDisplayRotation()) % 2 == 0; // If the intrinsics ratio and camera render ratio don't agree with each other, we invert the intrinsics // reading to align to camera render orientation. if ((!isLandscape && intrinsicsRatio > 1.0f) || (isLandscape && intrinsicsRatio < 1.0f)) { alignedIntrinsics.cx = intrinsics.cy; alignedIntrinsics.cy = intrinsics.cx; alignedIntrinsics.fx = intrinsics.fy; alignedIntrinsics.fy = intrinsics.fx; alignedIntrinsics.height = intrinsics.width; alignedIntrinsics.width = intrinsics.height; } else { alignedIntrinsics.cx = intrinsics.cx; alignedIntrinsics.cy = intrinsics.cy; alignedIntrinsics.fx = intrinsics.fx; alignedIntrinsics.fy = intrinsics.fy; alignedIntrinsics.height = intrinsics.height; alignedIntrinsics.width = intrinsics.width; } alignedIntrinsics.distortion0 = intrinsics.distortion0; alignedIntrinsics.distortion1 = intrinsics.distortion1; alignedIntrinsics.distortion2 = intrinsics.distortion2; alignedIntrinsics.distortion3 = intrinsics.distortion3; alignedIntrinsics.distortion4 = intrinsics.distortion4; alignedIntrinsics.camera_id = intrinsics.camera_id; alignedIntrinsics.calibration_type = intrinsics.calibration_type; }
/// <summary> /// Update AR screen rendering and attached Camera's projection matrix. /// </summary> /// <param name="displayRotation">Activity (screen) rotation.</param> /// <param name="colorCameraRotation">Color camera sensor rotation.</param> private void _SetRenderAndCamera(OrientationManager.Rotation displayRotation, OrientationManager.Rotation colorCameraRotation) { float cameraRatio = (float)Screen.width / (float)Screen.height; float cameraWidth = (float)Screen.width; float cameraHeight = (float)Screen.height; bool needToFlipCameraRatio = false; // Here we are computing if current display orientation is landscape or portrait. // AndroidHelper.GetAndroidDefaultOrientation() returns 1 if deivce default orientation is in portrait, // returns 2 if device default orientation is landscape. Adding device default orientation with // how much the display is rotation from default orientation will get us the result of current display // orientation. (landscape vs. portrait) bool isLandscape = (AndroidHelper.GetDefaultOrientation() + (int)displayRotation) % 2 == 0; #if !UNITY_EDITOR // In most of the time, we don't need to flip the camera width and height. However, in some cases Unity camera // only updates couple of frames after the display changed callback from Android, thus we need to flip the width // and height in this case. // // This does not happen in the editor, because the emulated device does not ever rotate. needToFlipCameraRatio = (!isLandscape & (cameraRatio > 1.0f)) || (isLandscape & (cameraRatio < 1.0f)); if (needToFlipCameraRatio) { cameraRatio = 1.0f / cameraRatio; float tmp = cameraWidth; cameraWidth = cameraHeight; cameraHeight = tmp; } #endif TangoCameraIntrinsics alignedIntrinsics = new TangoCameraIntrinsics(); TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetDeviceOientationAlignedIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, alignedIntrinsics); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); if (alignedIntrinsics.width != 0 && alignedIntrinsics.height != 0) { // The camera to which this script is attached is an Augmented Reality camera. The color camera // image must fill that camera's viewport. That means we must clip the color camera image to make // its ratio the same as the Unity camera. If we don't do this the color camera image will be // stretched non-uniformly, making a circle into an ellipse. float widthRatio = (float)cameraWidth / (float)alignedIntrinsics.width; float heightRatio = (float)cameraHeight / (float)alignedIntrinsics.height; if (widthRatio >= heightRatio) { m_uOffset = 0; m_vOffset = (1 - (heightRatio / widthRatio)) / 2; } else { m_uOffset = (1 - (widthRatio / heightRatio)) / 2; m_vOffset = 0; } // Note that here we are passing in non-inverted intrinsics, because the YUV conversion is still operating // on native buffer layout. OrientationManager.Rotation rotation = TangoSupport.RotateFromAToB(displayRotation, colorCameraRotation); _MaterialUpdateForIntrinsics(m_uOffset, m_vOffset, rotation); _CameraUpdateForIntrinsics(m_camera, alignedIntrinsics, m_uOffset, m_vOffset); if (m_arCameraPostProcess != null) { m_arCameraPostProcess.SetupIntrinsic(intrinsics); } } else { Debug.LogError("AR Camera intrinsic is not valid."); } }
public static int TangoSupport_fitPlaneModelNearClickMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics intrinsics, ref Matrix4x4 colorCameraTUnityWorld, float[] uvCoordinates, double[] intersectionPoint, double[] planeModel) { return(Common.ErrorType.TANGO_SUCCESS); }
/// <summary> /// Get the intrinsic calibration parameters for a given camera. /// /// The intrinsics are as specified by the TangoCameraIntrinsics struct. Intrinsics are read from the /// on-device intrinsics file (typically <code>/sdcard/config/calibration.xml</code>, but to ensure /// compatibility applications should only access these parameters via the API), or default internal model /// parameters corresponding to the device are used if the calibration.xml file is not found. /// </summary> /// <param name="cameraId">The camera ID to retrieve the calibration intrinsics for.</param> /// <param name="intrinsics">A TangoCameraIntrinsics filled with calibration intrinsics for the camera.</param> public static void GetIntrinsics(TangoEnums.TangoCameraId cameraId, TangoCameraIntrinsics intrinsics) { int returnValue = API.TangoService_getCameraIntrinsics(cameraId, intrinsics); #if UNITY_EDITOR // In editor, base 'intrinsics' off of properties of emulation camera. if (cameraId == TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR && EmulatedEnvironmentRenderHelper.m_emulationCamera != null) { // Instantiate any resources that we haven't yet. _InternResourcesForEmulation(); EmulatedEnvironmentRenderHelper.m_emulationCamera.targetTexture = m_emulatedColorRenderTexture; intrinsics.width = (uint)EMULATED_CAMERA_WIDTH; intrinsics.height = (uint)EMULATED_CAMERA_HEIGHT; float fov = EmulatedEnvironmentRenderHelper.m_emulationCamera.fieldOfView; float focalLengthInPixels = (1 / Mathf.Tan(fov * 0.5f * Mathf.Deg2Rad)) * (intrinsics.height * 0.5f); intrinsics.fy = intrinsics.fx = focalLengthInPixels; intrinsics.cx = intrinsics.width / 2f; intrinsics.cy = intrinsics.height / 2f; } #endif if (returnValue != Common.ErrorType.TANGO_SUCCESS) { Debug.Log("IntrinsicsProviderAPI.TangoService_getCameraIntrinsics() failed!"); } }
/// <summary> /// This is called when successfully connected to the Tango service. /// </summary> public void OnTangoServiceConnected() { #if UNITY_EDITOR // Format needs to be ARGB32 in editor to use Texture2D.ReadPixels() in emulation // in Unity 4.6. RenderTexture environmentMap = new RenderTexture(EMULATED_CAMERA_WIDTH, EMULATED_CAMERA_HEIGHT, 0, RenderTextureFormat.ARGB32); environmentMap.Create(); m_environmentMap = environmentMap; #else TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); m_environmentMap = new Texture2D((int)intrinsics.width, (int)intrinsics.height, TextureFormat.RGBA32, false); #endif }
public static extern int TangoSupport_getDepthAtPointNearestNeighborMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out Vector3 colorCameraPoint, [Out, MarshalAs(UnmanagedType.I4)] out int isValidPoint);
/// <summary> /// Sets up extrinsic matrixes and camera intrinsics for this hardware. /// </summary> private void _SetUpCameraData() { if (m_cameraDataSetUp) { return; } double timestamp = 0.0; TangoCoordinateFramePair pair; TangoPoseData poseData = new TangoPoseData(); // Query the extrinsics between IMU and device frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); m_imuTDevice = poseData.ToMatrix4x4(); // Query the extrinsics between IMU and depth camera frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); m_imuTDepthCamera = poseData.ToMatrix4x4(); // Also get the camera intrinsics m_colorCameraIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics); m_cameraDataSetUp = true; }
public static extern int TangoSupport_fitPlaneModelNearClickMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics intrinsics, ref Matrix4x4 matrix, [In, MarshalAs(UnmanagedType.LPArray, SizeConst = 2)] float[] uvCoordinates, [Out, MarshalAs(UnmanagedType.LPArray, SizeConst = 3)] double[] intersectionPoint, [Out, MarshalAs(UnmanagedType.LPArray, SizeConst = 4)] double[] planeModel);
public static void TangoUnity_setRenderTextureDistortion(TangoCameraIntrinsics intrinsics) { }
public static int GetDepthAtPointNearestNeighbor( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 colorCameraPoint, out bool isValidPoint) { return ScreenCoordinateToWorldNearestNeighbor(pointCloud, pointCount, timestamp, cameraIntrinsics, ref matrix, uvCoordinates, out colorCameraPoint, out isValidPoint); }
/// <summary> /// Fits a plane to a point cloud near a user-specified location. This /// occurs in two passes. First, all points in cloud within /// <c>maxPixelDistance</c> to <c>uvCoordinates</c> after projection are kept. Then a /// plane is fit to the subset cloud using RANSAC. After the initial fit /// all inliers from the original cloud are used to refine the plane /// model. /// </summary> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, /// Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> /// <param name="pointCloud"> /// The point cloud. Cannot be null and must have at least three points. /// </param> /// <param name="pointCount"> /// The number of points to read from the point cloud. /// </param> /// <param name="timestamp">The timestamp of the point cloud.</param> /// <param name="cameraIntrinsics"> /// The camera intrinsics for the color camera. Cannot be null. /// </param> /// <param name="matrix"> /// Transformation matrix of the color camera with respect to the Unity /// World frame. /// </param> /// <param name="uvCoordinates"> /// The UV coordinates for the user selection. This is expected to be /// between (0.0, 0.0) and (1.0, 1.0). /// </param> /// <param name="intersectionPoint"> /// The output point in depth camera coordinates that the user selected. /// </param> /// <param name="plane">The plane fit.</param> public static int FitPlaneModelNearClick( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 intersectionPoint, out Plane plane) { GCHandle pointCloudHandle = GCHandle.Alloc(pointCloud, GCHandleType.Pinned); TangoXYZij pointCloudXyzIj = new TangoXYZij(); pointCloudXyzIj.timestamp = timestamp; pointCloudXyzIj.xyz_count = pointCount; pointCloudXyzIj.xyz = pointCloudHandle.AddrOfPinnedObject(); DMatrix4x4 doubleMatrix = new DMatrix4x4(matrix); // Unity has Y pointing screen up; Tango camera has Y pointing // screen down. Vector2 uvCoordinatesTango = new Vector2(uvCoordinates.x, 1.0f - uvCoordinates.y); DVector3 doubleIntersectionPoint = new DVector3(); double[] planeArray = new double[4]; int returnValue = TangoSupportAPI.TangoSupport_fitPlaneModelNearPointMatrixTransform( pointCloudXyzIj, cameraIntrinsics, ref doubleMatrix, ref uvCoordinatesTango, out doubleIntersectionPoint, planeArray); if (returnValue != Common.ErrorType.TANGO_SUCCESS) { intersectionPoint = new Vector3(0.0f, 0.0f, 0.0f); plane = new Plane(new Vector3(0.0f, 0.0f, 0.0f), 0.0f); } else { intersectionPoint = doubleIntersectionPoint.ToVector3(); Vector3 normal = new Vector3((float)planeArray[0], (float)planeArray[1], (float)planeArray[2]); float distance = (float)planeArray[3] / normal.magnitude; plane = new Plane(normal, distance); } pointCloudHandle.Free(); return returnValue; }
/// <summary> /// Sets up extrinsic matrixes and camera intrinsics for this hardware. /// </summary> private void _SetUpCameraData() { if (m_cameraDataSetUp) { return; } double timestamp = 0.0; TangoCoordinateFramePair pair; TangoPoseData poseData = new TangoPoseData(); // Query the extrinsics between IMU and device frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); Vector3 position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); Quaternion quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_imuTDevice = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f)); // Query the extrinsics between IMU and color camera frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_imuTColorCamera = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f)); // Query the extrinsics between IMU and depth camera frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_imuTDepthCamera = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f)); // Also get the camera intrinsics m_colorCameraIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics); m_cameraDataSetUp = true; }
/// <summary> /// Set up the size of ARScreen based on camera intrinsics. /// </summary> private void _SetCameraIntrinsics() { TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); if (intrinsics.width != 0 && intrinsics.height != 0) { m_arCameraPostProcess.SetupIntrinsic(intrinsics); Camera.main.projectionMatrix = ProjectionMatrixForCameraIntrinsics((float)intrinsics.width, (float)intrinsics.height, (float)intrinsics.fx, (float)intrinsics.fy, (float)intrinsics.cx, (float)intrinsics.cy, 0.1f, 1000.0f); // Here we are scaling the image plane to make sure the image plane's ratio is set as the // color camera image ratio. // If we don't do this, because we are drawing the texture fullscreen, the image plane will // be set to the screen's ratio. float widthRatio = (float)Screen.width / (float)intrinsics.width; float heightRatio = (float)Screen.height / (float)intrinsics.height; if (widthRatio >= heightRatio) { float normalizedOffset = ((widthRatio / heightRatio) - 1.0f) / 2.0f; _SetScreenVertices(0, normalizedOffset); } else { float normalizedOffset = ((heightRatio / widthRatio) - 1.0f) / 2.0f; _SetScreenVertices(normalizedOffset, 0); } } else { m_arCameraPostProcess.enabled = false; } }
/// <summary> /// This is called when successfully connected to the Tango service. /// </summary> public void OnTangoServiceConnected() { // Calculate the camera extrinsics. TangoCoordinateFramePair pair; TangoPoseData imu_T_devicePose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(imu_T_devicePose, 0, pair); TangoPoseData imu_T_depthCameraPose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(imu_T_depthCameraPose, 0, pair); TangoPoseData imu_T_colorCameraPose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(imu_T_colorCameraPose, 0, pair); // Convert into matrix form to combine the poses. Matrix4x4 device_T_imu = Matrix4x4.Inverse(imu_T_devicePose.ToMatrix4x4()); m_device_T_depthCamera = device_T_imu * imu_T_depthCameraPose.ToMatrix4x4(); m_device_T_colorCamera = device_T_imu * imu_T_colorCameraPose.ToMatrix4x4(); // Update the camera intrinsics. TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); Status status; APICameraCalibration colorCameraCalibration; VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); colorCameraCalibration.calibration_type = (int)intrinsics.calibration_type; colorCameraCalibration.width = intrinsics.width; colorCameraCalibration.height = intrinsics.height; colorCameraCalibration.cx = intrinsics.cx; colorCameraCalibration.cy = intrinsics.cy; colorCameraCalibration.fx = intrinsics.fx; colorCameraCalibration.fy = intrinsics.fy; colorCameraCalibration.distortion0 = intrinsics.distortion0; colorCameraCalibration.distortion1 = intrinsics.distortion1; colorCameraCalibration.distortion2 = intrinsics.distortion2; colorCameraCalibration.distortion3 = intrinsics.distortion3; colorCameraCalibration.distortion4 = intrinsics.distortion4; status = (Status)API.Tango3DR_setColorCalibration(m_context, ref colorCameraCalibration); if (status != Status.SUCCESS) { Debug.Log("Unable to set color calibration." + Environment.StackTrace); } APICameraCalibration depthCameraCalibration; VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_DEPTH, intrinsics); depthCameraCalibration.calibration_type = (int)intrinsics.calibration_type; depthCameraCalibration.width = intrinsics.width; depthCameraCalibration.height = intrinsics.height; depthCameraCalibration.cx = intrinsics.cx; depthCameraCalibration.cy = intrinsics.cy; depthCameraCalibration.fx = intrinsics.fx; depthCameraCalibration.fy = intrinsics.fy; depthCameraCalibration.distortion0 = intrinsics.distortion0; depthCameraCalibration.distortion1 = intrinsics.distortion1; depthCameraCalibration.distortion2 = intrinsics.distortion2; depthCameraCalibration.distortion3 = intrinsics.distortion3; depthCameraCalibration.distortion4 = intrinsics.distortion4; status = (Status)API.Tango3DR_setDepthCalibration(m_context, ref depthCameraCalibration); if (status != Status.SUCCESS) { Debug.Log("Unable to set depth calibration." + Environment.StackTrace); } }
/// <summary> /// Update a camera so its perspective lines up with the color camera's perspective. /// </summary> /// <param name="cam">Camera to update.</param> /// <param name="intrinsics">Tango camera intrinsics for the color camera.</param> /// <param name="uOffset">U texture coordinate clipping.</param> /// <param name="vOffset">V texture coordinate clipping.</param> private static void _CameraUpdateForIntrinsics(Camera cam, TangoCameraIntrinsics intrinsics, float uOffset, float vOffset) { float cx = (float)intrinsics.cx; float cy = (float)intrinsics.cy; float width = (float)intrinsics.width; float height = (float)intrinsics.height; float xscale = cam.nearClipPlane / (float)intrinsics.fx; float yscale = cam.nearClipPlane / (float)intrinsics.fy; float pixelLeft = -cx + (uOffset * width); float pixelRight = width - cx - (uOffset * width); // OpenGL coordinates has y pointing downwards so we negate this term. float pixelBottom = -height + cy + (vOffset * height); float pixelTop = cy - (vOffset * height); cam.projectionMatrix = _Frustum(pixelLeft * xscale, pixelRight * xscale, pixelBottom * yscale, pixelTop * yscale, cam.nearClipPlane, cam.farClipPlane); }
public static extern int TangoService_getCameraIntrinsics(TangoEnums.TangoCameraId cameraId, [Out] TangoCameraIntrinsics intrinsics);
public static extern int TangoSupport_fitPlaneModelNearPointMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out DVector3 intersectionPoint, [Out, MarshalAs(UnmanagedType.LPArray, SizeConst = 4)] double[] planeModel);
/// <summary> /// Calculates the depth in the color camera space at a user-specified /// location using bilateral filtering weighted by both spatial distance /// from the user coordinate and by intensity similarity. /// </summary> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, /// Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> /// <param name="pointCloud"> /// The point cloud. Cannot be null and must have at least one point. /// </param> /// <param name="pointCount"> /// The number of points to read from the point cloud. /// </param> /// <param name="timestamp">The timestamp of the depth points.</param> /// <param name="cameraIntrinsics"> /// The camera intrinsics for the color camera. Cannot be null. /// </param> /// <param name="colorImage"> /// The color image buffer. Cannot be null. /// </param> /// <param name="matrix"> /// Transformation matrix of the color camera with respect to the Unity /// World frame. /// </param> /// <param name="uvCoordinates"> /// The UV coordinates for the user selection. This is expected to be /// between (0.0, 0.0) and (1.0, 1.0). /// </param> /// <param name="colorCameraPoint"> /// The point (x, y, z), where (x, y) is the back-projection of the UV /// coordinates to the color camera space and z is the z coordinate of /// the point in the point cloud nearest to the user selection after /// projection onto the image plane. If there is not a point cloud point /// close to the user selection after projection onto the image plane, /// then the point will be set to (0.0, 0.0, 0.0) and isValidPoint will /// be set to false. /// </param> /// <param name="isValidPoint"> /// A flag valued true if there is a point cloud point close to the user /// selection after projection onto the image plane and valued false /// otherwise. /// </param> public static int ScreenCoordinateToWorldBilateral( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, TangoImageBuffer colorImage, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 colorCameraPoint, out bool isValidPoint) { GCHandle pointCloudHandle = GCHandle.Alloc(pointCloud, GCHandleType.Pinned); TangoXYZij pointCloudXyzIj = new TangoXYZij(); pointCloudXyzIj.timestamp = timestamp; pointCloudXyzIj.xyz_count = pointCount; pointCloudXyzIj.xyz = pointCloudHandle.AddrOfPinnedObject(); DMatrix4x4 doubleMatrix = new DMatrix4x4(matrix); // Unity has Y pointing screen up; Tango camera has Y pointing // screen down. Vector2 uvCoordinatesTango = new Vector2(uvCoordinates.x, 1.0f - uvCoordinates.y); int isValidPointInteger; int returnValue = TangoSupportAPI.TangoSupport_getDepthAtPointBilateralCameraIntrinsicsMatrixTransform( pointCloudXyzIj, cameraIntrinsics, colorImage, ref doubleMatrix, ref uvCoordinatesTango, out colorCameraPoint, out isValidPointInteger); isValidPoint = isValidPointInteger != 0; pointCloudHandle.Free(); return returnValue; }
/// <summary> /// Given a screen coordinate, finds a plane that most closely fits the /// depth values in that area. /// /// This function is slow, as it looks at every single point in the point /// cloud. Avoid calling this more than once a frame. This also assumes the /// Unity camera intrinsics match the device's color camera. /// </summary> /// <returns><c>true</c>, if a plane was found; <c>false</c> otherwise.</returns> /// <param name="cam">The Unity camera.</param> /// <param name="pos">The point in screen space to perform detection on.</param> /// <param name="planeCenter">Filled in with the center of the plane in Unity world space.</param> /// <param name="plane">Filled in with a model of the plane in Unity world space.</param> public bool FindPlane(Camera cam, Vector2 pos, out Vector3 planeCenter, out Plane plane) { if (m_pointsCount == 0) { // No points to check, maybe not connected to the service yet planeCenter = Vector3.zero; plane = new Plane(); return false; } Matrix4x4 colorCameraTUnityWorld = m_colorCameraTUnityCamera * cam.transform.worldToLocalMatrix; Vector2 normalizedPos = cam.ScreenToViewportPoint(pos); // If the camera has a TangoARScreen attached, it is not displaying the entire color camera image. Correct // the normalized coordinates by taking the clipping into account. TangoARScreen arScreen = cam.gameObject.GetComponent<TangoARScreen>(); if (arScreen != null) { normalizedPos = arScreen.ViewportPointToCameraImagePoint(normalizedPos); } TangoCameraIntrinsics alignedIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetDeviceOientationAlignedIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, alignedIntrinsics); int returnValue = TangoSupport.FitPlaneModelNearClick( m_points, m_pointsCount, m_depthTimestamp, alignedIntrinsics, ref colorCameraTUnityWorld, normalizedPos, out planeCenter, out plane); if (returnValue == Common.ErrorType.TANGO_SUCCESS) { return true; } else { return false; } }
/// <summary> /// This is called when succesfully connected to the Tango service. /// </summary> public void OnTangoServiceConnected() { // Set up the size of ARScreen based on camera intrinsics. TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); if (intrinsics.width != 0 && intrinsics.height != 0) { Camera camera = GetComponent<Camera>(); if (camera != null) { // If this script is attached to a camera, then the camera is an Augmented Reality camera. The color // camera image then must fill the viewport. That means we must clip the color camera image to make // its ratio the same as the Unity camera. If we don't do this the color camera image will be // stretched non-uniformly, making a circle into an ellipse. float widthRatio = (float)camera.pixelWidth / (float)intrinsics.width; float heightRatio = (float)camera.pixelHeight / (float)intrinsics.height; if (widthRatio >= heightRatio) { m_uOffset = 0; m_vOffset = (1 - (heightRatio / widthRatio)) / 2; } else { m_uOffset = (1 - (widthRatio / heightRatio)) / 2; m_vOffset = 0; } m_arCameraPostProcess.SetupIntrinsic(intrinsics); _MeshUpdateForIntrinsics(GetComponent<MeshFilter>().mesh, m_uOffset, m_vOffset); _CameraUpdateForIntrinsics(camera, intrinsics, m_uOffset, m_vOffset); } } else { m_uOffset = 0; m_vOffset = 0; m_arCameraPostProcess.enabled = false; } }
public static int TangoSupport_fitPlaneModelNearPointMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out DVector3 intersectionPoint, double[] planeModel) { intersectionPoint = new DVector3(); return Common.ErrorType.TANGO_SUCCESS; }
public static int TangoService_getCameraIntrinsics(TangoEnums.TangoCameraId cameraId, [Out] TangoCameraIntrinsics intrinsics) { return(Common.ErrorType.TANGO_SUCCESS); }
public static int TangoSupport_getDepthAtPointNearestNeighborMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out Vector3 colorCameraPoint, out int isValidPoint) { colorCameraPoint = Vector3.zero; isValidPoint = 1; return Common.ErrorType.TANGO_SUCCESS; }
/// <summary> /// Set up the size of ARScreen based on camera intrinsics. /// </summary> private void _SetCameraIntrinsics() { TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); float verticalFOV = 2.0f * Mathf.Rad2Deg * Mathf.Atan((intrinsics.height * 0.5f) / (float)intrinsics.fy); if (!float.IsNaN(verticalFOV)) { m_renderCamera.projectionMatrix = ProjectionMatrixForCameraIntrinsics((float)intrinsics.width, (float)intrinsics.height, (float)intrinsics.fx, (float)intrinsics.fy, (float)intrinsics.cx, (float)intrinsics.cy, 0.1f, 1000.0f); // Here we are scaling the image plane to make sure the image plane's ratio is set as the // color camera image ratio. // If we don't do this, because we are drawing the texture fullscreen, the image plane will // be set to the screen's ratio. float widthRatio = (float)Screen.width / (float)intrinsics.width; float heightRatio = (float)Screen.height / (float)intrinsics.height; if (widthRatio >= heightRatio) { float normalizedOffset = ((widthRatio / heightRatio) - 1.0f) / 2.0f; _SetScreenVertices(0, normalizedOffset); } else { float normalizedOffset = ((heightRatio / widthRatio) - 1.0f) / 2.0f; _SetScreenVertices(normalizedOffset, 0); } } }
/// <summary> /// This callback function is called after user appoved or declined the permission to use Motion Tracking. /// </summary> /// <param name="permissionsGranted">If the permissions were granted.</param> private void _OnTangoApplicationPermissionsEvent( bool permissionsGranted ) { if( permissionsGranted ) { m_tangoApplication.InitApplication(); m_tangoApplication.InitProviders( string.Empty ); m_tangoApplication.ConnectToService(); //m_readyToDraw = true; //create colour buffer texture based on camera intrinsics TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics( TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics ); m_cameraProjection = TangoUtility.ProjectionMatrixForCameraIntrinsics( (float)intrinsics.width, (float)intrinsics.height, (float)intrinsics.fx, (float)intrinsics.fy, (float)intrinsics.cx, (float)intrinsics.cy, 0.1f, 1000.0f ); m_colourBuffer = new Texture2D( (int)intrinsics.width, (int)intrinsics.height ); TangoUtility.InitExtrinsics( m_request ); } else { AndroidHelper.ShowAndroidToastMessage( "Motion Tracking Permissions Needed", true ); } }