/// <summary> /// Set up the size of ARScreen based on camera intrinsics. /// </summary> private void _SetCameraIntrinsics() { TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); float verticalFOV = 2.0f * Mathf.Rad2Deg * Mathf.Atan((intrinsics.height * 0.5f) / (float)intrinsics.fy); if (!float.IsNaN(verticalFOV)) { m_renderCamera.fieldOfView = verticalFOV; // Here we are scaling the image plane to make sure the image plane's ratio is set as the // color camera image ratio. // If we don't do this, because we are drawing the texture fullscreen, the image plane will // be set to the screen's ratio. float widthRatio = (float)Screen.width / (float)intrinsics.width; float heightRatio = (float)Screen.height / (float)intrinsics.height; if (widthRatio >= heightRatio) { float normalizedOffset = (widthRatio / heightRatio - 1.0f) / 2.0f; _SetScreenVertices(0, normalizedOffset); } else { float normalizedOffset = (heightRatio / widthRatio - 1.0f) / 2.0f; _SetScreenVertices(normalizedOffset, 0); } } }
/// <summary> /// Update mesh texture. /// </summary> private void _UpdateTexture() { VideoOverlayProvider.RenderLatestFrame( m_texture.GetNativeTextureID(), m_texture.width, m_texture.height, ref m_textureTimestamp); GL.InvalidateState(); this.renderer.material.mainTexture = m_texture; }
/// <summary> /// Sets up extrinsic matrixes and camera intrinsics for this hardware. /// </summary> private void _SetUpCameraData() { if (m_cameraDataSetUp) { return; } double timestamp = 0.0; TangoCoordinateFramePair pair; TangoPoseData poseData = new TangoPoseData(); // Query the extrinsics between IMU and device frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); m_imuTDevice = DMatrix4x4.FromMatrix4x4(poseData.ToMatrix4x4()); // Query the extrinsics between IMU and depth camera frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); m_imuTDepthCamera = DMatrix4x4.FromMatrix4x4(poseData.ToMatrix4x4()); m_deviceTDepthCamera = m_imuTDevice.Inverse * m_imuTDepthCamera; // Also get the camera intrinsics m_colorCameraIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics); m_cameraDataSetUp = true; }
/// <summary> /// Unity update function, we update our texture from here. /// </summary> private void Update() { m_screenUpdateTime = VideoOverlayProvider.RenderLatestFrame(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR); // Rendering the latest frame changes a bunch of OpenGL state. Ensure Unity knows the current OpenGL state. GL.InvalidateState(); }
/// <summary> /// This will be called when a new frame is available from the camera. /// /// The first scan-line of the color image is reserved for metadata instead of image pixels. /// </summary> /// <param name="cameraId">Camera identifier.</param> public void OnTangoCameraTextureAvailable(TangoEnums.TangoCameraId cameraId) { if (IsRendering && cameraId == TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR) { m_screenUpdateTime = VideoOverlayProvider.UpdateARScreen(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR); } }
/// <summary> /// Render the background texture at the end of frame. /// </summary> /// <returns>Yield return.</returns> private IEnumerator RenderBackgoundTexture() { // Wait until init background finished. while (!is_initBackgroundFinished) { yield return(new WaitForSeconds(0.1f)); } while (true) { // if Background is not initialized we are not rendering if (m_videoOverlayRendererHandler == System.IntPtr.Zero) { yield return(null); } // wait for end of frame to not interfere with rendering thread on mac yield return(new WaitForEndOfFrame()); VideoOverlayProvider.RenderLatestFrame( m_backgroundTexture.GetNativeTextureID(), m_backgroundTexture.width, m_backgroundTexture.height, ref timestamp); #if (UNITY_EDITOR || UNITY_STANDALONE_OSX) // issue plugin event to render right after rendering thread finished rendering Plugin.IssuePluginEvent(Plugin.PluginEvents.RenderFrameEvent); #elif (UNITY_IPHONE || UNITY_ANDROID) GL.InvalidateState(); #else #error platform is not supported #endif // yield now to let class stop this coroutine if needed yield return(null); } }
/// <summary> /// Set up the size of ARScreen based on camera intrinsics. /// </summary> private void _SetCameraIntrinsics() { TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); if (intrinsics.width != 0 && intrinsics.height != 0) { Camera.main.projectionMatrix = ProjectionMatrixForCameraIntrinsics((float)intrinsics.width, (float)intrinsics.height, (float)intrinsics.fx, (float)intrinsics.fy, (float)intrinsics.cx, (float)intrinsics.cy, 0.1f, 1000.0f); // Here we are scaling the image plane to make sure the image plane's ratio is set as the // color camera image ratio. // If we don't do this, because we are drawing the texture fullscreen, the image plane will // be set to the screen's ratio. float widthRatio = (float)Screen.width / (float)intrinsics.width; float heightRatio = (float)Screen.height / (float)intrinsics.height; if (widthRatio >= heightRatio) { float normalizedOffset = ((widthRatio / heightRatio) - 1.0f) / 2.0f; _SetScreenVertices(0, normalizedOffset); } else { float normalizedOffset = ((heightRatio / widthRatio) - 1.0f) / 2.0f; _SetScreenVertices(normalizedOffset, 0); } } }
/// <summary> /// Perform any Camera.OnPreRender() logic /// here. /// </summary> public sealed override void OnPreRender() { VideoOverlayProvider.RenderLatestFrame(m_texture.GetNativeTextureID(), TEX_WIDTH, TEX_HEIGHT, ref m_timestamp); GL.InvalidateState(); }
/// <summary> /// Sets up extrinsic matrixes and camera intrinsics for this hardware. /// </summary> private void _SetUpCameraData() { if (m_cameraDataSetUp) { return; } double timestamp = 0.0; TangoCoordinateFramePair pair; TangoPoseData poseData = new TangoPoseData(); // Query the extrinsics between IMU and device frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); Vector3 position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); Quaternion quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_imuTDevice = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f)); // Query the extrinsics between IMU and color camera frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_imuTColorCamera = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f)); // Query the extrinsics between IMU and depth camera frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_imuTDepthCamera = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f)); // Also get the camera intrinsics m_colorCameraIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics); m_cameraDataSetUp = true; }
/// <summary> /// Update is called once per frame, /// pass both color texture and depth texture into shader. /// </summary> private void Update() { renderer.material.SetTexture("_ColorTex", m_texture); renderer.material.SetTexture("_DepthTex", m_depthTexture.GetDepthTexture(m_isUsingFilter, m_depthTexMaxLength, m_filterLevel)); renderer.material.SetFloat("_BlendValue", m_blendValue); VideoOverlayProvider.RenderLatestFrame( m_texture.GetNativeTextureID(), m_texture.width, m_texture.height, ref m_timestamp); GL.InvalidateState(); }
/// <summary> /// This will be called when a new frame is available from the camera. /// /// The first scan-line of the color image is reserved for metadata instead of image pixels. /// </summary> /// <param name="cameraId">Camera identifier.</param> public void OnExperimentalTangoImageAvailable(TangoEnums.TangoCameraId cameraId) { if (cameraId == TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR) { m_screenUpdateTime = VideoOverlayProvider.RenderLatestFrame(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR); // Rendering the latest frame changes a bunch of OpenGL state. Ensure Unity knows the current OpenGL state. GL.InvalidateState(); } }
/// <summary> /// Update is called once per frame. /// Pass the color texture and depth texture into the post process shader. /// </summary> private void Update() { VideoOverlayProvider.RenderLatestFrame(m_texture.GetNativeTextureID(), TEX_WIDTH, TEX_HEIGHT, ref m_timestamp); m_depthOcclusionMaterial.SetTexture("_ColorTex", m_texture); m_depthOcclusionMaterial.SetTexture("_DepthTex", m_depthTexture.GetDepthTexture(m_isUsingFilter, Camera.main.farClipPlane, 1)); GL.InvalidateState(); }
// Use this for initialization void OnEnable() { if (firstActivation) { firstActivation = false; } else { // assign command buffer Camera camera = transform.GetComponent <Camera>(); buf = VideoOverlayProvider.CreateARScreenCommandBuffer(); camera.AddCommandBuffer(CameraEvent.BeforeForwardOpaque, buf); camera.AddCommandBuffer(CameraEvent.BeforeGBuffer, buf); } }
/// <summary> /// This is called when successfully connected to the Tango Service. /// </summary> public void OnTangoServiceConnected() { #if UNITY_EDITOR // Format needs to be ARGB32 in editor to use Texture2D.ReadPixels() in emulation // in Unity 4.6. RenderTexture environmentMap = new RenderTexture(EMULATED_CAMERA_WIDTH, EMULATED_CAMERA_HEIGHT, 0, RenderTextureFormat.ARGB32); environmentMap.Create(); m_environmentMap = environmentMap; #else TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); m_environmentMap = new Texture2D((int)intrinsics.width, (int)intrinsics.height, TextureFormat.RGBA32, false); #endif }
/// <summary> /// Update AR screen material with camera texture size data /// (and distortion parameters if using distortion post-process filter). /// </summary> /// <param name="uOffset">U texcoord offset.</param> /// <param name="vOffset">V texcoord offset.</param> /// <param name="colorCameraRDisplay">Rotation of the display with respect to the color camera.</param> private static void _MaterialUpdateForIntrinsics( float uOffset, float vOffset, OrientationManager.Rotation colorCameraRDisplay) { Vector2[] uvs = new Vector2[4]; uvs[0] = new Vector2(0 + uOffset, 0 + vOffset); uvs[1] = new Vector2(0 + uOffset, 1 - vOffset); uvs[2] = new Vector2(1 - uOffset, 0 + vOffset); uvs[3] = new Vector2(1 - uOffset, 1 - vOffset); for (int i = 0; i < 4; ++i) { uvs[i] = _GetUnityUvBasedOnRotation(uvs[i], colorCameraRDisplay); } VideoOverlayProvider.SetARScreenUVs(uvs); }
/// <summary> /// This is called when successfully connected to the Tango service. /// </summary> public void OnTangoServiceConnected() { // Disable if (!m_tangoApplication.EnableVideoOverlay) { IsRendering = false; return; } CommandBuffer buf = VideoOverlayProvider.CreateARScreenCommandBuffer(); m_camera.AddCommandBuffer(CameraEvent.BeforeForwardOpaque, buf); m_camera.AddCommandBuffer(CameraEvent.BeforeGBuffer, buf); _SetRenderAndCamera(AndroidHelper.GetDisplayRotation(), AndroidHelper.GetColorCameraRotation()); IsRendering = true; }
/// <summary> /// Init the background texture at end of the frame. /// </summary> /// <returns>Yield return.</returns> private IEnumerator InitBackground() { // wait for end of frame to not interfere with rendering thread on mac yield return(new WaitForEndOfFrame()); VideoOverlayProvider.Init(); #if (UNITY_EDITOR || UNITY_STANDALONE_OSX) //issue plugin event to render right after rendering thread finished rendering Plugin.IssuePluginEvent(Plugin.PluginEvents.InitBackgroundEvent); #elif (UNITY_IPHONE || UNITY_ANDROID) GL.InvalidateState(); #else #error platform is not supported #endif is_initBackgroundFinished = true; }
/// <summary> /// Unity update function, we update our texture from here. /// </summary> private void Update() { if (Input.GetKeyDown(KeyCode.Escape)) { if (m_tangoApplication != null) { m_tangoApplication.Shutdown(); } // This is a temporary fix for a lifecycle issue where calling // Application.Quit() here, and restarting the application immediately, // results in a hard crash. AndroidHelper.AndroidQuit(); } double timestamp = VideoOverlayProvider.RenderLatestFrame(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR); _UpdateTransformation(timestamp); GL.InvalidateState(); }
/// <summary> /// Sets up extrinsic matrixes and camera intrinsics for this hardware. /// </summary> private void _SetUpCameraData() { if (m_cameraDataSetUp) { return; } double timestamp = 0.0; TangoCoordinateFramePair pair; TangoPoseData poseData = new TangoPoseData(); #if UNITY_EDITOR // Constant matrixes representing just the convention swap. m_imuTDevice.SetColumn(0, new Vector4(0.0f, 1.0f, 0.0f, 0.0f)); m_imuTDevice.SetColumn(1, new Vector4(-1.0f, 0.0f, 0.0f, 0.0f)); m_imuTDevice.SetColumn(2, new Vector4(0.0f, 0.0f, 1.0f, 0.0f)); m_imuTDevice.SetColumn(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f)); m_imuTDepthCamera.SetColumn(0, new Vector4(0.0f, 1.0f, 0.0f, 0.0f)); m_imuTDepthCamera.SetColumn(1, new Vector4(1.0f, 0.0f, 0.0f, 0.0f)); m_imuTDepthCamera.SetColumn(2, new Vector4(0.0f, 0.0f, -1.0f, 0.0f)); m_imuTDepthCamera.SetColumn(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f)); #else // Query the extrinsics between IMU and device frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); m_imuTDevice = poseData.ToMatrix4x4(); // Query the extrinsics between IMU and depth camera frame. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(poseData, timestamp, pair); m_imuTDepthCamera = poseData.ToMatrix4x4(); #endif // Also get the camera intrinsics m_colorCameraIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics); m_cameraDataSetUp = true; }
/// <summary> /// Render the background texture when called. /// </summary> /// <returns>Is background updated.</returns> public bool RenderBackgoundTextureOnDemand() { if (!is_initBackgroundFinished) { return(false); } VideoOverlayProvider.RenderLatestFrame( m_backgroundTexture.GetNativeTextureID(), m_backgroundTexture.width, m_backgroundTexture.height, ref timestamp); #if (UNITY_EDITOR || UNITY_STANDALONE_OSX) //issue plugin event to render right after rendering thread finished rendering Plugin.IssuePluginEvent(Plugin.PluginEvents.RenderFrameEvent); #elif (UNITY_IPHONE || UNITY_ANDROID) GL.InvalidateState(); #else #error platform is not supported #endif return(true); }
void SetFov(bool vertical) { TangoCameraIntrinsics ccIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, ccIntrinsics); float fov = (vertical) ? 2f * Mathf.Atan(0.5f * ccIntrinsics.height / (float)ccIntrinsics.fy) * Mathf.Rad2Deg : 2f * Mathf.Atan(0.5f * ccIntrinsics.width / (float)ccIntrinsics.fx) * Mathf.Rad2Deg; if (float.IsNaN(fov) || float.IsInfinity(fov)) { // Tango API itself should have produced a warning message for this case } else { targetCamera.fieldOfView = fov; Debug.Log("FOV is set: " + targetCamera.fieldOfView); } }
/// <summary> /// This is called when succesfully connected to the Tango service. /// </summary> public void OnTangoServiceConnected() { // Set up the size of ARScreen based on camera intrinsics. TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); if (intrinsics.width != 0 && intrinsics.height != 0) { Camera camera = GetComponent <Camera>(); if (camera != null) { // If this script is attached to a camera, then the camera is an Augmented Reality camera. The color // camera image then must fill the viewport. That means we must clip the color camera image to make // its ratio the same as the Unity camera. If we don't do this the color camera image will be // stretched non-uniformly, making a circle into an ellipse. float widthRatio = (float)camera.pixelWidth / (float)intrinsics.width; float heightRatio = (float)camera.pixelHeight / (float)intrinsics.height; if (widthRatio >= heightRatio) { m_uOffset = 0; m_vOffset = (1 - (heightRatio / widthRatio)) / 2; } else { m_uOffset = (1 - (widthRatio / heightRatio)) / 2; m_vOffset = 0; } m_arCameraPostProcess.SetupIntrinsic(intrinsics); _MeshUpdateForIntrinsics(GetComponent <MeshFilter>().mesh, m_uOffset, m_vOffset); _CameraUpdateForIntrinsics(camera, intrinsics, m_uOffset, m_vOffset); } } else { m_uOffset = 0; m_vOffset = 0; m_arCameraPostProcess.enabled = false; } }
/// @cond /// <summary> /// Initialize the AR Screen. /// </summary> public void Start() { m_camera = GetComponent <Camera>(); TangoApplication tangoApplication = FindObjectOfType <TangoApplication>(); tangoApplication.OnDisplayChanged += _OnDisplayChanged; m_arCameraPostProcess = gameObject.GetComponent <ARCameraPostProcess>(); if (tangoApplication != null) { tangoApplication.Register(this); // If already connected to a service, then do initialization now. if (tangoApplication.IsServiceConnected) { OnTangoServiceConnected(); } CommandBuffer buf = VideoOverlayProvider.CreateARScreenCommandBuffer(); m_camera.AddCommandBuffer(CameraEvent.BeforeForwardOpaque, buf); m_camera.AddCommandBuffer(CameraEvent.BeforeGBuffer, buf); } if (m_enableOcclusion) { TangoPointCloud pointCloud = FindObjectOfType <TangoPointCloud>(); if (pointCloud != null) { Renderer renderer = pointCloud.GetComponent <Renderer>(); renderer.enabled = true; renderer.material.shader = m_occlusionShader; pointCloud.m_updatePointsMesh = true; } else { Debug.Log("Point Cloud data is not available, occlusion is not possible."); } } }
/// <summary> /// This is called when succesfully connected to the Tango service. /// </summary> public void OnTangoServiceConnected() { // Set up the size of ARScreen based on camera intrinsics. TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); if (intrinsics.width != 0 && intrinsics.height != 0) { // The camera to which this script is attached is an Augmented Reality camera. The color camera // image must fill that camera's viewport. That means we must clip the color camera image to make // its ratio the same as the Unity camera. If we don't do this the color camera image will be // stretched non-uniformly, making a circle into an ellipse. float widthRatio = (float)m_camera.pixelWidth / (float)intrinsics.width; float heightRatio = (float)m_camera.pixelHeight / (float)intrinsics.height; if (widthRatio >= heightRatio) { m_uOffset = 0; m_vOffset = (1 - (heightRatio / widthRatio)) / 2; } else { m_uOffset = (1 - (widthRatio / heightRatio)) / 2; m_vOffset = 0; } _MaterialUpdateForIntrinsics(m_screenMaterial, m_arCameraPostProcess, intrinsics); _MeshUpdateForIntrinsics(m_screenSpaceMesh, m_uOffset, m_vOffset); _CameraUpdateForIntrinsics(m_camera, intrinsics, m_uOffset, m_vOffset); } else { m_uOffset = 0; m_vOffset = 0; if (m_arCameraPostProcess != null) { m_arCameraPostProcess.enabled = false; } } }
/// <summary> /// Given a screen coordinate, finds a plane that most closely fits the /// depth values in that area. /// /// This function is slow, as it looks at every single point in the point /// cloud. Avoid calling this more than once a frame. This also assumes the /// Unity camera intrinsics match the device's color camera. /// </summary> /// <returns><c>true</c>, if a plane was found; <c>false</c> otherwise.</returns> /// <param name="cam">The Unity camera.</param> /// <param name="pos">The point in screen space to perform detection on.</param> /// <param name="planeCenter">Filled in with the center of the plane in Unity world space.</param> /// <param name="plane">Filled in with a model of the plane in Unity world space.</param> public bool FindPlane(Camera cam, Vector2 pos, out Vector3 planeCenter, out Plane plane) { if (m_pointsCount == 0) { // No points to check, maybe not connected to the service yet planeCenter = Vector3.zero; plane = new Plane(); return(false); } Matrix4x4 colorCameraTUnityWorld = m_colorCameraTUnityCamera * cam.transform.worldToLocalMatrix; Vector2 normalizedPos = cam.ScreenToViewportPoint(pos); // If the camera has a TangoARScreen attached, it is not displaying the entire color camera image. Correct // the normalized coordinates by taking the clipping into account. TangoARScreen arScreen = cam.gameObject.GetComponent <TangoARScreen>(); if (arScreen != null) { normalizedPos = arScreen.ViewportPointToCameraImagePoint(normalizedPos); } TangoCameraIntrinsics alignedIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetDeviceOrientationAlignedIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, alignedIntrinsics); int returnValue = TangoSupport.FitPlaneModelNearClick( m_points, m_pointsCount, m_depthTimestamp, alignedIntrinsics, ref colorCameraTUnityWorld, normalizedPos, out planeCenter, out plane); if (returnValue == Common.ErrorType.TANGO_SUCCESS) { return(true); } else { return(false); } }
/// <summary> /// Update AR screen rendering and attached Camera's projection matrix. /// </summary> /// <param name="displayRotation">Activity (screen) rotation.</param> /// <param name="colorCameraRotation">Color camera sensor rotation.</param> private void _SetRenderAndCamera(OrientationManager.Rotation displayRotation, OrientationManager.Rotation colorCameraRotation) { float cameraWidth = (float)Screen.width; float cameraHeight = (float)Screen.height; #pragma warning disable 0219 // Here we are computing if current display orientation is landscape or portrait. // AndroidHelper.GetAndroidDefaultOrientation() returns 1 if device default orientation is in portrait, // returns 2 if device default orientation is landscape. Adding device default orientation with // how much the display is rotated from default orientation will get us the result of current display // orientation. (landscape vs. portrait) bool isLandscape = (AndroidHelper.GetDefaultOrientation() + (int)displayRotation) % 2 == 0; bool needToFlipCameraRatio = false; float cameraRatio = (float)Screen.width / (float)Screen.height; #pragma warning restore 0219 #if !UNITY_EDITOR // In most cases, we don't need to flip the camera width and height. However, in some cases Unity camera // only updates a couple of frames after the display changed callback from Android; thus, we need to flip the width // and height in this case. // // This does not happen in the editor, because the emulated device does not ever rotate. needToFlipCameraRatio = (!isLandscape & (cameraRatio > 1.0f)) || (isLandscape & (cameraRatio < 1.0f)); if (needToFlipCameraRatio) { cameraRatio = 1.0f / cameraRatio; float tmp = cameraWidth; cameraWidth = cameraHeight; cameraHeight = tmp; } #endif TangoCameraIntrinsics alignedIntrinsics = new TangoCameraIntrinsics(); TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetDeviceOrientationAlignedIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, alignedIntrinsics); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics); if (alignedIntrinsics.width != 0 && alignedIntrinsics.height != 0) { // The camera to which this script is attached is an Augmented Reality camera. The color camera // image must fill that camera's viewport. That means we must clip the color camera image to make // its ratio the same as the Unity camera. If we don't do this the color camera image will be // stretched non-uniformly, making a circle into an ellipse. float widthRatio = (float)cameraWidth / (float)alignedIntrinsics.width; float heightRatio = (float)cameraHeight / (float)alignedIntrinsics.height; if (widthRatio >= heightRatio) { m_uOffset = 0; m_vOffset = (1 - (heightRatio / widthRatio)) / 2; } else { m_uOffset = (1 - (widthRatio / heightRatio)) / 2; m_vOffset = 0; } // Note that here we are passing in non-inverted intrinsics, because the YUV conversion is still operating // on native buffer layout. OrientationManager.Rotation rotation = TangoSupport.RotateFromAToB(displayRotation, colorCameraRotation); _MaterialUpdateForIntrinsics(m_uOffset, m_vOffset, rotation); _CameraUpdateForIntrinsics(m_camera, alignedIntrinsics, m_uOffset, m_vOffset); if (m_arCameraPostProcess != null) { m_arCameraPostProcess.SetupIntrinsic(intrinsics); } } else { Debug.LogError("AR Camera intrinsic is not valid."); } }
/// <summary> /// Unity OnDisable callback. /// </summary> private void OnDisable() { VideoOverlayProvider.SetARScreenDistortion(false); }
void Update() { double timestamp = VideoOverlayProvider.RenderLatestFrame(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR); GL.InvalidateState(); }
public void SetTargetCameraTexture(TangoEnums.TangoCameraId cameraId) { VideoOverlayProvider.ConnectTexture(cameraId, m_texture.GetNativeTextureID()); }
/// <summary> /// Perform any Camera.OnPreRender() logic /// here. /// </summary> public sealed override void OnPreRender() { VideoOverlayProvider.RenderLatestFrame(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR); GL.InvalidateState(); }