Ejemplo n.º 1
0
    /// <summary>
    /// Set up the size of ARScreen based on camera intrinsics.
    /// </summary>
    private void _SetCameraIntrinsics()
    {
        TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics();

        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics);

        float verticalFOV = 2.0f * Mathf.Rad2Deg * Mathf.Atan((intrinsics.height * 0.5f) / (float)intrinsics.fy);

        if (!float.IsNaN(verticalFOV))
        {
            m_renderCamera.fieldOfView = verticalFOV;

            // Here we are scaling the image plane to make sure the image plane's ratio is set as the
            // color camera image ratio.
            // If we don't do this, because we are drawing the texture fullscreen, the image plane will
            // be set to the screen's ratio.
            float widthRatio  = (float)Screen.width / (float)intrinsics.width;
            float heightRatio = (float)Screen.height / (float)intrinsics.height;
            if (widthRatio >= heightRatio)
            {
                float normalizedOffset = (widthRatio / heightRatio - 1.0f) / 2.0f;
                _SetScreenVertices(0, normalizedOffset);
            }
            else
            {
                float normalizedOffset = (heightRatio / widthRatio - 1.0f) / 2.0f;
                _SetScreenVertices(normalizedOffset, 0);
            }
        }
    }
Ejemplo n.º 2
0
    /// <summary>
    /// Sets up extrinsic matrixes and camera intrinsics for this hardware.
    /// </summary>
    private void _SetUpCameraData()
    {
        if (m_cameraDataSetUp)
        {
            return;
        }

        double timestamp = 0.0;
        TangoCoordinateFramePair pair;
        TangoPoseData            poseData = new TangoPoseData();

        // Query the extrinsics between IMU and device frame.
        pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        m_imuTDevice = DMatrix4x4.FromMatrix4x4(poseData.ToMatrix4x4());

        // Query the extrinsics between IMU and depth camera frame.
        pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        m_imuTDepthCamera = DMatrix4x4.FromMatrix4x4(poseData.ToMatrix4x4());

        m_deviceTDepthCamera = m_imuTDevice.Inverse * m_imuTDepthCamera;

        // Also get the camera intrinsics
        m_colorCameraIntrinsics = new TangoCameraIntrinsics();
        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics);

        m_cameraDataSetUp = true;
    }
Ejemplo n.º 3
0
    /// <summary>
    /// Set up the size of ARScreen based on camera intrinsics.
    /// </summary>
    private void _SetCameraIntrinsics()
    {
        TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics();

        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics);

        if (intrinsics.width != 0 && intrinsics.height != 0)
        {
            Camera.main.projectionMatrix = ProjectionMatrixForCameraIntrinsics((float)intrinsics.width,
                                                                               (float)intrinsics.height,
                                                                               (float)intrinsics.fx,
                                                                               (float)intrinsics.fy,
                                                                               (float)intrinsics.cx,
                                                                               (float)intrinsics.cy,
                                                                               0.1f,
                                                                               1000.0f);

            // Here we are scaling the image plane to make sure the image plane's ratio is set as the
            // color camera image ratio.
            // If we don't do this, because we are drawing the texture fullscreen, the image plane will
            // be set to the screen's ratio.
            float widthRatio  = (float)Screen.width / (float)intrinsics.width;
            float heightRatio = (float)Screen.height / (float)intrinsics.height;
            if (widthRatio >= heightRatio)
            {
                float normalizedOffset = ((widthRatio / heightRatio) - 1.0f) / 2.0f;
                _SetScreenVertices(0, normalizedOffset);
            }
            else
            {
                float normalizedOffset = ((heightRatio / widthRatio) - 1.0f) / 2.0f;
                _SetScreenVertices(normalizedOffset, 0);
            }
        }
    }
Ejemplo n.º 4
0
    /// <summary>
    /// Sets up extrinsic matrixes and camera intrinsics for this hardware.
    /// </summary>
    private void _SetUpCameraData()
    {
        if (m_cameraDataSetUp)
        {
            return;
        }

        double timestamp = 0.0;
        TangoCoordinateFramePair pair;
        TangoPoseData            poseData = new TangoPoseData();

        // Query the extrinsics between IMU and device frame.
        pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        Vector3 position = new Vector3((float)poseData.translation[0],
                                       (float)poseData.translation[1],
                                       (float)poseData.translation[2]);
        Quaternion quat = new Quaternion((float)poseData.orientation[0],
                                         (float)poseData.orientation[1],
                                         (float)poseData.orientation[2],
                                         (float)poseData.orientation[3]);

        m_imuTDevice = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f));

        // Query the extrinsics between IMU and color camera frame.
        pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        position = new Vector3((float)poseData.translation[0],
                               (float)poseData.translation[1],
                               (float)poseData.translation[2]);
        quat = new Quaternion((float)poseData.orientation[0],
                              (float)poseData.orientation[1],
                              (float)poseData.orientation[2],
                              (float)poseData.orientation[3]);
        m_imuTColorCamera = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f));

        // Query the extrinsics between IMU and depth camera frame.
        pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        position = new Vector3((float)poseData.translation[0],
                               (float)poseData.translation[1],
                               (float)poseData.translation[2]);
        quat = new Quaternion((float)poseData.orientation[0],
                              (float)poseData.orientation[1],
                              (float)poseData.orientation[2],
                              (float)poseData.orientation[3]);
        m_imuTDepthCamera = Matrix4x4.TRS(position, quat, new Vector3(1.0f, 1.0f, 1.0f));

        // Also get the camera intrinsics
        m_colorCameraIntrinsics = new TangoCameraIntrinsics();
        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics);

        m_cameraDataSetUp = true;
    }
Ejemplo n.º 5
0
 /// <summary>
 /// Pass the camera intrinsics to both PostProcess and ARScreen shader.
 ///
 /// The camera intrinsics are needed to distort and undo distort the camera image.
 /// </summary>
 /// <param name="intrinsics">Color camera intrinsics.</param>
 internal void SetupIntrinsic(TangoCameraIntrinsics intrinsics)
 {
     m_postProcessMaterial.SetFloat("_Width", (float)intrinsics.width);
     m_postProcessMaterial.SetFloat("_Height", (float)intrinsics.height);
     m_postProcessMaterial.SetFloat("_Fx", (float)intrinsics.fx);
     m_postProcessMaterial.SetFloat("_Fy", (float)intrinsics.fy);
     m_postProcessMaterial.SetFloat("_Cx", (float)intrinsics.cx);
     m_postProcessMaterial.SetFloat("_Cy", (float)intrinsics.cy);
     m_postProcessMaterial.SetFloat("_K0", (float)intrinsics.distortion0);
     m_postProcessMaterial.SetFloat("_K1", (float)intrinsics.distortion1);
     m_postProcessMaterial.SetFloat("_K2", (float)intrinsics.distortion2);
 }
Ejemplo n.º 6
0
    /// <summary>
    /// This is called when successfully connected to the Tango Service.
    /// </summary>
    public void OnTangoServiceConnected()
    {
#if UNITY_EDITOR
        // Format needs to be ARGB32 in editor to use Texture2D.ReadPixels() in emulation
        // in Unity 4.6.
        RenderTexture environmentMap = new RenderTexture(EMULATED_CAMERA_WIDTH, EMULATED_CAMERA_HEIGHT, 0, RenderTextureFormat.ARGB32);
        environmentMap.Create();

        m_environmentMap = environmentMap;
#else
        TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics();
        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics);
        m_environmentMap = new Texture2D((int)intrinsics.width, (int)intrinsics.height, TextureFormat.RGBA32, false);
#endif
    }
Ejemplo n.º 7
0
    /// <summary>
    /// Sets up extrinsic matrixes and camera intrinsics for this hardware.
    /// </summary>
    private void _SetUpCameraData()
    {
        if (m_cameraDataSetUp)
        {
            return;
        }

        double timestamp = 0.0;
        TangoCoordinateFramePair pair;
        TangoPoseData            poseData = new TangoPoseData();

#if UNITY_EDITOR
        // Constant matrixes representing just the convention swap.
        m_imuTDevice.SetColumn(0, new Vector4(0.0f, 1.0f, 0.0f, 0.0f));
        m_imuTDevice.SetColumn(1, new Vector4(-1.0f, 0.0f, 0.0f, 0.0f));
        m_imuTDevice.SetColumn(2, new Vector4(0.0f, 0.0f, 1.0f, 0.0f));
        m_imuTDevice.SetColumn(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f));

        m_imuTDepthCamera.SetColumn(0, new Vector4(0.0f, 1.0f, 0.0f, 0.0f));
        m_imuTDepthCamera.SetColumn(1, new Vector4(1.0f, 0.0f, 0.0f, 0.0f));
        m_imuTDepthCamera.SetColumn(2, new Vector4(0.0f, 0.0f, -1.0f, 0.0f));
        m_imuTDepthCamera.SetColumn(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f));
#else
        // Query the extrinsics between IMU and device frame.
        pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        m_imuTDevice = poseData.ToMatrix4x4();

        // Query the extrinsics between IMU and depth camera frame.
        pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        m_imuTDepthCamera = poseData.ToMatrix4x4();
#endif

        // Also get the camera intrinsics
        m_colorCameraIntrinsics = new TangoCameraIntrinsics();
        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics);

        m_cameraDataSetUp = true;
    }
Ejemplo n.º 8
0
        void SetFov(bool vertical)
        {
            TangoCameraIntrinsics ccIntrinsics = new TangoCameraIntrinsics();

            VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, ccIntrinsics);

            float fov = (vertical)
                                ? 2f * Mathf.Atan(0.5f * ccIntrinsics.height / (float)ccIntrinsics.fy) * Mathf.Rad2Deg
                                : 2f * Mathf.Atan(0.5f * ccIntrinsics.width / (float)ccIntrinsics.fx) * Mathf.Rad2Deg;

            if (float.IsNaN(fov) || float.IsInfinity(fov))
            {
                // Tango API itself should have produced a warning message for this case
            }
            else
            {
                targetCamera.fieldOfView = fov;
                Debug.Log("FOV is set: " + targetCamera.fieldOfView);
            }
        }
Ejemplo n.º 9
0
    /// <summary>
    /// This is called when succesfully connected to the Tango service.
    /// </summary>
    public void OnTangoServiceConnected()
    {
        // Set up the size of ARScreen based on camera intrinsics.
        TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics();

        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics);

        if (intrinsics.width != 0 && intrinsics.height != 0)
        {
            Camera camera = GetComponent <Camera>();
            if (camera != null)
            {
                // If this script is attached to a camera, then the camera is an Augmented Reality camera.  The color
                // camera image then must fill the viewport.  That means we must clip the color camera image to make
                // its ratio the same as the Unity camera.  If we don't do this the color camera image will be
                // stretched non-uniformly, making a circle into an ellipse.
                float widthRatio  = (float)camera.pixelWidth / (float)intrinsics.width;
                float heightRatio = (float)camera.pixelHeight / (float)intrinsics.height;
                if (widthRatio >= heightRatio)
                {
                    m_uOffset = 0;
                    m_vOffset = (1 - (heightRatio / widthRatio)) / 2;
                }
                else
                {
                    m_uOffset = (1 - (widthRatio / heightRatio)) / 2;
                    m_vOffset = 0;
                }

                m_arCameraPostProcess.SetupIntrinsic(intrinsics);
                _MeshUpdateForIntrinsics(GetComponent <MeshFilter>().mesh, m_uOffset, m_vOffset);
                _CameraUpdateForIntrinsics(camera, intrinsics, m_uOffset, m_vOffset);
            }
        }
        else
        {
            m_uOffset = 0;
            m_vOffset = 0;
            m_arCameraPostProcess.enabled = false;
        }
    }
Ejemplo n.º 10
0
    /// <summary>
    /// Update a camera so its perspective lines up with the color camera's perspective.
    /// </summary>
    /// <param name="cam">Camera to update.</param>
    /// <param name="intrinsics">Tango camera intrinsics for the color camera.</param>
    /// <param name="uOffset">U texture coordinate clipping.</param>
    /// <param name="vOffset">V texture coordinate clipping.</param>
    private static void _CameraUpdateForIntrinsics(Camera cam, TangoCameraIntrinsics intrinsics, float uOffset, float vOffset)
    {
        float cx     = (float)intrinsics.cx;
        float cy     = (float)intrinsics.cy;
        float width  = (float)intrinsics.width;
        float height = (float)intrinsics.height;

        float xscale = cam.nearClipPlane / (float)intrinsics.fx;
        float yscale = cam.nearClipPlane / (float)intrinsics.fy;

        float pixelLeft  = -cx + (uOffset * width);
        float pixelRight = width - cx - (uOffset * width);

        // OpenGL coordinates has y pointing downwards so we negate this term.
        float pixelBottom = -height + cy + (vOffset * height);
        float pixelTop    = cy - (vOffset * height);

        cam.projectionMatrix = _Frustum(pixelLeft * xscale, pixelRight * xscale,
                                        pixelBottom * yscale, pixelTop * yscale,
                                        cam.nearClipPlane, cam.farClipPlane);
    }
Ejemplo n.º 11
0
    /// <summary>
    /// Given a screen coordinate, finds a plane that most closely fits the
    /// depth values in that area.
    ///
    /// This function is slow, as it looks at every single point in the point
    /// cloud. Avoid calling this more than once a frame. This also assumes the
    /// Unity camera intrinsics match the device's color camera.
    /// </summary>
    /// <returns><c>true</c>, if a plane was found; <c>false</c> otherwise.</returns>
    /// <param name="cam">The Unity camera.</param>
    /// <param name="pos">The point in screen space to perform detection on.</param>
    /// <param name="planeCenter">Filled in with the center of the plane in Unity world space.</param>
    /// <param name="plane">Filled in with a model of the plane in Unity world space.</param>
    public bool FindPlane(Camera cam, Vector2 pos, out Vector3 planeCenter, out Plane plane)
    {
        if (m_pointsCount == 0)
        {
            // No points to check, maybe not connected to the service yet
            planeCenter = Vector3.zero;
            plane       = new Plane();
            return(false);
        }

        Matrix4x4 colorCameraTUnityWorld = m_colorCameraTUnityCamera * cam.transform.worldToLocalMatrix;
        Vector2   normalizedPos          = cam.ScreenToViewportPoint(pos);

        // If the camera has a TangoARScreen attached, it is not displaying the entire color camera image.  Correct
        // the normalized coordinates by taking the clipping into account.
        TangoARScreen arScreen = cam.gameObject.GetComponent <TangoARScreen>();

        if (arScreen != null)
        {
            normalizedPos = arScreen.ViewportPointToCameraImagePoint(normalizedPos);
        }

        TangoCameraIntrinsics alignedIntrinsics = new TangoCameraIntrinsics();

        VideoOverlayProvider.GetDeviceOrientationAlignedIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR,
                                                                   alignedIntrinsics);
        int returnValue = TangoSupport.FitPlaneModelNearClick(
            m_points, m_pointsCount, m_depthTimestamp, alignedIntrinsics, ref colorCameraTUnityWorld,
            normalizedPos, out planeCenter, out plane);

        if (returnValue == Common.ErrorType.TANGO_SUCCESS)
        {
            return(true);
        }
        else
        {
            return(false);
        }
    }
    /// <summary>
    /// This is called when succesfully connected to the Tango service.
    /// </summary>
    public void OnTangoServiceConnected()
    {
        // Set up the size of ARScreen based on camera intrinsics.
        TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics();

        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics);
        if (intrinsics.width != 0 && intrinsics.height != 0)
        {
            // The camera to which this script is attached is an Augmented Reality camera.  The color camera
            // image must fill that camera's viewport.  That means we must clip the color camera image to make
            // its ratio the same as the Unity camera.  If we don't do this the color camera image will be
            // stretched non-uniformly, making a circle into an ellipse.
            float widthRatio  = (float)m_camera.pixelWidth / (float)intrinsics.width;
            float heightRatio = (float)m_camera.pixelHeight / (float)intrinsics.height;
            if (widthRatio >= heightRatio)
            {
                m_uOffset = 0;
                m_vOffset = (1 - (heightRatio / widthRatio)) / 2;
            }
            else
            {
                m_uOffset = (1 - (widthRatio / heightRatio)) / 2;
                m_vOffset = 0;
            }

            _MaterialUpdateForIntrinsics(m_screenMaterial, m_arCameraPostProcess, intrinsics);
            _MeshUpdateForIntrinsics(m_screenSpaceMesh, m_uOffset, m_vOffset);
            _CameraUpdateForIntrinsics(m_camera, intrinsics, m_uOffset, m_vOffset);
        }
        else
        {
            m_uOffset = 0;
            m_vOffset = 0;
            if (m_arCameraPostProcess != null)
            {
                m_arCameraPostProcess.enabled = false;
            }
        }
    }
Ejemplo n.º 13
0
    /// <summary>
    /// Update AR screen rendering and attached Camera's projection matrix.
    /// </summary>
    /// <param name="displayRotation">Activity (screen) rotation.</param>
    /// <param name="colorCameraRotation">Color camera sensor rotation.</param>
    private void _SetRenderAndCamera(OrientationManager.Rotation displayRotation,
                                     OrientationManager.Rotation colorCameraRotation)
    {
        float cameraWidth  = (float)Screen.width;
        float cameraHeight = (float)Screen.height;

        #pragma warning disable 0219
        // Here we are computing if current display orientation is landscape or portrait.
        // AndroidHelper.GetAndroidDefaultOrientation() returns 1 if device default orientation is in portrait,
        // returns 2 if device default orientation is landscape. Adding device default orientation with
        // how much the display is rotated from default orientation will get us the result of current display
        // orientation. (landscape vs. portrait)
        bool  isLandscape           = (AndroidHelper.GetDefaultOrientation() + (int)displayRotation) % 2 == 0;
        bool  needToFlipCameraRatio = false;
        float cameraRatio           = (float)Screen.width / (float)Screen.height;
        #pragma warning restore 0219

#if !UNITY_EDITOR
        // In most cases, we don't need to flip the camera width and height. However, in some cases Unity camera
        // only updates a couple of frames after the display changed callback from Android; thus, we need to flip the width
        // and height in this case.
        //
        // This does not happen in the editor, because the emulated device does not ever rotate.
        needToFlipCameraRatio = (!isLandscape & (cameraRatio > 1.0f)) || (isLandscape & (cameraRatio < 1.0f));

        if (needToFlipCameraRatio)
        {
            cameraRatio = 1.0f / cameraRatio;
            float tmp = cameraWidth;
            cameraWidth  = cameraHeight;
            cameraHeight = tmp;
        }
#endif

        TangoCameraIntrinsics alignedIntrinsics = new TangoCameraIntrinsics();
        TangoCameraIntrinsics intrinsics        = new TangoCameraIntrinsics();
        VideoOverlayProvider.GetDeviceOrientationAlignedIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR,
                                                                   alignedIntrinsics);
        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR,
                                           intrinsics);

        if (alignedIntrinsics.width != 0 && alignedIntrinsics.height != 0)
        {
            // The camera to which this script is attached is an Augmented Reality camera.  The color camera
            // image must fill that camera's viewport.  That means we must clip the color camera image to make
            // its ratio the same as the Unity camera.  If we don't do this the color camera image will be
            // stretched non-uniformly, making a circle into an ellipse.
            float widthRatio  = (float)cameraWidth / (float)alignedIntrinsics.width;
            float heightRatio = (float)cameraHeight / (float)alignedIntrinsics.height;

            if (widthRatio >= heightRatio)
            {
                m_uOffset = 0;
                m_vOffset = (1 - (heightRatio / widthRatio)) / 2;
            }
            else
            {
                m_uOffset = (1 - (widthRatio / heightRatio)) / 2;
                m_vOffset = 0;
            }

            // Note that here we are passing in non-inverted intrinsics, because the YUV conversion is still operating
            // on native buffer layout.
            OrientationManager.Rotation rotation = TangoSupport.RotateFromAToB(displayRotation, colorCameraRotation);
            _MaterialUpdateForIntrinsics(m_uOffset, m_vOffset, rotation);
            _CameraUpdateForIntrinsics(m_camera, alignedIntrinsics, m_uOffset, m_vOffset);
            if (m_arCameraPostProcess != null)
            {
                m_arCameraPostProcess.SetupIntrinsic(intrinsics);
            }
        }
        else
        {
            Debug.LogError("AR Camera intrinsic is not valid.");
        }
    }
 public static extern void TangoUnity_setRenderTextureDistortion(TangoCameraIntrinsics intrinsics);
 public static extern int TangoService_getCameraIntrinsics(
     TangoEnums.TangoCameraId cameraId, [Out] TangoCameraIntrinsics intrinsics);
 /// <summary>
 /// Update AR screen material with camera texture size data
 /// (and distortion parameters if using distortion post-process filter).
 /// </summary>
 /// <param name="mat">Material to update.</param>
 /// <param name="arPostProcess">ARCameraPostProcess script that handles distortion for this material instance
 /// (null if none).</param>
 /// <param name="intrinsics">Tango camera intrinsics for the color camera.</param>
 private static void _MaterialUpdateForIntrinsics(Material mat, ARCameraPostProcess arPostProcess, TangoCameraIntrinsics intrinsics)
 {
     if (arPostProcess != null)
     {
         // ARCameraPostProcess should take care of setting everything up for all materials involved.
         arPostProcess.SetupIntrinsic(intrinsics, mat);
     }
     else
     {
         // If not handling distortion, all the material needs to know is camera image dimensions.
         mat.SetFloat("_Width", (float)intrinsics.width);
         mat.SetFloat("_Height", (float)intrinsics.height);
     }
 }