ToMatrix4x4() private method

Convert this pose into matrix form.
private ToMatrix4x4 ( ) : Matrix4x4
return UnityEngine.Matrix4x4
Esempio n. 1
0
    public void updatePosition()
    {
        Tango.TangoPoseData data = new Tango.TangoPoseData();
        Tango.PoseProvider.GetPoseAtTime(data, 0.0, fp);
        Matrix4x4 T_ss_d = data.ToMatrix4x4();

        //Vector3 tangoTranslation = new Vector3((float) data.translation.x, (float) data.translation.y, (float) data.translation.z);
        Matrix4x4 T_uw_ss = new Matrix4x4();

        T_uw_ss.SetRow(0, new Vector4(1f, 0f, 0f, 0f));
        T_uw_ss.SetRow(1, new Vector4(0f, 0f, 1f, 0f));
        T_uw_ss.SetRow(2, new Vector4(0f, 1f, 0f, 0f));
        T_uw_ss.SetRow(3, new Vector4(0f, 0f, 0f, 1f));

        Matrix4x4 T_d_uc = new Matrix4x4();

        T_d_uc.SetRow(0, new Vector4(1f, 0f, 0f, 0f));
        T_d_uc.SetRow(1, new Vector4(0f, 1f, 0f, 0f));
        T_d_uc.SetRow(2, new Vector4(0f, 0f, -1f, 0f));
        T_d_uc.SetRow(3, new Vector4(0f, 0f, 0f, 1f));

        Matrix4x4 T_uw_uc = (T_uw_ss * T_ss_d) * T_d_uc;

        currPos  = T_uw_uc.GetColumn(3);
        position = "x: " + System.Math.Round(currPos.x, 3) + ", y: " + System.Math.Round(currPos.y, 3) + ", z: " + System.Math.Round(currPos.z, 3);
    }
        /// <summary>
        /// Calculate the camera extrinsics for this device.
        /// </summary>
        private void _UpdateExtrinsics()
        {
            TangoCoordinateFramePair pair;

            TangoPoseData imu_T_devicePose = new TangoPoseData();

            pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
            PoseProvider.GetPoseAtTime(imu_T_devicePose, 0, pair);

            TangoPoseData imu_T_depthCameraPose = new TangoPoseData();

            pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH;
            PoseProvider.GetPoseAtTime(imu_T_depthCameraPose, 0, pair);

            TangoPoseData imu_T_colorCameraPose = new TangoPoseData();

            pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR;
            PoseProvider.GetPoseAtTime(imu_T_colorCameraPose, 0, pair);

            // Convert into matrix form to combine the poses.
            Matrix4x4 device_T_imu = Matrix4x4.Inverse(imu_T_devicePose.ToMatrix4x4());

            m_device_T_depthCamera = device_T_imu * imu_T_depthCameraPose.ToMatrix4x4();
            m_device_T_colorCamera = device_T_imu * imu_T_colorCameraPose.ToMatrix4x4();

            m_unityWorld_T_startService.SetColumn(0, new Vector4(1, 0, 0, 0));
            m_unityWorld_T_startService.SetColumn(1, new Vector4(0, 0, 1, 0));
            m_unityWorld_T_startService.SetColumn(2, new Vector4(0, 1, 0, 0));
            m_unityWorld_T_startService.SetColumn(3, new Vector4(0, 0, 0, 1));

            // Update the camera intrinsics too.
            TangoCameraIntrinsics colorCameraIntrinsics = new TangoCameraIntrinsics();

            VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, colorCameraIntrinsics);
            m_colorCameraIntrinsics.calibration_type = (int)colorCameraIntrinsics.calibration_type;
            m_colorCameraIntrinsics.width            = colorCameraIntrinsics.width;
            m_colorCameraIntrinsics.height           = colorCameraIntrinsics.height;
            m_colorCameraIntrinsics.cx          = colorCameraIntrinsics.cx;
            m_colorCameraIntrinsics.cy          = colorCameraIntrinsics.cy;
            m_colorCameraIntrinsics.fx          = colorCameraIntrinsics.fx;
            m_colorCameraIntrinsics.fy          = colorCameraIntrinsics.fy;
            m_colorCameraIntrinsics.distortion0 = colorCameraIntrinsics.distortion0;
            m_colorCameraIntrinsics.distortion1 = colorCameraIntrinsics.distortion1;
            m_colorCameraIntrinsics.distortion2 = colorCameraIntrinsics.distortion2;
            m_colorCameraIntrinsics.distortion3 = colorCameraIntrinsics.distortion3;
            m_colorCameraIntrinsics.distortion4 = colorCameraIntrinsics.distortion4;
        }
Esempio n. 3
0
        /// <summary>
        /// This will be called when a new frame is available from the camera.
        ///
        /// The first scan-line of the color image is reserved for metadata instead of image pixels.
        /// </summary>
        /// <param name="cameraId">Camera identifier.</param>
        /// <param name="imageBuffer">Image buffer.</param>
        public void OnTangoImageMultithreadedAvailable(TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer)
        {
            if (!m_enabled)
            {
                return;
            }

            if (cameraId != TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR)
            {
                return;
            }

            // Build World T depth camera
            TangoPoseData world_T_devicePose = new TangoPoseData();

            if (m_useAreaDescriptionPose)
            {
                TangoCoordinateFramePair pair;
                pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair);
            }
            else
            {
                TangoCoordinateFramePair pair;
                pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair);
            }

            if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                Debug.Log(string.Format("Time {0} has bad status code {1}",
                                        imageBuffer.timestamp, world_T_devicePose.status_code)
                          + Environment.StackTrace);
                return;
            }

            // The 3D Reconstruction library can not handle a left handed transformation during update.  Instead,
            // transform into the Unity world space via the external_T_tango config.
            Matrix4x4 world_T_colorCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_colorCamera;

            _UpdateColor(imageBuffer, world_T_colorCamera);
        }
Esempio n. 4
0
        /// <summary>
        /// Convert a TangoPoseData into the Unity coordinate system. This only
        /// works on TangoPoseData that describes the device with respect to the
        /// start of service or area description. The result position and
        /// rotation can be used to set Unity's Transform.position and
        /// Transform.rotation.
        /// </summary>
        /// <param name="poseData">
        /// The input pose data that is going to be converted, please note that
        /// the pose data has to be in the start of service with respect to
        /// device frame.
        /// </param>
        /// <param name="position">The result position data.</param>
        /// <param name="rotation">The result rotation data.</param>
        public static void TangoPoseToWorldTransform(TangoPoseData poseData,
                                                     out Vector3 position,
                                                     out Quaternion rotation)
        {
            if (poseData == null)
            {
                Debug.Log("Invalid poseData.\n" + Environment.StackTrace);
                position = Vector3.zero;
                rotation = Quaternion.identity;
                return;
            }

            if (poseData.framePair.targetFrame != TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE)
            {
                Debug.Log("Invalid target frame of the poseData.\n" + Environment.StackTrace);
                position = Vector3.zero;
                rotation = Quaternion.identity;
                return;
            }

            if (poseData.framePair.baseFrame !=
                TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE &&
                poseData.framePair.baseFrame !=
                TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION)
            {
                Debug.Log("Invalid base frame of the poseData.\n" + Environment.StackTrace);
                position = Vector3.zero;
                rotation = Quaternion.identity;
                return;
            }

            Matrix4x4 startServiceTDevice    = poseData.ToMatrix4x4();
            Matrix4x4 unityWorldTUnityCamera = UNITY_WORLD_T_START_SERVICE *
                                               startServiceTDevice *
                                               DEVICE_T_UNITY_CAMERA *
                                               m_devicePoseRotation;

            // Extract final position, rotation.
            position = unityWorldTUnityCamera.GetColumn(3);
            rotation = Quaternion.LookRotation(unityWorldTUnityCamera.GetColumn(2),
                                               unityWorldTUnityCamera.GetColumn(1));
        }
        /// <summary>
        /// This is called each time new depth data is available.
        ///
        /// On the Tango tablet, the depth callback occurs at 5 Hz.
        /// </summary>
        /// <param name="tangoDepth">Tango depth.</param>
        public void OnTangoDepthMultithreadedAvailable(TangoXYZij tangoDepth)
        {
            if (!m_enabled)
            {
                return;
            }

            // Build World T depth camera
            TangoPoseData world_T_devicePose = new TangoPoseData();

            if (m_useAreaDescriptionPose)
            {
                TangoCoordinateFramePair pair;
                pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(world_T_devicePose, tangoDepth.timestamp, pair);
            }
            else
            {
                TangoCoordinateFramePair pair;
                pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(world_T_devicePose, tangoDepth.timestamp, pair);
            }

            if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                Debug.Log(string.Format("Time {0} has bad status code {1}",
                                        tangoDepth.timestamp, world_T_devicePose.status_code)
                          + Environment.StackTrace);
                return;
            }

            // NOTE: The 3D Reconstruction library does not handle left handed matrices correctly.  For now, transform
            // into the Unity world space after extraction.
            Matrix4x4 world_T_depthCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_depthCamera;

            _UpdateDepth(tangoDepth, world_T_depthCamera);
        }
Esempio n. 6
0
        /// <summary>
        /// Convert a TangoPoseData into the Unity coordinate system. This only
        /// works on TangoPoseData that describes the device with respect to the
        /// start of service or area description. The result position and
        /// rotation can be used to set Unity's Transform.position and
        /// Transform.rotation.
        /// </summary>
        /// <param name="poseData">
        /// The input pose data that is going to be converted, please note that
        /// the pose data has to be in the start of service with respect to
        /// device frame.
        /// </param>
        /// <param name="position">The result position data.</param>
        /// <param name="rotation">The result rotation data.</param>
        public static void TangoPoseToWorldTransform(TangoPoseData poseData,
                                                     out Vector3 position,
                                                     out Quaternion rotation)
        {
            if (poseData == null)
            {
                Debug.Log("Invalid poseData.\n" + Environment.StackTrace);
                position = Vector3.zero;
                rotation = Quaternion.identity;
                return;
            }

            if (poseData.framePair.targetFrame != TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE)
            {
                Debug.Log("Invalid target frame of the poseData.\n" + Environment.StackTrace);
                position = Vector3.zero;
                rotation = Quaternion.identity;
                return;
            }

            if (poseData.framePair.baseFrame !=
                TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE &&
                poseData.framePair.baseFrame !=
                TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION)
            {
                Debug.Log("Invalid base frame of the poseData.\n" + Environment.StackTrace);
                position = Vector3.zero;
                rotation = Quaternion.identity;
                return;
            }

            Matrix4x4 startServiceTDevice = poseData.ToMatrix4x4();
            Matrix4x4 unityWorldTUnityCamera = UNITY_WORLD_T_START_SERVICE *
                                               startServiceTDevice *
                                               DEVICE_T_UNITY_CAMERA *
                                               m_devicePoseRotation;

            // Extract final position, rotation.
            position = unityWorldTUnityCamera.GetColumn(3);
            rotation = Quaternion.LookRotation(unityWorldTUnityCamera.GetColumn(2),
                                               unityWorldTUnityCamera.GetColumn(1));
        }
    /// <summary>
    /// Callback that gets called when depth is available from the Tango Service.
    /// </summary>
    /// <param name="pointCloud">Depth information from Tango.</param>
    public void OnTangoPointCloudAvailable(TangoPointCloudData pointCloud)
    {
        // Calculate the time since the last successful depth data
        // collection.
        if (m_depthTimestamp != 0.0)
        {
            m_depthDeltaTime = (float)((pointCloud.m_timestamp - m_depthTimestamp) * 1000.0);
        }
        
        // Fill in the data to draw the point cloud.
        m_pointsCount = pointCloud.m_numPoints;
        if (m_pointsCount > 0)
        {
            _SetUpCameraData();
            TangoCoordinateFramePair pair;
            TangoPoseData poseData = new TangoPoseData();

            // Query pose to transform point cloud to world coordinates, here we are using the timestamp
            // that we get from depth.
            if (m_useAreaDescriptionPose)
            {
                pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
            }
            else
            {
                pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
            }

            PoseProvider.GetPoseAtTime(poseData, pointCloud.m_timestamp, pair);
            if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                return;
            }

            Matrix4x4 startServiceTDevice = poseData.ToMatrix4x4();

            // The transformation matrix that represents the pointcloud's pose. 
            // Explanation: 
            // The pointcloud which is in Depth camera's frame, is put in unity world's 
            // coordinate system(wrt unity world).
            // Then we are extracting the position and rotation from uwTuc matrix and applying it to 
            // the PointCloud's transform.
            Matrix4x4 unityWorldTDepthCamera = m_unityWorldTStartService * startServiceTDevice * Matrix4x4.Inverse(m_imuTDevice) * m_imuTDepthCamera;
            transform.position = Vector3.zero;
            transform.rotation = Quaternion.identity;

            // Add offset to the pointcloud depending on the offset from TangoDeltaPoseController
            Matrix4x4 unityWorldOffsetTDepthCamera;
            if (m_tangoDeltaPoseController != null)
            {
                unityWorldOffsetTDepthCamera = m_tangoDeltaPoseController.UnityWorldOffset * unityWorldTDepthCamera;
            }
            else
            {
                unityWorldOffsetTDepthCamera = unityWorldTDepthCamera;
            }

            // Converting points array to world space.
            m_overallZ = 0;
            for (int i = 0; i < m_pointsCount; ++i)
            {
                Vector3 point = pointCloud[i];
                m_points[i] = unityWorldOffsetTDepthCamera.MultiplyPoint3x4(point);
                m_overallZ += point.z;
            }

            m_overallZ = m_overallZ / m_pointsCount;
            m_depthTimestamp = pointCloud.m_timestamp;

            if (m_updatePointsMesh)
            {
                // Need to update indicies too!
                int[] indices = new int[m_pointsCount];
                for (int i = 0; i < m_pointsCount; ++i)
                {
                    indices[i] = i;
                }

                m_mesh.Clear();
                m_mesh.vertices = m_points;
                m_mesh.SetIndices(indices, MeshTopology.Points, 0);
            }

            // The color should be pose relative, we need to store enough info to go back to pose values.
            m_renderer.material.SetMatrix("depthCameraTUnityWorld", unityWorldOffsetTDepthCamera.inverse);

            // Try to find the floor using this set of depth points if requested.
            if (m_findFloorWithDepth)
            {
                _FindFloorWithDepth();
            }
        }
        else
        {
            m_overallZ = 0;
        }
    }
    /// <summary>
    /// Sets up extrinsic matrixes and camera intrinsics for this hardware.
    /// </summary>
    private void _SetUpCameraData()
    {
        if (m_cameraDataSetUp)
        {
            return;
        }

        double timestamp = 0.0;
        TangoCoordinateFramePair pair;
        TangoPoseData poseData = new TangoPoseData();

        // Query the extrinsics between IMU and device frame.
        pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        m_imuTDevice = poseData.ToMatrix4x4();

        // Query the extrinsics between IMU and depth camera frame.
        pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        m_imuTDepthCamera = poseData.ToMatrix4x4();

        // Also get the camera intrinsics
        m_colorCameraIntrinsics = new TangoCameraIntrinsics();
        VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics);

        m_cameraDataSetUp = true;
    }
    /// @endcond
    /// <summary>
    /// Updates the transformation to the pose for that timestamp.
    /// </summary>
    /// <param name="timestamp">Time in seconds to update the transformation to.</param>
    private void _UpdateTransformation(double timestamp)
    {
        TangoPoseData pose = new TangoPoseData();
        TangoCoordinateFramePair pair;
        if (m_useAreaDescriptionPose)
        {
            pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
        }
        else
        {
            pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
        }

        PoseProvider.GetPoseAtTime(pose, timestamp, pair);

        // The callback pose is for device with respect to start of service pose.
        if (pose.status_code == TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
        {
            // Construct matrix for the start of service with respect to device from the pose.
            Matrix4x4 ssTd = pose.ToMatrix4x4();
            
            // Calculate matrix for the camera in the Unity world, taking into account offsets.
            Matrix4x4 uwTuc = m_uwTss * ssTd * m_dTuc * TangoSupport.m_colorCameraPoseRotation;
            
            // Extract final position, rotation.
            m_tangoPosition = uwTuc.GetColumn(3);
            m_tangoRotation = Quaternion.LookRotation(uwTuc.GetColumn(2), uwTuc.GetColumn(1));
            
            // Other pose data -- Pose count gets reset if pose status just became valid.
            if (pose.status_code != m_poseStatus)
            {
                m_poseCount = 0;
            }

            m_poseCount++;
            
            // Other pose data -- Pose time.
            m_poseTimestamp = timestamp;
        }

        m_poseStatus = pose.status_code;
        
        // Apply final position and rotation.
        transform.position = m_tangoPosition;
        transform.rotation = m_tangoRotation;
    }
    /// <summary>
    /// Gets device and camera extrinsics necessary for the transformations done
    /// by this controller. Extrinsics queries use GetPoseAtTime() with a
    /// specific frame pair, and can only be done after the Tango service is
    /// connected.
    ///
    /// The transform for the device with respect to the color camera frame is
    /// not directly queryable from API, so we use the IMU frame to get the
    /// transformation between the two.
    /// </summary>
    private void _SetCameraExtrinsics()
    {
        double timestamp = 0.0;
        TangoCoordinateFramePair pair;
        TangoPoseData poseData = new TangoPoseData();
        
        // Get the transformation of device frame with respect to IMU frame.
        pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        Matrix4x4 imuTd = poseData.ToMatrix4x4();
        
        // Get the transformation of IMU frame with respect to color camera frame.
        pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
        pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR;
        PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
        Matrix4x4 imuTc = poseData.ToMatrix4x4();

        // Get the transform of the Unity Camera frame with respect to the Color Camera frame.
        Matrix4x4 cTuc = new Matrix4x4();
        cTuc.SetColumn(0, new Vector4(1.0f, 0.0f, 0.0f, 0.0f));
        cTuc.SetColumn(1, new Vector4(0.0f, -1.0f, 0.0f, 0.0f));
        cTuc.SetColumn(2, new Vector4(0.0f, 0.0f, 1.0f, 0.0f));
        cTuc.SetColumn(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f));

        m_dTuc = Matrix4x4.Inverse(imuTd) * imuTc * cTuc;
    }
        /// <summary>
        /// Calculate the camera extrinsics for this device.
        /// </summary>
        private void _UpdateExtrinsics()
        {
            TangoCoordinateFramePair pair;

            TangoPoseData imu_T_devicePose = new TangoPoseData();
            pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
            PoseProvider.GetPoseAtTime(imu_T_devicePose, 0, pair);

            TangoPoseData imu_T_depthCameraPose = new TangoPoseData();
            pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH;
            PoseProvider.GetPoseAtTime(imu_T_depthCameraPose, 0, pair);

            TangoPoseData imu_T_colorCameraPose = new TangoPoseData();
            pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR;
            PoseProvider.GetPoseAtTime(imu_T_colorCameraPose, 0, pair);

            // Convert into matrix form to combine the poses.
            Matrix4x4 device_T_imu = Matrix4x4.Inverse(imu_T_devicePose.ToMatrix4x4());
            m_device_T_depthCamera = device_T_imu * imu_T_depthCameraPose.ToMatrix4x4();
            m_device_T_colorCamera = device_T_imu * imu_T_colorCameraPose.ToMatrix4x4();

            m_unityWorld_T_startService.SetColumn(0, new Vector4(1, 0, 0, 0));
            m_unityWorld_T_startService.SetColumn(1, new Vector4(0, 0, 1, 0));
            m_unityWorld_T_startService.SetColumn(2, new Vector4(0, 1, 0, 0));
            m_unityWorld_T_startService.SetColumn(3, new Vector4(0, 0, 0, 1));

            // Update the camera intrinsics too.
            TangoCameraIntrinsics colorCameraIntrinsics = new TangoCameraIntrinsics();
            VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, colorCameraIntrinsics);
            m_colorCameraIntrinsics.calibration_type = (int)colorCameraIntrinsics.calibration_type;
            m_colorCameraIntrinsics.width = colorCameraIntrinsics.width;
            m_colorCameraIntrinsics.height = colorCameraIntrinsics.height;
            m_colorCameraIntrinsics.cx = colorCameraIntrinsics.cx;
            m_colorCameraIntrinsics.cy = colorCameraIntrinsics.cy;
            m_colorCameraIntrinsics.fx = colorCameraIntrinsics.fx;
            m_colorCameraIntrinsics.fy = colorCameraIntrinsics.fy;
            m_colorCameraIntrinsics.distortion0 = colorCameraIntrinsics.distortion0;
            m_colorCameraIntrinsics.distortion1 = colorCameraIntrinsics.distortion1;
            m_colorCameraIntrinsics.distortion2 = colorCameraIntrinsics.distortion2;
            m_colorCameraIntrinsics.distortion3 = colorCameraIntrinsics.distortion3;
            m_colorCameraIntrinsics.distortion4 = colorCameraIntrinsics.distortion4;
        }
        /// <summary>
        /// This will be called when a new frame is available from the camera.
        /// 
        /// The first scan-line of the color image is reserved for metadata instead of image pixels.
        /// </summary>
        /// <param name="cameraId">Camera identifier.</param>
        /// <param name="imageBuffer">Image buffer.</param>
        public void OnTangoImageMultithreadedAvailable(TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer)
        {
            if (!m_enabled)
            {
                return;
            }

            if (cameraId != TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR)
            {
                return;
            }

            // Build World T depth camera
            TangoPoseData world_T_devicePose = new TangoPoseData();
            if (m_useAreaDescriptionPose)
            {
                TangoCoordinateFramePair pair;
                pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair);
            }
            else
            {
                TangoCoordinateFramePair pair;
                pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair);
            }

            if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                Debug.Log(string.Format("Time {0} has bad status code {1}",
                                        imageBuffer.timestamp, world_T_devicePose.status_code)
                          + Environment.StackTrace);
                return;
            }

            // NOTE: The 3D Reconstruction library does not handle left handed matrices correctly.  For now, transform
            // into the Unity world space after extraction.
            Matrix4x4 world_T_colorCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_colorCamera;

            _UpdateColor(imageBuffer, world_T_colorCamera);
        }
        /// <summary>
        /// This is called each time new depth data is available.
        /// 
        /// On the Tango tablet, the depth callback occurs at 5 Hz.
        /// </summary>
        /// <param name="pointCloud">Tango depth.</param>
        public void OnTangoPointCloudMultithreadedAvailable(ref TangoPointCloudIntPtr pointCloud)
        {
            if (!m_enabled || pointCloud.m_points == IntPtr.Zero)
            {
                return;
            }

            // Build World T depth camera
            TangoPoseData world_T_devicePose = new TangoPoseData();
            if (m_useAreaDescriptionPose)
            {
                TangoCoordinateFramePair pair;
                pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(world_T_devicePose, pointCloud.m_timestamp, pair);
            }
            else
            {
                TangoCoordinateFramePair pair;
                pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(world_T_devicePose, pointCloud.m_timestamp, pair);
            }

            if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                Debug.LogFormat("Time {0} has bad status code {1}{2}", 
                                pointCloud.m_timestamp, world_T_devicePose.status_code, Environment.StackTrace);
                return;
            }

            // The 3D Reconstruction library can not handle a left handed transformation during update.  Instead,
            // transform into the Unity world space via the external_T_tango config.
            Matrix4x4 world_T_depthCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_depthCamera;

            _UpdateDepth(pointCloud, world_T_depthCamera);
        }
        /// <summary>
        /// This is called when successfully connected to the Tango service.
        /// </summary>
        public void OnTangoServiceConnected()
        {
            // Calculate the camera extrinsics.
            TangoCoordinateFramePair pair;

            TangoPoseData imu_T_devicePose = new TangoPoseData();
            pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
            PoseProvider.GetPoseAtTime(imu_T_devicePose, 0, pair);

            TangoPoseData imu_T_depthCameraPose = new TangoPoseData();
            pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH;
            PoseProvider.GetPoseAtTime(imu_T_depthCameraPose, 0, pair);

            TangoPoseData imu_T_colorCameraPose = new TangoPoseData();
            pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR;
            PoseProvider.GetPoseAtTime(imu_T_colorCameraPose, 0, pair);

            // Convert into matrix form to combine the poses.
            Matrix4x4 device_T_imu = Matrix4x4.Inverse(imu_T_devicePose.ToMatrix4x4());
            m_device_T_depthCamera = device_T_imu * imu_T_depthCameraPose.ToMatrix4x4();
            m_device_T_colorCamera = device_T_imu * imu_T_colorCameraPose.ToMatrix4x4();

            // Update the camera intrinsics.
            TangoCameraIntrinsics intrinsics = new TangoCameraIntrinsics();
            Status status;

            APICameraCalibration colorCameraCalibration;
            VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, intrinsics);
            colorCameraCalibration.calibration_type = (int)intrinsics.calibration_type;
            colorCameraCalibration.width = intrinsics.width;
            colorCameraCalibration.height = intrinsics.height;
            colorCameraCalibration.cx = intrinsics.cx;
            colorCameraCalibration.cy = intrinsics.cy;
            colorCameraCalibration.fx = intrinsics.fx;
            colorCameraCalibration.fy = intrinsics.fy;
            colorCameraCalibration.distortion0 = intrinsics.distortion0;
            colorCameraCalibration.distortion1 = intrinsics.distortion1;
            colorCameraCalibration.distortion2 = intrinsics.distortion2;
            colorCameraCalibration.distortion3 = intrinsics.distortion3;
            colorCameraCalibration.distortion4 = intrinsics.distortion4;
            status = (Status)API.Tango3DR_setColorCalibration(m_context, ref colorCameraCalibration);
            if (status != Status.SUCCESS)
            {
                Debug.Log("Unable to set color calibration." + Environment.StackTrace);
            }

            APICameraCalibration depthCameraCalibration;
            VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_DEPTH, intrinsics);
            depthCameraCalibration.calibration_type = (int)intrinsics.calibration_type;
            depthCameraCalibration.width = intrinsics.width;
            depthCameraCalibration.height = intrinsics.height;
            depthCameraCalibration.cx = intrinsics.cx;
            depthCameraCalibration.cy = intrinsics.cy;
            depthCameraCalibration.fx = intrinsics.fx;
            depthCameraCalibration.fy = intrinsics.fy;
            depthCameraCalibration.distortion0 = intrinsics.distortion0;
            depthCameraCalibration.distortion1 = intrinsics.distortion1;
            depthCameraCalibration.distortion2 = intrinsics.distortion2;
            depthCameraCalibration.distortion3 = intrinsics.distortion3;
            depthCameraCalibration.distortion4 = intrinsics.distortion4;
            status = (Status)API.Tango3DR_setDepthCalibration(m_context, ref depthCameraCalibration);
            if (status != Status.SUCCESS)
            {
                Debug.Log("Unable to set depth calibration." + Environment.StackTrace);
            }
        }
    /// <summary>
    /// Correct all saved marks when loop closure happens.
    /// 
    /// When Tango Service is in learning mode, the drift will accumulate overtime, but when the system sees a
    /// pre-exsiting area, it will do a operation to correct all previously saved poses
    /// (the pose you can query with GetPoseAtTime). This operation is called loop closure. When loop closure happens,
    /// we will need to re-query all previously saved marker position in order to achieve the best result.
    /// This function is doing the querying job based on timestamp.
    /// </summary>
    private void _UpdateMarkersForLoopClosures()
    {
        // Adjust mark's position each time we have a loop closure detected.
        foreach (GameObject obj in m_markerList)
        {
            ARMarker tempMarker = obj.GetComponent<ARMarker>();
            if (tempMarker.m_timestamp != -1.0f)
            {
                TangoCoordinateFramePair pair;
                TangoPoseData relocalizedPose = new TangoPoseData();

                pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
                PoseProvider.GetPoseAtTime(relocalizedPose, tempMarker.m_timestamp, pair);

                Matrix4x4 uwTDevice = m_poseController.m_uwTss
                                      * relocalizedPose.ToMatrix4x4()
                                      * m_poseController.m_dTuc;

                Matrix4x4 uwTMarker = uwTDevice * tempMarker.m_deviceTMarker;

                obj.transform.position = uwTMarker.GetColumn(3);
                obj.transform.rotation = Quaternion.LookRotation(uwTMarker.GetColumn(2), uwTMarker.GetColumn(1));
            }
        }
    }