/// <summary> /// Raise a Tango pose event if there is new data. /// </summary> internal void SendPoseIfAvailable() { #if UNITY_EDITOR if (TangoApplication.m_mouseEmulationViaPoseUpdates) { PoseProvider.UpdateTangoEmulation(); lock (m_lockObject) { if (m_onTangoPoseAvailable != null) { FillEmulatedPoseData(ref m_motionTrackingData, TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE, TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE); FillEmulatedPoseData(ref m_areaLearningData, TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION, TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE); m_isDirty = true; } } } #endif if (m_isDirty) { if (m_onTangoPoseAvailable != null) { // NOTE: If this becomes a performance issue, this could be changed to use // Interlocked.CompareExchange to "consume" the motion tracking data. lock (m_lockObject) { if (m_motionTrackingData != null) { m_onTangoPoseAvailable(m_motionTrackingData); m_poseDataPool.Push(m_motionTrackingData); m_motionTrackingData = null; } if (m_areaLearningData != null) { m_onTangoPoseAvailable(m_areaLearningData); m_poseDataPool.Push(m_areaLearningData); m_areaLearningData = null; } if (m_relocalizationData != null) { m_onTangoPoseAvailable(m_relocalizationData); m_poseDataPool.Push(m_relocalizationData); m_relocalizationData = null; } } } m_isDirty = false; } }
/// <summary> /// Register to get Tango pose callbacks for specific reference frames. /// /// NOTE: Tango pose callbacks happen on a different thread than the main /// Unity thread. /// </summary> /// <param name="framePairs">The reference frames to get callbacks for.</param> internal static void SetCallback(TangoCoordinateFramePair[] framePairs) { if (m_poseAvailableCallback != null) { Debug.Log("PoseListener.SetCallback() called when callback is already set."); return; } Debug.Log("PoseListener.SetCallback()"); m_poseAvailableCallback = new PoseProvider.APIOnPoseAvailable(_OnPoseAvailable); PoseProvider.SetCallback(framePairs, m_poseAvailableCallback); }
/// <summary> /// Raise a Tango pose event if there is new data. /// </summary> /// <param name="emulateAreaDescriptions">If set, Area description poses are emulated.</param> internal void SendPoseIfAvailable(bool emulateAreaDescriptions) { #if UNITY_EDITOR PoseProvider.UpdateTangoEmulation(); lock (m_lockObject) { if (m_onTangoPoseAvailable != null) { TangoCoordinateFramePair framePair; framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(m_motionTrackingData, 0, framePair); m_isMotionTrackingPoseAvailable = true; if (emulateAreaDescriptions) { framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(m_areaLearningData, 0, framePair); m_isAreaLearningPoseAvailable = true; } } } #endif if (m_onTangoPoseAvailable != null) { // NOTE: If this becomes a performance issue, this could be changed to use // Interlocked.CompareExchange to "consume" the motion tracking data. lock (m_lockObject) { if (m_isMotionTrackingPoseAvailable) { m_onTangoPoseAvailable(m_motionTrackingData); m_isMotionTrackingPoseAvailable = false; } if (m_isAreaLearningPoseAvailable) { m_onTangoPoseAvailable(m_areaLearningData); m_isAreaLearningPoseAvailable = false; } if (m_isRelocalizaitonPoseAvailable) { m_onTangoPoseAvailable(m_relocalizationData); m_isRelocalizaitonPoseAvailable = false; } } } }
/// <summary> /// Calculate the camera extrinsics for this device. /// </summary> private void _UpdateExtrinsics() { TangoCoordinateFramePair pair; TangoPoseData imu_T_devicePose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(imu_T_devicePose, 0, pair); TangoPoseData imu_T_depthCameraPose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(imu_T_depthCameraPose, 0, pair); TangoPoseData imu_T_colorCameraPose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(imu_T_colorCameraPose, 0, pair); // Convert into matrix form to combine the poses. Matrix4x4 device_T_imu = Matrix4x4.Inverse(imu_T_devicePose.ToMatrix4x4()); m_device_T_depthCamera = device_T_imu * imu_T_depthCameraPose.ToMatrix4x4(); m_device_T_colorCamera = device_T_imu * imu_T_colorCameraPose.ToMatrix4x4(); m_unityWorld_T_startService.SetColumn(0, new Vector4(1, 0, 0, 0)); m_unityWorld_T_startService.SetColumn(1, new Vector4(0, 0, 1, 0)); m_unityWorld_T_startService.SetColumn(2, new Vector4(0, 1, 0, 0)); m_unityWorld_T_startService.SetColumn(3, new Vector4(0, 0, 0, 1)); // Update the camera intrinsics too. TangoCameraIntrinsics colorCameraIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, colorCameraIntrinsics); m_colorCameraIntrinsics.calibration_type = (int)colorCameraIntrinsics.calibration_type; m_colorCameraIntrinsics.width = colorCameraIntrinsics.width; m_colorCameraIntrinsics.height = colorCameraIntrinsics.height; m_colorCameraIntrinsics.cx = colorCameraIntrinsics.cx; m_colorCameraIntrinsics.cy = colorCameraIntrinsics.cy; m_colorCameraIntrinsics.fx = colorCameraIntrinsics.fx; m_colorCameraIntrinsics.fy = colorCameraIntrinsics.fy; m_colorCameraIntrinsics.distortion0 = colorCameraIntrinsics.distortion0; m_colorCameraIntrinsics.distortion1 = colorCameraIntrinsics.distortion1; m_colorCameraIntrinsics.distortion2 = colorCameraIntrinsics.distortion2; m_colorCameraIntrinsics.distortion3 = colorCameraIntrinsics.distortion3; m_colorCameraIntrinsics.distortion4 = colorCameraIntrinsics.distortion4; }
/// <summary> /// Populates the UUID list. /// </summary> /// <param name="uuidNames">UUID names.</param> public void PopulateUUIDList(string uuidNames) { System.Text.UTF8Encoding encoder = new System.Text.UTF8Encoding(); string[] splitNames = uuidNames.Split(','); UUIDs = new UUIDUnityHolder[splitNames.Length]; count = splitNames.Length; for (int i = 0; i < count; ++i) { if (UUIDs[i] == null) { UUIDs[i] = new Tango.UUIDUnityHolder(); } //Following three calls should be done in the same order always. UUIDs[i].SetDataUUID(System.Text.Encoding.UTF8.GetString(encoder.GetBytes(splitNames[i]))); PoseProvider.GetAreaDescriptionMetaData(UUIDs[i]); UUIDs[i].PrepareUUIDMetaData(); } }
/// <summary> /// This will be called when a new frame is available from the camera. /// /// The first scan-line of the color image is reserved for metadata instead of image pixels. /// </summary> /// <param name="cameraId">Camera identifier.</param> /// <param name="imageBuffer">Image buffer.</param> public void OnTangoImageMultithreadedAvailable(TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer) { if (!m_enabled) { return; } if (cameraId != TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR) { return; } // Build World T depth camera TangoPoseData world_T_devicePose = new TangoPoseData(); if (m_useAreaDescriptionPose) { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair); } else { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair); } if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { Debug.Log(string.Format("Time {0} has bad status code {1}", imageBuffer.timestamp, world_T_devicePose.status_code) + Environment.StackTrace); return; } // The 3D Reconstruction library can not handle a left handed transformation during update. Instead, // transform into the Unity world space via the external_T_tango config. Matrix4x4 world_T_colorCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_colorCamera; _UpdateColor(imageBuffer, world_T_colorCamera); }
/// <summary> /// INTERNAL USE: Update the Tango emulation state for color camera data. /// </summary> /// <param name="useByteBufferMethod">Whether to update emulation for byte-buffer method.</param> internal static void UpdateTangoEmulation(bool useByteBufferMethod) { // Get emulated position and rotation in Unity space. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; if (!PoseProvider.GetTimestampForColorEmulation(out m_lastColorEmulationTime)) { Debug.LogError("Couldn't get a valid timestamp with which to emulate color camera. " + "Color camera emulation will be skipped this frame."); return; } PoseProvider.GetPoseAtTime(poseData, m_lastColorEmulationTime, pair); if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } Vector3 position; Quaternion rotation; TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation); // Instantiate any resources that we haven't yet. if (!m_emulationIsInitialized) { _InitializeResourcesForEmulation(); m_emulationIsInitialized = true; } // Render. EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedColorRenderTexture, EmulatedEnvironmentRenderHelper.EmulatedDataType.COLOR_CAMERA, position, rotation); m_emulationIsDirty = true; }
/// <summary> /// Stop getting Tango pose callbacks. /// </summary> internal static void Reset() { // Avoid calling into tango_client_api before the correct library is loaded. if (m_poseAvailableCallback != null) { PoseProvider.ClearCallback(); } m_poseAvailableCallback = null; m_motionTrackingData = new TangoPoseData(); m_areaLearningData = new TangoPoseData(); m_relocalizationData = new TangoPoseData(); m_onTangoPoseAvailable = null; m_isMotionTrackingPoseAvailable = false; m_isAreaLearningPoseAvailable = false; m_isRelocalizaitonPoseAvailable = false; #if UNITY_EDITOR m_mostRecentEmulatedRelocalizationTimestamp = -1; #endif }
/// <summary> /// This is called each time new depth data is available. /// /// On the Tango tablet, the depth callback occurs at 5 Hz. /// </summary> /// <param name="tangoDepth">Tango depth.</param> public void OnTangoDepthMultithreadedAvailable(TangoXYZij tangoDepth) { if (!m_enabled) { return; } // Build World T depth camera TangoPoseData world_T_devicePose = new TangoPoseData(); if (m_useAreaDescriptionPose) { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, tangoDepth.timestamp, pair); } else { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, tangoDepth.timestamp, pair); } if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { Debug.Log(string.Format("Time {0} has bad status code {1}", tangoDepth.timestamp, world_T_devicePose.status_code) + Environment.StackTrace); return; } // NOTE: The 3D Reconstruction library does not handle left handed matrices correctly. For now, transform // into the Unity world space after extraction. Matrix4x4 world_T_depthCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_depthCamera; _UpdateDepth(tangoDepth, world_T_depthCamera); }
/// <summary> /// Fill out <c>poseData</c> with emulated values from Tango. /// </summary> /// <param name="poseData">The poseData to fill out.</param> /// <param name="baseFrame">Base frame to set.</param> /// <param name="targetFrame">Target frame to set.</param> private void FillEmulatedPoseData(ref TangoPoseData poseData, TangoEnums.TangoCoordinateFrameType baseFrame, TangoEnums.TangoCoordinateFrameType targetFrame) { Vector3 position; Quaternion rotation; PoseProvider.GetTangoEmulation(out position, out rotation); poseData.framePair.baseFrame = baseFrame; poseData.framePair.targetFrame = targetFrame; poseData.timestamp = Time.time * 1000; // timestamp is in ms, time is in sec. poseData.version = 0; // Not actually used poseData.status_code = TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID; poseData.translation[0] = position.x; poseData.translation[1] = position.y; poseData.translation[2] = position.z; poseData.orientation[0] = rotation.x; poseData.orientation[1] = rotation.y; poseData.orientation[2] = rotation.z; poseData.orientation[3] = rotation.w; }
/// <summary> /// INTERNAL USE: Update the Tango emulation state for depth data. /// </summary> internal static void UpdateTangoEmulation() { m_emulatedPointCloud.Clear(); // Timestamp shall be something in the past, and we'll emulate the depth cloud based on it. if (!PoseProvider.GetTimestampForDepthEmulation(out m_lastDepthEmulationTime)) { Debug.LogError("Couldn't get a valid timestamp with which to emulate depth data. " + "Depth emulation will be skipped this frame."); return; } // Get emulated position and rotation in Unity space. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, m_lastDepthEmulationTime, pair); if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } Vector3 position; Quaternion rotation; TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation); // Instantiate any resources that we haven't yet. _InternResourcesForEmulation(); // Render emulated depth camera data. EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedDepthTexture, EmulatedEnvironmentRenderHelper.EmulatedDataType.DEPTH, position, rotation); // Capture rendered depth points from texture. RenderTexture.active = m_emulatedDepthTexture; m_emulationCaptureTexture.ReadPixels(new Rect(0, 0, m_emulatedDepthTexture.width, m_emulatedDepthTexture.height), 0, 0); m_emulationCaptureTexture.Apply(); // Exctract captured data. Color32[] depthDataAsColors = m_emulationCaptureTexture.GetPixels32(); // Convert depth texture to positions in camera space. Matrix4x4 projectionMatrix = GL.GetGPUProjectionMatrix(EmulatedEnvironmentRenderHelper.m_emulationCamera.projectionMatrix, false); Matrix4x4 reverseProjectionMatrix = projectionMatrix.inverse; float width = m_emulationCaptureTexture.width; float height = m_emulationCaptureTexture.height; for (int yTexel = 0; yTexel < height; yTexel++) { for (int xTexel = 0; xTexel < width; xTexel++) { Color32 depthAsColor = depthDataAsColors[xTexel + (yTexel * m_emulationCaptureTexture.width)]; float clipSpaceZ = (depthAsColor.r - 128f) + (depthAsColor.g / 255f); float ndcSpaceZ = (clipSpaceZ - projectionMatrix.m23) / projectionMatrix.m22; float perspectiveDivisor = ndcSpaceZ * projectionMatrix.m32; float ndcSpaceX = (((xTexel + 0.5f) / width) * 2f) - 1; float ndcSpaceY = (((yTexel + 0.5f) / height) * 2f) - 1; Vector4 clipSpacePos = new Vector4(ndcSpaceX * perspectiveDivisor, ndcSpaceY * perspectiveDivisor, clipSpaceZ, perspectiveDivisor); Vector4 viewSpacePos = reverseProjectionMatrix * clipSpacePos; Vector3 emulatedDepthPos = new Vector3(viewSpacePos.x, -viewSpacePos.y, -viewSpacePos.z); if (emulatedDepthPos.z > MIN_POINT_DISTANCE && emulatedDepthPos.z < MAX_POINT_DISTANCE) { m_emulatedPointCloud.Add(emulatedDepthPos); } } } m_emulationIsDirty = true; }
/// <summary> /// Detect one or more markers in the input image. /// </summary> /// <param name="imageBuffer"> /// The input image buffer. /// </param> /// <param name="cameraId"> /// Camera that is used for detecting markers, can be TangoEnums.TangoCameraId.TANGO_CAMERA_FISHEYE or /// TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR. /// </param> /// <param name="markerType"> /// Target marker's type. Current support marker types are QR marker and Alvar marker. /// </param> /// <param name="markerSize"> /// Physical size of marker's length. /// </param> /// <param name="markers"> /// The returned marker list. /// </param> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> public static bool DetectMarkers(TangoUnityImageData imageBuffer, TangoEnums.TangoCameraId cameraId, MarkerType markerType, double markerSize, List <Marker> markers) { if (markers == null) { Debug.Log("markers is null. " + Environment.StackTrace); return(false); } // Clear any existing marker markers.Clear(); // Detect marker. TangoImageBuffer buffer = new TangoImageBuffer(); GCHandle gchandle = GCHandle.Alloc(imageBuffer.data, GCHandleType.Pinned); IntPtr ptr = gchandle.AddrOfPinnedObject(); buffer.data = ptr; buffer.format = imageBuffer.format; buffer.frame_number = imageBuffer.frame_number; buffer.height = imageBuffer.height; buffer.stride = imageBuffer.stride; buffer.timestamp = imageBuffer.timestamp; buffer.width = imageBuffer.width; // Get Pose. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(poseData, buffer.timestamp, pair); APIMarkerList rawAPIMarkerList = new APIMarkerList(); APIMarkerParam rawMarkerParam = new APIMarkerParam(markerType, markerSize); int ret = TangoSupportAPI.TangoSupport_detectMarkers(ref buffer, cameraId, ref poseData.translation, ref poseData.orientation, ref rawMarkerParam, ref rawAPIMarkerList); gchandle.Free(); if (ret != Common.ErrorType.TANGO_SUCCESS) { return(false); } if (rawAPIMarkerList.markerCount != 0) { List <APIMarker> apiMarkers = new List <APIMarker>(); MarshallingHelper.MarshalUnmanagedStructArrayToList <TangoSupport.APIMarker>( rawAPIMarkerList.markers, rawAPIMarkerList.markerCount, apiMarkers); for (int i = 0; i < apiMarkers.Count; ++i) { APIMarker apiMarker = apiMarkers[i]; Marker marker = new Marker(); marker.m_type = apiMarker.m_type; marker.m_timestamp = apiMarker.m_timestamp; marker.m_content = apiMarker.m_content; // Covert 2D corner points from pixel space to UV space. marker.m_corner2DP0.x = apiMarker.m_corner2DP0.x / buffer.width; marker.m_corner2DP0.y = apiMarker.m_corner2DP0.y / buffer.height; marker.m_corner2DP1.x = apiMarker.m_corner2DP1.x / buffer.width; marker.m_corner2DP1.y = apiMarker.m_corner2DP1.y / buffer.height; marker.m_corner2DP2.x = apiMarker.m_corner2DP2.x / buffer.width; marker.m_corner2DP2.y = apiMarker.m_corner2DP2.y / buffer.height; marker.m_corner2DP3.x = apiMarker.m_corner2DP3.x / buffer.width; marker.m_corner2DP3.y = apiMarker.m_corner2DP3.y / buffer.height; // Convert 3D corner points from Start of Service space to Unity World space. marker.m_corner3DP0 = GetMarkerInUnitySpace(apiMarker.m_corner3DP0); marker.m_corner3DP1 = GetMarkerInUnitySpace(apiMarker.m_corner3DP1); marker.m_corner3DP2 = GetMarkerInUnitySpace(apiMarker.m_corner3DP2); marker.m_corner3DP3 = GetMarkerInUnitySpace(apiMarker.m_corner3DP3); // Convert pose from Start of Service to Unity World space. Vector3 translation = new Vector3( (float)apiMarker.m_translation.x, (float)apiMarker.m_translation.y, (float)apiMarker.m_translation.z); Quaternion orientation = new Quaternion( (float)apiMarker.m_rotation.x, (float)apiMarker.m_rotation.y, (float)apiMarker.m_rotation.z, (float)apiMarker.m_rotation.w); Matrix4x4 ss_T_marker = Matrix4x4.TRS(translation, orientation, Vector3.one); // Note that UNITY_WORLD_T_START_SERVICE is involutory matrix. The actually transform // we wanted to multiply on the right hand side is START_SERVICE_T_UNITY_WORLD. Matrix4x4 uw_T_u_marker = TangoSupport.UNITY_WORLD_T_START_SERVICE * ss_T_marker * TangoSupport.UNITY_WORLD_T_START_SERVICE; marker.m_translation = uw_T_u_marker.GetColumn(3); marker.m_orientation = Quaternion.LookRotation(uw_T_u_marker.GetColumn(2), uw_T_u_marker.GetColumn(1)); // Add the marker to the output list markers.Add(marker); } } TangoSupportAPI.TangoSupport_freeMarkerList(ref rawAPIMarkerList); return(false); }
/// <summary> /// Populates the meta data key values pairs. /// </summary> public void PopulateMetaDataKeyValues() { PoseProvider.PopulateAreaDescriptionMetaDataKeyValues(meta_data_pointer, ref m_KeyValuePairs); }
/// <summary> /// Raise a Tango pose event if there is new data. /// </summary> /// <param name="emulateAreaDescriptions">If set, Area description poses are emulated.</param> internal static void SendIfAvailable(bool emulateAreaDescriptions) { if (m_poseAvailableCallback == null) { return; } #if UNITY_EDITOR lock (m_lockObject) { if (PoseProvider.m_emulationIsDirty) { PoseProvider.m_emulationIsDirty = false; if (m_onTangoPoseAvailable != null) { TangoCoordinateFramePair framePair; framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(m_motionTrackingData, 0, framePair); m_isMotionTrackingPoseAvailable = true; if (emulateAreaDescriptions) { framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(m_areaLearningData, 0, framePair); if (m_areaLearningData.status_code == TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { m_isAreaLearningPoseAvailable = true; } framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; PoseProvider.GetPoseAtTime(m_relocalizationData, 0, framePair); if (m_relocalizationData.status_code == TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID && m_relocalizationData.timestamp != m_mostRecentEmulatedRelocalizationTimestamp) { m_mostRecentEmulatedRelocalizationTimestamp = m_relocalizationData.timestamp; m_isRelocalizaitonPoseAvailable = true; } } } } } #endif if (m_onTangoPoseAvailable != null) { // NOTE: If this becomes a performance issue, this could be changed to use // Interlocked.CompareExchange to "consume" the motion tracking data. lock (m_lockObject) { if (m_isMotionTrackingPoseAvailable) { m_onTangoPoseAvailable(m_motionTrackingData); m_isMotionTrackingPoseAvailable = false; } if (m_isAreaLearningPoseAvailable) { m_onTangoPoseAvailable(m_areaLearningData); m_isAreaLearningPoseAvailable = false; } if (m_isRelocalizaitonPoseAvailable) { m_onTangoPoseAvailable(m_relocalizationData); m_isRelocalizaitonPoseAvailable = false; } } } }
/// <summary> /// INTERNAL USE: Update the Tango emulation state for depth data. /// /// Make this this is only called once per frame. /// </summary> internal static void UpdateTangoEmulation() { m_emulatedPointCloud.Clear(); // Timestamp shall be something in the past, and we'll emulate the depth cloud based on it. m_lastDepthEmulationTime = PoseProvider.GetTimestampForDepthEmulation(); // Get emulated position and rotation in Unity space. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, m_lastDepthEmulationTime, pair); if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } Vector3 position; Quaternion rotation; TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation); // Instantiate any resources that we haven't yet. if (m_emulatedDepthTexture == null) { m_emulatedDepthTexture = new RenderTexture(NUM_X_DEPTH_SAMPLES, NUM_Y_DEPTH_SAMPLES, 24, RenderTextureFormat.ARGB32); } if (m_emulationCaptureTexture == null) { m_emulationCaptureTexture = new Texture2D(NUM_X_DEPTH_SAMPLES, NUM_Y_DEPTH_SAMPLES, TextureFormat.ARGB32, false); } if (m_emulatedDepthShader == null) { // Find depth shader by searching for it in project. string[] foundAssetGuids = UnityEditor.AssetDatabase.FindAssets("DepthEmulation t:Shader"); if (foundAssetGuids.Length > 0) { string assetPath = UnityEditor.AssetDatabase.GUIDToAssetPath(foundAssetGuids[0]); m_emulatedDepthShader = UnityEditor.AssetDatabase.LoadAssetAtPath(assetPath, typeof(Shader)) as Shader; } } // Render emulated depth camera data. EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedDepthTexture, m_emulatedDepthShader, position, rotation); // Capture rendered depth points from texture. RenderTexture.active = m_emulatedDepthTexture; m_emulationCaptureTexture.ReadPixels(new Rect(0, 0, m_emulatedDepthTexture.width, m_emulatedDepthTexture.height), 0, 0); m_emulationCaptureTexture.Apply(); // Exctract captured data. Color32[] depthDataAsColors = m_emulationCaptureTexture.GetPixels32(); // Convert depth texture to positions in camera space. Matrix4x4 projectionMatrix = GL.GetGPUProjectionMatrix(EmulatedEnvironmentRenderHelper.m_emulationCamera.projectionMatrix, false); Matrix4x4 reverseProjectionMatrix = projectionMatrix.inverse; float width = m_emulationCaptureTexture.width; float height = m_emulationCaptureTexture.height; for (int yTexel = 0; yTexel < height; yTexel++) { for (int xTexel = 0; xTexel < width; xTexel++) { Color32 depthAsColor = depthDataAsColors[xTexel + (yTexel * m_emulationCaptureTexture.width)]; float clipSpaceZ = (depthAsColor.r - 128f) + (depthAsColor.g / 255f); float ndcSpaceZ = (clipSpaceZ - projectionMatrix.m23) / projectionMatrix.m22; float perspectiveDivisor = ndcSpaceZ * projectionMatrix.m32; float ndcSpaceX = (((xTexel + 0.5f) / width) * 2f) - 1; float ndcSpaceY = (((yTexel + 0.5f) / height) * 2f) - 1; Vector4 clipSpacePos = new Vector4(ndcSpaceX * perspectiveDivisor, ndcSpaceY * perspectiveDivisor, clipSpaceZ, perspectiveDivisor); Vector4 viewSpacePos = reverseProjectionMatrix * clipSpacePos; Vector3 emulatedDepthPos = new Vector3(viewSpacePos.x, -viewSpacePos.y, -viewSpacePos.z); if (emulatedDepthPos.z > MIN_POINT_DISTANCE && emulatedDepthPos.z < MAX_POINT_DISTANCE) { m_emulatedPointCloud.Add(emulatedDepthPos); } } } }